Project import
diff --git a/libchrome/Android.mk b/libchrome/Android.mk
new file mode 100644
index 0000000..e769fef
--- /dev/null
+++ b/libchrome/Android.mk
@@ -0,0 +1,635 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default values for the USE flags. Override these USE flags from your product
+# by setting BRILLO_USE_* values. Note that we define local variables like
+# local_use_* to prevent leaking our default setting for other packages.
+local_use_dbus := $(if $(BRILLO_USE_DBUS),$(BRILLO_USE_DBUS),1)
+
+LOCAL_PATH := $(call my-dir)
+
+# Common variables
+# ========================================================
+
+# Set libchromeUseClang to "true" to force clang or "false" to force gcc.
+libchromeUseClang :=
+libchromeCommonCppExtension := .cc
+libchromeTestCFlags := -Wno-unused-parameter -Wno-unused-function \
+ -Wno-missing-field-initializers
+libchromeCommonCFlags := -Wall -Werror
+libchromeCommonCIncludes := \
+ external/valgrind/include \
+ external/valgrind \
+
+libchromeExportedCIncludes := $(LOCAL_PATH)
+
+libchromeCommonSrc := \
+ base/at_exit.cc \
+ base/base64.cc \
+ base/base64url.cc \
+ base/base_switches.cc \
+ base/bind_helpers.cc \
+ base/build_time.cc \
+ base/callback_helpers.cc \
+ base/callback_internal.cc \
+ base/command_line.cc \
+ base/cpu.cc \
+ base/debug/alias.cc \
+ base/debug/debugger.cc \
+ base/debug/debugger_posix.cc \
+ base/debug/stack_trace.cc \
+ base/debug/stack_trace_posix.cc \
+ base/debug/task_annotator.cc \
+ base/environment.cc \
+ base/files/file.cc \
+ base/files/file_enumerator.cc \
+ base/files/file_enumerator_posix.cc \
+ base/files/file_path.cc \
+ base/files/file_path_constants.cc \
+ base/files/file_path_watcher.cc \
+ base/files/file_posix.cc \
+ base/files/file_tracing.cc \
+ base/files/file_util.cc \
+ base/files/file_util_posix.cc \
+ base/files/important_file_writer.cc \
+ base/files/memory_mapped_file.cc \
+ base/files/memory_mapped_file_posix.cc \
+ base/files/scoped_file.cc \
+ base/files/scoped_temp_dir.cc \
+ base/guid.cc \
+ base/hash.cc \
+ base/json/json_file_value_serializer.cc \
+ base/json/json_parser.cc \
+ base/json/json_reader.cc \
+ base/json/json_string_value_serializer.cc \
+ base/json/json_value_converter.cc \
+ base/json/json_writer.cc \
+ base/json/string_escape.cc \
+ base/lazy_instance.cc \
+ base/location.cc \
+ base/logging.cc \
+ base/md5.cc \
+ base/memory/aligned_memory.cc \
+ base/memory/ref_counted.cc \
+ base/memory/ref_counted_memory.cc \
+ base/memory/singleton.cc \
+ base/memory/weak_ptr.cc \
+ base/message_loop/incoming_task_queue.cc \
+ base/message_loop/message_loop.cc \
+ base/message_loop/message_loop_task_runner.cc \
+ base/message_loop/message_pump.cc \
+ base/message_loop/message_pump_default.cc \
+ base/message_loop/message_pump_libevent.cc \
+ base/metrics/bucket_ranges.cc \
+ base/metrics/field_trial.cc \
+ base/metrics/metrics_hashes.cc \
+ base/metrics/histogram_base.cc \
+ base/metrics/histogram.cc \
+ base/metrics/histogram_samples.cc \
+ base/metrics/histogram_snapshot_manager.cc \
+ base/metrics/persistent_histogram_allocator.cc \
+ base/metrics/persistent_memory_allocator.cc \
+ base/metrics/persistent_sample_map.cc \
+ base/metrics/sample_map.cc \
+ base/metrics/sample_vector.cc \
+ base/metrics/sparse_histogram.cc \
+ base/metrics/statistics_recorder.cc \
+ base/pending_task.cc \
+ base/pickle.cc \
+ base/posix/file_descriptor_shuffle.cc \
+ base/posix/safe_strerror.cc \
+ base/process/kill.cc \
+ base/process/kill_posix.cc \
+ base/process/launch.cc \
+ base/process/launch_posix.cc \
+ base/process/process_handle.cc \
+ base/process/process_handle_posix.cc \
+ base/process/process_iterator.cc \
+ base/process/process_metrics.cc \
+ base/process/process_metrics_posix.cc \
+ base/process/process_posix.cc \
+ base/profiler/scoped_profile.cc \
+ base/profiler/scoped_tracker.cc \
+ base/profiler/tracked_time.cc \
+ base/rand_util.cc \
+ base/rand_util_posix.cc \
+ base/run_loop.cc \
+ base/sequence_checker_impl.cc \
+ base/sequenced_task_runner.cc \
+ base/sha1_portable.cc \
+ base/strings/pattern.cc \
+ base/strings/safe_sprintf.cc \
+ base/strings/string16.cc \
+ base/strings/string_number_conversions.cc \
+ base/strings/string_piece.cc \
+ base/strings/stringprintf.cc \
+ base/strings/string_split.cc \
+ base/strings/string_util.cc \
+ base/strings/string_util_constants.cc \
+ base/strings/utf_string_conversions.cc \
+ base/strings/utf_string_conversion_utils.cc \
+ base/synchronization/cancellation_flag.cc \
+ base/synchronization/condition_variable_posix.cc \
+ base/synchronization/lock.cc \
+ base/synchronization/lock_impl_posix.cc \
+ base/synchronization/read_write_lock_posix.cc \
+ base/synchronization/waitable_event_posix.cc \
+ base/sync_socket_posix.cc \
+ base/sys_info.cc \
+ base/sys_info_posix.cc \
+ base/task/cancelable_task_tracker.cc \
+ base/task_runner.cc \
+ base/task_scheduler/scheduler_lock_impl.cc \
+ base/task_scheduler/sequence.cc \
+ base/task_scheduler/sequence_sort_key.cc \
+ base/task_scheduler/task.cc \
+ base/task_scheduler/task_traits.cc \
+ base/third_party/icu/icu_utf.cc \
+ base/third_party/nspr/prtime.cc \
+ base/threading/non_thread_safe_impl.cc \
+ base/threading/platform_thread_posix.cc \
+ base/threading/post_task_and_reply_impl.cc \
+ base/threading/sequenced_task_runner_handle.cc \
+ base/threading/sequenced_worker_pool.cc \
+ base/threading/simple_thread.cc \
+ base/threading/thread.cc \
+ base/threading/thread_checker_impl.cc \
+ base/threading/thread_collision_warner.cc \
+ base/threading/thread_id_name_manager.cc \
+ base/threading/thread_local_posix.cc \
+ base/threading/thread_local_storage.cc \
+ base/threading/thread_local_storage_posix.cc \
+ base/threading/thread_restrictions.cc \
+ base/threading/thread_task_runner_handle.cc \
+ base/threading/worker_pool.cc \
+ base/threading/worker_pool_posix.cc \
+ base/time/clock.cc \
+ base/time/default_clock.cc \
+ base/time/default_tick_clock.cc \
+ base/time/tick_clock.cc \
+ base/time/time.cc \
+ base/time/time_posix.cc \
+ base/timer/elapsed_timer.cc \
+ base/timer/timer.cc \
+ base/trace_event/heap_profiler_allocation_context.cc \
+ base/trace_event/heap_profiler_allocation_context_tracker.cc \
+ base/trace_event/heap_profiler_allocation_register.cc \
+ base/trace_event/heap_profiler_allocation_register_posix.cc \
+ base/trace_event/heap_profiler_heap_dump_writer.cc \
+ base/trace_event/heap_profiler_stack_frame_deduplicator.cc \
+ base/trace_event/heap_profiler_type_name_deduplicator.cc \
+ base/trace_event/malloc_dump_provider.cc \
+ base/trace_event/memory_allocator_dump.cc \
+ base/trace_event/memory_allocator_dump_guid.cc \
+ base/trace_event/memory_dump_manager.cc \
+ base/trace_event/memory_dump_request_args.cc \
+ base/trace_event/memory_dump_session_state.cc \
+ base/trace_event/memory_infra_background_whitelist.cc \
+ base/trace_event/process_memory_dump.cc \
+ base/trace_event/process_memory_maps.cc \
+ base/trace_event/process_memory_totals.cc \
+ base/trace_event/trace_buffer.cc \
+ base/trace_event/trace_config.cc \
+ base/trace_event/trace_event_argument.cc \
+ base/trace_event/trace_event_impl.cc \
+ base/trace_event/trace_event_memory_overhead.cc \
+ base/trace_event/trace_event_synthetic_delay.cc \
+ base/trace_event/trace_log.cc \
+ base/trace_event/trace_log_constants.cc \
+ base/trace_event/trace_sampling_thread.cc \
+ base/tracked_objects.cc \
+ base/tracking_info.cc \
+ base/values.cc \
+ base/version.cc \
+ base/vlog.cc \
+
+libchromeLinuxSrc := \
+ base/allocator/allocator_shim.cc \
+ base/files/file_path_watcher_linux.cc \
+ base/files/file_util_linux.cc \
+ base/memory/shared_memory_posix.cc \
+ base/posix/unix_domain_socket_linux.cc \
+ base/process/internal_linux.cc \
+ base/process/process_handle_linux.cc \
+ base/process/process_iterator_linux.cc \
+ base/process/process_metrics_linux.cc \
+ base/strings/sys_string_conversions_posix.cc \
+ base/sys_info_linux.cc \
+ base/threading/platform_thread_internal_posix.cc \
+ base/threading/platform_thread_linux.cc \
+ components/timers/alarm_timer_chromeos.cc \
+
+libchromeMacSrc := \
+ base/files/file_path_watcher_fsevents.cc \
+ base/files/file_path_watcher_kqueue.cc \
+ base/files/file_path_watcher_mac.cc \
+ base/files/file_util_mac.mm \
+ base/mac/bundle_locations.mm \
+ base/mac/foundation_util.mm \
+ base/mac/mach_logging.cc \
+ base/mac/scoped_mach_port.cc \
+ base/mac/scoped_mach_vm.cc \
+ base/mac/scoped_nsautorelease_pool.mm \
+ base/mac/sdk_forward_declarations.mm \
+ base/memory/shared_memory_mac.cc \
+ base/memory/shared_memory_handle_mac.cc \
+ base/message_loop/message_pump_mac.mm \
+ base/process/launch_mac.cc \
+ base/process/port_provider_mac.cc \
+ base/process/process_handle_mac.cc \
+ base/process/process_iterator_mac.cc \
+ base/process/process_metrics_mac.cc \
+ base/strings/sys_string_conversions_mac.mm \
+ base/sys_info_mac.mm \
+ base/time/time_mac.cc \
+ base/threading/platform_thread_mac.mm \
+
+libchromeCommonUnittestSrc := \
+ base/at_exit_unittest.cc \
+ base/atomicops_unittest.cc \
+ base/base64_unittest.cc \
+ base/base64url_unittest.cc \
+ base/bind_unittest.cc \
+ base/bits_unittest.cc \
+ base/build_time_unittest.cc \
+ base/callback_helpers_unittest.cc \
+ base/callback_list_unittest.cc \
+ base/callback_unittest.cc \
+ base/cancelable_callback_unittest.cc \
+ base/command_line_unittest.cc \
+ base/cpu_unittest.cc \
+ base/debug/debugger_unittest.cc \
+ base/debug/leak_tracker_unittest.cc \
+ base/debug/task_annotator_unittest.cc \
+ base/environment_unittest.cc \
+ base/file_version_info_unittest.cc \
+ base/files/dir_reader_posix_unittest.cc \
+ base/files/file_path_watcher_unittest.cc \
+ base/files/file_path_unittest.cc \
+ base/files/file_unittest.cc \
+ base/files/important_file_writer_unittest.cc \
+ base/files/scoped_temp_dir_unittest.cc \
+ base/gmock_unittest.cc \
+ base/guid_unittest.cc \
+ base/id_map_unittest.cc \
+ base/json/json_parser_unittest.cc \
+ base/json/json_reader_unittest.cc \
+ base/json/json_value_converter_unittest.cc \
+ base/json/json_value_serializer_unittest.cc \
+ base/json/json_writer_unittest.cc \
+ base/json/string_escape_unittest.cc \
+ base/lazy_instance_unittest.cc \
+ base/logging_unittest.cc \
+ base/md5_unittest.cc \
+ base/memory/aligned_memory_unittest.cc \
+ base/memory/linked_ptr_unittest.cc \
+ base/memory/ref_counted_memory_unittest.cc \
+ base/memory/ref_counted_unittest.cc \
+ base/memory/scoped_vector_unittest.cc \
+ base/memory/singleton_unittest.cc \
+ base/memory/weak_ptr_unittest.cc \
+ base/message_loop/message_loop_test.cc \
+ base/message_loop/message_loop_task_runner_unittest.cc \
+ base/message_loop/message_loop_unittest.cc \
+ base/metrics/bucket_ranges_unittest.cc \
+ base/metrics/field_trial_unittest.cc \
+ base/metrics/metrics_hashes_unittest.cc \
+ base/metrics/histogram_base_unittest.cc \
+ base/metrics/histogram_macros_unittest.cc \
+ base/metrics/histogram_snapshot_manager_unittest.cc \
+ base/metrics/histogram_unittest.cc \
+ base/metrics/persistent_histogram_allocator_unittest.cc \
+ base/metrics/persistent_memory_allocator_unittest.cc \
+ base/metrics/persistent_sample_map_unittest.cc \
+ base/metrics/sample_map_unittest.cc \
+ base/metrics/sample_vector_unittest.cc \
+ base/metrics/sparse_histogram_unittest.cc \
+ base/metrics/statistics_recorder_unittest.cc \
+ base/numerics/safe_numerics_unittest.cc \
+ base/observer_list_unittest.cc \
+ base/optional_unittest.cc \
+ base/pickle_unittest.cc \
+ base/posix/file_descriptor_shuffle_unittest.cc \
+ base/posix/unix_domain_socket_linux_unittest.cc \
+ base/process/process_metrics_unittest.cc \
+ base/profiler/tracked_time_unittest.cc \
+ base/rand_util_unittest.cc \
+ base/scoped_clear_errno_unittest.cc \
+ base/scoped_generic_unittest.cc \
+ base/security_unittest.cc \
+ base/sequence_checker_unittest.cc \
+ base/sha1_unittest.cc \
+ base/stl_util_unittest.cc \
+ base/strings/pattern_unittest.cc \
+ base/strings/string16_unittest.cc \
+ base/strings/string_number_conversions_unittest.cc \
+ base/strings/string_piece_unittest.cc \
+ base/strings/stringprintf_unittest.cc \
+ base/strings/string_split_unittest.cc \
+ base/strings/string_util_unittest.cc \
+ base/strings/sys_string_conversions_unittest.cc \
+ base/strings/utf_string_conversions_unittest.cc \
+ base/synchronization/cancellation_flag_unittest.cc \
+ base/synchronization/condition_variable_unittest.cc \
+ base/synchronization/lock_unittest.cc \
+ base/synchronization/waitable_event_unittest.cc \
+ base/sync_socket_unittest.cc \
+ base/sys_info_unittest.cc \
+ base/task/cancelable_task_tracker_unittest.cc \
+ base/task_runner_util_unittest.cc \
+ base/task_scheduler/scheduler_lock_unittest.cc \
+ base/task_scheduler/sequence_sort_key_unittest.cc \
+ base/task_scheduler/sequence_unittest.cc \
+ base/task_scheduler/task_traits.cc \
+ base/template_util_unittest.cc \
+ base/test/multiprocess_test.cc \
+ base/test/multiprocess_test_android.cc \
+ base/test/opaque_ref_counted.cc \
+ base/test/scoped_locale.cc \
+ base/test/sequenced_worker_pool_owner.cc \
+ base/test/test_file_util.cc \
+ base/test/test_file_util_linux.cc \
+ base/test/test_file_util_posix.cc \
+ base/test/test_io_thread.cc \
+ base/test/test_pending_task.cc \
+ base/test/test_simple_task_runner.cc \
+ base/test/test_switches.cc \
+ base/test/test_timeouts.cc \
+ base/test/trace_event_analyzer.cc \
+ base/threading/non_thread_safe_unittest.cc \
+ base/threading/platform_thread_unittest.cc \
+ base/threading/simple_thread_unittest.cc \
+ base/threading/thread_checker_unittest.cc \
+ base/threading/thread_collision_warner_unittest.cc \
+ base/threading/thread_id_name_manager_unittest.cc \
+ base/threading/thread_local_storage_unittest.cc \
+ base/threading/thread_local_unittest.cc \
+ base/threading/thread_unittest.cc \
+ base/threading/worker_pool_posix_unittest.cc \
+ base/threading/worker_pool_unittest.cc \
+ base/time/pr_time_unittest.cc \
+ base/time/time_unittest.cc \
+ base/timer/hi_res_timer_manager_unittest.cc \
+ base/timer/timer_unittest.cc \
+ base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc \
+ base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc \
+ base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc \
+ base/trace_event/memory_allocator_dump_unittest.cc \
+ base/trace_event/memory_dump_manager_unittest.cc \
+ base/trace_event/process_memory_dump_unittest.cc \
+ base/trace_event/trace_config_unittest.cc \
+ base/trace_event/trace_event_argument_unittest.cc \
+ base/trace_event/trace_event_synthetic_delay_unittest.cc \
+ base/trace_event/trace_event_unittest.cc \
+ base/tracked_objects_unittest.cc \
+ base/tuple_unittest.cc \
+ base/values_unittest.cc \
+ base/version_unittest.cc \
+ base/vlog_unittest.cc \
+ testing/multiprocess_func_list.cc \
+ testrunner.cc \
+
+libchromeCryptoUnittestSrc := \
+ crypto/secure_hash_unittest.cc \
+ crypto/sha2_unittest.cc \
+
+libchromeHostCFlags := -D__ANDROID_HOST__ -DDONT_EMBED_BUILD_METADATA
+
+ifeq ($(HOST_OS),linux)
+libchromeHostSrc := $(libchromeLinuxSrc) \
+ base/allocator/allocator_shim_default_dispatch_to_glibc.cc
+libchromeHostLdFlags :=
+endif
+
+ifeq ($(HOST_OS),darwin)
+libchromeHostSrc := $(libchromeMacSrc)
+libchromeHostCFlags += -D_FILE_OFFSET_BITS=64 -Wno-deprecated-declarations
+libchromeHostLdFlags := \
+ -framework AppKit \
+ -framework CoreFoundation \
+ -framework Foundation \
+ -framework Security
+endif
+
+# libchrome shared library for target
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome
+LOCAL_SRC_FILES := \
+ $(libchromeCommonSrc) \
+ $(libchromeLinuxSrc) \
+ base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc \
+ base/memory/shared_memory_android.cc \
+ base/sys_info_chromeos.cc \
+
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_LDFLAGS := -Wl,-wrap,calloc -Wl,-wrap,free -Wl,-wrap,malloc \
+ -Wl,-wrap,memalign -Wl,-wrap,realloc
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbase
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
+LOCAL_SHARED_LIBRARIES := libbase libevent liblog libcutils
+LOCAL_STATIC_LIBRARIES := libmodpb64 libgtest_prod
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
+include $(BUILD_SHARED_LIBRARY)
+
+# libchrome static library for target
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome
+LOCAL_SRC_FILES := \
+ $(libchromeCommonSrc) \
+ $(libchromeLinuxSrc) \
+ base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc \
+ base/memory/shared_memory_android.cc \
+ base/sys_info_chromeos.cc \
+
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_LDFLAGS := -Wl,-wrap,calloc -Wl,-wrap,free -Wl,-wrap,malloc \
+ -Wl,-wrap,memalign -Wl,-wrap,realloc
+LOCAL_STATIC_LIBRARIES := libmodpb64 libgtest_prod \
+ libbase libevent liblog libcutils
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := $(LOCAL_STATIC_LIBRARIES)
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
+include $(BUILD_STATIC_LIBRARY)
+
+# libchrome shared library for host
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome
+LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeHostCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbase
+LOCAL_SHARED_LIBRARIES := libbase libevent
+LOCAL_STATIC_LIBRARIES := libmodpb64 libgtest_prod
+LOCAL_SRC_FILES := \
+ $(libchromeCommonSrc) \
+ $(libchromeHostSrc) \
+
+LOCAL_LDFLAGS := $(libchromeHostLdFlags)
+include $(BUILD_HOST_SHARED_LIBRARY)
+
+ifeq ($(local_use_dbus),1)
+
+# libchrome-dbus shared library for target
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome-dbus
+LOCAL_SRC_FILES := \
+ dbus/bus.cc \
+ dbus/dbus_statistics.cc \
+ dbus/exported_object.cc \
+ dbus/file_descriptor.cc \
+ dbus/message.cc \
+ dbus/object_manager.cc \
+ dbus/object_path.cc \
+ dbus/object_proxy.cc \
+ dbus/property.cc \
+ dbus/scoped_dbus_error.cc \
+ dbus/string_util.cc \
+ dbus/util.cc \
+ dbus/values_util.cc \
+
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SHARED_LIBRARIES := \
+ libchrome \
+ libdbus \
+
+LOCAL_STATIC_LIBRARIES := libgtest_prod
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libchrome
+include $(BUILD_SHARED_LIBRARY)
+
+endif # local_use_dbus == 1
+
+# libchrome-crypto shared library for target
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome-crypto
+LOCAL_SRC_FILES := \
+ crypto/openssl_util.cc \
+ crypto/random.cc \
+ crypto/secure_hash.cc \
+ crypto/secure_util.cc \
+ crypto/sha2.cc \
+
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags) -Wno-unused-parameter
+LOCAL_CPPFLAGS := $(libchromeCommonCppFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SHARED_LIBRARIES := \
+ libchrome \
+ libcrypto \
+ libssl \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
+include $(BUILD_SHARED_LIBRARY)
+
+# Helpers needed for unit tests.
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome_test_helpers
+LOCAL_SHARED_LIBRARIES := libchrome
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SRC_FILES := \
+ base/test/simple_test_clock.cc \
+ base/test/simple_test_tick_clock.cc \
+ base/test/test_file_util.cc \
+ base/test/test_file_util_linux.cc \
+ base/test/test_switches.cc \
+ base/test/test_timeouts.cc \
+
+include $(BUILD_STATIC_LIBRARY)
+
+ifeq ($(local_use_dbus),1)
+
+# Helpers needed for D-Bus unit tests.
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome_dbus_test_helpers
+LOCAL_SHARED_LIBRARIES := libdbus libchrome-dbus
+LOCAL_STATIC_LIBRARIES := libgmock
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SRC_FILES := \
+ dbus/mock_bus.cc \
+ dbus/mock_exported_object.cc \
+ dbus/mock_object_manager.cc \
+ dbus/mock_object_proxy.cc \
+
+include $(BUILD_STATIC_LIBRARY)
+
+endif # local_use_dbus == 1
+
+# Helpers needed for unit tests (for host).
+# ========================================================
+ifeq ($(HOST_OS),linux)
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome_test_helpers-host
+LOCAL_SHARED_LIBRARIES := libchrome
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags)
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SRC_FILES := base/test/simple_test_clock.cc
+include $(BUILD_HOST_STATIC_LIBRARY)
+
+# Host unit tests. Run (from repo root) with:
+# ./out/host/<arch>/bin/libchrome_test
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome_test
+LOCAL_SRC_FILES := $(libchromeCommonUnittestSrc)
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags) $(libchromeHostCFlags) -DUNIT_TEST
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SHARED_LIBRARIES := libchrome libevent
+LOCAL_STATIC_LIBRARIES := libgmock_host libgtest_host
+LOCAL_LDLIBS := -lrt
+include $(BUILD_HOST_NATIVE_TEST)
+endif
+
+# Native unit tests. Run with:
+# adb shell /data/nativetest/libchrome_test/libchrome_test
+# ========================================================
+include $(CLEAR_VARS)
+LOCAL_MODULE := libchrome_test
+LOCAL_SRC_FILES := $(libchromeCryptoUnittestSrc) $(libchromeCommonUnittestSrc)
+LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
+LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags) -DUNIT_TEST -DDONT_EMBED_BUILD_METADATA
+LOCAL_CLANG := $(libchromeUseClang)
+LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_SHARED_LIBRARIES := libchrome libchrome-crypto libevent
+LOCAL_STATIC_LIBRARIES := libgmock libgtest
+include $(BUILD_NATIVE_TEST)
diff --git a/libchrome/MODULE_LICENSE_BSD b/libchrome/MODULE_LICENSE_BSD
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libchrome/MODULE_LICENSE_BSD
diff --git a/libchrome/NOTICE b/libchrome/NOTICE
new file mode 100644
index 0000000..972bb2e
--- /dev/null
+++ b/libchrome/NOTICE
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/libchrome/SConstruct b/libchrome/SConstruct
new file mode 100644
index 0000000..72e022e
--- /dev/null
+++ b/libchrome/SConstruct
@@ -0,0 +1,540 @@
+# -*- python -*-
+
+# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is used to build the libchrome package for Chrome OS:
+# https://www.chromium.org/chromium-os/packages/libchrome
+
+import os
+
+env = Environment()
+
+BASE_VER = os.environ.get('BASE_VER', '0')
+PKG_CONFIG = os.environ.get('PKG_CONFIG', 'pkg-config')
+CHROME_INCLUDE_PATH = os.environ.get('CHROME_INCLUDE_PATH', '.')
+
+# This block will need updating whenever libchrome gets updated. The order of
+# the libs below doesn't matter (as scons will take care of building things in
+# the required order). The split between them is purely to reduce excess
+# linking of third-party libraries, i.e. 'core' should require only a minimal
+# set of libraries, and other third-party libraries should get a unique 'xxx'
+# name.
+base_name = 'base'
+base_libs = [
+ {
+ 'name' : 'core',
+ 'sources' : """
+ allocator/allocator_extension.cc
+ allocator/allocator_shim.cc
+ allocator/allocator_shim_default_dispatch_to_glibc.cc
+ at_exit.cc
+ base64.cc
+ base64url.cc
+ base_switches.cc
+ bind_helpers.cc
+ build_time.cc
+ callback_helpers.cc
+ callback_internal.cc
+ command_line.cc
+ cpu.cc
+ debug/alias.cc
+ debug/debugger.cc
+ debug/debugger_posix.cc
+ debug/stack_trace.cc
+ debug/stack_trace_posix.cc
+ debug/task_annotator.cc
+ environment.cc
+ files/file.cc
+ files/file_enumerator.cc
+ files/file_enumerator_posix.cc
+ files/file_path.cc
+ files/file_path_constants.cc
+ files/file_path_watcher.cc
+ files/file_path_watcher_linux.cc
+ files/file_posix.cc
+ files/file_tracing.cc
+ files/file_util.cc
+ files/file_util_linux.cc
+ files/file_util_posix.cc
+ files/important_file_writer.cc
+ files/memory_mapped_file.cc
+ files/memory_mapped_file_posix.cc
+ files/scoped_file.cc
+ files/scoped_temp_dir.cc
+ guid.cc
+ hash.cc
+ json/json_file_value_serializer.cc
+ json/json_parser.cc
+ json/json_reader.cc
+ json/json_string_value_serializer.cc
+ json/json_value_converter.cc
+ json/json_writer.cc
+ json/string_escape.cc
+ lazy_instance.cc
+ location.cc
+ logging.cc
+ md5.cc
+ memory/aligned_memory.cc
+ memory/ref_counted.cc
+ memory/ref_counted_memory.cc
+ memory/shared_memory_posix.cc
+ memory/singleton.cc
+ memory/weak_ptr.cc
+ message_loop/incoming_task_queue.cc
+ message_loop/message_loop.cc
+ message_loop/message_loop_task_runner.cc
+ message_loop/message_pump.cc
+ message_loop/message_pump_default.cc
+ message_loop/message_pump_glib.cc
+ message_loop/message_pump_libevent.cc
+ metrics/bucket_ranges.cc
+ metrics/field_trial.cc
+ metrics/metrics_hashes.cc
+ metrics/histogram_base.cc
+ metrics/histogram.cc
+ metrics/histogram_samples.cc
+ metrics/histogram_snapshot_manager.cc
+ metrics/persistent_histogram_allocator.cc
+ metrics/persistent_memory_allocator.cc
+ metrics/persistent_sample_map.cc
+ metrics/sample_map.cc
+ metrics/sample_vector.cc
+ metrics/sparse_histogram.cc
+ metrics/statistics_recorder.cc
+ pending_task.cc
+ pickle.cc
+ posix/file_descriptor_shuffle.cc
+ posix/global_descriptors.cc
+ posix/safe_strerror.cc
+ posix/unix_domain_socket_linux.cc
+ process/internal_linux.cc
+ process/kill.cc
+ process/kill_posix.cc
+ process/launch.cc
+ process/launch_posix.cc
+ process/process_handle_linux.cc
+ process/process_iterator.cc
+ process/process_iterator_linux.cc
+ process/process_handle_posix.cc
+ process/process_metrics.cc
+ process/process_metrics_linux.cc
+ process/process_metrics_posix.cc
+ process/process_posix.cc
+ profiler/scoped_profile.cc
+ profiler/scoped_tracker.cc
+ profiler/tracked_time.cc
+ rand_util.cc
+ rand_util_posix.cc
+ run_loop.cc
+ sequence_checker_impl.cc
+ sequenced_task_runner.cc
+ sha1_portable.cc
+ strings/pattern.cc
+ strings/safe_sprintf.cc
+ strings/string16.cc
+ strings/string_number_conversions.cc
+ strings/string_piece.cc
+ strings/stringprintf.cc
+ strings/string_split.cc
+ strings/string_util.cc
+ strings/string_util_constants.cc
+ strings/sys_string_conversions_posix.cc
+ strings/utf_string_conversions.cc
+ strings/utf_string_conversion_utils.cc
+ synchronization/cancellation_flag.cc
+ synchronization/condition_variable_posix.cc
+ synchronization/lock.cc
+ synchronization/lock_impl_posix.cc
+ synchronization/read_write_lock_posix.cc
+ synchronization/waitable_event_posix.cc
+ synchronization/waitable_event_watcher_posix.cc
+ sync_socket_posix.cc
+ sys_info.cc
+ sys_info_chromeos.cc
+ sys_info_linux.cc
+ sys_info_posix.cc
+ task_runner.cc
+ task/cancelable_task_tracker.cc
+ task_scheduler/scheduler_lock_impl.cc
+ task_scheduler/sequence.cc
+ task_scheduler/sequence_sort_key.cc
+ task_scheduler/task.cc
+ task_scheduler/task_traits.cc
+ third_party/icu/icu_utf.cc
+ third_party/nspr/prtime.cc
+ threading/non_thread_safe_impl.cc
+ threading/platform_thread_internal_posix.cc
+ threading/platform_thread_linux.cc
+ threading/platform_thread_posix.cc
+ threading/post_task_and_reply_impl.cc
+ threading/sequenced_task_runner_handle.cc
+ threading/sequenced_worker_pool.cc
+ threading/simple_thread.cc
+ threading/thread.cc
+ threading/thread_checker_impl.cc
+ threading/thread_collision_warner.cc
+ threading/thread_id_name_manager.cc
+ threading/thread_local_posix.cc
+ threading/thread_local_storage.cc
+ threading/thread_local_storage_posix.cc
+ threading/thread_restrictions.cc
+ threading/thread_task_runner_handle.cc
+ threading/worker_pool.cc
+ threading/worker_pool_posix.cc
+ timer/elapsed_timer.cc
+ timer/timer.cc
+ time/clock.cc
+ time/default_clock.cc
+ time/default_tick_clock.cc
+ time/tick_clock.cc
+ time/time.cc
+ time/time_posix.cc
+ trace_event/heap_profiler_allocation_context.cc
+ trace_event/heap_profiler_allocation_context_tracker.cc
+ trace_event/heap_profiler_allocation_register.cc
+ trace_event/heap_profiler_allocation_register_posix.cc
+ trace_event/heap_profiler_heap_dump_writer.cc
+ trace_event/heap_profiler_stack_frame_deduplicator.cc
+ trace_event/heap_profiler_type_name_deduplicator.cc
+ trace_event/malloc_dump_provider.cc
+ trace_event/memory_allocator_dump.cc
+ trace_event/memory_allocator_dump_guid.cc
+ trace_event/memory_dump_manager.cc
+ trace_event/memory_dump_request_args.cc
+ trace_event/memory_dump_session_state.cc
+ trace_event/memory_infra_background_whitelist.cc
+ trace_event/process_memory_dump.cc
+ trace_event/process_memory_maps.cc
+ trace_event/process_memory_totals.cc
+ trace_event/trace_buffer.cc
+ trace_event/trace_config.cc
+ trace_event/trace_event_argument.cc
+ trace_event/trace_event_impl.cc
+ trace_event/trace_event_memory_overhead.cc
+ trace_event/trace_event_synthetic_delay.cc
+ trace_event/trace_log.cc
+ trace_event/trace_log_constants.cc
+ trace_event/trace_sampling_thread.cc
+ tracked_objects.cc
+ tracking_info.cc
+ values.cc
+ version.cc
+ vlog.cc
+ """,
+ 'prefix' : 'base',
+ 'libs' : 'pthread rt libmodp_b64',
+ 'pc_libs' : 'glib-2.0 libevent',
+ },
+ {
+ 'name' : 'dl',
+ 'sources' : """
+ native_library_posix.cc
+ """,
+ 'prefix' : 'base',
+ 'libs' : 'dl',
+ 'pc_libs' : '',
+ },
+ {
+ 'name' : 'dbus',
+ 'sources' : """
+ bus.cc
+ dbus_statistics.cc
+ exported_object.cc
+ file_descriptor.cc
+ message.cc
+ object_manager.cc
+ object_path.cc
+ object_proxy.cc
+ property.cc
+ scoped_dbus_error.cc
+ string_util.cc
+ util.cc
+ values_util.cc
+ """,
+ 'prefix' : 'dbus',
+ 'libs' : '',
+ 'pc_libs' : 'dbus-1 protobuf-lite',
+ },
+ {
+ 'name' : 'timers',
+ 'sources' : """
+ alarm_timer_chromeos.cc
+ """,
+ 'prefix' : 'components/timers',
+ 'libs' : '',
+ 'pc_libs' : '',
+ },
+ {
+ 'name' : 'crypto',
+ 'sources' : """
+ hmac.cc
+ hmac_nss.cc
+ nss_key_util.cc
+ nss_util.cc
+ openssl_util.cc
+ p224.cc
+ p224_spake.cc
+ random.cc
+ rsa_private_key.cc
+ rsa_private_key_nss.cc
+ scoped_test_nss_db.cc
+ secure_hash.cc
+ secure_util.cc
+ sha2.cc
+ signature_creator_nss.cc
+ signature_verifier_nss.cc
+ symmetric_key_nss.cc
+ third_party/nss/rsawrapr.c
+ third_party/nss/sha512.cc
+ """,
+ 'prefix' : 'crypto',
+ 'libs' : '%s-dl-%s' % (base_name, BASE_VER),
+ 'pc_libs' : 'nss openssl',
+ },
+ {
+ 'name' : 'sandbox',
+ 'sources' : """
+ linux/bpf_dsl/bpf_dsl.cc
+ linux/bpf_dsl/codegen.cc
+ linux/bpf_dsl/dump_bpf.cc
+ linux/bpf_dsl/policy.cc
+ linux/bpf_dsl/policy_compiler.cc
+ linux/bpf_dsl/syscall_set.cc
+ linux/bpf_dsl/verifier.cc
+ linux/seccomp-bpf/die.cc
+ linux/seccomp-bpf/sandbox_bpf.cc
+ linux/seccomp-bpf/syscall.cc
+ linux/seccomp-bpf/trap.cc
+
+ linux/seccomp-bpf-helpers/baseline_policy.cc
+ linux/seccomp-bpf-helpers/sigsys_handlers.cc
+ linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+ linux/seccomp-bpf-helpers/syscall_sets.cc
+
+ linux/services/init_process_reaper.cc
+ linux/services/proc_util.cc
+ linux/services/resource_limits.cc
+ linux/services/scoped_process.cc
+ linux/services/syscall_wrappers.cc
+ linux/services/thread_helpers.cc
+ linux/services/yama.cc
+ linux/syscall_broker/broker_channel.cc
+ linux/syscall_broker/broker_client.cc
+ linux/syscall_broker/broker_file_permission.cc
+ linux/syscall_broker/broker_host.cc
+ linux/syscall_broker/broker_policy.cc
+ linux/syscall_broker/broker_process.cc
+
+ linux/services/credentials.cc
+ linux/services/namespace_sandbox.cc
+ linux/services/namespace_utils.cc
+ """,
+ 'prefix' : 'sandbox',
+ 'libs' : '',
+ 'pc_libs' : '',
+ },
+]
+
+env.Append(
+ CPPPATH=['files'],
+ CCFLAGS=['-g']
+)
+for key in Split('CC CXX AR RANLIB LD NM CFLAGS CXXFLAGS LDFLAGS'):
+ value = os.environ.get(key)
+ if value:
+ env[key] = Split(value)
+if os.environ.has_key('CPPFLAGS'):
+ env['CCFLAGS'] += Split(os.environ['CPPFLAGS'])
+
+env['CCFLAGS'] += ['-DOS_CHROMEOS',
+ '-DUSE_NSS_CERTS',
+ '-DUSE_SYSTEM_LIBEVENT',
+ '-DNO_TCMALLOC',
+ '-fPIC',
+ '-fno-exceptions',
+ '-Wall',
+ '-Werror',
+ '-Wno-deprecated-register',
+ '-Wno-narrowing',
+ '-Wno-psabi',
+ '-Wno-unused-local-typedefs',
+ # Various #defines are hardcoded near the top of
+ # build_config.h to ensure that they'll be set both when
+ # libchrome is built and when other packages include
+ # libchrome's headers.
+ '-I%s' % CHROME_INCLUDE_PATH]
+
+env.Append(
+ CXXFLAGS=['-std=c++11']
+)
+
+# Flags for clang taken from build/common.gypi in the clang==1 section.
+CLANG_FLAGS = (
+ '-Wno-char-subscripts',
+)
+
+env['CCFLAGS'] += ['-Xclang-only=%s' % x for x in CLANG_FLAGS]
+
+# Fix issue with scons not passing some vars through the environment.
+for key in Split('PKG_CONFIG SYSROOT'):
+ if os.environ.has_key(key):
+ env['ENV'][key] = os.environ[key]
+
+all_base_libs = []
+all_pc_libs = ''
+all_libs = []
+all_scons_libs = []
+
+# Build all the shared libraries.
+for lib in base_libs:
+ pc_libs = lib['pc_libs'].replace('${bslot}', BASE_VER)
+ all_pc_libs += ' ' + pc_libs
+
+ libs = Split(lib['libs'].replace('${bslot}', BASE_VER))
+ all_libs += libs
+
+ name = '%s-%s-%s' % (base_name, lib['name'], BASE_VER)
+ all_base_libs += [name]
+ corename = '%s-core-%s' % (base_name, BASE_VER)
+ # Automatically link the sub-libs against the main core lib.
+ # This is to keep from having to explicitly mention it in the
+ # table above (i.e. lazy).
+ if name != corename:
+ libs += [corename]
+
+ e = env.Clone()
+ e.Append(
+ LIBS = Split(libs),
+ LIBPATH = ['.'],
+ LINKFLAGS = ['-Wl,--as-needed', '-Wl,-z,defs',
+ '-Wl,-soname,lib%s.so' % name],
+ )
+ if pc_libs:
+ e.ParseConfig(PKG_CONFIG + ' --cflags --libs %s' % pc_libs)
+
+ # Prepend prefix to source filenames.
+ sources = [os.path.join(lib['prefix'], x) for x in Split(lib['sources'])]
+
+ all_scons_libs += [ e.SharedLibrary(name, sources) ]
+
+
+# Build a static library of mocks for unittests to link against.
+# Being static allows us to mask this library out of the image.
+
+all_base_test_libs = []
+all_test_pc_libs = ''
+all_test_libs = []
+
+test_libs = [
+ {
+ 'name': 'base_test_support',
+ 'sources': """
+ simple_test_clock.cc
+ simple_test_tick_clock.cc
+ test_file_util.cc
+ test_file_util_linux.cc
+ test_switches.cc
+ test_timeouts.cc
+ """,
+ 'prefix': 'base/test',
+ 'libs': '',
+ 'pc_libs': '',
+ },
+ {
+ 'name': 'dbus_test_support',
+ 'sources': """
+ mock_bus.cc
+ mock_exported_object.cc
+ mock_object_manager.cc
+ mock_object_proxy.cc
+ """,
+ 'prefix': 'dbus',
+ 'libs': '', # TODO(wiley) what should go here?
+ 'pc_libs': 'dbus-1 protobuf-lite',
+ },
+ {
+ 'name': 'timer_test_support',
+ 'sources': """
+ mock_timer.cc
+ """,
+ 'prefix': 'base/timer',
+ 'libs': '',
+ 'pc_libs': '',
+ },
+]
+
+for lib in test_libs:
+ pc_libs = lib['pc_libs'].replace('${bslot}', BASE_VER)
+ all_test_pc_libs += ' ' + pc_libs
+
+ libs = Split(lib['libs'].replace('${bslot}', BASE_VER))
+ all_test_libs += libs
+
+ name = '%s-%s-%s' % (base_name, lib['name'], BASE_VER)
+ all_base_test_libs += [name]
+
+ static_env = env.Clone()
+ if pc_libs:
+ static_env.ParseConfig(PKG_CONFIG + ' --cflags --libs %s' % pc_libs)
+ sources = [os.path.join(lib['prefix'], x)
+ for x in Split(lib['sources'])]
+ static_env.StaticLibrary(name, sources)
+
+# Build the random text files (pkg-config and linker script).
+
+def lib_list(libs):
+ return ' '.join(['-l' + l for l in libs])
+
+prod_subst_dict = {
+ '@BSLOT@': BASE_VER,
+ '@PRIVATE_PC@': all_pc_libs,
+ '@BASE_LIBS@': lib_list(all_base_libs),
+ '@LIBS@': lib_list(all_libs),
+ '@NAME@': 'libchrome',
+ '@PKG_CFG_NAME@': 'libchrome-%s.pc' % BASE_VER,
+ '@LIB_NAME@': 'libbase-%s.so' % BASE_VER,
+ '@DESCRIPTION@': 'chrome base library',
+ # scons, in its infinite wisdom sees fit to expand this string if
+ # if we don't escape the $.
+ '@TARGET_LIB@': 'base-$${bslot}',
+}
+
+# Similarly, build text files related to the test libraries.
+test_subst_dict = {
+ '@BSLOT@': BASE_VER,
+ '@PRIVATE_PC@': all_test_pc_libs,
+ '@BASE_LIBS@': lib_list(all_base_test_libs),
+ '@LIBS@': lib_list(all_test_libs),
+ '@NAME@': 'libchrome-test',
+ '@PKG_CFG_NAME@': 'libchrome-test-%s.pc' % BASE_VER,
+ '@LIB_NAME@': 'libbase-test-%s.a' % BASE_VER,
+ '@DESCRIPTION@': 'chrome base test library',
+ # scons, in its infinite wisdom sees fit to expand this string if
+ # if we don't escape the $.
+ '@TARGET_LIB@': 'base-test-$${bslot}',
+}
+
+pc_file_contents = """
+prefix=/usr
+includedir=${prefix}/include
+bslot=@BSLOT@
+
+Name: @NAME@
+Description: @DESCRIPTION@
+Version: ${bslot}
+Requires:
+Requires.private: @PRIVATE_PC@
+Libs: -l@TARGET_LIB@
+Libs.private: @BASE_LIBS@ @LIBS@
+Cflags: -I${includedir}/@TARGET_LIB@ -Wno-c++11-extensions -Wno-unused-local-typedefs -DBASE_VER=${bslot}
+"""
+
+# https://sourceware.org/binutils/docs/ld/Scripts.html
+so_file_contents = """GROUP ( AS_NEEDED ( @BASE_LIBS@ ) )"""
+
+for subst_dict in (test_subst_dict, prod_subst_dict):
+ env = Environment(tools=['textfile'], SUBST_DICT=subst_dict)
+ env.Substfile(subst_dict['@LIB_NAME@'], [Value(so_file_contents)])
+ env.Substfile(subst_dict['@PKG_CFG_NAME@'], [Value(pc_file_contents)])
diff --git a/libchrome/base/BUILD.gn b/libchrome/base/BUILD.gn
new file mode 100644
index 0000000..c147989
--- /dev/null
+++ b/libchrome/base/BUILD.gn
@@ -0,0 +1,2434 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# HOW TO WRITE CONDITIONALS IN THIS FILE
+# ======================================
+#
+# In many other places, one would write a conditional that expresses all the
+# cases when a source file is used or unused, and then either add or subtract
+# it from the sources list in that case
+#
+# Since base includes so many low-level things that vary widely and
+# unpredictably for the various build types, we prefer a slightly different
+# style. Instead, there are big per-platform blocks of inclusions and
+# exclusions. If a given file has an inclusion or exclusion rule that applies
+# for multiple conditions, perfer to duplicate it in both lists. This makes it
+# a bit easier to see which files apply in which cases rather than having a
+# huge sequence of random-looking conditionals.
+
+import("//build/buildflag_header.gni")
+import("//build/config/allocator.gni")
+import("//build/config/chromecast_build.gni")
+import("//build/config/compiler/compiler.gni")
+import("//build/config/nacl/config.gni")
+import("//build/config/sysroot.gni")
+import("//build/config/ui.gni")
+import("//build/nocompile.gni")
+import("//testing/test.gni")
+
+declare_args() {
+ # Override this value to give a specific build date.
+ # See //base/build_time.cc and //build/write_build_date_header.py for more
+ # details and the expected format.
+ override_build_date = "N/A"
+}
+
+if (is_android) {
+ import("//build/config/android/rules.gni")
+}
+
+if (is_win) {
+ import("//build/config/win/visual_studio_version.gni")
+}
+
+config("base_flags") {
+ if (is_clang) {
+ cflags = [
+ # Don't die on dtoa code that uses a char as an array index.
+ # This is required solely for base/third_party/dmg_fp/dtoa_wrapper.cc.
+ "-Wno-char-subscripts",
+ ]
+ }
+}
+
+config("base_implementation") {
+ defines = [ "BASE_IMPLEMENTATION" ]
+ configs = [ "//build/config/compiler:wexit_time_destructors" ]
+}
+
+if (is_win) {
+ # This is in a separate config so the flags can be applied to dependents.
+ # ldflags in GN aren't automatically inherited.
+ config("base_win_linker_flags") {
+ ldflags = [
+ "/DELAYLOAD:cfgmgr32.dll",
+ "/DELAYLOAD:powrprof.dll",
+ "/DELAYLOAD:setupapi.dll",
+ ]
+ }
+}
+
+if (is_nacl_nonsfi) {
+ # Must be in a config because of how GN orders flags (otherwise -Wall will
+ # appear after this, and turn it back on).
+ config("nacl_nonsfi_warnings") {
+ # file_util_posix.cc contains a function which is not
+ # being used by nacl_helper_nonsfi.
+ cflags = [ "-Wno-unused-function" ]
+ }
+}
+
+if (is_nacl) {
+ # None of the files apply to nacl, and we can't make an empty static library.
+ group("base_paths") {
+ }
+} else {
+ static_library("base_paths") {
+ sources = [
+ "base_paths.cc",
+ "base_paths.h",
+ "base_paths_android.cc",
+ "base_paths_android.h",
+ "base_paths_mac.h",
+ "base_paths_mac.mm",
+ "base_paths_posix.cc",
+ "base_paths_posix.h",
+ "base_paths_win.cc",
+ "base_paths_win.h",
+ ]
+
+ if (is_android || is_mac || is_ios) {
+ sources -= [ "base_paths_posix.cc" ]
+ }
+
+ configs += [ ":base_implementation" ]
+
+ visibility = [ ":base" ]
+ }
+}
+
+if (is_android) {
+ config("android_system_libs") {
+ libs = [ "log" ] # Used by logging.cc.
+ }
+}
+
+# Base and everything it depends on should be a static library rather than
+# a source set. Base is more of a "library" in the classic sense in that many
+# small parts of it are used in many different contexts. This combined with a
+# few static initializers floating around means that dead code stripping
+# still leaves a lot of code behind that isn't always used. For example, this
+# saves more than 40K for a smaller target like chrome_elf.
+#
+# Use static libraries for the helper stuff as well like //base/debug since
+# those things refer back to base code, which will force base compilation units
+# to be linked in where they wouldn't have otherwise. This does not include
+# test code (test support and anything in the test directory) which should use
+# source_set as is recommended for GN targets).
+component("base") {
+ if (is_nacl_nonsfi) {
+ # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
+ # nacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
+ # reasons for this seem to involve obscure toolchain bugs. This should be
+ # fixed and this target should always be a static_library in the
+ # non-component case.
+ static_component_type = "source_set"
+ }
+
+ sources = [
+ "allocator/allocator_check.cc",
+ "allocator/allocator_check.h",
+ "allocator/allocator_extension.cc",
+ "allocator/allocator_extension.h",
+ "android/animation_frame_time_histogram.cc",
+ "android/animation_frame_time_histogram.h",
+ "android/apk_assets.cc",
+ "android/apk_assets.h",
+ "android/application_status_listener.cc",
+ "android/application_status_listener.h",
+ "android/base_jni_onload.cc",
+ "android/base_jni_onload.h",
+ "android/base_jni_registrar.cc",
+ "android/base_jni_registrar.h",
+ "android/build_info.cc",
+ "android/build_info.h",
+ "android/callback_android.cc",
+ "android/callback_android.h",
+ "android/command_line_android.cc",
+ "android/command_line_android.h",
+ "android/content_uri_utils.cc",
+ "android/content_uri_utils.h",
+ "android/context_utils.cc",
+ "android/context_utils.h",
+ "android/cpu_features.cc",
+ "android/cxa_demangle_stub.cc",
+ "android/event_log.cc",
+ "android/event_log.h",
+ "android/field_trial_list.cc",
+ "android/field_trial_list.h",
+ "android/fifo_utils.cc",
+ "android/fifo_utils.h",
+ "android/important_file_writer_android.cc",
+ "android/important_file_writer_android.h",
+ "android/java_handler_thread.cc",
+ "android/java_handler_thread.h",
+ "android/java_runtime.cc",
+ "android/java_runtime.h",
+ "android/jni_android.cc",
+ "android/jni_android.h",
+ "android/jni_array.cc",
+ "android/jni_array.h",
+ "android/jni_registrar.cc",
+ "android/jni_registrar.h",
+ "android/jni_string.cc",
+ "android/jni_string.h",
+ "android/jni_utils.cc",
+ "android/jni_utils.h",
+ "android/jni_weak_ref.cc",
+ "android/jni_weak_ref.h",
+ "android/library_loader/library_load_from_apk_status_codes.h",
+ "android/library_loader/library_loader_hooks.cc",
+ "android/library_loader/library_loader_hooks.h",
+ "android/library_loader/library_prefetcher.cc",
+ "android/library_loader/library_prefetcher.h",
+ "android/locale_utils.cc",
+ "android/locale_utils.h",
+ "android/memory_pressure_listener_android.cc",
+ "android/memory_pressure_listener_android.h",
+ "android/path_service_android.cc",
+ "android/path_service_android.h",
+ "android/path_utils.cc",
+ "android/path_utils.h",
+ "android/record_histogram.cc",
+ "android/record_histogram.h",
+ "android/record_user_action.cc",
+ "android/record_user_action.h",
+ "android/scoped_java_ref.cc",
+ "android/scoped_java_ref.h",
+ "android/sys_utils.cc",
+ "android/sys_utils.h",
+ "android/thread_utils.h",
+ "android/trace_event_binding.cc",
+ "android/trace_event_binding.h",
+ "at_exit.cc",
+ "at_exit.h",
+ "atomic_ref_count.h",
+ "atomic_sequence_num.h",
+ "atomicops.h",
+ "atomicops_internals_portable.h",
+ "atomicops_internals_x86_msvc.h",
+ "auto_reset.h",
+ "barrier_closure.cc",
+ "barrier_closure.h",
+ "base64.cc",
+ "base64.h",
+ "base64url.cc",
+ "base64url.h",
+ "base_export.h",
+ "base_switches.h",
+ "big_endian.cc",
+ "big_endian.h",
+ "bind.h",
+ "bind_helpers.cc",
+ "bind_helpers.h",
+ "bind_internal.h",
+ "bit_cast.h",
+ "bits.h",
+ "build_time.cc",
+ "build_time.h",
+ "callback.h",
+ "callback_helpers.cc",
+ "callback_helpers.h",
+ "callback_internal.cc",
+ "callback_internal.h",
+ "cancelable_callback.h",
+ "command_line.cc",
+ "command_line.h",
+ "compiler_specific.h",
+ "containers/adapters.h",
+ "containers/hash_tables.h",
+ "containers/linked_list.h",
+ "containers/mru_cache.h",
+ "containers/scoped_ptr_hash_map.h",
+ "containers/small_map.h",
+ "containers/stack_container.h",
+ "cpu.cc",
+ "cpu.h",
+ "critical_closure.h",
+ "critical_closure_internal_ios.mm",
+ "debug/alias.cc",
+ "debug/alias.h",
+ "debug/asan_invalid_access.cc",
+ "debug/asan_invalid_access.h",
+ "debug/close_handle_hook_win.cc",
+ "debug/close_handle_hook_win.h",
+ "debug/crash_logging.cc",
+ "debug/crash_logging.h",
+ "debug/debugger.cc",
+ "debug/debugger.h",
+ "debug/debugger_posix.cc",
+ "debug/debugger_win.cc",
+ "debug/dump_without_crashing.cc",
+ "debug/dump_without_crashing.h",
+ "debug/gdi_debug_util_win.cc",
+ "debug/gdi_debug_util_win.h",
+
+ # This file depends on files from the "debug/allocator" target,
+ # but this target does not depend on "debug/allocator" (see
+ # allocator.gyp for details).
+ "debug/leak_annotations.h",
+ "debug/leak_tracker.h",
+ "debug/proc_maps_linux.cc",
+ "debug/proc_maps_linux.h",
+ "debug/profiler.cc",
+ "debug/profiler.h",
+ "debug/stack_trace.cc",
+ "debug/stack_trace.h",
+ "debug/stack_trace_android.cc",
+ "debug/stack_trace_posix.cc",
+ "debug/stack_trace_win.cc",
+ "debug/task_annotator.cc",
+ "debug/task_annotator.h",
+ "deferred_sequenced_task_runner.cc",
+ "deferred_sequenced_task_runner.h",
+ "environment.cc",
+ "environment.h",
+ "feature_list.cc",
+ "feature_list.h",
+ "file_descriptor_posix.h",
+ "file_version_info.h",
+ "file_version_info_mac.h",
+ "file_version_info_mac.mm",
+ "file_version_info_win.cc",
+ "file_version_info_win.h",
+ "files/dir_reader_fallback.h",
+ "files/dir_reader_linux.h",
+ "files/dir_reader_posix.h",
+ "files/file.cc",
+ "files/file_enumerator.cc",
+ "files/file_enumerator.h",
+ "files/file_enumerator_posix.cc",
+ "files/file_enumerator_win.cc",
+ "files/file_path.cc",
+ "files/file_path.h",
+ "files/file_path_constants.cc",
+ "files/file_path_watcher.cc",
+ "files/file_path_watcher.h",
+ "files/file_path_watcher_fsevents.cc",
+ "files/file_path_watcher_fsevents.h",
+ "files/file_path_watcher_kqueue.cc",
+ "files/file_path_watcher_kqueue.h",
+ "files/file_path_watcher_linux.cc",
+ "files/file_path_watcher_mac.cc",
+ "files/file_path_watcher_win.cc",
+ "files/file_posix.cc",
+ "files/file_proxy.cc",
+ "files/file_proxy.h",
+ "files/file_tracing.cc",
+ "files/file_tracing.h",
+ "files/file_util.cc",
+ "files/file_util.h",
+ "files/file_util_android.cc",
+ "files/file_util_linux.cc",
+ "files/file_util_mac.mm",
+ "files/file_util_posix.cc",
+ "files/file_util_proxy.cc",
+ "files/file_util_proxy.h",
+ "files/file_util_win.cc",
+ "files/file_win.cc",
+ "files/important_file_writer.cc",
+ "files/important_file_writer.h",
+ "files/memory_mapped_file.cc",
+ "files/memory_mapped_file.h",
+ "files/memory_mapped_file_posix.cc",
+ "files/memory_mapped_file_win.cc",
+ "files/scoped_file.cc",
+ "files/scoped_file.h",
+ "files/scoped_temp_dir.cc",
+ "files/scoped_temp_dir.h",
+ "format_macros.h",
+ "gtest_prod_util.h",
+ "guid.cc",
+ "guid.h",
+ "hash.cc",
+ "hash.h",
+ "id_map.h",
+ "ios/crb_protocol_observers.h",
+ "ios/crb_protocol_observers.mm",
+ "ios/device_util.h",
+ "ios/device_util.mm",
+ "ios/ios_util.h",
+ "ios/ios_util.mm",
+ "ios/ns_error_util.h",
+ "ios/ns_error_util.mm",
+ "ios/scoped_critical_action.h",
+ "ios/scoped_critical_action.mm",
+ "ios/weak_nsobject.h",
+ "ios/weak_nsobject.mm",
+ "json/json_file_value_serializer.cc",
+ "json/json_file_value_serializer.h",
+ "json/json_parser.cc",
+ "json/json_parser.h",
+ "json/json_reader.cc",
+ "json/json_reader.h",
+ "json/json_string_value_serializer.cc",
+ "json/json_string_value_serializer.h",
+ "json/json_value_converter.cc",
+ "json/json_value_converter.h",
+ "json/json_writer.cc",
+ "json/json_writer.h",
+ "json/string_escape.cc",
+ "json/string_escape.h",
+ "lazy_instance.cc",
+ "lazy_instance.h",
+ "linux_util.cc",
+ "linux_util.h",
+ "location.cc",
+ "location.h",
+ "logging.cc",
+ "logging.h",
+ "logging_win.cc",
+ "logging_win.h",
+ "mac/authorization_util.h",
+ "mac/authorization_util.mm",
+ "mac/bind_objc_block.h",
+ "mac/bundle_locations.h",
+ "mac/bundle_locations.mm",
+ "mac/call_with_eh_frame.cc",
+ "mac/call_with_eh_frame.h",
+ "mac/call_with_eh_frame_asm.S",
+ "mac/close_nocancel.cc",
+ "mac/cocoa_protocols.h",
+ "mac/dispatch_source_mach.cc",
+ "mac/dispatch_source_mach.h",
+ "mac/foundation_util.h",
+ "mac/foundation_util.mm",
+ "mac/launch_services_util.cc",
+ "mac/launch_services_util.h",
+ "mac/launchd.cc",
+ "mac/launchd.h",
+ "mac/mac_logging.h",
+ "mac/mac_logging.mm",
+ "mac/mac_util.h",
+ "mac/mac_util.mm",
+ "mac/mach_logging.cc",
+ "mac/mach_logging.h",
+ "mac/mach_port_broker.h",
+ "mac/mach_port_broker.mm",
+ "mac/mach_port_util.cc",
+ "mac/mach_port_util.h",
+ "mac/objc_property_releaser.h",
+ "mac/objc_property_releaser.mm",
+ "mac/os_crash_dumps.cc",
+ "mac/os_crash_dumps.h",
+ "mac/scoped_aedesc.h",
+ "mac/scoped_authorizationref.h",
+ "mac/scoped_block.h",
+ "mac/scoped_cftyperef.h",
+ "mac/scoped_dispatch_object.h",
+ "mac/scoped_ioobject.h",
+ "mac/scoped_ioplugininterface.h",
+ "mac/scoped_launch_data.h",
+ "mac/scoped_mach_port.cc",
+ "mac/scoped_mach_port.h",
+ "mac/scoped_mach_vm.cc",
+ "mac/scoped_mach_vm.h",
+ "mac/scoped_nsautorelease_pool.h",
+ "mac/scoped_nsautorelease_pool.mm",
+ "mac/scoped_nsobject.h",
+ "mac/scoped_nsobject.mm",
+ "mac/scoped_objc_class_swizzler.h",
+ "mac/scoped_objc_class_swizzler.mm",
+ "mac/scoped_sending_event.h",
+ "mac/scoped_sending_event.mm",
+ "mac/sdk_forward_declarations.h",
+ "mac/sdk_forward_declarations.mm",
+ "macros.h",
+ "md5.cc",
+ "md5.h",
+ "memory/aligned_memory.cc",
+ "memory/aligned_memory.h",
+ "memory/discardable_memory.cc",
+ "memory/discardable_memory.h",
+ "memory/discardable_memory_allocator.cc",
+ "memory/discardable_memory_allocator.h",
+ "memory/discardable_shared_memory.cc",
+ "memory/discardable_shared_memory.h",
+ "memory/free_deleter.h",
+ "memory/linked_ptr.h",
+ "memory/manual_constructor.h",
+ "memory/memory_pressure_listener.cc",
+ "memory/memory_pressure_listener.h",
+ "memory/memory_pressure_monitor.cc",
+ "memory/memory_pressure_monitor.h",
+ "memory/memory_pressure_monitor_chromeos.cc",
+ "memory/memory_pressure_monitor_chromeos.h",
+ "memory/memory_pressure_monitor_mac.cc",
+ "memory/memory_pressure_monitor_mac.h",
+ "memory/memory_pressure_monitor_win.cc",
+ "memory/memory_pressure_monitor_win.h",
+ "memory/ptr_util.h",
+ "memory/raw_scoped_refptr_mismatch_checker.h",
+ "memory/ref_counted.cc",
+ "memory/ref_counted.h",
+ "memory/ref_counted_delete_on_message_loop.h",
+ "memory/ref_counted_memory.cc",
+ "memory/ref_counted_memory.h",
+ "memory/scoped_policy.h",
+ "memory/scoped_vector.h",
+ "memory/shared_memory.h",
+ "memory/shared_memory_android.cc",
+ "memory/shared_memory_handle.h",
+ "memory/shared_memory_handle_mac.cc",
+ "memory/shared_memory_handle_win.cc",
+ "memory/shared_memory_mac.cc",
+ "memory/shared_memory_nacl.cc",
+ "memory/shared_memory_posix.cc",
+ "memory/shared_memory_win.cc",
+ "memory/singleton.cc",
+ "memory/singleton.h",
+ "memory/weak_ptr.cc",
+ "memory/weak_ptr.h",
+ "message_loop/incoming_task_queue.cc",
+ "message_loop/incoming_task_queue.h",
+ "message_loop/message_loop.cc",
+ "message_loop/message_loop.h",
+ "message_loop/message_loop_task_runner.cc",
+ "message_loop/message_loop_task_runner.h",
+ "message_loop/message_pump.cc",
+ "message_loop/message_pump.h",
+ "message_loop/message_pump_android.cc",
+ "message_loop/message_pump_android.h",
+ "message_loop/message_pump_default.cc",
+ "message_loop/message_pump_default.h",
+ "message_loop/message_pump_glib.cc",
+ "message_loop/message_pump_glib.h",
+ "message_loop/message_pump_io_ios.cc",
+ "message_loop/message_pump_io_ios.h",
+ "message_loop/message_pump_libevent.cc",
+ "message_loop/message_pump_libevent.h",
+ "message_loop/message_pump_mac.h",
+ "message_loop/message_pump_mac.mm",
+ "message_loop/message_pump_win.cc",
+ "message_loop/message_pump_win.h",
+ "metrics/bucket_ranges.cc",
+ "metrics/bucket_ranges.h",
+ "metrics/field_trial.cc",
+ "metrics/field_trial.h",
+ "metrics/histogram.cc",
+ "metrics/histogram.h",
+ "metrics/histogram_base.cc",
+ "metrics/histogram_base.h",
+ "metrics/histogram_delta_serialization.cc",
+ "metrics/histogram_delta_serialization.h",
+ "metrics/histogram_flattener.h",
+ "metrics/histogram_macros.h",
+ "metrics/histogram_samples.cc",
+ "metrics/histogram_samples.h",
+ "metrics/histogram_snapshot_manager.cc",
+ "metrics/histogram_snapshot_manager.h",
+ "metrics/metrics_hashes.cc",
+ "metrics/metrics_hashes.h",
+ "metrics/persistent_histogram_allocator.cc",
+ "metrics/persistent_histogram_allocator.h",
+ "metrics/persistent_memory_allocator.cc",
+ "metrics/persistent_memory_allocator.h",
+ "metrics/persistent_sample_map.cc",
+ "metrics/persistent_sample_map.h",
+ "metrics/sample_map.cc",
+ "metrics/sample_map.h",
+ "metrics/sample_vector.cc",
+ "metrics/sample_vector.h",
+ "metrics/sparse_histogram.cc",
+ "metrics/sparse_histogram.h",
+ "metrics/statistics_recorder.cc",
+ "metrics/statistics_recorder.h",
+ "metrics/user_metrics.cc",
+ "metrics/user_metrics.h",
+ "metrics/user_metrics_action.h",
+ "native_library.h",
+ "native_library_ios.mm",
+ "native_library_mac.mm",
+ "native_library_posix.cc",
+ "native_library_win.cc",
+ "nix/mime_util_xdg.cc",
+ "nix/mime_util_xdg.h",
+ "nix/xdg_util.cc",
+ "nix/xdg_util.h",
+ "numerics/safe_conversions.h",
+ "numerics/safe_conversions_impl.h",
+ "numerics/safe_math.h",
+ "numerics/safe_math_impl.h",
+ "observer_list.h",
+ "observer_list_threadsafe.h",
+ "optional.h",
+ "os_compat_android.cc",
+ "os_compat_android.h",
+ "os_compat_nacl.cc",
+ "os_compat_nacl.h",
+ "path_service.cc",
+ "path_service.h",
+ "pending_task.cc",
+ "pending_task.h",
+ "pickle.cc",
+ "pickle.h",
+ "posix/eintr_wrapper.h",
+ "posix/file_descriptor_shuffle.cc",
+ "posix/global_descriptors.cc",
+ "posix/global_descriptors.h",
+ "posix/safe_strerror.cc",
+ "posix/safe_strerror.h",
+ "posix/unix_domain_socket_linux.cc",
+ "posix/unix_domain_socket_linux.h",
+ "power_monitor/power_monitor.cc",
+ "power_monitor/power_monitor.h",
+ "power_monitor/power_monitor_device_source.cc",
+ "power_monitor/power_monitor_device_source.h",
+ "power_monitor/power_monitor_device_source_android.cc",
+ "power_monitor/power_monitor_device_source_android.h",
+ "power_monitor/power_monitor_device_source_chromeos.cc",
+ "power_monitor/power_monitor_device_source_ios.mm",
+ "power_monitor/power_monitor_device_source_mac.mm",
+ "power_monitor/power_monitor_device_source_posix.cc",
+ "power_monitor/power_monitor_device_source_win.cc",
+ "power_monitor/power_monitor_source.cc",
+ "power_monitor/power_monitor_source.h",
+ "power_monitor/power_observer.h",
+ "process/internal_linux.cc",
+ "process/internal_linux.h",
+ "process/kill.cc",
+ "process/kill.h",
+ "process/kill_mac.cc",
+ "process/kill_posix.cc",
+ "process/kill_win.cc",
+ "process/launch.cc",
+ "process/launch.h",
+ "process/launch_ios.cc",
+ "process/launch_mac.cc",
+ "process/launch_posix.cc",
+ "process/launch_win.cc",
+ "process/memory.cc",
+ "process/memory.h",
+ "process/memory_linux.cc",
+ "process/memory_mac.mm",
+ "process/memory_win.cc",
+ "process/port_provider_mac.cc",
+ "process/port_provider_mac.h",
+ "process/process.h",
+ "process/process_handle.cc",
+
+ #"process/process_handle_freebsd.cc", # Unused in Chromium build.
+ "process/process_handle_linux.cc",
+ "process/process_handle_mac.cc",
+
+ #"process/process_handle_openbsd.cc", # Unused in Chromium build.
+ "process/process_handle_posix.cc",
+ "process/process_handle_win.cc",
+ "process/process_info.h",
+ "process/process_info_linux.cc",
+ "process/process_info_mac.cc",
+ "process/process_info_win.cc",
+ "process/process_iterator.cc",
+ "process/process_iterator.h",
+
+ #"process/process_iterator_freebsd.cc", # Unused in Chromium build.
+ "process/process_iterator_linux.cc",
+ "process/process_iterator_mac.cc",
+
+ #"process/process_iterator_openbsd.cc", # Unused in Chromium build.
+ "process/process_iterator_win.cc",
+ "process/process_linux.cc",
+ "process/process_metrics.cc",
+ "process/process_metrics.h",
+
+ #"process/process_metrics_freebsd.cc", # Unused in Chromium build.
+ "process/process_metrics_ios.cc",
+ "process/process_metrics_linux.cc",
+ "process/process_metrics_mac.cc",
+
+ #"process/process_metrics_openbsd.cc", # Unused in Chromium build.
+ "process/process_metrics_posix.cc",
+ "process/process_metrics_win.cc",
+ "process/process_posix.cc",
+ "process/process_win.cc",
+ "profiler/native_stack_sampler.cc",
+ "profiler/native_stack_sampler.h",
+ "profiler/native_stack_sampler_posix.cc",
+ "profiler/native_stack_sampler_win.cc",
+ "profiler/scoped_profile.cc",
+ "profiler/scoped_profile.h",
+ "profiler/scoped_tracker.cc",
+ "profiler/scoped_tracker.h",
+ "profiler/stack_sampling_profiler.cc",
+ "profiler/stack_sampling_profiler.h",
+ "profiler/tracked_time.cc",
+ "profiler/tracked_time.h",
+ "rand_util.cc",
+ "rand_util.h",
+ "rand_util_nacl.cc",
+ "rand_util_posix.cc",
+ "rand_util_win.cc",
+ "run_loop.cc",
+ "run_loop.h",
+ "scoped_generic.h",
+ "scoped_native_library.cc",
+ "scoped_native_library.h",
+ "scoped_observer.h",
+ "sequence_checker.h",
+ "sequence_checker_impl.cc",
+ "sequence_checker_impl.h",
+ "sequenced_task_runner.cc",
+ "sequenced_task_runner.h",
+ "sequenced_task_runner_helpers.h",
+ "sha1.cc",
+ "sha1.h",
+ "single_thread_task_runner.h",
+ "stl_util.h",
+ "strings/latin1_string_conversions.cc",
+ "strings/latin1_string_conversions.h",
+ "strings/nullable_string16.cc",
+ "strings/nullable_string16.h",
+ "strings/pattern.cc",
+ "strings/pattern.h",
+ "strings/safe_sprintf.cc",
+ "strings/safe_sprintf.h",
+ "strings/string16.cc",
+ "strings/string16.h",
+ "strings/string_number_conversions.cc",
+ "strings/string_number_conversions.h",
+ "strings/string_piece.cc",
+ "strings/string_piece.h",
+ "strings/string_split.cc",
+ "strings/string_split.h",
+ "strings/string_tokenizer.h",
+ "strings/string_util.cc",
+ "strings/string_util.h",
+ "strings/string_util_constants.cc",
+ "strings/string_util_posix.h",
+ "strings/string_util_win.h",
+ "strings/stringize_macros.h",
+ "strings/stringprintf.cc",
+ "strings/stringprintf.h",
+ "strings/sys_string_conversions.h",
+ "strings/sys_string_conversions_mac.mm",
+ "strings/sys_string_conversions_posix.cc",
+ "strings/sys_string_conversions_win.cc",
+ "strings/utf_offset_string_conversions.cc",
+ "strings/utf_offset_string_conversions.h",
+ "strings/utf_string_conversion_utils.cc",
+ "strings/utf_string_conversion_utils.h",
+ "strings/utf_string_conversions.cc",
+ "strings/utf_string_conversions.h",
+ "supports_user_data.cc",
+ "supports_user_data.h",
+ "sync_socket.h",
+ "sync_socket_posix.cc",
+ "sync_socket_win.cc",
+ "synchronization/cancellation_flag.cc",
+ "synchronization/cancellation_flag.h",
+ "synchronization/condition_variable.h",
+ "synchronization/condition_variable_posix.cc",
+ "synchronization/condition_variable_win.cc",
+ "synchronization/lock.cc",
+ "synchronization/lock.h",
+ "synchronization/lock_impl.h",
+ "synchronization/lock_impl_posix.cc",
+ "synchronization/lock_impl_win.cc",
+ "synchronization/read_write_lock.h",
+ "synchronization/read_write_lock_nacl.cc",
+ "synchronization/read_write_lock_posix.cc",
+ "synchronization/read_write_lock_win.cc",
+ "synchronization/spin_wait.h",
+ "synchronization/waitable_event.h",
+ "synchronization/waitable_event_posix.cc",
+ "synchronization/waitable_event_watcher.h",
+ "synchronization/waitable_event_watcher_posix.cc",
+ "synchronization/waitable_event_watcher_win.cc",
+ "synchronization/waitable_event_win.cc",
+ "sys_byteorder.h",
+ "sys_info.cc",
+ "sys_info.h",
+ "sys_info_android.cc",
+ "sys_info_chromeos.cc",
+
+ #"sys_info_freebsd.cc", # Unused in Chromium build.
+ "sys_info_ios.mm",
+ "sys_info_linux.cc",
+ "sys_info_mac.mm",
+
+ #"sys_info_openbsd.cc", # Unused in Chromium build.
+ "sys_info_posix.cc",
+ "sys_info_win.cc",
+ "system_monitor/system_monitor.cc",
+ "system_monitor/system_monitor.h",
+ "task/cancelable_task_tracker.cc",
+ "task/cancelable_task_tracker.h",
+ "task_runner.cc",
+ "task_runner.h",
+ "task_runner_util.h",
+ "task_scheduler/delayed_task_manager.cc",
+ "task_scheduler/delayed_task_manager.h",
+ "task_scheduler/priority_queue.cc",
+ "task_scheduler/priority_queue.h",
+ "task_scheduler/scheduler_lock.h",
+ "task_scheduler/scheduler_lock_impl.cc",
+ "task_scheduler/scheduler_lock_impl.h",
+ "task_scheduler/scheduler_service_thread.cc",
+ "task_scheduler/scheduler_service_thread.h",
+ "task_scheduler/scheduler_worker.cc",
+ "task_scheduler/scheduler_worker.h",
+ "task_scheduler/scheduler_worker_pool.h",
+ "task_scheduler/scheduler_worker_pool_impl.cc",
+ "task_scheduler/scheduler_worker_pool_impl.h",
+ "task_scheduler/scheduler_worker_stack.cc",
+ "task_scheduler/scheduler_worker_stack.h",
+ "task_scheduler/sequence.cc",
+ "task_scheduler/sequence.h",
+ "task_scheduler/sequence_sort_key.cc",
+ "task_scheduler/sequence_sort_key.h",
+ "task_scheduler/task.cc",
+ "task_scheduler/task.h",
+ "task_scheduler/task_scheduler.cc",
+ "task_scheduler/task_scheduler.h",
+ "task_scheduler/task_scheduler_impl.cc",
+ "task_scheduler/task_scheduler_impl.h",
+ "task_scheduler/task_tracker.cc",
+ "task_scheduler/task_tracker.h",
+ "task_scheduler/task_traits.cc",
+ "task_scheduler/task_traits.h",
+ "template_util.h",
+ "third_party/dmg_fp/dmg_fp.h",
+ "third_party/dmg_fp/dtoa_wrapper.cc",
+ "third_party/dmg_fp/g_fmt.cc",
+ "third_party/icu/icu_utf.cc",
+ "third_party/icu/icu_utf.h",
+ "third_party/nspr/prtime.cc",
+ "third_party/nspr/prtime.h",
+ "third_party/superfasthash/superfasthash.c",
+ "threading/non_thread_safe.h",
+ "threading/non_thread_safe_impl.cc",
+ "threading/non_thread_safe_impl.h",
+ "threading/platform_thread.h",
+ "threading/platform_thread_android.cc",
+ "threading/platform_thread_internal_posix.cc",
+ "threading/platform_thread_internal_posix.h",
+ "threading/platform_thread_linux.cc",
+ "threading/platform_thread_mac.mm",
+ "threading/platform_thread_posix.cc",
+ "threading/platform_thread_win.cc",
+ "threading/post_task_and_reply_impl.cc",
+ "threading/post_task_and_reply_impl.h",
+ "threading/sequenced_task_runner_handle.cc",
+ "threading/sequenced_task_runner_handle.h",
+ "threading/sequenced_worker_pool.cc",
+ "threading/sequenced_worker_pool.h",
+ "threading/simple_thread.cc",
+ "threading/simple_thread.h",
+ "threading/thread.cc",
+ "threading/thread.h",
+ "threading/thread_checker.h",
+ "threading/thread_checker_impl.cc",
+ "threading/thread_checker_impl.h",
+ "threading/thread_collision_warner.cc",
+ "threading/thread_collision_warner.h",
+ "threading/thread_id_name_manager.cc",
+ "threading/thread_id_name_manager.h",
+ "threading/thread_local.h",
+ "threading/thread_local_android.cc",
+ "threading/thread_local_posix.cc",
+ "threading/thread_local_storage.cc",
+ "threading/thread_local_storage.h",
+ "threading/thread_local_storage_posix.cc",
+ "threading/thread_local_storage_win.cc",
+ "threading/thread_local_win.cc",
+ "threading/thread_restrictions.cc",
+ "threading/thread_restrictions.h",
+ "threading/thread_task_runner_handle.cc",
+ "threading/thread_task_runner_handle.h",
+ "threading/watchdog.cc",
+ "threading/watchdog.h",
+ "threading/worker_pool.cc",
+ "threading/worker_pool.h",
+ "threading/worker_pool_posix.cc",
+ "threading/worker_pool_posix.h",
+ "threading/worker_pool_win.cc",
+ "time/clock.cc",
+ "time/clock.h",
+ "time/default_clock.cc",
+ "time/default_clock.h",
+ "time/default_tick_clock.cc",
+ "time/default_tick_clock.h",
+ "time/tick_clock.cc",
+ "time/tick_clock.h",
+ "time/time.cc",
+ "time/time.h",
+ "time/time_mac.cc",
+ "time/time_posix.cc",
+ "time/time_win.cc",
+ "timer/elapsed_timer.cc",
+ "timer/elapsed_timer.h",
+ "timer/hi_res_timer_manager.h",
+ "timer/hi_res_timer_manager_posix.cc",
+ "timer/hi_res_timer_manager_win.cc",
+ "timer/mock_timer.cc",
+ "timer/mock_timer.h",
+ "timer/timer.cc",
+ "timer/timer.h",
+ "trace_event/blame_context.cc",
+ "trace_event/blame_context.h",
+ "trace_event/common/trace_event_common.h",
+ "trace_event/heap_profiler.h",
+ "trace_event/heap_profiler_allocation_context.cc",
+ "trace_event/heap_profiler_allocation_context.h",
+ "trace_event/heap_profiler_allocation_context_tracker.cc",
+ "trace_event/heap_profiler_allocation_context_tracker.h",
+ "trace_event/heap_profiler_allocation_register.cc",
+ "trace_event/heap_profiler_allocation_register.h",
+ "trace_event/heap_profiler_allocation_register_posix.cc",
+ "trace_event/heap_profiler_allocation_register_win.cc",
+ "trace_event/heap_profiler_heap_dump_writer.cc",
+ "trace_event/heap_profiler_heap_dump_writer.h",
+ "trace_event/heap_profiler_stack_frame_deduplicator.cc",
+ "trace_event/heap_profiler_stack_frame_deduplicator.h",
+ "trace_event/heap_profiler_type_name_deduplicator.cc",
+ "trace_event/heap_profiler_type_name_deduplicator.h",
+ "trace_event/java_heap_dump_provider_android.cc",
+ "trace_event/java_heap_dump_provider_android.h",
+ "trace_event/memory_allocator_dump.cc",
+ "trace_event/memory_allocator_dump.h",
+ "trace_event/memory_allocator_dump_guid.cc",
+ "trace_event/memory_allocator_dump_guid.h",
+ "trace_event/memory_dump_manager.cc",
+ "trace_event/memory_dump_manager.h",
+ "trace_event/memory_dump_provider.h",
+ "trace_event/memory_dump_request_args.cc",
+ "trace_event/memory_dump_request_args.h",
+ "trace_event/memory_dump_session_state.cc",
+ "trace_event/memory_dump_session_state.h",
+ "trace_event/memory_infra_background_whitelist.cc",
+ "trace_event/memory_infra_background_whitelist.h",
+ "trace_event/process_memory_dump.cc",
+ "trace_event/process_memory_dump.h",
+ "trace_event/process_memory_maps.cc",
+ "trace_event/process_memory_maps.h",
+ "trace_event/process_memory_totals.cc",
+ "trace_event/process_memory_totals.h",
+ "trace_event/trace_buffer.cc",
+ "trace_event/trace_buffer.h",
+ "trace_event/trace_config.cc",
+ "trace_event/trace_config.h",
+ "trace_event/trace_event.h",
+ "trace_event/trace_event_android.cc",
+ "trace_event/trace_event_argument.cc",
+ "trace_event/trace_event_argument.h",
+ "trace_event/trace_event_etw_export_win.cc",
+ "trace_event/trace_event_etw_export_win.h",
+ "trace_event/trace_event_impl.cc",
+ "trace_event/trace_event_impl.h",
+ "trace_event/trace_event_memory_overhead.cc",
+ "trace_event/trace_event_memory_overhead.h",
+ "trace_event/trace_event_synthetic_delay.cc",
+ "trace_event/trace_event_synthetic_delay.h",
+ "trace_event/trace_event_system_stats_monitor.cc",
+ "trace_event/trace_event_system_stats_monitor.h",
+ "trace_event/trace_log.cc",
+ "trace_event/trace_log.h",
+ "trace_event/trace_log_constants.cc",
+ "trace_event/trace_sampling_thread.cc",
+ "trace_event/trace_sampling_thread.h",
+ "trace_event/tracing_agent.cc",
+ "trace_event/tracing_agent.h",
+ "trace_event/winheap_dump_provider_win.cc",
+ "trace_event/winheap_dump_provider_win.h",
+ "tracked_objects.cc",
+ "tracked_objects.h",
+ "tracking_info.cc",
+ "tracking_info.h",
+ "tuple.h",
+ "value_conversions.cc",
+ "value_conversions.h",
+ "values.cc",
+ "values.h",
+ "version.cc",
+ "version.h",
+ "vlog.cc",
+ "vlog.h",
+ "win/enum_variant.cc",
+ "win/enum_variant.h",
+ "win/event_trace_consumer.h",
+ "win/event_trace_controller.cc",
+ "win/event_trace_controller.h",
+ "win/event_trace_provider.cc",
+ "win/event_trace_provider.h",
+ "win/i18n.cc",
+ "win/i18n.h",
+ "win/iat_patch_function.cc",
+ "win/iat_patch_function.h",
+ "win/iunknown_impl.cc",
+ "win/iunknown_impl.h",
+ "win/message_window.cc",
+ "win/message_window.h",
+ "win/object_watcher.cc",
+ "win/object_watcher.h",
+ "win/process_startup_helper.cc",
+ "win/process_startup_helper.h",
+ "win/registry.cc",
+ "win/registry.h",
+ "win/resource_util.cc",
+ "win/resource_util.h",
+ "win/scoped_bstr.cc",
+ "win/scoped_bstr.h",
+ "win/scoped_co_mem.h",
+ "win/scoped_com_initializer.h",
+ "win/scoped_comptr.h",
+ "win/scoped_gdi_object.h",
+ "win/scoped_handle.cc",
+ "win/scoped_handle.h",
+ "win/scoped_hdc.h",
+ "win/scoped_hglobal.h",
+ "win/scoped_process_information.cc",
+ "win/scoped_process_information.h",
+ "win/scoped_propvariant.h",
+ "win/scoped_select_object.h",
+ "win/scoped_variant.cc",
+ "win/scoped_variant.h",
+ "win/shortcut.cc",
+ "win/shortcut.h",
+ "win/startup_information.cc",
+ "win/startup_information.h",
+ "win/wait_chain.cc",
+ "win/wait_chain.h",
+ "win/win_util.cc",
+ "win/win_util.h",
+ "win/windows_version.cc",
+ "win/windows_version.h",
+ "win/wrapped_window_proc.cc",
+ "win/wrapped_window_proc.h",
+ ]
+
+ defines = []
+ data = []
+
+ configs += [
+ ":base_flags",
+ ":base_implementation",
+ "//base/allocator:allocator_shim_define", # for allocator_check.cc.
+ "//build/config:precompiled_headers",
+ ]
+
+ deps = [
+ "//base/allocator",
+ "//base/allocator:features",
+ "//base/third_party/dynamic_annotations",
+ "//third_party/modp_b64",
+ ]
+
+ public_deps = [
+ ":base_paths",
+ ":base_static",
+ ":build_date",
+ ":debugging_flags",
+ ]
+
+ # Needed for <atomic> if using newer C++ library than sysroot
+ if (!use_sysroot && (is_android || is_linux)) {
+ libs = [ "atomic" ]
+ }
+
+ if (use_experimental_allocator_shim) {
+ # The allocator shim is part of the base API. This is to allow clients of
+ # base should to install hooks into the allocator path.
+ public_deps += [ "//base/allocator:unified_allocator_shim" ]
+ }
+
+ # Allow more direct string conversions on platforms with native utf8
+ # strings
+ if (is_mac || is_ios || is_chromeos || is_chromecast) {
+ defines += [ "SYSTEM_NATIVE_UTF8" ]
+ }
+
+ # Android.
+ if (is_android) {
+ sources -= [
+ "debug/stack_trace_posix.cc",
+ "power_monitor/power_monitor_device_source_posix.cc",
+ ]
+
+ # Android uses some Linux sources, put those back.
+ set_sources_assignment_filter([])
+ sources += [
+ "debug/proc_maps_linux.cc",
+ "files/file_path_watcher_linux.cc",
+ "posix/unix_domain_socket_linux.cc",
+ "process/internal_linux.cc",
+ "process/memory_linux.cc",
+ "process/process_handle_linux.cc",
+ "process/process_iterator_linux.cc",
+ "process/process_metrics_linux.cc",
+ "sys_info_linux.cc",
+ "trace_event/malloc_dump_provider.cc",
+ "trace_event/malloc_dump_provider.h",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+
+ deps += [
+ ":base_jni_headers",
+ "//third_party/android_tools:cpu_features",
+ "//third_party/ashmem",
+ ]
+
+ # Needs to be a public config so that dependent targets link against it as
+ # well when doing a component build.
+ public_configs = [ ":android_system_libs" ]
+ }
+
+ # Chromeos.
+ if (is_chromeos) {
+ sources -= [ "power_monitor/power_monitor_device_source_posix.cc" ]
+ }
+
+ # NaCl.
+ if (is_nacl) {
+ # We reset sources_assignment_filter in order to explicitly include
+ # the linux file (which would otherwise be filtered out).
+ set_sources_assignment_filter([])
+ sources += [
+ "files/file_path_watcher_stub.cc",
+ "process/process_metrics_nacl.cc",
+ "sync_socket_nacl.cc",
+ "threading/platform_thread_linux.cc",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+
+ sources -= [
+ "cpu.cc",
+ "debug/crash_logging.cc",
+ "debug/crash_logging.h",
+ "debug/stack_trace.cc",
+ "debug/stack_trace_posix.cc",
+ "files/file_enumerator_posix.cc",
+ "files/file_proxy.cc",
+ "files/file_util_proxy.cc",
+ "files/important_file_writer.cc",
+ "files/important_file_writer.h",
+ "files/scoped_temp_dir.cc",
+ "memory/discardable_memory.cc",
+ "memory/discardable_memory.h",
+ "memory/discardable_memory_allocator.cc",
+ "memory/discardable_memory_allocator.h",
+ "memory/discardable_shared_memory.cc",
+ "memory/discardable_shared_memory.h",
+ "memory/shared_memory_posix.cc",
+ "native_library_posix.cc",
+ "path_service.cc",
+ "process/kill.cc",
+ "process/kill.h",
+ "process/memory.cc",
+ "process/memory.h",
+ "process/process_iterator.cc",
+ "process/process_iterator.h",
+ "process/process_metrics.cc",
+ "process/process_metrics_posix.cc",
+ "process/process_posix.cc",
+ "scoped_native_library.cc",
+ "sync_socket_posix.cc",
+ "synchronization/read_write_lock_posix.cc",
+ "sys_info.cc",
+ "sys_info_posix.cc",
+ "trace_event/trace_event_system_stats_monitor.cc",
+ ]
+
+ if (is_nacl_nonsfi) {
+ set_sources_assignment_filter([])
+ sources += [ "posix/unix_domain_socket_linux.cc" ]
+ set_sources_assignment_filter(sources_assignment_filter)
+ sources -= [ "rand_util_nacl.cc" ]
+ configs += [ ":nacl_nonsfi_warnings" ]
+ } else {
+ sources -= [
+ "files/file_util.cc",
+ "files/file_util.h",
+ "files/file_util_posix.cc",
+ "json/json_file_value_serializer.cc",
+ "json/json_file_value_serializer.h",
+ "message_loop/message_pump_libevent.cc",
+ "message_loop/message_pump_libevent.h",
+ "process/kill_posix.cc",
+ "process/launch.cc",
+ "process/launch.h",
+ "process/launch_posix.cc",
+ "rand_util_posix.cc",
+ ]
+ }
+ } else {
+ # Remove NaCl stuff.
+ sources -= [
+ "memory/shared_memory_nacl.cc",
+ "os_compat_nacl.cc",
+ "os_compat_nacl.h",
+ "rand_util_nacl.cc",
+ "synchronization/read_write_lock_nacl.cc",
+ ]
+ }
+
+ # Windows.
+ if (is_win) {
+ sources += [
+ "profiler/win32_stack_frame_unwinder.cc",
+ "profiler/win32_stack_frame_unwinder.h",
+ ]
+
+ sources -= [
+ "message_loop/message_pump_libevent.cc",
+ "strings/string16.cc",
+ ]
+
+ deps += [
+ "//base/trace_event/etw_manifest:chrome_events_win",
+ "//base/win:base_win_features",
+ ]
+
+ if (is_component_build) {
+ # Copy the VS runtime DLLs into the isolate so that they don't have to be
+ # preinstalled on the target machine. The debug runtimes have a "d" at
+ # the end.
+ if (is_debug) {
+ vcrt_suffix = "d"
+ } else {
+ vcrt_suffix = ""
+ }
+
+ # These runtime files are copied to the output directory by the
+ # vs_toolchain script that runs as part of toolchain configuration.
+ if (visual_studio_version == "2015") {
+ data += [
+ "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+ "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+ "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+
+ # Universal Windows 10 CRT files
+ "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+ "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+ "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+ "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+ ]
+ } else {
+ data += [
+ "$root_out_dir/msvcp120${vcrt_suffix}.dll",
+ "$root_out_dir/msvcr120${vcrt_suffix}.dll",
+ ]
+ }
+ if (is_asan) {
+ data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+ }
+ }
+
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ libs = [
+ "cfgmgr32.lib",
+ "powrprof.lib",
+ "setupapi.lib",
+ "userenv.lib",
+ "winmm.lib",
+ ]
+ all_dependent_configs = [ ":base_win_linker_flags" ]
+ } else if (!is_nacl || is_nacl_nonsfi) {
+ # Non-Windows.
+ deps += [ "//base/third_party/libevent" ]
+ }
+
+ # Desktop Mac.
+ if (is_mac) {
+ sources += [
+ "trace_event/malloc_dump_provider.cc",
+ "trace_event/malloc_dump_provider.h",
+ ]
+ libs = [
+ "ApplicationServices.framework",
+ "AppKit.framework",
+ "bsm",
+ "CoreFoundation.framework",
+ "IOKit.framework",
+ "Security.framework",
+ ]
+ }
+
+ # Mac or iOS.
+ if (is_mac || is_ios) {
+ sources -= [
+ "memory/shared_memory_posix.cc",
+ "native_library_posix.cc",
+ "strings/sys_string_conversions_posix.cc",
+ "threading/platform_thread_internal_posix.cc",
+ ]
+ } else {
+ # Non-Mac/ios.
+ sources -= [
+ "files/file_path_watcher_fsevents.cc",
+ "files/file_path_watcher_fsevents.h",
+ "files/file_path_watcher_kqueue.cc",
+ "files/file_path_watcher_kqueue.h",
+ ]
+ }
+
+ # Linux.
+ if (is_linux) {
+ sources += [
+ "trace_event/malloc_dump_provider.cc",
+ "trace_event/malloc_dump_provider.h",
+ ]
+
+ if (is_asan || is_lsan || is_msan || is_tsan) {
+ # For llvm-sanitizer.
+ data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ]
+ }
+
+ # TODO(brettw) this will need to be parameterized at some point.
+ linux_configs = []
+ if (use_glib) {
+ linux_configs += [ "//build/config/linux:glib" ]
+ }
+
+ defines += [ "USE_SYMBOLIZE" ]
+
+ configs += linux_configs
+ all_dependent_configs = linux_configs
+
+ # These dependencies are not required on Android, and in the case
+ # of xdg_mime must be excluded due to licensing restrictions.
+ deps += [
+ "//base/third_party/symbolize",
+ "//base/third_party/xdg_mime",
+ "//base/third_party/xdg_user_dirs",
+ ]
+ } else {
+ # Non-Linux.
+ sources -= [
+ "nix/mime_util_xdg.cc",
+ "nix/mime_util_xdg.h",
+ "nix/xdg_util.cc",
+ "nix/xdg_util.h",
+ ]
+
+ if (!is_android) {
+ sources -= [
+ "linux_util.cc",
+ "linux_util.h",
+ ]
+ }
+ }
+
+ # iOS
+ if (is_ios) {
+ set_sources_assignment_filter([])
+
+ sources -= [
+ "files/file_path_watcher.cc",
+ "files/file_path_watcher.h",
+ "files/file_path_watcher_fsevents.cc",
+ "files/file_path_watcher_fsevents.h",
+ "files/file_path_watcher_kqueue.cc",
+ "files/file_path_watcher_kqueue.h",
+ "memory/discardable_shared_memory.cc",
+ "memory/discardable_shared_memory.h",
+ "message_loop/message_pump_libevent.cc",
+ "message_loop/message_pump_libevent.h",
+ "process/kill.cc",
+ "process/kill.h",
+ "process/kill_posix.cc",
+ "process/launch.cc",
+ "process/launch.h",
+ "process/launch_posix.cc",
+ "process/memory.cc",
+ "process/memory.h",
+ "process/process_iterator.cc",
+ "process/process_iterator.h",
+ "process/process_metrics_posix.cc",
+ "process/process_posix.cc",
+ "sync_socket.h",
+ "sync_socket_posix.cc",
+ ]
+ sources += [
+ "base_paths_mac.h",
+ "base_paths_mac.mm",
+ "file_version_info_mac.h",
+ "file_version_info_mac.mm",
+ "files/file_util_mac.mm",
+ "mac/bundle_locations.h",
+ "mac/bundle_locations.mm",
+ "mac/call_with_eh_frame.cc",
+ "mac/call_with_eh_frame.h",
+ "mac/foundation_util.h",
+ "mac/foundation_util.mm",
+ "mac/mac_logging.h",
+ "mac/mac_logging.mm",
+ "mac/mach_logging.cc",
+ "mac/mach_logging.h",
+ "mac/objc_property_releaser.h",
+ "mac/objc_property_releaser.mm",
+ "mac/scoped_block.h",
+ "mac/scoped_mach_port.cc",
+ "mac/scoped_mach_port.h",
+ "mac/scoped_mach_vm.cc",
+ "mac/scoped_mach_vm.h",
+ "mac/scoped_nsautorelease_pool.h",
+ "mac/scoped_nsautorelease_pool.mm",
+ "mac/scoped_nsobject.h",
+ "mac/scoped_nsobject.mm",
+ "mac/scoped_objc_class_swizzler.h",
+ "mac/scoped_objc_class_swizzler.mm",
+ "mac/scoped_typeref.h",
+ "memory/shared_memory_posix.cc",
+ "message_loop/message_pump_mac.h",
+ "message_loop/message_pump_mac.mm",
+ "process/memory_stubs.cc",
+ "strings/sys_string_conversions_mac.mm",
+ "threading/platform_thread_mac.mm",
+ "time/time_mac.cc",
+ ]
+
+ set_sources_assignment_filter(sources_assignment_filter)
+ }
+
+ if (!use_glib) {
+ sources -= [
+ "message_loop/message_pump_glib.cc",
+ "message_loop/message_pump_glib.h",
+ ]
+ }
+
+ if (is_asan || is_lsan || is_msan || is_tsan) {
+ data += [ "//tools/valgrind/asan/" ]
+ if (is_win) {
+ data +=
+ [ "//third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer.exe" ]
+ } else {
+ data += [ "//third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer" ]
+ }
+ }
+
+ configs += [ "//build/config/compiler:wexit_time_destructors" ]
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:default_optimization" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
+ allow_circular_includes_from = public_deps
+}
+
+buildflag_header("debugging_flags") {
+ header = "debugging_flags.h"
+ header_dir = "base/debug"
+ flags = [ "ENABLE_PROFILING=$enable_profiling" ]
+}
+
+# This is the subset of files from base that should not be used with a dynamic
+# library. Note that this library cannot depend on base because base depends on
+# base_static.
+static_library("base_static") {
+ sources = [
+ "base_switches.cc",
+ "base_switches.h",
+ "win/pe_image.cc",
+ "win/pe_image.h",
+ ]
+
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:default_optimization" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+}
+
+component("i18n") {
+ output_name = "base_i18n"
+ sources = [
+ "i18n/base_i18n_export.h",
+ "i18n/base_i18n_switches.cc",
+ "i18n/base_i18n_switches.h",
+ "i18n/bidi_line_iterator.cc",
+ "i18n/bidi_line_iterator.h",
+ "i18n/break_iterator.cc",
+ "i18n/break_iterator.h",
+ "i18n/case_conversion.cc",
+ "i18n/case_conversion.h",
+ "i18n/char_iterator.cc",
+ "i18n/char_iterator.h",
+ "i18n/file_util_icu.cc",
+ "i18n/file_util_icu.h",
+ "i18n/i18n_constants.cc",
+ "i18n/i18n_constants.h",
+ "i18n/icu_encoding_detection.cc",
+ "i18n/icu_encoding_detection.h",
+ "i18n/icu_string_conversions.cc",
+ "i18n/icu_string_conversions.h",
+ "i18n/icu_util.cc",
+ "i18n/icu_util.h",
+ "i18n/message_formatter.cc",
+ "i18n/message_formatter.h",
+ "i18n/number_formatting.cc",
+ "i18n/number_formatting.h",
+ "i18n/rtl.cc",
+ "i18n/rtl.h",
+ "i18n/streaming_utf8_validator.cc",
+ "i18n/streaming_utf8_validator.h",
+ "i18n/string_compare.cc",
+ "i18n/string_compare.h",
+ "i18n/string_search.cc",
+ "i18n/string_search.h",
+ "i18n/time_formatting.cc",
+ "i18n/time_formatting.h",
+ "i18n/timezone.cc",
+ "i18n/timezone.h",
+ "i18n/utf8_validator_tables.cc",
+ "i18n/utf8_validator_tables.h",
+ ]
+ defines = [ "BASE_I18N_IMPLEMENTATION" ]
+ configs += [ "//build/config/compiler:wexit_time_destructors" ]
+ public_deps = [
+ "//third_party/icu",
+ ]
+ deps = [
+ ":base",
+ "//base/third_party/dynamic_annotations",
+ ]
+
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:default_optimization" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ if (is_mac) {
+ libs = [ "CoreFoundation.framework" ]
+ }
+}
+
+test("base_perftests") {
+ sources = [
+ "message_loop/message_pump_perftest.cc",
+
+ # "test/run_all_unittests.cc",
+ "threading/thread_perftest.cc",
+ ]
+ deps = [
+ ":base",
+ "//base/test:test_support",
+ "//base/test:test_support_perf",
+ "//testing/gtest",
+ "//testing/perf",
+ ]
+
+ if (is_android) {
+ deps += [ "//testing/android/native_test:native_test_native_code" ]
+ }
+}
+
+test("base_i18n_perftests") {
+ sources = [
+ "i18n/streaming_utf8_validator_perftest.cc",
+ ]
+ deps = [
+ ":base",
+ ":i18n",
+ "//base/test:test_support",
+ "//base/test:test_support_perf",
+ "//testing/gtest",
+ ]
+}
+
+if (!is_ios) {
+ executable("build_utf8_validator_tables") {
+ sources = [
+ "i18n/build_utf8_validator_tables.cc",
+ ]
+ deps = [
+ ":base",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ "//third_party/icu:icuuc",
+ ]
+ }
+
+ executable("check_example") {
+ sources = [
+ "check_example.cc",
+ ]
+ deps = [
+ ":base",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+ }
+}
+
+source_set("message_loop_tests") {
+ testonly = true
+ sources = [
+ "message_loop/message_loop_test.cc",
+ "message_loop/message_loop_test.h",
+ ]
+
+ deps = [
+ ":base",
+ "//testing/gtest",
+ ]
+}
+
+if (is_win) {
+ # Target to manually rebuild pe_image_test.dll which is checked into
+ # base/test/data/pe_image.
+ shared_library("pe_image_test") {
+ sources = [
+ "win/pe_image_test.cc",
+ ]
+ ldflags = [
+ "/DELAYLOAD:cfgmgr32.dll",
+ "/DELAYLOAD:shell32.dll",
+ "/SUBSYSTEM:WINDOWS",
+ ]
+ libs = [
+ "cfgmgr32.lib",
+ "shell32.lib",
+ ]
+ deps = [
+ "//build/config/sanitizers:deps",
+ ]
+ }
+
+ loadable_module("scoped_handle_test_dll") {
+ sources = [
+ "win/scoped_handle_test_dll.cc",
+ ]
+ deps = [
+ ":base",
+ "//base/win:base_win_features",
+ ]
+ }
+
+ if (current_cpu == "x64") {
+ # Must be a shared library so that it can be unloaded during testing.
+ shared_library("base_profiler_test_support_library") {
+ sources = [
+ "profiler/test_support_library.cc",
+ ]
+ deps = [
+ "//build/config/sanitizers:deps",
+ ]
+ }
+ }
+}
+
+bundle_data("base_unittests_bundle_data") {
+ testonly = true
+ sources = [
+ "test/data/file_util/binary_file.bin",
+ "test/data/file_util/binary_file_diff.bin",
+ "test/data/file_util/binary_file_same.bin",
+ "test/data/file_util/blank_line.txt",
+ "test/data/file_util/blank_line_crlf.txt",
+ "test/data/file_util/crlf.txt",
+ "test/data/file_util/different.txt",
+ "test/data/file_util/different_first.txt",
+ "test/data/file_util/different_last.txt",
+ "test/data/file_util/empty1.txt",
+ "test/data/file_util/empty2.txt",
+ "test/data/file_util/first1.txt",
+ "test/data/file_util/first2.txt",
+ "test/data/file_util/original.txt",
+ "test/data/file_util/same.txt",
+ "test/data/file_util/same_length.txt",
+ "test/data/file_util/shortened.txt",
+ "test/data/json/bom_feff.json",
+ "test/data/serializer_nested_test.json",
+ "test/data/serializer_test.json",
+ "test/data/serializer_test_nowhitespace.json",
+ ]
+ outputs = [
+ "{{bundle_resources_dir}}/" +
+ "{{source_root_relative_dir}}/{{source_file_part}}",
+ ]
+}
+
+if (is_ios || is_mac) {
+ source_set("base_unittests_arc") {
+ testonly = true
+ set_sources_assignment_filter([])
+ sources = [
+ "mac/bind_objc_block_unittest_arc.mm",
+ "mac/scoped_nsobject_unittest_arc.mm",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+ configs += [ "//build/config/compiler:enable_arc" ]
+ deps = [
+ ":base",
+ "//testing/gtest",
+ ]
+ }
+}
+
+test("base_unittests") {
+ sources = [
+ "allocator/tcmalloc_unittest.cc",
+ "android/application_status_listener_unittest.cc",
+ "android/content_uri_utils_unittest.cc",
+ "android/jni_android_unittest.cc",
+ "android/jni_array_unittest.cc",
+ "android/jni_string_unittest.cc",
+ "android/library_loader/library_prefetcher_unittest.cc",
+ "android/path_utils_unittest.cc",
+ "android/scoped_java_ref_unittest.cc",
+ "android/sys_utils_unittest.cc",
+ "at_exit_unittest.cc",
+ "atomicops_unittest.cc",
+ "barrier_closure_unittest.cc",
+ "base64_unittest.cc",
+ "base64url_unittest.cc",
+ "big_endian_unittest.cc",
+ "bind_unittest.cc",
+ "bit_cast_unittest.cc",
+ "bits_unittest.cc",
+ "build_time_unittest.cc",
+ "callback_helpers_unittest.cc",
+ "callback_list_unittest.cc",
+ "callback_unittest.cc",
+ "cancelable_callback_unittest.cc",
+ "command_line_unittest.cc",
+ "containers/adapters_unittest.cc",
+ "containers/hash_tables_unittest.cc",
+ "containers/linked_list_unittest.cc",
+ "containers/mru_cache_unittest.cc",
+ "containers/scoped_ptr_hash_map_unittest.cc",
+ "containers/small_map_unittest.cc",
+ "containers/stack_container_unittest.cc",
+ "cpu_unittest.cc",
+ "debug/crash_logging_unittest.cc",
+ "debug/debugger_unittest.cc",
+ "debug/leak_tracker_unittest.cc",
+ "debug/proc_maps_linux_unittest.cc",
+ "debug/stack_trace_unittest.cc",
+ "debug/task_annotator_unittest.cc",
+ "deferred_sequenced_task_runner_unittest.cc",
+ "environment_unittest.cc",
+ "feature_list_unittest.cc",
+ "file_version_info_win_unittest.cc",
+ "files/dir_reader_posix_unittest.cc",
+ "files/file_locking_unittest.cc",
+ "files/file_path_unittest.cc",
+ "files/file_path_watcher_unittest.cc",
+ "files/file_proxy_unittest.cc",
+ "files/file_unittest.cc",
+ "files/file_util_proxy_unittest.cc",
+ "files/file_util_unittest.cc",
+ "files/important_file_writer_unittest.cc",
+ "files/memory_mapped_file_unittest.cc",
+ "files/scoped_temp_dir_unittest.cc",
+ "gmock_unittest.cc",
+ "guid_unittest.cc",
+ "hash_unittest.cc",
+ "i18n/break_iterator_unittest.cc",
+ "i18n/case_conversion_unittest.cc",
+ "i18n/char_iterator_unittest.cc",
+ "i18n/file_util_icu_unittest.cc",
+ "i18n/icu_string_conversions_unittest.cc",
+ "i18n/message_formatter_unittest.cc",
+ "i18n/number_formatting_unittest.cc",
+ "i18n/rtl_unittest.cc",
+ "i18n/streaming_utf8_validator_unittest.cc",
+ "i18n/string_search_unittest.cc",
+ "i18n/time_formatting_unittest.cc",
+ "i18n/timezone_unittest.cc",
+ "id_map_unittest.cc",
+ "ios/device_util_unittest.mm",
+ "ios/weak_nsobject_unittest.mm",
+ "json/json_parser_unittest.cc",
+ "json/json_reader_unittest.cc",
+ "json/json_value_converter_unittest.cc",
+ "json/json_value_serializer_unittest.cc",
+ "json/json_writer_unittest.cc",
+ "json/string_escape_unittest.cc",
+ "lazy_instance_unittest.cc",
+ "logging_unittest.cc",
+ "mac/bind_objc_block_unittest.mm",
+ "mac/call_with_eh_frame_unittest.mm",
+ "mac/dispatch_source_mach_unittest.cc",
+ "mac/foundation_util_unittest.mm",
+ "mac/mac_util_unittest.mm",
+ "mac/mach_port_broker_unittest.cc",
+ "mac/objc_property_releaser_unittest.mm",
+ "mac/scoped_nsobject_unittest.mm",
+ "mac/scoped_objc_class_swizzler_unittest.mm",
+ "mac/scoped_sending_event_unittest.mm",
+ "md5_unittest.cc",
+ "memory/aligned_memory_unittest.cc",
+ "memory/discardable_shared_memory_unittest.cc",
+ "memory/linked_ptr_unittest.cc",
+ "memory/memory_pressure_listener_unittest.cc",
+ "memory/memory_pressure_monitor_chromeos_unittest.cc",
+ "memory/memory_pressure_monitor_mac_unittest.cc",
+ "memory/memory_pressure_monitor_win_unittest.cc",
+ "memory/ptr_util_unittest.cc",
+ "memory/ref_counted_memory_unittest.cc",
+ "memory/ref_counted_unittest.cc",
+ "memory/scoped_vector_unittest.cc",
+ "memory/shared_memory_mac_unittest.cc",
+ "memory/shared_memory_unittest.cc",
+ "memory/shared_memory_win_unittest.cc",
+ "memory/singleton_unittest.cc",
+ "memory/weak_ptr_unittest.cc",
+ "message_loop/message_loop_task_runner_unittest.cc",
+ "message_loop/message_loop_unittest.cc",
+ "message_loop/message_pump_glib_unittest.cc",
+ "message_loop/message_pump_io_ios_unittest.cc",
+ "metrics/bucket_ranges_unittest.cc",
+ "metrics/field_trial_unittest.cc",
+ "metrics/histogram_base_unittest.cc",
+ "metrics/histogram_delta_serialization_unittest.cc",
+ "metrics/histogram_macros_unittest.cc",
+ "metrics/histogram_snapshot_manager_unittest.cc",
+ "metrics/histogram_unittest.cc",
+ "metrics/metrics_hashes_unittest.cc",
+ "metrics/persistent_histogram_allocator_unittest.cc",
+ "metrics/persistent_memory_allocator_unittest.cc",
+ "metrics/persistent_sample_map_unittest.cc",
+ "metrics/sample_map_unittest.cc",
+ "metrics/sample_vector_unittest.cc",
+ "metrics/sparse_histogram_unittest.cc",
+ "metrics/statistics_recorder_unittest.cc",
+ "native_library_unittest.cc",
+ "numerics/safe_numerics_unittest.cc",
+ "observer_list_unittest.cc",
+ "optional_unittest.cc",
+ "os_compat_android_unittest.cc",
+ "path_service_unittest.cc",
+ "pickle_unittest.cc",
+ "posix/file_descriptor_shuffle_unittest.cc",
+ "posix/unix_domain_socket_linux_unittest.cc",
+ "power_monitor/power_monitor_unittest.cc",
+ "process/memory_unittest.cc",
+ "process/memory_unittest_mac.h",
+ "process/memory_unittest_mac.mm",
+ "process/process_metrics_unittest.cc",
+ "process/process_metrics_unittest_ios.cc",
+ "process/process_unittest.cc",
+ "process/process_util_unittest.cc",
+ "profiler/stack_sampling_profiler_unittest.cc",
+ "profiler/tracked_time_unittest.cc",
+ "rand_util_unittest.cc",
+ "run_loop_unittest.cc",
+ "scoped_clear_errno_unittest.cc",
+ "scoped_generic_unittest.cc",
+ "scoped_native_library_unittest.cc",
+ "security_unittest.cc",
+ "sequence_checker_unittest.cc",
+ "sha1_unittest.cc",
+ "stl_util_unittest.cc",
+ "strings/nullable_string16_unittest.cc",
+ "strings/pattern_unittest.cc",
+ "strings/safe_sprintf_unittest.cc",
+ "strings/string16_unittest.cc",
+ "strings/string_number_conversions_unittest.cc",
+ "strings/string_piece_unittest.cc",
+ "strings/string_split_unittest.cc",
+ "strings/string_tokenizer_unittest.cc",
+ "strings/string_util_unittest.cc",
+ "strings/stringize_macros_unittest.cc",
+ "strings/stringprintf_unittest.cc",
+ "strings/sys_string_conversions_mac_unittest.mm",
+ "strings/sys_string_conversions_unittest.cc",
+ "strings/utf_offset_string_conversions_unittest.cc",
+ "strings/utf_string_conversions_unittest.cc",
+ "supports_user_data_unittest.cc",
+ "sync_socket_unittest.cc",
+ "synchronization/cancellation_flag_unittest.cc",
+ "synchronization/condition_variable_unittest.cc",
+ "synchronization/lock_unittest.cc",
+ "synchronization/read_write_lock_unittest.cc",
+ "synchronization/waitable_event_unittest.cc",
+ "synchronization/waitable_event_watcher_unittest.cc",
+ "sys_byteorder_unittest.cc",
+ "sys_info_unittest.cc",
+ "system_monitor/system_monitor_unittest.cc",
+ "task/cancelable_task_tracker_unittest.cc",
+ "task_runner_util_unittest.cc",
+ "task_scheduler/delayed_task_manager_unittest.cc",
+ "task_scheduler/priority_queue_unittest.cc",
+ "task_scheduler/scheduler_lock_unittest.cc",
+ "task_scheduler/scheduler_service_thread_unittest.cc",
+ "task_scheduler/scheduler_worker_pool_impl_unittest.cc",
+ "task_scheduler/scheduler_worker_stack_unittest.cc",
+ "task_scheduler/scheduler_worker_unittest.cc",
+ "task_scheduler/sequence_sort_key_unittest.cc",
+ "task_scheduler/sequence_unittest.cc",
+ "task_scheduler/task_scheduler_impl_unittest.cc",
+ "task_scheduler/task_tracker_unittest.cc",
+ "task_scheduler/test_task_factory.cc",
+ "task_scheduler/test_task_factory.h",
+ "task_scheduler/test_utils.h",
+ "template_util_unittest.cc",
+ "test/histogram_tester_unittest.cc",
+ "test/icu_test_util.cc",
+ "test/icu_test_util.h",
+ "test/test_pending_task_unittest.cc",
+ "test/test_reg_util_win_unittest.cc",
+ "test/trace_event_analyzer_unittest.cc",
+ "test/user_action_tester_unittest.cc",
+ "threading/non_thread_safe_unittest.cc",
+ "threading/platform_thread_unittest.cc",
+ "threading/sequenced_task_runner_handle_unittest.cc",
+ "threading/sequenced_worker_pool_unittest.cc",
+ "threading/simple_thread_unittest.cc",
+ "threading/thread_checker_unittest.cc",
+ "threading/thread_collision_warner_unittest.cc",
+ "threading/thread_id_name_manager_unittest.cc",
+ "threading/thread_local_storage_unittest.cc",
+ "threading/thread_local_unittest.cc",
+ "threading/thread_unittest.cc",
+ "threading/watchdog_unittest.cc",
+ "threading/worker_pool_posix_unittest.cc",
+ "threading/worker_pool_unittest.cc",
+ "time/pr_time_unittest.cc",
+ "time/time_unittest.cc",
+ "time/time_win_unittest.cc",
+ "timer/hi_res_timer_manager_unittest.cc",
+ "timer/mock_timer_unittest.cc",
+ "timer/timer_unittest.cc",
+ "tools_sanity_unittest.cc",
+ "trace_event/blame_context_unittest.cc",
+ "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
+ "trace_event/heap_profiler_allocation_register_unittest.cc",
+ "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
+ "trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc",
+ "trace_event/heap_profiler_type_name_deduplicator_unittest.cc",
+ "trace_event/java_heap_dump_provider_android_unittest.cc",
+ "trace_event/memory_allocator_dump_unittest.cc",
+ "trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/process_memory_dump_unittest.cc",
+ "trace_event/trace_config_unittest.cc",
+ "trace_event/trace_event_argument_unittest.cc",
+ "trace_event/trace_event_synthetic_delay_unittest.cc",
+ "trace_event/trace_event_system_stats_monitor_unittest.cc",
+ "trace_event/trace_event_unittest.cc",
+ "trace_event/winheap_dump_provider_win_unittest.cc",
+ "tracked_objects_unittest.cc",
+ "tuple_unittest.cc",
+ "values_unittest.cc",
+ "version_unittest.cc",
+ "vlog_unittest.cc",
+ "win/dllmain.cc",
+ "win/enum_variant_unittest.cc",
+ "win/event_trace_consumer_unittest.cc",
+ "win/event_trace_controller_unittest.cc",
+ "win/event_trace_provider_unittest.cc",
+ "win/i18n_unittest.cc",
+ "win/iunknown_impl_unittest.cc",
+ "win/message_window_unittest.cc",
+ "win/object_watcher_unittest.cc",
+ "win/pe_image_unittest.cc",
+ "win/registry_unittest.cc",
+ "win/scoped_bstr_unittest.cc",
+ "win/scoped_comptr_unittest.cc",
+ "win/scoped_handle_unittest.cc",
+ "win/scoped_process_information_unittest.cc",
+ "win/scoped_variant_unittest.cc",
+ "win/shortcut_unittest.cc",
+ "win/startup_information_unittest.cc",
+ "win/wait_chain_unittest.cc",
+ "win/win_util_unittest.cc",
+ "win/windows_version_unittest.cc",
+ "win/wrapped_window_proc_unittest.cc",
+ ]
+
+ defines = []
+
+ deps = [
+ ":base",
+ ":i18n",
+ ":message_loop_tests",
+ "//base/test:run_all_unittests",
+ "//base/test:test_support",
+ "//base/third_party/dynamic_annotations",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/icu",
+ ]
+
+ if (is_ios || is_mac) {
+ deps += [ ":base_unittests_arc" ]
+ }
+
+ public_deps = [
+ ":base_unittests_bundle_data",
+ ]
+
+ # Some unittests depend on the ALLOCATOR_SHIM macro.
+ configs += [ "//base/allocator:allocator_shim_define" ]
+
+ data = [
+ "test/data/",
+ ]
+
+ # Allow more direct string conversions on platforms with native utf8
+ # strings
+ if (is_mac || is_ios || is_chromeos || is_chromecast) {
+ defines += [ "SYSTEM_NATIVE_UTF8" ]
+ }
+
+ if (is_android) {
+ deps += [
+ ":base_java",
+ ":base_java_unittest_support",
+ "//base/android/jni_generator:jni_generator_tests",
+ ]
+ }
+
+ if (is_ios) {
+ sources -= [
+ "files/file_locking_unittest.cc",
+ "files/file_path_watcher_unittest.cc",
+ "memory/discardable_shared_memory_unittest.cc",
+ "memory/shared_memory_unittest.cc",
+ "process/memory_unittest.cc",
+ "process/process_unittest.cc",
+ "process/process_util_unittest.cc",
+ "sync_socket_unittest.cc",
+ ]
+
+ # Pull in specific Mac files for iOS (which have been filtered out by file
+ # name rules).
+ set_sources_assignment_filter([])
+ sources += [
+ "mac/bind_objc_block_unittest.mm",
+ "mac/foundation_util_unittest.mm",
+ "mac/objc_property_releaser_unittest.mm",
+ "mac/scoped_nsobject_unittest.mm",
+ "strings/sys_string_conversions_mac_unittest.mm",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+
+ # TODO(GYP): dep on copy_test_data_ios action.
+ }
+
+ if (is_mac) {
+ libs = [
+ "CoreFoundation.framework",
+ "Foundation.framework",
+ ]
+ }
+
+ if (is_linux) {
+ if (is_desktop_linux) {
+ sources += [ "nix/xdg_util_unittest.cc" ]
+ }
+
+ deps += [ "//base/test:malloc_wrapper" ]
+
+ if (use_glib) {
+ configs += [ "//build/config/linux:glib" ]
+ }
+
+ if (!is_component_build) {
+ # Set rpath to find libmalloc_wrapper.so even in a non-component build.
+ configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
+ }
+ }
+
+ if (!use_glib) {
+ sources -= [ "message_loop/message_pump_glib_unittest.cc" ]
+ }
+
+ if (is_posix && !is_ios) {
+ sources += [ "message_loop/message_pump_libevent_unittest.cc" ]
+ deps += [ "//base/third_party/libevent" ]
+ }
+
+ if (is_android) {
+ deps += [ "//testing/android/native_test:native_test_native_code" ]
+ set_sources_assignment_filter([])
+ sources += [
+ "debug/proc_maps_linux_unittest.cc",
+ "trace_event/trace_event_android_unittest.cc",
+ ]
+ set_sources_assignment_filter(sources_assignment_filter)
+ }
+
+ if (is_win) {
+ deps += [ "//base:scoped_handle_test_dll" ]
+ if (current_cpu == "x64") {
+ sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
+ deps += [ ":base_profiler_test_support_library" ]
+ }
+ }
+
+ if (use_experimental_allocator_shim) {
+ sources += [ "allocator/allocator_shim_unittest.cc" ]
+ }
+
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ # Symbols for crashes when running tests on swarming.
+ if (symbol_level > 0) {
+ if (is_win) {
+ data += [ "$root_out_dir/base_unittests.exe.pdb" ]
+ } else if (is_mac) {
+ # TODO(crbug.com/330301): make this conditional on mac_strip_release.
+ # data += [ "$root_out_dir/base_unittests.dSYM/" ]
+ }
+ }
+
+ if (use_cfi_cast) {
+ # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+ defines += [ "CFI_CAST_CHECK" ]
+ }
+}
+
+action("build_date") {
+ script = "//build/write_build_date_header.py"
+
+ # Force recalculation if there's been a change.
+ inputs = [
+ "//build/util/LASTCHANGE",
+ ]
+ outputs = [
+ "$target_gen_dir/generated_build_date.h",
+ ]
+
+ args =
+ [ rebase_path("$target_gen_dir/generated_build_date.h", root_build_dir) ]
+
+ if (is_official_build) {
+ args += [ "official" ]
+ } else {
+ args += [ "default" ]
+ }
+
+ if (override_build_date != "N/A") {
+ args += [ override_build_date ]
+ }
+}
+
+if (enable_nocompile_tests) {
+ nocompile_test("base_nocompile_tests") {
+ sources = [
+ "bind_unittest.nc",
+ "callback_list_unittest.nc",
+ "callback_unittest.nc",
+ "memory/weak_ptr_unittest.nc",
+ ]
+
+ deps = [
+ ":base",
+ "//base/test:run_all_unittests",
+ "//testing/gtest",
+ ]
+ }
+}
+
+if (is_android) {
+ # GYP: //base.gyp:base_jni_headers
+ generate_jni("base_jni_headers") {
+ sources = [
+ "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
+ "android/java/src/org/chromium/base/ApkAssets.java",
+ "android/java/src/org/chromium/base/ApplicationStatus.java",
+ "android/java/src/org/chromium/base/BuildInfo.java",
+ "android/java/src/org/chromium/base/Callback.java",
+ "android/java/src/org/chromium/base/CommandLine.java",
+ "android/java/src/org/chromium/base/ContentUriUtils.java",
+ "android/java/src/org/chromium/base/ContextUtils.java",
+ "android/java/src/org/chromium/base/CpuFeatures.java",
+ "android/java/src/org/chromium/base/EventLog.java",
+ "android/java/src/org/chromium/base/FieldTrialList.java",
+ "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
+ "android/java/src/org/chromium/base/JNIUtils.java",
+ "android/java/src/org/chromium/base/JavaHandlerThread.java",
+ "android/java/src/org/chromium/base/LocaleUtils.java",
+ "android/java/src/org/chromium/base/MemoryPressureListener.java",
+ "android/java/src/org/chromium/base/PathService.java",
+ "android/java/src/org/chromium/base/PathUtils.java",
+ "android/java/src/org/chromium/base/PowerMonitor.java",
+ "android/java/src/org/chromium/base/SysUtils.java",
+ "android/java/src/org/chromium/base/SystemMessageHandler.java",
+ "android/java/src/org/chromium/base/ThreadUtils.java",
+ "android/java/src/org/chromium/base/TraceEvent.java",
+ "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
+ "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
+ "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+ ]
+
+ public_deps = [
+ ":android_runtime_jni_headers",
+ ]
+
+ jni_package = "base"
+ }
+
+ # GYP: //base.gyp:android_runtime_jni_headers
+ generate_jar_jni("android_runtime_jni_headers") {
+ jni_package = "base"
+ classes = [ "java/lang/Runtime.class" ]
+ }
+
+ # GYP: //base.gyp:base_java
+ android_library("base_java") {
+ srcjar_deps = [
+ ":base_android_java_enums_srcjar",
+ ":base_build_config_gen",
+ ":base_native_libraries_gen",
+ ]
+
+ deps = [
+ "//third_party/android_tools:android_support_multidex_java",
+ "//third_party/jsr-305:jsr_305_javalib",
+ ]
+
+ java_files = [
+ "android/java/src/org/chromium/base/ActivityState.java",
+ "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
+ "android/java/src/org/chromium/base/ApiCompatibilityUtils.java",
+ "android/java/src/org/chromium/base/ApkAssets.java",
+ "android/java/src/org/chromium/base/ApplicationStatus.java",
+ "android/java/src/org/chromium/base/BaseChromiumApplication.java",
+ "android/java/src/org/chromium/base/BaseSwitches.java",
+ "android/java/src/org/chromium/base/BuildInfo.java",
+ "android/java/src/org/chromium/base/Callback.java",
+ "android/java/src/org/chromium/base/CollectionUtil.java",
+ "android/java/src/org/chromium/base/CommandLine.java",
+ "android/java/src/org/chromium/base/CommandLineInitUtil.java",
+ "android/java/src/org/chromium/base/ContentUriUtils.java",
+ "android/java/src/org/chromium/base/ContextUtils.java",
+ "android/java/src/org/chromium/base/CpuFeatures.java",
+ "android/java/src/org/chromium/base/EventLog.java",
+ "android/java/src/org/chromium/base/FieldTrialList.java",
+ "android/java/src/org/chromium/base/FileUtils.java",
+ "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
+ "android/java/src/org/chromium/base/JNIUtils.java",
+ "android/java/src/org/chromium/base/JavaHandlerThread.java",
+ "android/java/src/org/chromium/base/LocaleUtils.java",
+ "android/java/src/org/chromium/base/Log.java",
+ "android/java/src/org/chromium/base/MemoryPressureListener.java",
+ "android/java/src/org/chromium/base/ObserverList.java",
+ "android/java/src/org/chromium/base/PackageUtils.java",
+ "android/java/src/org/chromium/base/PathService.java",
+ "android/java/src/org/chromium/base/PathUtils.java",
+ "android/java/src/org/chromium/base/PerfTraceEvent.java",
+ "android/java/src/org/chromium/base/PowerMonitor.java",
+ "android/java/src/org/chromium/base/PowerStatusReceiver.java",
+ "android/java/src/org/chromium/base/Promise.java",
+ "android/java/src/org/chromium/base/ResourceExtractor.java",
+ "android/java/src/org/chromium/base/SecureRandomInitializer.java",
+ "android/java/src/org/chromium/base/StreamUtil.java",
+ "android/java/src/org/chromium/base/SysUtils.java",
+ "android/java/src/org/chromium/base/SystemMessageHandler.java",
+ "android/java/src/org/chromium/base/ThreadUtils.java",
+ "android/java/src/org/chromium/base/TraceEvent.java",
+ "android/java/src/org/chromium/base/VisibleForTesting.java",
+ "android/java/src/org/chromium/base/annotations/AccessedByNative.java",
+ "android/java/src/org/chromium/base/annotations/CalledByNative.java",
+ "android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java",
+ "android/java/src/org/chromium/base/annotations/JNIAdditionalImport.java",
+ "android/java/src/org/chromium/base/annotations/JNINamespace.java",
+ "android/java/src/org/chromium/base/annotations/MainDex.java",
+ "android/java/src/org/chromium/base/annotations/NativeCall.java",
+ "android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
+ "android/java/src/org/chromium/base/annotations/RemovableInRelease.java",
+ "android/java/src/org/chromium/base/annotations/SuppressFBWarnings.java",
+ "android/java/src/org/chromium/base/annotations/UsedByReflection.java",
+ "android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
+ "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
+ "android/java/src/org/chromium/base/library_loader/Linker.java",
+ "android/java/src/org/chromium/base/library_loader/LoaderErrors.java",
+ "android/java/src/org/chromium/base/library_loader/ModernLinker.java",
+ "android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
+ "android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
+ "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
+ "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+ "android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+ ]
+
+ # New versions of BuildConfig.java and NativeLibraries.java
+ # (with the actual correct values) will be created when creating an apk.
+ jar_excluded_patterns = [
+ "*/BuildConfig.class",
+ "*/NativeLibraries.class",
+ "*/NativeLibraries##*.class",
+ ]
+ }
+
+ # GYP: //base.gyp:base_javatests
+ android_library("base_javatests") {
+ deps = [
+ ":base_java",
+ ":base_java_test_support",
+ ]
+ java_files = [
+ "android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
+ "android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
+ "android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
+ "android/javatests/src/org/chromium/base/CommandLineTest.java",
+ "android/javatests/src/org/chromium/base/ObserverListTest.java",
+ "android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
+ ]
+ }
+
+ # GYP: //base.gyp:base_java_test_support
+ android_library("base_java_test_support") {
+ deps = [
+ ":base_java",
+ "//testing/android/reporter:reporter_java",
+ ]
+ java_files = [
+ "test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
+ "test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
+ "test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
+ "test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
+ "test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java",
+ "test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java",
+ "test/android/javatests/src/org/chromium/base/test/util/Feature.java",
+ "test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java",
+ "test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java",
+ "test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
+ "test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
+ "test/android/javatests/src/org/chromium/base/test/util/Manual.java",
+ "test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
+ "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
+ "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/PerfTest.java",
+ "test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
+ "test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java",
+ "test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
+ "test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
+ "test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
+ "test/android/javatests/src/org/chromium/base/test/util/TestThread.java",
+ "test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java",
+ "test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
+ "test/android/javatests/src/org/chromium/base/test/util/parameter/BaseParameter.java",
+ "test/android/javatests/src/org/chromium/base/test/util/parameter/Parameter.java",
+ "test/android/javatests/src/org/chromium/base/test/util/parameter/Parameterizable.java",
+ "test/android/javatests/src/org/chromium/base/test/util/parameter/ParameterizedTest.java",
+ "test/android/javatests/src/org/chromium/base/test/util/parameter/parameters/MethodParameter.java",
+ ]
+ }
+
+ # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
+ # in the multidex shadow library. crbug.com/522043
+ # GYP: //base.gyp:base_junit_test_support
+ java_library("base_junit_test_support") {
+ testonly = true
+ java_files = [ "test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java" ]
+ deps = [
+ "//third_party/android_tools:android_support_multidex_java",
+ "//third_party/robolectric:android-all-4.3_r2-robolectric-0",
+ "//third_party/robolectric:robolectric_java",
+ ]
+ srcjar_deps = [ ":base_build_config_gen" ]
+ }
+
+ # GYP: //base.gyp:base_junit_tests
+ junit_binary("base_junit_tests") {
+ java_files = [
+ "android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
+ "android/junit/src/org/chromium/base/LogTest.java",
+ "android/junit/src/org/chromium/base/PromiseTest.java",
+ "test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
+ "test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
+ "test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
+ "test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java",
+ ]
+ deps = [
+ ":base_java",
+ ":base_java_test_support",
+ ":base_junit_test_support",
+ ]
+ }
+
+ # GYP: //base.gyp:base_java_application_state
+ # GYP: //base.gyp:base_java_library_load_from_apk_status_codes
+ # GYP: //base.gyp:base_java_library_process_type
+ # GYP: //base.gyp:base_java_memory_pressure_level
+ java_cpp_enum("base_android_java_enums_srcjar") {
+ sources = [
+ "android/application_status_listener.h",
+ "android/library_loader/library_load_from_apk_status_codes.h",
+ "android/library_loader/library_loader_hooks.h",
+ "memory/memory_pressure_listener.h",
+ ]
+ }
+
+ # GYP: //base/base.gyp:base_build_config_gen
+ java_cpp_template("base_build_config_gen") {
+ sources = [
+ "android/java/templates/BuildConfig.template",
+ ]
+ package_name = "org/chromium/base"
+
+ defines = []
+ if (!is_java_debug) {
+ defines += [ "NDEBUG" ]
+ }
+ }
+
+ # GYP: //base/base.gyp:base_native_libraries_gen
+ java_cpp_template("base_native_libraries_gen") {
+ sources = [
+ "android/java/templates/NativeLibraries.template",
+ ]
+ package_name = "org/chromium/base/library_loader"
+ }
+
+ # GYP: //base.gyp:base_java_unittest_support
+ android_library("base_java_unittest_support") {
+ deps = [
+ ":base_java",
+ ]
+ java_files =
+ [ "test/android/java/src/org/chromium/base/ContentUriTestUtils.java" ]
+ }
+}
diff --git a/libchrome/base/DEPS b/libchrome/base/DEPS
new file mode 100644
index 0000000..c0e95a0
--- /dev/null
+++ b/libchrome/base/DEPS
@@ -0,0 +1,15 @@
+include_rules = [
+ "+jni",
+ "+third_party/ashmem",
+ "+third_party/apple_apsl",
+ "+third_party/lss",
+ "+third_party/modp_b64",
+ "+third_party/tcmalloc",
+
+ # These are implicitly brought in from the root, and we don't want them.
+ "-ipc",
+ "-url",
+
+ # ICU dependendencies must be separate from the rest of base.
+ "-i18n",
+]
diff --git a/libchrome/base/OWNERS b/libchrome/base/OWNERS
new file mode 100644
index 0000000..b6cfce4
--- /dev/null
+++ b/libchrome/base/OWNERS
@@ -0,0 +1,46 @@
+# About src/base:
+#
+# Chromium is a very mature project, most things that are generally useful are
+# already here, and that things not here aren't generally useful.
+#
+# Base is pulled into many projects. For example, various ChromeOS daemons. So
+# the bar for adding stuff is that it must have demonstrated wide
+# applicability. Prefer to add things closer to where they're used (i.e. "not
+# base"), and pull into base only when needed. In a project our size,
+# sometimes even duplication is OK and inevitable.
+#
+# Adding a new logging macro DPVELOG_NE is not more clear than just
+# writing the stuff you want to log in a regular logging statement, even
+# if it makes your calling code longer. Just add it to your own code.
+#
+# If the code in question does not need to be used inside base, but will have
+# multiple consumers across the codebase, consider placing it in a new directory
+# under components/ instead.
+
+mark@chromium.org
+thakis@chromium.org
+danakj@chromium.org
+thestig@chromium.org
+dcheng@chromium.org
+
+# For Bind/Callback:
+per-file bind*=tzik@chromium.org
+per-file callback*=tzik@chromium.org
+
+# For Android-specific changes:
+per-file *android*=nyquist@chromium.org
+per-file *android*=rmcilroy@chromium.org
+per-file *android*=torne@chromium.org
+per-file *android*=yfriedman@chromium.org
+
+# For FeatureList API:
+per-file feature_list*=asvitkine@chromium.org
+per-file feature_list*=isherman@chromium.org
+
+# For bot infrastructure:
+per-file *.isolate=maruel@chromium.org
+per-file *.isolate=tandrii@chromium.org
+per-file *.isolate=vadimsh@chromium.org
+
+# For TCMalloc tests:
+per-file security_unittest.cc=jln@chromium.org
diff --git a/libchrome/base/PRESUBMIT.py b/libchrome/base/PRESUBMIT.py
new file mode 100644
index 0000000..7fc8107
--- /dev/null
+++ b/libchrome/base/PRESUBMIT.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Chromium presubmit script for src/base.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details on the presubmit API built into depot_tools.
+"""
+
+def _CheckNoInterfacesInBase(input_api, output_api):
+ """Checks to make sure no files in libbase.a have |@interface|."""
+ pattern = input_api.re.compile(r'^\s*@interface', input_api.re.MULTILINE)
+ files = []
+ for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
+ if (f.LocalPath().startswith('base/') and
+ not "/ios/" in f.LocalPath() and
+ not "/test/" in f.LocalPath() and
+ not f.LocalPath().endswith('_unittest.mm') and
+ not f.LocalPath().endswith('mac/sdk_forward_declarations.h')):
+ contents = input_api.ReadFile(f)
+ if pattern.search(contents):
+ files.append(f)
+
+ if len(files):
+ return [ output_api.PresubmitError(
+ 'Objective-C interfaces or categories are forbidden in libbase. ' +
+ 'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +
+ 'browse_thread/thread/efb28c10435987fd',
+ files) ]
+ return []
+
+
+def _CommonChecks(input_api, output_api):
+ """Checks common to both upload and commit."""
+ results = []
+ results.extend(_CheckNoInterfacesInBase(input_api, output_api))
+ return results
+
+def CheckChangeOnUpload(input_api, output_api):
+ results = []
+ results.extend(_CommonChecks(input_api, output_api))
+ return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ results = []
+ results.extend(_CommonChecks(input_api, output_api))
+ return results
diff --git a/libchrome/base/allocator/BUILD.gn b/libchrome/base/allocator/BUILD.gn
new file mode 100644
index 0000000..490b8e8
--- /dev/null
+++ b/libchrome/base/allocator/BUILD.gn
@@ -0,0 +1,327 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+import("//build/config/allocator.gni")
+import("//build/config/compiler/compiler.gni")
+
+declare_args() {
+ # Provide a way to force disable debugallocation in Debug builds,
+ # e.g. for profiling (it's more rare to profile Debug builds,
+ # but people sometimes need to do that).
+ enable_debugallocation = is_debug
+}
+
+# Allocator shim is only enabled for Release static builds.
+win_use_allocator_shim = is_win && !is_component_build && !is_debug
+
+# This "allocator" meta-target will forward to the default allocator according
+# to the build settings.
+group("allocator") {
+ public_deps = []
+ deps = []
+
+ if (use_allocator == "tcmalloc") {
+ deps += [ ":tcmalloc" ]
+ }
+
+ if (win_use_allocator_shim) {
+ public_deps += [ ":allocator_shim" ]
+ }
+}
+
+# This config defines ALLOCATOR_SHIM in the same conditions that the allocator
+# shim will be used by the allocator target.
+#
+# TODO(brettw) this is only used in one place and is kind of mess, because it
+# assumes that the library using it will eventually be linked with
+# //base/allocator in the default way. Clean this up and delete this.
+config("allocator_shim_define") {
+ if (win_use_allocator_shim) {
+ defines = [ "ALLOCATOR_SHIM" ]
+ }
+}
+
+config("tcmalloc_flags") {
+ defines = []
+ if (enable_debugallocation) {
+ defines += [
+ # Use debugallocation for Debug builds to catch problems early
+ # and cleanly, http://crbug.com/30715 .
+ "TCMALLOC_FOR_DEBUGALLOCATION",
+ ]
+ }
+ if (use_experimental_allocator_shim) {
+ defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
+ }
+ if (is_clang) {
+ cflags = [
+ # tcmalloc initializes some fields in the wrong order.
+ "-Wno-reorder",
+
+ # tcmalloc contains some unused local template specializations.
+ "-Wno-unused-function",
+
+ # tcmalloc uses COMPILE_ASSERT without static_assert but with
+ # typedefs.
+ "-Wno-unused-local-typedefs",
+
+ # for magic2_ in debugallocation.cc (only built in Debug builds)
+ # typedefs.
+ "-Wno-unused-private-field",
+ ]
+ } else {
+ cflags = []
+ }
+
+ if (is_linux || is_android) {
+ # We enable all warnings by default, but upstream disables a few.
+ # Keep "-Wno-*" flags in sync with upstream by comparing against:
+ # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
+ cflags += [
+ "-Wno-sign-compare",
+ "-Wno-unused-result",
+ ]
+ }
+}
+
+# This config is only used on Windows static release builds for the
+# allocator shim.
+if (win_use_allocator_shim) {
+ source_set("allocator_shim") {
+ sources = [
+ "allocator_shim_win.cc",
+ "allocator_shim_win.h",
+ ]
+ configs += [ ":allocator_shim_define" ]
+ }
+}
+
+if (use_allocator == "tcmalloc") {
+ # tcmalloc currently won't compile on Android.
+ source_set("tcmalloc") {
+ tcmalloc_dir = "//third_party/tcmalloc/chromium"
+
+ # Don't check tcmalloc's includes. These files include various files like
+ # base/foo.h and they actually refer to tcmalloc's forked copy of base
+ # rather than the regular one, which confuses the header checker.
+ check_includes = false
+
+ sources = [
+ # Generated for our configuration from tcmalloc's build
+ # and checked in.
+ "$tcmalloc_dir/src/config.h",
+ "$tcmalloc_dir/src/config_android.h",
+ "$tcmalloc_dir/src/config_linux.h",
+ "$tcmalloc_dir/src/config_win.h",
+
+ # tcmalloc native and forked files.
+ "$tcmalloc_dir/src/base/abort.cc",
+ "$tcmalloc_dir/src/base/abort.h",
+ "$tcmalloc_dir/src/base/arm_instruction_set_select.h",
+ "$tcmalloc_dir/src/base/atomicops-internals-arm-generic.h",
+ "$tcmalloc_dir/src/base/atomicops-internals-arm-v6plus.h",
+ "$tcmalloc_dir/src/base/atomicops-internals-linuxppc.h",
+ "$tcmalloc_dir/src/base/atomicops-internals-macosx.h",
+ "$tcmalloc_dir/src/base/atomicops-internals-windows.h",
+ "$tcmalloc_dir/src/base/atomicops-internals-x86.cc",
+ "$tcmalloc_dir/src/base/atomicops-internals-x86.h",
+ "$tcmalloc_dir/src/base/atomicops.h",
+ "$tcmalloc_dir/src/base/commandlineflags.h",
+ "$tcmalloc_dir/src/base/cycleclock.h",
+
+ # We don't list dynamic_annotations.c since its copy is already
+ # present in the dynamic_annotations target.
+ "$tcmalloc_dir/src/base/elf_mem_image.cc",
+ "$tcmalloc_dir/src/base/elf_mem_image.h",
+ "$tcmalloc_dir/src/base/linuxthreads.cc",
+ "$tcmalloc_dir/src/base/linuxthreads.h",
+ "$tcmalloc_dir/src/base/logging.cc",
+ "$tcmalloc_dir/src/base/logging.h",
+ "$tcmalloc_dir/src/base/low_level_alloc.cc",
+ "$tcmalloc_dir/src/base/low_level_alloc.h",
+ "$tcmalloc_dir/src/base/spinlock.cc",
+ "$tcmalloc_dir/src/base/spinlock.h",
+ "$tcmalloc_dir/src/base/spinlock_internal.cc",
+ "$tcmalloc_dir/src/base/spinlock_internal.h",
+ "$tcmalloc_dir/src/base/synchronization_profiling.h",
+ "$tcmalloc_dir/src/base/sysinfo.cc",
+ "$tcmalloc_dir/src/base/sysinfo.h",
+ "$tcmalloc_dir/src/base/vdso_support.cc",
+ "$tcmalloc_dir/src/base/vdso_support.h",
+ "$tcmalloc_dir/src/central_freelist.cc",
+ "$tcmalloc_dir/src/central_freelist.h",
+ "$tcmalloc_dir/src/common.cc",
+ "$tcmalloc_dir/src/common.h",
+
+ # #included by debugallocation_shim.cc
+ #"$tcmalloc_dir/src/debugallocation.cc",
+ "$tcmalloc_dir/src/free_list.cc",
+ "$tcmalloc_dir/src/free_list.h",
+ "$tcmalloc_dir/src/heap-profile-table.cc",
+ "$tcmalloc_dir/src/heap-profile-table.h",
+ "$tcmalloc_dir/src/heap-profiler.cc",
+ "$tcmalloc_dir/src/internal_logging.cc",
+ "$tcmalloc_dir/src/internal_logging.h",
+ "$tcmalloc_dir/src/linked_list.h",
+ "$tcmalloc_dir/src/malloc_extension.cc",
+ "$tcmalloc_dir/src/malloc_hook-inl.h",
+ "$tcmalloc_dir/src/malloc_hook.cc",
+ "$tcmalloc_dir/src/maybe_threads.cc",
+ "$tcmalloc_dir/src/maybe_threads.h",
+ "$tcmalloc_dir/src/memory_region_map.cc",
+ "$tcmalloc_dir/src/memory_region_map.h",
+ "$tcmalloc_dir/src/page_heap.cc",
+ "$tcmalloc_dir/src/page_heap.h",
+ "$tcmalloc_dir/src/raw_printer.cc",
+ "$tcmalloc_dir/src/raw_printer.h",
+ "$tcmalloc_dir/src/sampler.cc",
+ "$tcmalloc_dir/src/sampler.h",
+ "$tcmalloc_dir/src/span.cc",
+ "$tcmalloc_dir/src/span.h",
+ "$tcmalloc_dir/src/stack_trace_table.cc",
+ "$tcmalloc_dir/src/stack_trace_table.h",
+ "$tcmalloc_dir/src/stacktrace.cc",
+ "$tcmalloc_dir/src/static_vars.cc",
+ "$tcmalloc_dir/src/static_vars.h",
+ "$tcmalloc_dir/src/symbolize.cc",
+ "$tcmalloc_dir/src/symbolize.h",
+ "$tcmalloc_dir/src/system-alloc.cc",
+ "$tcmalloc_dir/src/system-alloc.h",
+
+ # #included by debugallocation_shim.cc
+ #"$tcmalloc_dir/src/tcmalloc.cc",
+ "$tcmalloc_dir/src/thread_cache.cc",
+ "$tcmalloc_dir/src/thread_cache.h",
+ "$tcmalloc_dir/src/windows/port.cc",
+ "$tcmalloc_dir/src/windows/port.h",
+ "debugallocation_shim.cc",
+
+ # These are both #included by allocator_shim for maximal linking.
+ #"generic_allocators.cc",
+ #"win_allocator.cc",
+ ]
+
+ # Disable the heap checker in tcmalloc.
+ defines = [ "NO_HEAP_CHECK" ]
+
+ include_dirs = [
+ ".",
+ "$tcmalloc_dir/src/base",
+ "$tcmalloc_dir/src",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [
+ "//build/config/compiler:no_chromium_code",
+ ":tcmalloc_flags",
+ ]
+
+ deps = []
+
+ if (enable_profiling) {
+ sources += [
+ "$tcmalloc_dir/src/base/thread_lister.c",
+ "$tcmalloc_dir/src/base/thread_lister.h",
+ "$tcmalloc_dir/src/profile-handler.cc",
+ "$tcmalloc_dir/src/profile-handler.h",
+ "$tcmalloc_dir/src/profiledata.cc",
+ "$tcmalloc_dir/src/profiledata.h",
+ "$tcmalloc_dir/src/profiler.cc",
+ ]
+ defines += [ "ENABLE_PROFILING=1" ]
+ }
+
+ if (is_linux || is_android) {
+ sources -= [
+ "$tcmalloc_dir/src/system-alloc.h",
+ "$tcmalloc_dir/src/windows/port.cc",
+ "$tcmalloc_dir/src/windows/port.h",
+ ]
+
+ # Compiling tcmalloc with -fvisibility=default is only necessary when
+ # not using the allocator shim, which provides the correct visibility
+ # annotations for those symbols which need to be exported (see
+ # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+ # //base/allocator/allocator_shim_internals.h for the definition of
+ # SHIM_ALWAYS_EXPORT).
+ if (!use_experimental_allocator_shim) {
+ configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+ configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ }
+
+ ldflags = [
+ # Don't let linker rip this symbol out, otherwise the heap&cpu
+ # profilers will not initialize properly on startup.
+ "-Wl,-uIsHeapProfilerRunning,-uProfilerStart",
+
+ # Do the same for heap leak checker.
+ "-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi",
+ "-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl",
+ "-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv",
+ ]
+ }
+
+ # Make sure the allocation library is optimized as much as possible when
+ # we"re in release mode.
+ if (!is_debug) {
+ configs -= [ "//build/config/compiler:default_optimization" ]
+ configs += [ "//build/config/compiler:optimize_max" ]
+ }
+
+ deps += [ "//base/third_party/dynamic_annotations" ]
+ }
+} # use_allocator == "tcmalloc"
+
+buildflag_header("features") {
+ header = "features.h"
+ flags = [ "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim" ]
+}
+
+if (use_experimental_allocator_shim) {
+ # Used to shim malloc symbols on Android. see //base/allocator/README.md.
+ config("wrap_malloc_symbols") {
+ ldflags = [
+ "-Wl,-wrap,calloc",
+ "-Wl,-wrap,free",
+ "-Wl,-wrap,malloc",
+ "-Wl,-wrap,memalign",
+ "-Wl,-wrap,posix_memalign",
+ "-Wl,-wrap,pvalloc",
+ "-Wl,-wrap,realloc",
+ "-Wl,-wrap,valloc",
+ ]
+ }
+
+ source_set("unified_allocator_shim") {
+ # TODO(primiano): support other platforms, currently this works only on
+ # Linux/CrOS/Android. http://crbug.com/550886 .
+ configs += [ "//base:base_implementation" ] # for BASE_EXPORT
+ visibility = [ "//base:base" ]
+ sources = [
+ "allocator_shim.cc",
+ "allocator_shim.h",
+ "allocator_shim_internals.h",
+ "allocator_shim_override_cpp_symbols.h",
+ "allocator_shim_override_libc_symbols.h",
+ ]
+ if (is_linux && use_allocator == "tcmalloc") {
+ sources += [
+ "allocator_shim_default_dispatch_to_tcmalloc.cc",
+ "allocator_shim_override_glibc_weak_symbols.h",
+ ]
+ deps = [
+ ":tcmalloc",
+ ]
+ } else if (is_linux && use_allocator == "none") {
+ sources += [ "allocator_shim_default_dispatch_to_glibc.cc" ]
+ } else if (is_android && use_allocator == "none") {
+ sources += [
+ "allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+ "allocator_shim_override_linker_wrapped_symbols.h",
+ ]
+ all_dependent_configs = [ ":wrap_malloc_symbols" ]
+ }
+ }
+}
diff --git a/libchrome/base/allocator/OWNERS b/libchrome/base/allocator/OWNERS
new file mode 100644
index 0000000..f26394a
--- /dev/null
+++ b/libchrome/base/allocator/OWNERS
@@ -0,0 +1,5 @@
+primiano@chromium.org
+wfh@chromium.org
+
+# For changes to tcmalloc it is advisable to ask jar@chromium.org
+# before proceeding.
diff --git a/libchrome/base/allocator/README.md b/libchrome/base/allocator/README.md
new file mode 100644
index 0000000..164df51
--- /dev/null
+++ b/libchrome/base/allocator/README.md
@@ -0,0 +1,196 @@
+This document describes how malloc / new calls are routed in the various Chrome
+platforms.
+
+Bare in mind that the chromium codebase does not always just use `malloc()`.
+Some examples:
+ - Large parts of the renderer (Blink) use two home-brewed allocators,
+ PartitionAlloc and BlinkGC (Oilpan).
+ - Some subsystems, such as the V8 JavaScript engine, handle memory management
+ autonomously.
+ - Various parts of the codebase use abstractions such as `SharedMemory` or
+ `DiscardableMemory` which, similarly to the above, have their own page-level
+ memory management.
+
+Background
+----------
+The `allocator` target defines at compile-time the platform-specific choice of
+the allocator and extra-hooks which services calls to malloc/new. The relevant
+build-time flags involved are `use_allocator` and `win_use_allocator_shim`.
+
+The default choices are as follows:
+
+**Windows**
+`use_allocator: winheap`, the default Windows heap.
+Additionally, `static_library` (i.e. non-component) builds have a shim
+layer wrapping malloc/new, which is controlled by `win_use_allocator_shim`.
+The shim layer provides extra security features, such as preventing large
+allocations that can hit signed vs. unsigned bugs in third_party code.
+
+**Linux Desktop / CrOS**
+`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
+`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
+to fall back to the system (Glibc) symbols.
+
+**Android**
+`use_allocator: none`, always use the allocator symbols coming from Android's
+libc (Bionic). As it is developed as part of the OS, it is considered to be
+optimized for small devices and more memory-efficient than other choices.
+The actual implementation backing malloc symbols in Bionic is up to the board
+config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
+
+**Mac/iOS**
+`use_allocator: none`, we always use the system's allocator implementation.
+
+In addition, when building for `asan` / `msan` / `syzyasan` `valgrind`, the
+both the allocator and the shim layer are disabled.
+
+Layering and build deps
+-----------------------
+The `allocator` target provides both the source files for tcmalloc (where
+applicable) and the linker flags required for the Windows shim layer.
+The `base` target is (almost) the only one depending on `allocator`. No other
+targets should depend on it, with the exception of the very few executables /
+dynamic libraries that don't depend, either directly or indirectly, on `base`
+within the scope of a linker unit.
+
+More importantly, **no other place outside of `/base` should depend on the
+specific allocator** (e.g., directly include `third_party/tcmalloc`).
+If such a functional dependency is required that should be achieved using
+abstractions in `base` (see `/base/allocator/allocator_extension.h` and
+`/base/memory/`)
+
+**Why `base` depends on `allocator`?**
+Because it needs to provide services that depend on the actual allocator
+implementation. In the past `base` used to pretend to be allocator-agnostic
+and get the dependencies injected by other layers. This ended up being an
+inconsistent mess.
+See the [allocator cleanup doc][url-allocator-cleanup] for more context.
+
+Linker unit targets (executables and shared libraries) that depend in some way
+on `base` (most of the targets in the codebase) get automatically the correct
+set of linker flags to pull in tcmalloc or the Windows shim-layer.
+
+
+Source code
+-----------
+This directory contains just the allocator (i.e. shim) layer that switches
+between the different underlying memory allocation implementations.
+
+The tcmalloc library originates outside of Chromium and exists in
+`../../third_party/tcmalloc` (currently, the actual location is defined in the
+allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
+track Chromium-specific changes independently from upstream changes.
+
+The general intent is to push local changes upstream so that over
+time we no longer need any forked files.
+
+
+Unified allocator shim
+----------------------
+On most platform, Chrome overrides the malloc / operator new symbols (and
+corresponding free / delete and other variants). This is to enforce security
+checks and lately to enable the
+[memory-infra heap profiler][url-memory-infra-heap-profiler].
+Historically each platform had its special logic for defining the allocator
+symbols in different places of the codebase. The unified allocator shim is
+a project aimed to unify the symbol definition and allocator routing logic in
+a central place.
+
+ - Full documentation: [Allocator shim design doc][url-allocator-shim].
+ - Current state: Available and enabled by default on Linux, CrOS and Android.
+ - Tracking bug: [https://crbug.com/550886][crbug.com/550886].
+ - Build-time flag: `use_experimental_allocator_shim`.
+
+**Overview of the unified allocator shim**
+The allocator shim consists of three stages:
+```
++-------------------------+ +-----------------------+ +----------------+
+| malloc & friends | -> | shim layer | -> | Routing to |
+| symbols definition | | implementation | | allocator |
++-------------------------+ +-----------------------+ +----------------+
+| - libc symbols (malloc, | | - Security checks | | - tcmalloc |
+| calloc, free, ...) | | - Chain of dispatchers| | - glibc |
+| - C++ symbols (operator | | that can intercept | | - Android |
+| new, delete, ...) | | and override | | bionic |
+| - glibc weak symbols | | allocations | | - WinHeap |
+| (__libc_malloc, ...) | +-----------------------+ +----------------+
++-------------------------+
+```
+
+**1. malloc symbols definition**
+This stage takes care of overriding the symbols `malloc`, `free`,
+`operator new`, `operator delete` and friends and routing those calls inside the
+allocator shim (next point).
+This is taken care of by the headers in `allocator_shim_override_*`.
+
+*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
+in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
+and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
+`operator delete` and friends).
+This enables proper interposition of malloc symbols referenced by the main
+executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
+(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
+Additionally, when tcmalloc is the default allocator, some extra glibc symbols
+are also defined in `allocator_shim_override_glibc_weak_symbols.h`, for subtle
+reasons explained in that file.
+The Linux/CrOS shim was introduced by
+[crrev.com/1675143004](https://crrev.com/1675143004).
+
+*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
+possible. This is because Android processes are `fork()`-ed from the Android
+zygote, which pre-loads libc.so and only later native code gets loaded via
+`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
+scope).
+In this case, the approach instead of wrapping symbol resolution at link time
+(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
+The use of this wrapping flag causes:
+ - All references to allocator symbols in the Chrome codebase to be rewritten as
+ references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
+ defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
+ route allocator calls inside the shim layer.
+ - The reference to the original `malloc` symbols (which typically is defined by
+ the system's libc.so) are accessible via the special `__real_malloc` and
+ friends symbols (which will be relocated, at load time, against `malloc`).
+
+In summary, this approach is transparent to the dynamic loader, which still sees
+undefined symbol references to malloc symbols.
+These symbols will be resolved against libc.so as usual.
+More details in [crrev.com/1719433002](https://crrev.com/1719433002).
+
+**2. Shim layer implementation**
+This stage contains the actual shim implementation. This consists of:
+- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
+(using the `InsertAllocatorDispatch` API). They can intercept and override
+allocator calls.
+- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
+This happens inside `allocator_shim.cc`
+
+**3. Final allocator routing**
+The final element of the aforementioned dispatcher chain is statically defined
+at build time and ultimately routes the allocator calls to the actual allocator
+(as described in the *Background* section above). This is taken care of by the
+headers in `allocator_shim_default_dispatch_to_*` files.
+
+
+Appendixes
+----------
+**How does the Windows shim layer replace the malloc symbols?**
+The mechanism for hooking LIBCMT in Windows is rather tricky. The core
+problem is that by default, the Windows library does not declare malloc and
+free as weak symbols. Because of this, they cannot be overridden. To work
+around this, we start with the LIBCMT.LIB, and manually remove all allocator
+related functions from it using the visual studio library tool. Once removed,
+we can now link against the library and provide custom versions of the
+allocator related functionality.
+See the script `preb_libc.py` in this folder.
+
+Related links
+-------------
+- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
+- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
+- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
+- [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
+
+[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
+[url-memory-infra-heap-profiler]: components/tracing/docs/heap_profiler.md
+[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/libchrome/base/allocator/allocator.gyp b/libchrome/base/allocator/allocator.gyp
new file mode 100644
index 0000000..674d4d6
--- /dev/null
+++ b/libchrome/base/allocator/allocator.gyp
@@ -0,0 +1,450 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'variables': {
+ # This code gets run a lot and debugged rarely, so it should be fast
+ # by default. See http://crbug.com/388949.
+ 'debug_optimize': '2',
+ 'win_debug_Optimization': '0',
+ # Run time checks are incompatible with any level of optimizations.
+ 'win_debug_RuntimeChecks': '0',
+ },
+ },
+ 'variables': {
+ 'tcmalloc_dir': '../../third_party/tcmalloc/chromium',
+ 'use_vtable_verify%': 0,
+ # Provide a way to force disable debugallocation in Debug builds
+ # e.g. for profiling (it's more rare to profile Debug builds,
+ # but people sometimes need to do that).
+ 'disable_debugallocation%': 0,
+ },
+ 'targets': [
+ # The only targets that should depend on allocator are 'base' and
+ # executables that don't depend, directly or indirectly, on base (a few).
+ # All the other targets get a transitive dependency on this target via base.
+ {
+ 'target_name': 'allocator',
+ 'variables': {
+ 'conditions': [
+ ['use_allocator!="none" or (OS=="win" and win_use_allocator_shim==1)', {
+ 'allocator_target_type%': 'static_library',
+ }, {
+ 'allocator_target_type%': 'none',
+ }],
+ ],
+ },
+ 'type': '<(allocator_target_type)',
+ 'toolsets': ['host', 'target'],
+ 'conditions': [
+ ['OS=="win" and win_use_allocator_shim==1', {
+ 'msvs_settings': {
+ # TODO(sgk): merge this with build/common.gypi settings
+ 'VCLibrarianTool': {
+ 'AdditionalOptions': ['/ignore:4006,4221'],
+ },
+ 'VCLinkerTool': {
+ 'AdditionalOptions': ['/ignore:4006'],
+ },
+ },
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'allocator_shim_win.cc',
+ 'allocator_shim_win.h',
+ ],
+ 'configurations': {
+ 'Debug_Base': {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': '0',
+ },
+ },
+ },
+ },
+ }], # OS=="win"
+ ['use_allocator=="tcmalloc"', {
+ # Disable the heap checker in tcmalloc.
+ 'defines': [
+ 'NO_HEAP_CHECK',
+ ],
+ 'dependencies': [
+ '../third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ ],
+ # The order of this include_dirs matters, as tc-malloc has its own
+ # base/ mini-fork. Do not factor these out of this conditions section.
+ 'include_dirs': [
+ '.',
+ '<(tcmalloc_dir)/src/base',
+ '<(tcmalloc_dir)/src',
+ '../..',
+ ],
+ 'sources': [
+ # Generated for our configuration from tcmalloc's build
+ # and checked in.
+ '<(tcmalloc_dir)/src/config.h',
+ '<(tcmalloc_dir)/src/config_android.h',
+ '<(tcmalloc_dir)/src/config_linux.h',
+ '<(tcmalloc_dir)/src/config_win.h',
+
+ # all tcmalloc native and forked files
+ '<(tcmalloc_dir)/src/addressmap-inl.h',
+ '<(tcmalloc_dir)/src/base/abort.cc',
+ '<(tcmalloc_dir)/src/base/abort.h',
+ '<(tcmalloc_dir)/src/base/arm_instruction_set_select.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-arm-generic.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-arm-v6plus.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-windows.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.cc',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
+ '<(tcmalloc_dir)/src/base/atomicops.h',
+ '<(tcmalloc_dir)/src/base/commandlineflags.h',
+ '<(tcmalloc_dir)/src/base/cycleclock.h',
+ # We don't list dynamic_annotations.c since its copy is already
+ # present in the dynamic_annotations target.
+ '<(tcmalloc_dir)/src/base/dynamic_annotations.h',
+ '<(tcmalloc_dir)/src/base/elf_mem_image.cc',
+ '<(tcmalloc_dir)/src/base/elf_mem_image.h',
+ '<(tcmalloc_dir)/src/base/elfcore.h',
+ '<(tcmalloc_dir)/src/base/googleinit.h',
+ '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
+ '<(tcmalloc_dir)/src/base/linuxthreads.cc',
+ '<(tcmalloc_dir)/src/base/linuxthreads.h',
+ '<(tcmalloc_dir)/src/base/logging.cc',
+ '<(tcmalloc_dir)/src/base/logging.h',
+ '<(tcmalloc_dir)/src/base/low_level_alloc.cc',
+ '<(tcmalloc_dir)/src/base/low_level_alloc.h',
+ '<(tcmalloc_dir)/src/base/simple_mutex.h',
+ '<(tcmalloc_dir)/src/base/spinlock.cc',
+ '<(tcmalloc_dir)/src/base/spinlock.h',
+ '<(tcmalloc_dir)/src/base/spinlock_internal.cc',
+ '<(tcmalloc_dir)/src/base/spinlock_internal.h',
+ '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
+ '<(tcmalloc_dir)/src/base/stl_allocator.h',
+ '<(tcmalloc_dir)/src/base/synchronization_profiling.h',
+ '<(tcmalloc_dir)/src/base/sysinfo.cc',
+ '<(tcmalloc_dir)/src/base/sysinfo.h',
+ '<(tcmalloc_dir)/src/base/thread_annotations.h',
+ '<(tcmalloc_dir)/src/base/thread_lister.c',
+ '<(tcmalloc_dir)/src/base/thread_lister.h',
+ '<(tcmalloc_dir)/src/base/vdso_support.cc',
+ '<(tcmalloc_dir)/src/base/vdso_support.h',
+ '<(tcmalloc_dir)/src/central_freelist.cc',
+ '<(tcmalloc_dir)/src/central_freelist.h',
+ '<(tcmalloc_dir)/src/common.cc',
+ '<(tcmalloc_dir)/src/common.h',
+ '<(tcmalloc_dir)/src/debugallocation.cc',
+ '<(tcmalloc_dir)/src/free_list.cc',
+ '<(tcmalloc_dir)/src/free_list.h',
+ '<(tcmalloc_dir)/src/getpc.h',
+ '<(tcmalloc_dir)/src/gperftools/heap-checker.h',
+ '<(tcmalloc_dir)/src/gperftools/heap-profiler.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_extension.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_extension_c.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_hook.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_hook_c.h',
+ '<(tcmalloc_dir)/src/gperftools/profiler.h',
+ '<(tcmalloc_dir)/src/gperftools/stacktrace.h',
+ '<(tcmalloc_dir)/src/gperftools/tcmalloc.h',
+ '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
+ '<(tcmalloc_dir)/src/heap-checker.cc',
+ '<(tcmalloc_dir)/src/heap-profile-table.cc',
+ '<(tcmalloc_dir)/src/heap-profile-table.h',
+ '<(tcmalloc_dir)/src/heap-profiler.cc',
+ '<(tcmalloc_dir)/src/internal_logging.cc',
+ '<(tcmalloc_dir)/src/internal_logging.h',
+ '<(tcmalloc_dir)/src/libc_override.h',
+ '<(tcmalloc_dir)/src/libc_override_gcc_and_weak.h',
+ '<(tcmalloc_dir)/src/libc_override_glibc.h',
+ '<(tcmalloc_dir)/src/libc_override_osx.h',
+ '<(tcmalloc_dir)/src/libc_override_redefine.h',
+ '<(tcmalloc_dir)/src/linked_list.h',
+ '<(tcmalloc_dir)/src/malloc_extension.cc',
+ '<(tcmalloc_dir)/src/malloc_hook-inl.h',
+ '<(tcmalloc_dir)/src/malloc_hook.cc',
+ '<(tcmalloc_dir)/src/malloc_hook_mmap_freebsd.h',
+ '<(tcmalloc_dir)/src/malloc_hook_mmap_linux.h',
+ '<(tcmalloc_dir)/src/maybe_threads.cc',
+ '<(tcmalloc_dir)/src/maybe_threads.h',
+ '<(tcmalloc_dir)/src/memfs_malloc.cc',
+ '<(tcmalloc_dir)/src/memory_region_map.cc',
+ '<(tcmalloc_dir)/src/memory_region_map.h',
+ '<(tcmalloc_dir)/src/packed-cache-inl.h',
+ '<(tcmalloc_dir)/src/page_heap.cc',
+ '<(tcmalloc_dir)/src/page_heap.h',
+ '<(tcmalloc_dir)/src/page_heap_allocator.h',
+ '<(tcmalloc_dir)/src/pagemap.h',
+ '<(tcmalloc_dir)/src/profile-handler.cc',
+ '<(tcmalloc_dir)/src/profile-handler.h',
+ '<(tcmalloc_dir)/src/profiledata.cc',
+ '<(tcmalloc_dir)/src/profiledata.h',
+ '<(tcmalloc_dir)/src/profiler.cc',
+ '<(tcmalloc_dir)/src/raw_printer.cc',
+ '<(tcmalloc_dir)/src/raw_printer.h',
+ '<(tcmalloc_dir)/src/sampler.cc',
+ '<(tcmalloc_dir)/src/sampler.h',
+ '<(tcmalloc_dir)/src/span.cc',
+ '<(tcmalloc_dir)/src/span.h',
+ '<(tcmalloc_dir)/src/stack_trace_table.cc',
+ '<(tcmalloc_dir)/src/stack_trace_table.h',
+ '<(tcmalloc_dir)/src/stacktrace.cc',
+ '<(tcmalloc_dir)/src/stacktrace_arm-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_config.h',
+ '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
+ '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
+ '<(tcmalloc_dir)/src/static_vars.cc',
+ '<(tcmalloc_dir)/src/static_vars.h',
+ '<(tcmalloc_dir)/src/symbolize.cc',
+ '<(tcmalloc_dir)/src/symbolize.h',
+ '<(tcmalloc_dir)/src/system-alloc.cc',
+ '<(tcmalloc_dir)/src/system-alloc.h',
+ '<(tcmalloc_dir)/src/tcmalloc.cc',
+ '<(tcmalloc_dir)/src/tcmalloc_guard.h',
+ '<(tcmalloc_dir)/src/thread_cache.cc',
+ '<(tcmalloc_dir)/src/thread_cache.h',
+
+ 'debugallocation_shim.cc',
+ ],
+ # sources! means that these are not compiled directly.
+ 'sources!': [
+ # We simply don't use these, but list them above so that IDE
+ # users can view the full available source for reference, etc.
+ '<(tcmalloc_dir)/src/addressmap-inl.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86-msvc.h',
+ '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
+ '<(tcmalloc_dir)/src/base/atomicops.h',
+ '<(tcmalloc_dir)/src/base/commandlineflags.h',
+ '<(tcmalloc_dir)/src/base/cycleclock.h',
+ '<(tcmalloc_dir)/src/base/elf_mem_image.h',
+ '<(tcmalloc_dir)/src/base/elfcore.h',
+ '<(tcmalloc_dir)/src/base/googleinit.h',
+ '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
+ '<(tcmalloc_dir)/src/base/simple_mutex.h',
+ '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
+ '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
+ '<(tcmalloc_dir)/src/base/stl_allocator.h',
+ '<(tcmalloc_dir)/src/base/thread_annotations.h',
+ '<(tcmalloc_dir)/src/getpc.h',
+ '<(tcmalloc_dir)/src/gperftools/heap-checker.h',
+ '<(tcmalloc_dir)/src/gperftools/heap-profiler.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_extension.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_extension_c.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_hook.h',
+ '<(tcmalloc_dir)/src/gperftools/malloc_hook_c.h',
+ '<(tcmalloc_dir)/src/gperftools/profiler.h',
+ '<(tcmalloc_dir)/src/gperftools/stacktrace.h',
+ '<(tcmalloc_dir)/src/gperftools/tcmalloc.h',
+ '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
+ '<(tcmalloc_dir)/src/heap-checker.cc',
+ '<(tcmalloc_dir)/src/libc_override.h',
+ '<(tcmalloc_dir)/src/libc_override_gcc_and_weak.h',
+ '<(tcmalloc_dir)/src/libc_override_glibc.h',
+ '<(tcmalloc_dir)/src/libc_override_osx.h',
+ '<(tcmalloc_dir)/src/libc_override_redefine.h',
+ '<(tcmalloc_dir)/src/malloc_hook_mmap_freebsd.h',
+ '<(tcmalloc_dir)/src/malloc_hook_mmap_linux.h',
+ '<(tcmalloc_dir)/src/memfs_malloc.cc',
+ '<(tcmalloc_dir)/src/packed-cache-inl.h',
+ '<(tcmalloc_dir)/src/page_heap_allocator.h',
+ '<(tcmalloc_dir)/src/pagemap.h',
+ '<(tcmalloc_dir)/src/stacktrace_arm-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_config.h',
+ '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
+ '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
+ '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
+ '<(tcmalloc_dir)/src/tcmalloc_guard.h',
+
+ # Included by debugallocation_shim.cc.
+ '<(tcmalloc_dir)/src/debugallocation.cc',
+ '<(tcmalloc_dir)/src/tcmalloc.cc',
+ ],
+ 'variables': {
+ 'clang_warning_flags': [
+ # tcmalloc initializes some fields in the wrong order.
+ '-Wno-reorder',
+ # tcmalloc contains some unused local template specializations.
+ '-Wno-unused-function',
+ # tcmalloc uses COMPILE_ASSERT without static_assert but with
+ # typedefs.
+ '-Wno-unused-local-typedefs',
+ # for magic2_ in debugallocation.cc (only built in Debug builds)
+ # typedefs.
+ '-Wno-unused-private-field',
+ ],
+ },
+ 'conditions': [
+ ['OS=="linux" or OS=="freebsd" or OS=="solaris" or OS=="android"', {
+ 'sources!': [
+ '<(tcmalloc_dir)/src/system-alloc.h',
+ ],
+ # We enable all warnings by default, but upstream disables a few.
+ # Keep "-Wno-*" flags in sync with upstream by comparing against:
+ # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
+ 'cflags': [
+ '-Wno-sign-compare',
+ '-Wno-unused-result',
+ ],
+ 'link_settings': {
+ 'ldflags': [
+ # Don't let linker rip this symbol out, otherwise the heap&cpu
+ # profilers will not initialize properly on startup.
+ '-Wl,-uIsHeapProfilerRunning,-uProfilerStart',
+ # Do the same for heap leak checker.
+ '-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi',
+ '-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl',
+ '-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv',
+ ],
+ },
+ # Compiling tcmalloc with -fvisibility=default is only necessary when
+ # not using the allocator shim, which provides the correct visibility
+ # annotations for those symbols which need to be exported (see
+ # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+ # //base/allocator/allocator_shim_internals.h for the definition of
+ # SHIM_ALWAYS_EXPORT).
+ 'conditions': [
+ ['use_experimental_allocator_shim==0', {
+ 'cflags!': [
+ '-fvisibility=hidden',
+ ],
+ }],
+ ],
+ }],
+ ['profiling!=1', {
+ 'sources!': [
+ # cpuprofiler
+ '<(tcmalloc_dir)/src/base/thread_lister.c',
+ '<(tcmalloc_dir)/src/base/thread_lister.h',
+ '<(tcmalloc_dir)/src/profile-handler.cc',
+ '<(tcmalloc_dir)/src/profile-handler.h',
+ '<(tcmalloc_dir)/src/profiledata.cc',
+ '<(tcmalloc_dir)/src/profiledata.h',
+ '<(tcmalloc_dir)/src/profiler.cc',
+ ],
+ }],
+ ['use_experimental_allocator_shim==1', {
+ 'defines': [
+ 'TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC',
+ ],
+ }]
+ ],
+ 'configurations': {
+ 'Debug_Base': {
+ 'conditions': [
+ ['disable_debugallocation==0', {
+ 'defines': [
+ # Use debugallocation for Debug builds to catch problems
+ # early and cleanly, http://crbug.com/30715 .
+ 'TCMALLOC_FOR_DEBUGALLOCATION',
+ ],
+ }],
+ ],
+ },
+ },
+ }], # use_allocator=="tcmalloc
+ # For CrOS builds with vtable verification. According to the author of
+ # crrev.com/10854031 this is used in conjuction with some other CrOS
+ # build flag, to enable verification of any allocator that uses virtual
+ # function calls.
+ ['use_vtable_verify==1', {
+ 'cflags': [
+ '-fvtable-verify=preinit',
+ ],
+ }],
+ ['order_profiling != 0', {
+ 'target_conditions' : [
+ ['_toolset=="target"', {
+ 'cflags!': [ '-finstrument-functions' ],
+ }],
+ ],
+ }],
+ ], # conditions of 'allocator' target.
+ }, # 'allocator' target.
+ {
+ # GN: //base/allocator:features
+ # When referenced from a target that might be compiled in the host
+ # toolchain, always refer to 'allocator_features#target'.
+ 'target_name': 'allocator_features',
+ 'includes': [ '../../build/buildflag_header.gypi' ],
+ 'variables': {
+ 'buildflag_header_path': 'base/allocator/features.h',
+ 'buildflag_flags': [
+ 'USE_EXPERIMENTAL_ALLOCATOR_SHIM=<(use_experimental_allocator_shim)',
+ ],
+ },
+ }, # 'allocator_features' target.
+ ], # targets.
+ 'conditions': [
+ ['use_experimental_allocator_shim==1', {
+ 'targets': [
+ {
+ # GN: //base/allocator:unified_allocator_shim
+ 'target_name': 'unified_allocator_shim',
+ 'toolsets': ['host', 'target'],
+ 'type': 'static_library',
+ 'defines': [ 'BASE_IMPLEMENTATION' ],
+ 'sources': [
+ 'allocator_shim.cc',
+ 'allocator_shim.h',
+ 'allocator_shim_internals.h',
+ 'allocator_shim_override_cpp_symbols.h',
+ 'allocator_shim_override_libc_symbols.h',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'conditions': [
+ ['OS=="linux" and use_allocator=="tcmalloc"', {
+ 'sources': [
+ 'allocator_shim_default_dispatch_to_tcmalloc.cc',
+ 'allocator_shim_override_glibc_weak_symbols.h',
+ ],
+ }],
+ ['use_allocator=="none" and (OS=="linux" or (OS=="android" and _toolset == "host" and host_os == "linux"))', {
+ 'sources': [
+ 'allocator_shim_default_dispatch_to_glibc.cc',
+ ],
+ }],
+ ['OS=="android" and _toolset == "target"', {
+ 'sources': [
+ 'allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc',
+ 'allocator_shim_override_linker_wrapped_symbols.h',
+ ],
+ # On Android all references to malloc & friends symbols are
+ # rewritten, at link time, and routed to the shim.
+ # See //base/allocator/README.md.
+ 'all_dependent_settings': {
+ 'ldflags': [
+ '-Wl,-wrap,calloc',
+ '-Wl,-wrap,free',
+ '-Wl,-wrap,malloc',
+ '-Wl,-wrap,memalign',
+ '-Wl,-wrap,posix_memalign',
+ '-Wl,-wrap,pvalloc',
+ '-Wl,-wrap,realloc',
+ '-Wl,-wrap,valloc',
+ ],
+ },
+ }],
+ ]
+ }, # 'unified_allocator_shim' target.
+ ],
+ }]
+ ],
+}
diff --git a/libchrome/base/allocator/allocator_extension.cc b/libchrome/base/allocator/allocator_extension.cc
new file mode 100644
index 0000000..9a3d114
--- /dev/null
+++ b/libchrome/base/allocator/allocator_extension.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_extension.h"
+
+#include "base/logging.h"
+
+#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
+#endif
+
+namespace base {
+namespace allocator {
+
+void ReleaseFreeMemory() {
+#if defined(USE_TCMALLOC)
+ ::MallocExtension::instance()->ReleaseFreeMemory();
+#endif
+}
+
+bool GetNumericProperty(const char* name, size_t* value) {
+#if defined(USE_TCMALLOC)
+ return ::MallocExtension::instance()->GetNumericProperty(name, value);
+#endif
+ return false;
+}
+
+bool IsHeapProfilerRunning() {
+#if defined(USE_TCMALLOC)
+ return ::IsHeapProfilerRunning();
+#endif
+ return false;
+}
+
+void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
+// TODO(sque): Use allocator shim layer instead.
+#if defined(USE_TCMALLOC)
+ // Make sure no hooks get overwritten.
+ auto prev_alloc_hook = MallocHook::SetNewHook(alloc_hook);
+ if (alloc_hook)
+ DCHECK(!prev_alloc_hook);
+
+ auto prev_free_hook = MallocHook::SetDeleteHook(free_hook);
+ if (free_hook)
+ DCHECK(!prev_free_hook);
+#endif
+}
+
+int GetCallStack(void** stack, int max_stack_size) {
+#if defined(USE_TCMALLOC)
+ return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
+#endif
+ return 0;
+}
+
+} // namespace allocator
+} // namespace base
diff --git a/libchrome/base/allocator/allocator_extension.h b/libchrome/base/allocator/allocator_extension.h
new file mode 100644
index 0000000..9f2775a
--- /dev/null
+++ b/libchrome/base/allocator/allocator_extension.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
+#define BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
+
+#include <stddef.h> // for size_t
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace allocator {
+
+// Callback types for alloc and free.
+using AllocHookFunc = void (*)(const void*, size_t);
+using FreeHookFunc = void (*)(const void*);
+
+// Request that the allocator release any free memory it knows about to the
+// system.
+BASE_EXPORT void ReleaseFreeMemory();
+
+// Get the named property's |value|. Returns true if the property is known.
+// Returns false if the property is not a valid property name for the current
+// allocator implementation.
+// |name| or |value| cannot be NULL
+BASE_EXPORT bool GetNumericProperty(const char* name, size_t* value);
+
+BASE_EXPORT bool IsHeapProfilerRunning();
+
+// Register callbacks for alloc and free. Can only store one callback at a time
+// for each of alloc and free.
+BASE_EXPORT void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook);
+
+// Attempts to unwind the call stack from the current location where this
+// function is being called from. Must be called from a hook function registered
+// by calling SetSingle{Alloc,Free}Hook, directly or indirectly.
+//
+// Arguments:
+// stack: pointer to a pre-allocated array of void*'s.
+// max_stack_size: indicates the size of the array in |stack|.
+//
+// Returns the number of call stack frames stored in |stack|, or 0 if no call
+// stack information is available.
+BASE_EXPORT int GetCallStack(void** stack, int max_stack_size);
+
+} // namespace allocator
+} // namespace base
+
+#endif // BASE_ALLOCATOR_ALLOCATOR_EXTENSION_H_
diff --git a/libchrome/base/allocator/allocator_shim.cc b/libchrome/base/allocator/allocator_shim.cc
new file mode 100644
index 0000000..09ed45f
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim.cc
@@ -0,0 +1,260 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include <new>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+// No calls to malloc / new in this file. They would would cause re-entrancy of
+// the shim, which is hard to deal with. Keep this code as simple as possible
+// and don't use any external C++ object here, not even //base ones. Even if
+// they are safe to use today, in future they might be refactored.
+
+namespace {
+
+using namespace base;
+
+subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
+ &allocator::AllocatorDispatch::default_dispatch);
+
+bool g_call_new_handler_on_malloc_failure = false;
+subtle::Atomic32 g_new_handler_lock = 0;
+
+// In theory this should be just base::ThreadChecker. But we can't afford
+// the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
+bool CalledOnValidThread() {
+ using subtle::Atomic32;
+ const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
+ static Atomic32 g_tid = kInvalidTID;
+ Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
+ Atomic32 prev_tid =
+ subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
+ return prev_tid == kInvalidTID || prev_tid == cur_tid;
+}
+
+inline size_t GetPageSize() {
+ static size_t pagesize = 0;
+ if (!pagesize)
+ pagesize = sysconf(_SC_PAGESIZE);
+ return pagesize;
+}
+
+// Calls the std::new handler thread-safely. Returns true if a new_handler was
+// set and called, false if no new_handler was set.
+bool CallNewHandler() {
+ // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
+ // to be thread safe and would avoid the spinlock boilerplate here. However
+ // it doesn't seem to be available yet in the Linux chroot headers yet.
+ std::new_handler nh;
+ {
+ while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1))
+ PlatformThread::YieldCurrentThread();
+ nh = std::set_new_handler(0);
+ ignore_result(std::set_new_handler(nh));
+ subtle::Release_Store(&g_new_handler_lock, 0);
+ }
+ if (!nh)
+ return false;
+ (*nh)();
+ // Assume the new_handler will abort if it fails. Exception are disabled and
+ // we don't support the case of a new_handler throwing std::bad_balloc.
+ return true;
+}
+
+inline const allocator::AllocatorDispatch* GetChainHead() {
+ // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
+ // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
+ // barriered on Linux+Clang, and that causes visible perf regressons.
+ return reinterpret_cast<const allocator::AllocatorDispatch*>(
+#if defined(OS_LINUX) && defined(__clang__)
+ *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
+#else
+ subtle::NoBarrier_Load(&g_chain_head)
+#endif
+ );
+}
+
+} // namespace
+
+namespace base {
+namespace allocator {
+
+void SetCallNewHandlerOnMallocFailure(bool value) {
+ g_call_new_handler_on_malloc_failure = value;
+}
+
+void* UncheckedAlloc(size_t size) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->alloc_function(chain_head, size);
+}
+
+void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
+ // Ensure this is always called on the same thread.
+ DCHECK(CalledOnValidThread());
+
+ dispatch->next = GetChainHead();
+
+ // This function does not guarantee to be thread-safe w.r.t. concurrent
+ // insertions, but still has to guarantee that all the threads always
+ // see a consistent chain, hence the MemoryBarrier() below.
+ // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+ // we don't really want this to be a release-store with a corresponding
+ // acquire-load during malloc().
+ subtle::MemoryBarrier();
+
+ subtle::NoBarrier_Store(&g_chain_head,
+ reinterpret_cast<subtle::AtomicWord>(dispatch));
+}
+
+void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(GetChainHead(), dispatch);
+ subtle::NoBarrier_Store(&g_chain_head,
+ reinterpret_cast<subtle::AtomicWord>(dispatch->next));
+}
+
+} // namespace allocator
+} // namespace base
+
+// The Shim* functions below are the entry-points into the shim-layer and
+// are supposed to be invoked / aliased by the allocator_shim_override_*
+// headers to route the malloc / new symbols through the shim layer.
+extern "C" {
+
+// The general pattern for allocations is:
+// - Try to allocate, if succeded return the pointer.
+// - If the allocation failed:
+// - Call the std::new_handler if it was a C++ allocation.
+// - Call the std::new_handler if it was a malloc() (or calloc() or similar)
+// AND SetCallNewHandlerOnMallocFailure(true).
+// - If the std::new_handler is NOT set just return nullptr.
+// - If the std::new_handler is set:
+// - Assume it will abort() if it fails (very likely the new_handler will
+// just suicide priting a message).
+// - Assume it did succeed if it returns, in which case reattempt the alloc.
+
+void* ShimCppNew(size_t size) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ void* ptr;
+ do {
+ ptr = chain_head->alloc_function(chain_head, size);
+ } while (!ptr && CallNewHandler());
+ return ptr;
+}
+
+void ShimCppDelete(void* address) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->free_function(chain_head, address);
+}
+
+void* ShimMalloc(size_t size) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ void* ptr;
+ do {
+ ptr = chain_head->alloc_function(chain_head, size);
+ } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+ return ptr;
+}
+
+void* ShimCalloc(size_t n, size_t size) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ void* ptr;
+ do {
+ ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size);
+ } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+ return ptr;
+}
+
+void* ShimRealloc(void* address, size_t size) {
+ // realloc(size == 0) means free() and might return a nullptr. We should
+ // not call the std::new_handler in that case, though.
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ void* ptr;
+ do {
+ ptr = chain_head->realloc_function(chain_head, address, size);
+ } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
+ CallNewHandler());
+ return ptr;
+}
+
+void* ShimMemalign(size_t alignment, size_t size) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ void* ptr;
+ do {
+ ptr = chain_head->alloc_aligned_function(chain_head, alignment, size);
+ } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+ return ptr;
+}
+
+int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
+ // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
+ // in tc_malloc.cc.
+ if (((alignment % sizeof(void*)) != 0) ||
+ ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
+ return EINVAL;
+ }
+ void* ptr = ShimMemalign(alignment, size);
+ *res = ptr;
+ return ptr ? 0 : ENOMEM;
+}
+
+void* ShimValloc(size_t size) {
+ return ShimMemalign(GetPageSize(), size);
+}
+
+void* ShimPvalloc(size_t size) {
+ // pvalloc(0) should allocate one page, according to its man page.
+ if (size == 0) {
+ size = GetPageSize();
+ } else {
+ size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1);
+ }
+ return ShimMemalign(GetPageSize(), size);
+}
+
+void ShimFree(void* address) {
+ const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+ return chain_head->free_function(chain_head, address);
+}
+
+} // extern "C"
+
+// Cpp symbols (new / delete) should always be routed through the shim layer.
+#include "base/allocator/allocator_shim_override_cpp_symbols.h"
+
+// Android does not support symbol interposition. The way malloc symbols are
+// intercepted on Android is by using link-time -wrap flags.
+#if !defined(OS_ANDROID) && !defined(ANDROID)
+// Ditto for plain malloc() / calloc() / free() etc. symbols.
+#include "base/allocator/allocator_shim_override_libc_symbols.h"
+#else
+#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
+#endif
+
+// In the case of tcmalloc we also want to plumb into the glibc hooks
+// to avoid that allocations made in glibc itself (e.g., strdup()) get
+// accidentally performed on the glibc heap instead of the tcmalloc one.
+#if defined(USE_TCMALLOC)
+#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
+#endif
+
+// Cross-checks.
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#error The allocator shim should not be compiled when building for memory tools.
+#endif
+
+#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+ (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
+#error This code cannot be used when exceptions are turned on.
+#endif
diff --git a/libchrome/base/allocator/allocator_shim.h b/libchrome/base/allocator/allocator_shim.h
new file mode 100644
index 0000000..f1a1e3d
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim.h
@@ -0,0 +1,96 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace allocator {
+
+// Allocator Shim API. Allows to to:
+// - Configure the behavior of the allocator (what to do on OOM failures).
+// - Install new hooks (AllocatorDispatch) in the allocator chain.
+
+// When this shim layer is enabled, the route of an allocation is as-follows:
+//
+// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
+// The override_* headers define the symbols required to intercept calls to
+// malloc() and operator new (if not overridden by specific C++ classes).
+//
+// [allocator_shim.cc] Routing allocation calls to the shim:
+// The headers above route the calls to the internal ShimMalloc(), ShimFree(),
+// ShimCppNew() etc. methods defined in allocator_shim.cc.
+// These methods will: (1) forward the allocation call to the front of the
+// AllocatorDispatch chain. (2) perform security hardenings (e.g., might
+// call std::new_handler on OOM failure).
+//
+// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
+// It is a singly linked list where each element is a struct with function
+// pointers (|malloc_function|, |free_function|, etc). Normally the chain
+// consists of a single AllocatorDispatch element, herein called
+// the "default dispatch", which is statically defined at build time and
+// ultimately routes the calls to the actual allocator defined by the build
+// config (tcmalloc, glibc, ...).
+//
+// It is possible to dynamically insert further AllocatorDispatch stages
+// to the front of the chain, for debugging / profiling purposes.
+//
+// All the functions must be thred safe. The shim does not enforce any
+// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
+// wihout introducing unnecessary perf hits.
+
+struct AllocatorDispatch {
+ using AllocFn = void*(const AllocatorDispatch* self, size_t size);
+ using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
+ size_t n,
+ size_t size);
+ using AllocAlignedFn = void*(const AllocatorDispatch* self,
+ size_t alignment,
+ size_t size);
+ using ReallocFn = void*(const AllocatorDispatch* self,
+ void* address,
+ size_t size);
+ using FreeFn = void(const AllocatorDispatch* self, void* address);
+
+ AllocFn* const alloc_function;
+ AllocZeroInitializedFn* const alloc_zero_initialized_function;
+ AllocAlignedFn* const alloc_aligned_function;
+ ReallocFn* const realloc_function;
+ FreeFn* const free_function;
+
+ const AllocatorDispatch* next;
+
+ // |default_dispatch| is statically defined by one (and only one) of the
+ // allocator_shim_default_dispatch_to_*.cc files, depending on the build
+ // configuration.
+ static const AllocatorDispatch default_dispatch;
+};
+
+// When true makes malloc behave like new, w.r.t calling the new_handler if
+// the allocation fails (see set_new_mode() in Windows).
+BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
+
+// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
+// regardless of SetCallNewHandlerOnMallocFailure().
+BASE_EXPORT void* UncheckedAlloc(size_t size);
+
+// Inserts |dispatch| in front of the allocator chain. This method is NOT
+// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
+// The callers have the responsibility of linearizing the changes to the chain
+// (or more likely call these always on the same thread).
+BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
+
+// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
+// removal of arbitrary elements from a singly linked list would require a lock
+// in malloc(), which we really don't want.
+BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
+
+} // namespace allocator
+} // namespace base
+
+#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
diff --git a/libchrome/base/allocator/allocator_shim_default_dispatch_to_glibc.cc b/libchrome/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
new file mode 100644
index 0000000..02facba
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to libc functions.
+// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_calloc(size_t n, size_t size);
+void* __libc_realloc(void* address, size_t size);
+void* __libc_memalign(size_t alignment, size_t size);
+void __libc_free(void* ptr);
+} // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* GlibcMalloc(const AllocatorDispatch*, size_t size) {
+ return __libc_malloc(size);
+}
+
+void* GlibcCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+ return __libc_calloc(n, size);
+}
+
+void* GlibcRealloc(const AllocatorDispatch*, void* address, size_t size) {
+ return __libc_realloc(address, size);
+}
+
+void* GlibcMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+ return __libc_memalign(alignment, size);
+}
+
+void GlibcFree(const AllocatorDispatch*, void* address) {
+ __libc_free(address);
+}
+
+} // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+ &GlibcMalloc, /* alloc_function */
+ &GlibcCalloc, /* alloc_zero_initialized_function */
+ &GlibcMemalign, /* alloc_aligned_function */
+ &GlibcRealloc, /* realloc_function */
+ &GlibcFree, /* free_function */
+ nullptr, /* next */
+};
diff --git a/libchrome/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/libchrome/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
new file mode 100644
index 0000000..7955cb7
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to the original libc functions when using the link-time
+// -Wl,-wrap,malloc approach (see README.md).
+// The __real_X functions here are special symbols that the linker will relocate
+// against the real "X" undefined symbol, so that __real_malloc becomes the
+// equivalent of what an undefined malloc symbol reference would have been.
+// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
+// which routes the __wrap_X functions into the shim.
+
+extern "C" {
+void* __real_malloc(size_t);
+void* __real_calloc(size_t, size_t);
+void* __real_realloc(void*, size_t);
+void* __real_memalign(size_t, size_t);
+void* __real_free(void*);
+} // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* RealMalloc(const AllocatorDispatch*, size_t size) {
+ return __real_malloc(size);
+}
+
+void* RealCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+ return __real_calloc(n, size);
+}
+
+void* RealRealloc(const AllocatorDispatch*, void* address, size_t size) {
+ return __real_realloc(address, size);
+}
+
+void* RealMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+ return __real_memalign(alignment, size);
+}
+
+void RealFree(const AllocatorDispatch*, void* address) {
+ __real_free(address);
+}
+
+} // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+ &RealMalloc, /* alloc_function */
+ &RealCalloc, /* alloc_zero_initialized_function */
+ &RealMemalign, /* alloc_aligned_function */
+ &RealRealloc, /* realloc_function */
+ &RealFree, /* free_function */
+ nullptr, /* next */
+};
diff --git a/libchrome/base/allocator/allocator_shim_internals.h b/libchrome/base/allocator/allocator_shim_internals.h
new file mode 100644
index 0000000..fc3624c
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim_internals.h
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+
+#if defined(__GNUC__)
+
+#include <sys/cdefs.h> // for __THROW
+
+#ifndef __THROW // Not a glibc system
+#ifdef _NOEXCEPT // LLVM libc++ uses noexcept instead
+#define __THROW _NOEXCEPT
+#else
+#define __THROW
+#endif // !_NOEXCEPT
+#endif
+
+// Shim layer symbols need to be ALWAYS exported, regardless of component build.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
+
+#define SHIM_ALIAS_SYMBOL(fn) __attribute__((alias(#fn)))
+
+#endif // __GNUC__
+
+#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/libchrome/base/allocator/allocator_shim_override_cpp_symbols.h b/libchrome/base/allocator/allocator_shim_override_cpp_symbols.h
new file mode 100644
index 0000000..616716f
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+
+// Alias the default new/delete C++ symbols to the shim entry points.
+// This file is strongly inspired by tcmalloc's libc_override_redefine.h.
+
+#include <new>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size)
+ SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size)
+ SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size,
+ const std::nothrow_t&) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
+ const std::nothrow_t&) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p,
+ const std::nothrow_t&) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCppDelete);
diff --git a/libchrome/base/allocator/allocator_shim_override_libc_symbols.h b/libchrome/base/allocator/allocator_shim_override_libc_symbols.h
new file mode 100644
index 0000000..37b3b4e
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim_override_libc_symbols.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Its purpose is to SHIM_ALIAS_SYMBOL the Libc symbols for malloc/new to the
+// shim layer entry points.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+
+#include <malloc.h>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW
+ SHIM_ALIAS_SYMBOL(ShimMalloc);
+
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW
+ SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW
+ SHIM_ALIAS_SYMBOL(ShimRealloc);
+
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW
+ SHIM_ALIAS_SYMBOL(ShimCalloc);
+
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW
+ SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW
+ SHIM_ALIAS_SYMBOL(ShimMemalign);
+
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW
+ SHIM_ALIAS_SYMBOL(ShimValloc);
+
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW
+ SHIM_ALIAS_SYMBOL(ShimPvalloc);
+
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW
+ SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+
+// The default dispatch translation unit has to define also the following
+// symbols (unless they are ultimately routed to the system symbols):
+// void malloc_stats(void);
+// int mallopt(int, int);
+// struct mallinfo mallinfo(void);
+// size_t malloc_size(void*);
+// size_t malloc_usable_size(const void*);
+
+} // extern "C"
diff --git a/libchrome/base/allocator/allocator_shim_override_linker_wrapped_symbols.h b/libchrome/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
new file mode 100644
index 0000000..5b85d6e
--- /dev/null
+++ b/libchrome/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+
+// This header overrides the __wrap_X symbols when using the link-time
+// -Wl,-wrap,malloc shim-layer approach (see README.md).
+// All references to malloc, free, etc. within the linker unit that gets the
+// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
+// linker as references to __wrap_malloc, __wrap_free, which are defined here.
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t, size_t)
+ SHIM_ALIAS_SYMBOL(ShimCalloc);
+
+SHIM_ALWAYS_EXPORT void __wrap_free(void*)
+ SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t)
+ SHIM_ALIAS_SYMBOL(ShimMalloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t, size_t)
+ SHIM_ALIAS_SYMBOL(ShimMemalign);
+
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void**, size_t, size_t)
+ SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t)
+ SHIM_ALIAS_SYMBOL(ShimPvalloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void*, size_t)
+ SHIM_ALIAS_SYMBOL(ShimRealloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t)
+ SHIM_ALIAS_SYMBOL(ShimValloc);
+
+} // extern "C"
diff --git a/libchrome/base/allocator/features.h b/libchrome/base/allocator/features.h
new file mode 100644
index 0000000..eedb0b6
--- /dev/null
+++ b/libchrome/base/allocator/features.h
@@ -0,0 +1,15 @@
+// Generated by build/write_buildflag_header.py
+// From "allocator_features"
+
+#ifndef BASE_ALLOCATOR_FEATURES_H_
+#define BASE_ALLOCATOR_FEATURES_H_
+
+#include "build/buildflag.h"
+
+#if defined(__APPLE__)
+#define BUILDFLAG_INTERNAL_USE_EXPERIMENTAL_ALLOCATOR_SHIM() (0)
+#else
+#define BUILDFLAG_INTERNAL_USE_EXPERIMENTAL_ALLOCATOR_SHIM() (1)
+#endif
+
+#endif // BASE_ALLOCATOR_FEATURES_H_
diff --git a/libchrome/base/at_exit.cc b/libchrome/base/at_exit.cc
new file mode 100644
index 0000000..cfe4cf9
--- /dev/null
+++ b/libchrome/base/at_exit.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+
+#include <stddef.h>
+#include <ostream>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Keep a stack of registered AtExitManagers. We always operate on the most
+// recent, and we should never have more than one outside of testing (for a
+// statically linked version of this library). Testing may use the shadow
+// version of the constructor, and if we are building a dynamic library we may
+// end up with multiple AtExitManagers on the same process. We don't protect
+// this for thread-safe access, since it will only be modified in testing.
+static AtExitManager* g_top_manager = NULL;
+
+AtExitManager::AtExitManager()
+ : processing_callbacks_(false), next_manager_(g_top_manager) {
+// If multiple modules instantiate AtExitManagers they'll end up living in this
+// module... they have to coexist.
+#if !defined(COMPONENT_BUILD)
+ DCHECK(!g_top_manager);
+#endif
+ g_top_manager = this;
+}
+
+AtExitManager::~AtExitManager() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
+ return;
+ }
+ DCHECK_EQ(this, g_top_manager);
+
+ ProcessCallbacksNow();
+ g_top_manager = next_manager_;
+}
+
+// static
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
+ DCHECK(func);
+ RegisterTask(base::Bind(func, param));
+}
+
+// static
+void AtExitManager::RegisterTask(base::Closure task) {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
+ return;
+ }
+
+ AutoLock lock(g_top_manager->lock_);
+ DCHECK(!g_top_manager->processing_callbacks_);
+ g_top_manager->stack_.push(std::move(task));
+}
+
+// static
+void AtExitManager::ProcessCallbacksNow() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
+ return;
+ }
+
+ // Callbacks may try to add new callbacks, so run them without holding
+ // |lock_|. This is an error and caught by the DCHECK in RegisterTask(), but
+ // handle it gracefully in release builds so we don't deadlock.
+ std::stack<base::Closure> tasks;
+ {
+ AutoLock lock(g_top_manager->lock_);
+ tasks.swap(g_top_manager->stack_);
+ g_top_manager->processing_callbacks_ = true;
+ }
+
+ while (!tasks.empty()) {
+ base::Closure task = tasks.top();
+ task.Run();
+ tasks.pop();
+ }
+
+ // Expect that all callbacks have been run.
+ DCHECK(g_top_manager->stack_.empty());
+}
+
+AtExitManager::AtExitManager(bool shadow)
+ : processing_callbacks_(false), next_manager_(g_top_manager) {
+ DCHECK(shadow || !g_top_manager);
+ g_top_manager = this;
+}
+
+} // namespace base
diff --git a/libchrome/base/at_exit.h b/libchrome/base/at_exit.h
new file mode 100644
index 0000000..02e18ed
--- /dev/null
+++ b/libchrome/base/at_exit.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AT_EXIT_H_
+#define BASE_AT_EXIT_H_
+
+#include <stack>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// This class provides a facility similar to the CRT atexit(), except that
+// we control when the callbacks are executed. Under Windows for a DLL they
+// happen at a really bad time and under the loader lock. This facility is
+// mostly used by base::Singleton.
+//
+// The usage is simple. Early in the main() or WinMain() scope create an
+// AtExitManager object on the stack:
+// int main(...) {
+// base::AtExitManager exit_manager;
+//
+// }
+// When the exit_manager object goes out of scope, all the registered
+// callbacks and singleton destructors will be called.
+
+class BASE_EXPORT AtExitManager {
+ public:
+ typedef void (*AtExitCallbackType)(void*);
+
+ AtExitManager();
+
+ // The dtor calls all the registered callbacks. Do not try to register more
+ // callbacks after this point.
+ ~AtExitManager();
+
+ // Registers the specified function to be called at exit. The prototype of
+ // the callback function is void func(void*).
+ static void RegisterCallback(AtExitCallbackType func, void* param);
+
+ // Registers the specified task to be called at exit.
+ static void RegisterTask(base::Closure task);
+
+ // Calls the functions registered with RegisterCallback in LIFO order. It
+ // is possible to register new callbacks after calling this function.
+ static void ProcessCallbacksNow();
+
+ protected:
+ // This constructor will allow this instance of AtExitManager to be created
+ // even if one already exists. This should only be used for testing!
+ // AtExitManagers are kept on a global stack, and it will be removed during
+ // destruction. This allows you to shadow another AtExitManager.
+ explicit AtExitManager(bool shadow);
+
+ private:
+ base::Lock lock_;
+ std::stack<base::Closure> stack_;
+ bool processing_callbacks_;
+ AtExitManager* next_manager_; // Stack of managers to allow shadowing.
+
+ DISALLOW_COPY_AND_ASSIGN(AtExitManager);
+};
+
+#if defined(UNIT_TEST)
+class ShadowingAtExitManager : public AtExitManager {
+ public:
+ ShadowingAtExitManager() : AtExitManager(true) {}
+};
+#endif // defined(UNIT_TEST)
+
+} // namespace base
+
+#endif // BASE_AT_EXIT_H_
diff --git a/libchrome/base/at_exit_unittest.cc b/libchrome/base/at_exit_unittest.cc
new file mode 100644
index 0000000..cda7340
--- /dev/null
+++ b/libchrome/base/at_exit_unittest.cc
@@ -0,0 +1,87 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+int g_test_counter_1 = 0;
+int g_test_counter_2 = 0;
+
+void IncrementTestCounter1(void* unused) {
+ ++g_test_counter_1;
+}
+
+void IncrementTestCounter2(void* unused) {
+ ++g_test_counter_2;
+}
+
+void ZeroTestCounters() {
+ g_test_counter_1 = 0;
+ g_test_counter_2 = 0;
+}
+
+void ExpectCounter1IsZero(void* unused) {
+ EXPECT_EQ(0, g_test_counter_1);
+}
+
+void ExpectParamIsNull(void* param) {
+ EXPECT_EQ(static_cast<void*>(NULL), param);
+}
+
+void ExpectParamIsCounter(void* param) {
+ EXPECT_EQ(&g_test_counter_1, param);
+}
+
+} // namespace
+
+class AtExitTest : public testing::Test {
+ private:
+ // Don't test the global AtExitManager, because asking it to process its
+ // AtExit callbacks can ruin the global state that other tests may depend on.
+ base::ShadowingAtExitManager exit_manager_;
+};
+
+TEST_F(AtExitTest, Basic) {
+ ZeroTestCounters();
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter2, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+
+ EXPECT_EQ(0, g_test_counter_1);
+ EXPECT_EQ(0, g_test_counter_2);
+ base::AtExitManager::ProcessCallbacksNow();
+ EXPECT_EQ(2, g_test_counter_1);
+ EXPECT_EQ(1, g_test_counter_2);
+}
+
+TEST_F(AtExitTest, LIFOOrder) {
+ ZeroTestCounters();
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter1, NULL);
+ base::AtExitManager::RegisterCallback(&ExpectCounter1IsZero, NULL);
+ base::AtExitManager::RegisterCallback(&IncrementTestCounter2, NULL);
+
+ EXPECT_EQ(0, g_test_counter_1);
+ EXPECT_EQ(0, g_test_counter_2);
+ base::AtExitManager::ProcessCallbacksNow();
+ EXPECT_EQ(1, g_test_counter_1);
+ EXPECT_EQ(1, g_test_counter_2);
+}
+
+TEST_F(AtExitTest, Param) {
+ base::AtExitManager::RegisterCallback(&ExpectParamIsNull, NULL);
+ base::AtExitManager::RegisterCallback(&ExpectParamIsCounter,
+ &g_test_counter_1);
+ base::AtExitManager::ProcessCallbacksNow();
+}
+
+TEST_F(AtExitTest, Task) {
+ ZeroTestCounters();
+ base::AtExitManager::RegisterTask(base::Bind(&ExpectParamIsCounter,
+ &g_test_counter_1));
+ base::AtExitManager::ProcessCallbacksNow();
+}
diff --git a/libchrome/base/atomic_ref_count.h b/libchrome/base/atomic_ref_count.h
new file mode 100644
index 0000000..2ab7242
--- /dev/null
+++ b/libchrome/base/atomic_ref_count.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a low level implementation of atomic semantics for reference
+// counting. Please use base/memory/ref_counted.h directly instead.
+
+#ifndef BASE_ATOMIC_REF_COUNT_H_
+#define BASE_ATOMIC_REF_COUNT_H_
+
+#include "base/atomicops.h"
+
+namespace base {
+
+typedef subtle::Atomic32 AtomicRefCount;
+
+// Increment a reference count by "increment", which must exceed 0.
+inline void AtomicRefCountIncN(volatile AtomicRefCount *ptr,
+ AtomicRefCount increment) {
+ subtle::NoBarrier_AtomicIncrement(ptr, increment);
+}
+
+// Decrement a reference count by "decrement", which must exceed 0,
+// and return whether the result is non-zero.
+// Insert barriers to ensure that state written before the reference count
+// became zero will be visible to a thread that has just made the count zero.
+inline bool AtomicRefCountDecN(volatile AtomicRefCount *ptr,
+ AtomicRefCount decrement) {
+ bool res = (subtle::Barrier_AtomicIncrement(ptr, -decrement) != 0);
+ return res;
+}
+
+// Increment a reference count by 1.
+inline void AtomicRefCountInc(volatile AtomicRefCount *ptr) {
+ base::AtomicRefCountIncN(ptr, 1);
+}
+
+// Decrement a reference count by 1 and return whether the result is non-zero.
+// Insert barriers to ensure that state written before the reference count
+// became zero will be visible to a thread that has just made the count zero.
+inline bool AtomicRefCountDec(volatile AtomicRefCount *ptr) {
+ return base::AtomicRefCountDecN(ptr, 1);
+}
+
+// Return whether the reference count is one. If the reference count is used
+// in the conventional way, a refrerence count of 1 implies that the current
+// thread owns the reference and no other thread shares it. This call performs
+// the test for a reference count of one, and performs the memory barrier
+// needed for the owning thread to act on the object, knowing that it has
+// exclusive access to the object.
+inline bool AtomicRefCountIsOne(volatile AtomicRefCount *ptr) {
+ bool res = (subtle::Acquire_Load(ptr) == 1);
+ return res;
+}
+
+// Return whether the reference count is zero. With conventional object
+// referencing counting, the object will be destroyed, so the reference count
+// should never be zero. Hence this is generally used for a debug check.
+inline bool AtomicRefCountIsZero(volatile AtomicRefCount *ptr) {
+ bool res = (subtle::Acquire_Load(ptr) == 0);
+ return res;
+}
+
+} // namespace base
+
+#endif // BASE_ATOMIC_REF_COUNT_H_
diff --git a/libchrome/base/atomic_sequence_num.h b/libchrome/base/atomic_sequence_num.h
new file mode 100644
index 0000000..59b0d25
--- /dev/null
+++ b/libchrome/base/atomic_sequence_num.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ATOMIC_SEQUENCE_NUM_H_
+#define BASE_ATOMIC_SEQUENCE_NUM_H_
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+
+namespace base {
+
+class AtomicSequenceNumber;
+
+// Static (POD) AtomicSequenceNumber that MUST be used in global scope (or
+// non-function scope) ONLY. This implementation does not generate any static
+// initializer. Note that it does not implement any constructor which means
+// that its fields are not initialized except when it is stored in the global
+// data section (.data in ELF). If you want to allocate an atomic sequence
+// number on the stack (or heap), please use the AtomicSequenceNumber class
+// declared below.
+class StaticAtomicSequenceNumber {
+ public:
+ inline int GetNext() {
+ return static_cast<int>(
+ base::subtle::NoBarrier_AtomicIncrement(&seq_, 1) - 1);
+ }
+
+ private:
+ friend class AtomicSequenceNumber;
+
+ inline void Reset() {
+ base::subtle::Release_Store(&seq_, 0);
+ }
+
+ base::subtle::Atomic32 seq_;
+};
+
+// AtomicSequenceNumber that can be stored and used safely (i.e. its fields are
+// always initialized as opposed to StaticAtomicSequenceNumber declared above).
+// Please use StaticAtomicSequenceNumber if you want to declare an atomic
+// sequence number in the global scope.
+class AtomicSequenceNumber {
+ public:
+ AtomicSequenceNumber() {
+ seq_.Reset();
+ }
+
+ inline int GetNext() {
+ return seq_.GetNext();
+ }
+
+ private:
+ StaticAtomicSequenceNumber seq_;
+ DISALLOW_COPY_AND_ASSIGN(AtomicSequenceNumber);
+};
+
+} // namespace base
+
+#endif // BASE_ATOMIC_SEQUENCE_NUM_H_
diff --git a/libchrome/base/atomicops.h b/libchrome/base/atomicops.h
new file mode 100644
index 0000000..3428fe8
--- /dev/null
+++ b/libchrome/base/atomicops.h
@@ -0,0 +1,161 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For atomic operations on reference counts, see atomic_refcount.h.
+// For atomic operations on sequence numbers, see atomic_sequence_num.h.
+
+// The routines exported by this module are subtle. If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain. If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative. You should assume only properties explicitly guaranteed by the
+// specifications in this file. You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break. If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines. The NoBarrier
+// versions are provided when no barriers are needed:
+// NoBarrier_Store()
+// NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef BASE_ATOMICOPS_H_
+#define BASE_ATOMICOPS_H_
+
+#include <stdint.h>
+
+// Small C++ header which defines implementation specific macros used to
+// identify the STL implementation.
+// - libc++: captures __config for _LIBCPP_VERSION
+// - libstdc++: captures bits/c++config.h for __GLIBCXX__
+#include <cstddef>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace base {
+namespace subtle {
+
+typedef int32_t Atomic32;
+#ifdef ARCH_CPU_64_BITS
+// We need to be able to go between Atomic64 and AtomicWord implicitly. This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__) || defined(OS_NACL)
+// NaCl's intptr_t is not actually 64-bits on 64-bit!
+// http://code.google.com/p/nativeclient/issues/detail?id=1162
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions. "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef ARCH_CPU_64_BITS
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif // ARCH_CPU_64_BITS
+
+} // namespace subtle
+} // namespace base
+
+#if defined(OS_WIN)
+// TODO(jfb): The MSVC header includes windows.h, which other files end up
+// relying on. Fix this as part of crbug.com/559247.
+# include "base/atomicops_internals_x86_msvc.h"
+#else
+# include "base/atomicops_internals_portable.h"
+#endif
+
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(OS_MACOSX) || defined(OS_OPENBSD)
+#include "base/atomicops_internals_atomicword_compat.h"
+#endif
+
+#endif // BASE_ATOMICOPS_H_
diff --git a/libchrome/base/atomicops_internals_atomicword_compat.h b/libchrome/base/atomicops_internals_atomicword_compat.h
new file mode 100644
index 0000000..8b000d2
--- /dev/null
+++ b/libchrome/base/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32_t,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(ARCH_CPU_64_BITS)
+
+namespace base {
+namespace subtle {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return base::subtle::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return base::subtle::Release_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return base::subtle::Acquire_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return base::subtle::Release_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return base::subtle::Acquire_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return base::subtle::Release_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} // namespace subtle
+} // namespace base
+
+#endif // !defined(ARCH_CPU_64_BITS)
+
+#endif // BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/libchrome/base/atomicops_internals_portable.h b/libchrome/base/atomicops_internals_portable.h
new file mode 100644
index 0000000..ee034de
--- /dev/null
+++ b/libchrome/base/atomicops_internals_portable.h
@@ -0,0 +1,229 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
+// locations as truly atomic. See the static_assert below.
+//
+// Of note in this implementation:
+// * All NoBarrier variants are implemented as relaxed.
+// * All Barrier variants are implemented as sequentially-consistent.
+// * Compare exchange's failure ordering is always the same as the success one
+// (except for release, which fails as relaxed): using a weaker ordering is
+// only valid under certain uses of compare exchange.
+// * Acquire store doesn't exist in the C11 memory model, it is instead
+// implemented as a relaxed store followed by a sequentially consistent
+// fence.
+// * Release load doesn't exist in the C11 memory model, it is instead
+// implemented as sequentially consistent fence followed by a relaxed load.
+// * Atomic increment is expected to return the post-incremented value, whereas
+// C11 fetch add returns the previous value. The implementation therefore
+// needs to increment twice (which the compiler should be able to detect and
+// optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h. This requires casting memory locations to the atomic types, and
+// assumes that the API and the C++11 implementation are layout-compatible,
+// which isn't true for all implementations or hardware platforms. The static
+// assertion should detect this issue, were it to fire then this header
+// shouldn't be used.
+//
+// TODO(jfb) If this header manages to stay committed then the API should be
+// modified, and all call sites updated.
+typedef volatile std::atomic<Atomic32>* AtomicLocation32;
+static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
+ "incompatible 32-bit atomic layout");
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+ // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+ // not defined, leading to the linker complaining about undefined references.
+ __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return ((AtomicLocation32)ptr)
+ ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment +
+ ((AtomicLocation32)ptr)
+ ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_acquire,
+ std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ((AtomicLocation32)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+typedef volatile std::atomic<Atomic64>* AtomicLocation64;
+static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
+ "incompatible 64-bit atomic layout");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return ((AtomicLocation64)ptr)
+ ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment +
+ ((AtomicLocation64)ptr)
+ ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_acquire,
+ std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ ((AtomicLocation64)ptr)
+ ->compare_exchange_strong(old_value,
+ new_value,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+#endif // defined(ARCH_CPU_64_BITS)
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/libchrome/base/atomicops_internals_x86_msvc.h b/libchrome/base/atomicops_internals_x86_msvc.h
new file mode 100644
index 0000000..9f05b7e
--- /dev/null
+++ b/libchrome/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,196 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include <windows.h>
+
+#include <intrin.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_64_BITS)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ LONG result = _InterlockedCompareExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value),
+ static_cast<LONG>(old_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ LONG result = _InterlockedExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return _InterlockedExchangeAdd(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void MemoryBarrier() {
+#if defined(ARCH_CPU_64_BITS)
+ // See #undef and note at the top of this file.
+ __faststorefence();
+#else
+ // We use MemoryBarrier from WinNT.h
+ ::MemoryBarrier();
+#endif
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+ // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ PVOID result = InterlockedCompareExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ PVOID result = InterlockedExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return InterlockedExchangeAdd64(
+ reinterpret_cast<volatile LONGLONG*>(ptr),
+ static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif // defined(_WIN64)
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/libchrome/base/atomicops_unittest.cc b/libchrome/base/atomicops_unittest.cc
new file mode 100644
index 0000000..7298609
--- /dev/null
+++ b/libchrome/base/atomicops_unittest.cc
@@ -0,0 +1,248 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/atomicops.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+ // For now, we just test single threaded execution
+
+ // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
+ // outside the expected address bounds. This is in particular to
+ // test that some future change to the asm code doesn't cause the
+ // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+ // machines.
+ struct {
+ AtomicType prev_word;
+ AtomicType count;
+ AtomicType next_word;
+ } s;
+
+ AtomicType prev_word_value, next_word_value;
+ memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+ memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+ s.prev_word = prev_word_value;
+ s.count = 0;
+ s.next_word = next_word_value;
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
+ EXPECT_EQ(s.count, 1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
+ EXPECT_EQ(s.count, 3);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
+ EXPECT_EQ(s.count, 6);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
+ EXPECT_EQ(s.count, 3);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
+ EXPECT_EQ(s.count, 1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
+ EXPECT_EQ(s.count, 0);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
+ EXPECT_EQ(s.count, -1);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
+ EXPECT_EQ(s.count, -5);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+
+ EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
+ EXPECT_EQ(s.count, 0);
+ EXPECT_EQ(s.prev_word, prev_word_value);
+ EXPECT_EQ(s.next_word, next_word_value);
+}
+
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+ AtomicType value = 0;
+ AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(0, prev);
+
+ // Verify that CAS will *not* change "value" if it doesn't match the
+ // expected number. CAS will always return the actual value of the
+ // variable from before any change.
+ AtomicType fail = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 2);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(1, fail);
+
+ // Use test value that has non-zero bits in both halves, more for testing
+ // 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
+ (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
+ EXPECT_EQ(k_test_val, value);
+ EXPECT_EQ(k_test_val, prev);
+
+ value = k_test_val;
+ prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+ EXPECT_EQ(5, value);
+ EXPECT_EQ(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+ AtomicType value = 0;
+ AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
+ EXPECT_EQ(1, value);
+ EXPECT_EQ(0, new_value);
+
+ // Use test value that has non-zero bits in both halves, more for testing
+ // 64-bit implementation on 32-bit platforms.
+ const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
+ (NUM_BITS(AtomicType) - 2)) + 11;
+ value = k_test_val;
+ new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
+ EXPECT_EQ(k_test_val, value);
+ EXPECT_EQ(k_test_val, new_value);
+
+ value = k_test_val;
+ new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
+ EXPECT_EQ(5, value);
+ EXPECT_EQ(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+ // Test at rollover boundary between int_max and int_min
+ AtomicType test_val = (static_cast<uint64_t>(1) <<
+ (NUM_BITS(AtomicType) - 1));
+ AtomicType value = -1 ^ test_val;
+ AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+ EXPECT_EQ(test_val, value);
+ EXPECT_EQ(value, new_value);
+
+ base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+ EXPECT_EQ(-1 ^ test_val, value);
+
+ // Test at 32-bit boundary for 64-bit atomic type.
+ test_val = static_cast<uint64_t>(1) << (NUM_BITS(AtomicType) / 2);
+ value = test_val - 1;
+ new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
+ EXPECT_EQ(test_val, value);
+ EXPECT_EQ(value, new_value);
+
+ base::subtle::NoBarrier_AtomicIncrement(&value, -1);
+ EXPECT_EQ(test_val - 1, value);
+}
+
+// Return an AtomicType with the value 0xa5a5a5..
+template <class AtomicType>
+static AtomicType TestFillValue() {
+ AtomicType val = 0;
+ memset(&val, 0xa5, sizeof(AtomicType));
+ return val;
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestStore() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ base::subtle::NoBarrier_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::NoBarrier_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+
+ base::subtle::Acquire_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::Acquire_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+
+ base::subtle::Release_Store(&value, kVal1);
+ EXPECT_EQ(kVal1, value);
+ base::subtle::Release_Store(&value, kVal2);
+ EXPECT_EQ(kVal2, value);
+}
+
+// This is a simple sanity check that values are correct. Not testing
+// atomicity
+template <class AtomicType>
+static void TestLoad() {
+ const AtomicType kVal1 = TestFillValue<AtomicType>();
+ const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+ AtomicType value;
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
+
+ value = kVal1;
+ EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
+ value = kVal2;
+ EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
+}
+
+TEST(AtomicOpsTest, Inc) {
+ TestAtomicIncrement<base::subtle::Atomic32>();
+ TestAtomicIncrement<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, CompareAndSwap) {
+ TestCompareAndSwap<base::subtle::Atomic32>();
+ TestCompareAndSwap<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Exchange) {
+ TestAtomicExchange<base::subtle::Atomic32>();
+ TestAtomicExchange<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, IncrementBounds) {
+ TestAtomicIncrementBounds<base::subtle::Atomic32>();
+ TestAtomicIncrementBounds<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Store) {
+ TestStore<base::subtle::Atomic32>();
+ TestStore<base::subtle::AtomicWord>();
+}
+
+TEST(AtomicOpsTest, Load) {
+ TestLoad<base::subtle::Atomic32>();
+ TestLoad<base::subtle::AtomicWord>();
+}
diff --git a/libchrome/base/auto_reset.h b/libchrome/base/auto_reset.h
new file mode 100644
index 0000000..9116537
--- /dev/null
+++ b/libchrome/base/auto_reset.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AUTO_RESET_H_
+#define BASE_AUTO_RESET_H_
+
+#include "base/macros.h"
+
+// base::AutoReset<> is useful for setting a variable to a new value only within
+// a particular scope. An base::AutoReset<> object resets a variable to its
+// original value upon destruction, making it an alternative to writing
+// "var = false;" or "var = old_val;" at all of a block's exit points.
+//
+// This should be obvious, but note that an base::AutoReset<> instance should
+// have a shorter lifetime than its scoped_variable, to prevent invalid memory
+// writes when the base::AutoReset<> object is destroyed.
+
+namespace base {
+
+template<typename T>
+class AutoReset {
+ public:
+ AutoReset(T* scoped_variable, T new_value)
+ : scoped_variable_(scoped_variable),
+ original_value_(*scoped_variable) {
+ *scoped_variable_ = new_value;
+ }
+
+ ~AutoReset() { *scoped_variable_ = original_value_; }
+
+ private:
+ T* scoped_variable_;
+ T original_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(AutoReset);
+};
+
+} // namespace base
+
+#endif // BASE_AUTO_RESET_H_
diff --git a/libchrome/base/base.gyp b/libchrome/base/base.gyp
new file mode 100644
index 0000000..a534d5c
--- /dev/null
+++ b/libchrome/base/base.gyp
@@ -0,0 +1,1801 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ '../build/win_precompile.gypi',
+ 'base.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'base',
+ 'type': '<(component)',
+ 'toolsets': ['host', 'target'],
+ 'variables': {
+ 'base_target': 1,
+ 'enable_wexit_time_destructors': 1,
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ 'allocator/allocator.gyp:allocator',
+ 'allocator/allocator.gyp:allocator_features#target',
+ 'base_debugging_flags#target',
+ 'base_win_features#target',
+ 'base_static',
+ 'base_build_date#target',
+ '../testing/gtest.gyp:gtest_prod',
+ '../third_party/modp_b64/modp_b64.gyp:modp_b64',
+ 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ ],
+ # TODO(gregoryd): direct_dependent_settings should be shared with the
+ # 64-bit target, but it doesn't work due to a bug in gyp
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ 'conditions': [
+ ['desktop_linux == 1 or chromeos == 1', {
+ 'conditions': [
+ ['chromeos==1', {
+ 'sources/': [ ['include', '_chromeos\\.cc$'] ]
+ }],
+ ],
+ 'dependencies': [
+ 'symbolize',
+ 'xdg_mime',
+ ],
+ 'defines': [
+ 'USE_SYMBOLIZE',
+ ],
+ }, { # desktop_linux == 0 and chromeos == 0
+ 'sources/': [
+ ['exclude', '/xdg_user_dirs/'],
+ ['exclude', '_nss\\.cc$'],
+ ],
+ }],
+ ['use_glib==1', {
+ 'dependencies': [
+ '../build/linux/system.gyp:glib',
+ ],
+ 'export_dependent_settings': [
+ '../build/linux/system.gyp:glib',
+ ],
+ }],
+ ['OS == "android" and _toolset == "host"', {
+ # Always build base as a static_library for host toolset, even if
+ # we're doing a component build. Specifically, we only care about the
+ # target toolset using components since that's what developers are
+ # focusing on. In theory we should do this more generally for all
+ # targets when building for host, but getting the gyp magic
+ # per-toolset for the "component" variable is hard, and we really only
+ # need base on host.
+ 'type': 'static_library',
+ # Base for host support is the minimum required to run the
+ # ssl false start blacklist tool. It requires further changes
+ # to generically support host builds (and tests).
+ # Note: when building for host, gyp has OS == "android",
+ # hence the *_android.cc files are included but the actual code
+ # doesn't have OS_ANDROID / ANDROID defined.
+ 'conditions': [
+ ['host_os == "mac"', {
+ 'sources/': [
+ ['exclude', '^native_library_linux\\.cc$'],
+ ['exclude', '^process_util_linux\\.cc$'],
+ ['exclude', '^sys_info_linux\\.cc$'],
+ ['exclude', '^sys_string_conversions_linux\\.cc$'],
+ ['exclude', '^worker_pool_linux\\.cc$'],
+ ],
+ }],
+ ],
+ }],
+ ['OS == "android" and _toolset == "target"', {
+ 'dependencies': [
+ 'base_java',
+ 'base_jni_headers',
+ '../build/android/ndk.gyp:cpu_features',
+ '../third_party/ashmem/ashmem.gyp:ashmem',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-llog',
+ ],
+ },
+ 'sources!': [
+ 'debug/stack_trace_posix.cc',
+ ],
+ }],
+ ['os_bsd==1', {
+ 'include_dirs': [
+ '/usr/local/include',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ],
+ },
+ }],
+ ['OS == "linux"', {
+ 'link_settings': {
+ 'libraries': [
+ # We need rt for clock_gettime().
+ '-lrt',
+ # For 'native_library_linux.cc'
+ '-ldl',
+ ],
+ },
+ 'conditions': [
+ ['use_allocator!="tcmalloc"', {
+ 'defines': [
+ 'NO_TCMALLOC',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'NO_TCMALLOC',
+ ],
+ },
+ }],
+ ],
+ }],
+ ['use_sysroot==0 and (OS == "android" or OS == "linux")', {
+ 'link_settings': {
+ 'libraries': [
+ # Needed for <atomic> when building with newer C++ library.
+ '-latomic',
+ ],
+ },
+ }],
+ ['OS == "win"', {
+ # Specify delayload for base.dll.
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DelayLoadDLLs': [
+ 'cfgmgr32.dll',
+ 'powrprof.dll',
+ 'setupapi.dll',
+ ],
+ 'AdditionalDependencies': [
+ 'cfgmgr32.lib',
+ 'powrprof.lib',
+ 'setupapi.lib',
+ 'userenv.lib',
+ 'winmm.lib',
+ ],
+ },
+ },
+ # Specify delayload for components that link with base.lib.
+ 'all_dependent_settings': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DelayLoadDLLs': [
+ 'cfgmgr32.dll',
+ 'powrprof.dll',
+ 'setupapi.dll',
+ ],
+ 'AdditionalDependencies': [
+ 'cfgmgr32.lib',
+ 'powrprof.lib',
+ 'setupapi.lib',
+ 'userenv.lib',
+ 'winmm.lib',
+ ],
+ },
+ },
+ },
+ 'dependencies': [
+ 'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
+ ],
+ }],
+ ['OS == "mac" or (OS == "ios" and _toolset == "host")', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
+ '$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+ '$(SDKROOT)/usr/lib/libbsm.dylib',
+ ],
+ },
+ }],
+ ['OS == "ios" and _toolset != "host"', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreText.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ }],
+ ['OS != "win" and (OS != "ios" or _toolset == "host")', {
+ 'dependencies': ['third_party/libevent/libevent.gyp:libevent'],
+ },],
+ ['component=="shared_library"', {
+ 'conditions': [
+ ['OS=="win"', {
+ 'sources!': [
+ 'debug/debug_on_start_win.cc',
+ ],
+ }],
+ ],
+ }],
+ ['OS=="ios"', {
+ 'sources!': [
+ 'sync_socket.h',
+ 'sync_socket_posix.cc',
+ ]
+ }],
+ ['use_experimental_allocator_shim==1', {
+ 'dependencies': [ 'allocator/allocator.gyp:unified_allocator_shim']
+ }],
+ ],
+ 'sources': [
+ 'auto_reset.h',
+ 'linux_util.cc',
+ 'linux_util.h',
+ 'message_loop/message_pump_android.cc',
+ 'message_loop/message_pump_android.h',
+ 'message_loop/message_pump_glib.cc',
+ 'message_loop/message_pump_glib.h',
+ 'message_loop/message_pump_io_ios.cc',
+ 'message_loop/message_pump_io_ios.h',
+ 'message_loop/message_pump_libevent.cc',
+ 'message_loop/message_pump_libevent.h',
+ 'message_loop/message_pump_mac.h',
+ 'message_loop/message_pump_mac.mm',
+ 'metrics/field_trial.cc',
+ 'metrics/field_trial.h',
+ 'posix/file_descriptor_shuffle.cc',
+ 'posix/file_descriptor_shuffle.h',
+ 'sync_socket.h',
+ 'sync_socket_posix.cc',
+ 'sync_socket_win.cc',
+ 'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
+ 'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
+ ],
+ 'includes': [
+ '../build/android/increase_size_for_speed.gypi',
+ ],
+ },
+ {
+ 'target_name': 'base_i18n',
+ 'type': '<(component)',
+ 'variables': {
+ 'enable_wexit_time_destructors': 1,
+ 'optimize': 'max',
+ 'base_i18n_target': 1,
+ },
+ 'dependencies': [
+ 'base',
+ 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../third_party/icu/icu.gyp:icui18n',
+ '../third_party/icu/icu.gyp:icuuc',
+ ],
+ 'conditions': [
+ ['OS == "win"', {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [
+ 4267,
+ ],
+ }],
+ ['icu_use_data_file_flag==1', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+ }, { # else icu_use_data_file_flag !=1
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+ }, {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+ }],
+ ],
+ }],
+ ['OS == "ios"', {
+ 'toolsets': ['host', 'target'],
+ }],
+ ],
+ 'export_dependent_settings': [
+ 'base',
+ '../third_party/icu/icu.gyp:icuuc',
+ '../third_party/icu/icu.gyp:icui18n',
+ ],
+ 'includes': [
+ '../build/android/increase_size_for_speed.gypi',
+ ],
+ },
+ {
+ 'target_name': 'base_message_loop_tests',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'message_loop/message_loop_test.cc',
+ 'message_loop/message_loop_test.h',
+ ],
+ },
+ {
+ # This is the subset of files from base that should not be used with a
+ # dynamic library. Note that this library cannot depend on base because
+ # base depends on base_static.
+ 'target_name': 'base_static',
+ 'type': 'static_library',
+ 'variables': {
+ 'enable_wexit_time_destructors': 1,
+ 'optimize': 'max',
+ },
+ 'toolsets': ['host', 'target'],
+ 'sources': [
+ 'base_switches.cc',
+ 'base_switches.h',
+ 'win/pe_image.cc',
+ 'win/pe_image.h',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'includes': [
+ '../build/android/increase_size_for_speed.gypi',
+ ],
+ },
+ # Include this target for a main() function that simply instantiates
+ # and runs a base::TestSuite.
+ {
+ 'target_name': 'run_all_unittests',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'test_support_base',
+ ],
+ 'sources': [
+ 'test/run_all_unittests.cc',
+ ],
+ },
+ {
+ 'target_name': 'base_unittests',
+ 'type': '<(gtest_target_type)',
+ 'sources': [
+ 'allocator/tcmalloc_unittest.cc',
+ 'android/application_status_listener_unittest.cc',
+ 'android/content_uri_utils_unittest.cc',
+ 'android/jni_android_unittest.cc',
+ 'android/jni_array_unittest.cc',
+ 'android/jni_string_unittest.cc',
+ 'android/library_loader/library_prefetcher_unittest.cc',
+ 'android/path_utils_unittest.cc',
+ 'android/scoped_java_ref_unittest.cc',
+ 'android/sys_utils_unittest.cc',
+ 'at_exit_unittest.cc',
+ 'atomicops_unittest.cc',
+ 'barrier_closure_unittest.cc',
+ 'base64_unittest.cc',
+ 'base64url_unittest.cc',
+ 'big_endian_unittest.cc',
+ 'bind_unittest.cc',
+ 'bind_unittest.nc',
+ 'bit_cast_unittest.cc',
+ 'bits_unittest.cc',
+ 'build_time_unittest.cc',
+ 'callback_helpers_unittest.cc',
+ 'callback_list_unittest.cc',
+ 'callback_list_unittest.nc',
+ 'callback_unittest.cc',
+ 'callback_unittest.nc',
+ 'cancelable_callback_unittest.cc',
+ 'command_line_unittest.cc',
+ 'containers/adapters_unittest.cc',
+ 'containers/hash_tables_unittest.cc',
+ 'containers/linked_list_unittest.cc',
+ 'containers/mru_cache_unittest.cc',
+ 'containers/scoped_ptr_hash_map_unittest.cc',
+ 'containers/small_map_unittest.cc',
+ 'containers/stack_container_unittest.cc',
+ 'cpu_unittest.cc',
+ 'debug/crash_logging_unittest.cc',
+ 'debug/debugger_unittest.cc',
+ 'debug/leak_tracker_unittest.cc',
+ 'debug/proc_maps_linux_unittest.cc',
+ 'debug/stack_trace_unittest.cc',
+ 'debug/task_annotator_unittest.cc',
+ 'deferred_sequenced_task_runner_unittest.cc',
+ 'environment_unittest.cc',
+ 'feature_list_unittest.cc',
+ 'file_version_info_win_unittest.cc',
+ 'files/dir_reader_posix_unittest.cc',
+ 'files/file_locking_unittest.cc',
+ 'files/file_path_unittest.cc',
+ 'files/file_path_watcher_unittest.cc',
+ 'files/file_proxy_unittest.cc',
+ 'files/file_unittest.cc',
+ 'files/file_util_proxy_unittest.cc',
+ 'files/file_util_unittest.cc',
+ 'files/important_file_writer_unittest.cc',
+ 'files/memory_mapped_file_unittest.cc',
+ 'files/scoped_temp_dir_unittest.cc',
+ 'gmock_unittest.cc',
+ 'guid_unittest.cc',
+ 'hash_unittest.cc',
+ 'i18n/break_iterator_unittest.cc',
+ 'i18n/case_conversion_unittest.cc',
+ 'i18n/char_iterator_unittest.cc',
+ 'i18n/file_util_icu_unittest.cc',
+ 'i18n/icu_string_conversions_unittest.cc',
+ 'i18n/message_formatter_unittest.cc',
+ 'i18n/number_formatting_unittest.cc',
+ 'i18n/rtl_unittest.cc',
+ 'i18n/streaming_utf8_validator_unittest.cc',
+ 'i18n/string_search_unittest.cc',
+ 'i18n/time_formatting_unittest.cc',
+ 'i18n/timezone_unittest.cc',
+ 'id_map_unittest.cc',
+ 'ios/crb_protocol_observers_unittest.mm',
+ 'ios/device_util_unittest.mm',
+ 'ios/weak_nsobject_unittest.mm',
+ 'json/json_parser_unittest.cc',
+ 'json/json_reader_unittest.cc',
+ 'json/json_value_converter_unittest.cc',
+ 'json/json_value_serializer_unittest.cc',
+ 'json/json_writer_unittest.cc',
+ 'json/string_escape_unittest.cc',
+ 'lazy_instance_unittest.cc',
+ 'logging_unittest.cc',
+ 'mac/bind_objc_block_unittest.mm',
+ 'mac/call_with_eh_frame_unittest.mm',
+ 'mac/dispatch_source_mach_unittest.cc',
+ 'mac/foundation_util_unittest.mm',
+ 'mac/mac_util_unittest.mm',
+ 'mac/mach_port_broker_unittest.cc',
+ 'mac/objc_property_releaser_unittest.mm',
+ 'mac/scoped_nsobject_unittest.mm',
+ 'mac/scoped_objc_class_swizzler_unittest.mm',
+ 'mac/scoped_sending_event_unittest.mm',
+ 'md5_unittest.cc',
+ 'memory/aligned_memory_unittest.cc',
+ 'memory/discardable_shared_memory_unittest.cc',
+ 'memory/linked_ptr_unittest.cc',
+ 'memory/memory_pressure_listener_unittest.cc',
+ 'memory/memory_pressure_monitor_chromeos_unittest.cc',
+ 'memory/memory_pressure_monitor_mac_unittest.cc',
+ 'memory/memory_pressure_monitor_win_unittest.cc',
+ 'memory/ptr_util_unittest.cc',
+ 'memory/ref_counted_memory_unittest.cc',
+ 'memory/ref_counted_unittest.cc',
+ 'memory/scoped_vector_unittest.cc',
+ 'memory/shared_memory_mac_unittest.cc',
+ 'memory/shared_memory_unittest.cc',
+ 'memory/shared_memory_win_unittest.cc',
+ 'memory/singleton_unittest.cc',
+ 'memory/weak_ptr_unittest.cc',
+ 'memory/weak_ptr_unittest.nc',
+ 'message_loop/message_loop_task_runner_unittest.cc',
+ 'message_loop/message_loop_unittest.cc',
+ 'message_loop/message_pump_glib_unittest.cc',
+ 'message_loop/message_pump_io_ios_unittest.cc',
+ 'message_loop/message_pump_libevent_unittest.cc',
+ 'metrics/bucket_ranges_unittest.cc',
+ 'metrics/field_trial_unittest.cc',
+ 'metrics/histogram_base_unittest.cc',
+ 'metrics/histogram_delta_serialization_unittest.cc',
+ 'metrics/histogram_macros_unittest.cc',
+ 'metrics/histogram_snapshot_manager_unittest.cc',
+ 'metrics/histogram_unittest.cc',
+ 'metrics/metrics_hashes_unittest.cc',
+ 'metrics/persistent_histogram_allocator_unittest.cc',
+ 'metrics/persistent_memory_allocator_unittest.cc',
+ 'metrics/persistent_sample_map_unittest.cc',
+ 'metrics/sample_map_unittest.cc',
+ 'metrics/sample_vector_unittest.cc',
+ 'metrics/sparse_histogram_unittest.cc',
+ 'metrics/statistics_recorder_unittest.cc',
+ 'native_library_unittest.cc',
+ 'numerics/safe_numerics_unittest.cc',
+ 'observer_list_unittest.cc',
+ 'optional_unittest.cc',
+ 'os_compat_android_unittest.cc',
+ 'path_service_unittest.cc',
+ 'pickle_unittest.cc',
+ 'posix/file_descriptor_shuffle_unittest.cc',
+ 'posix/unix_domain_socket_linux_unittest.cc',
+ 'power_monitor/power_monitor_unittest.cc',
+ 'process/memory_unittest.cc',
+ 'process/memory_unittest_mac.h',
+ 'process/memory_unittest_mac.mm',
+ 'process/process_metrics_unittest.cc',
+ 'process/process_metrics_unittest_ios.cc',
+ 'process/process_unittest.cc',
+ 'process/process_util_unittest.cc',
+ 'profiler/stack_sampling_profiler_unittest.cc',
+ 'profiler/tracked_time_unittest.cc',
+ 'rand_util_unittest.cc',
+ 'run_loop_unittest.cc',
+ 'scoped_clear_errno_unittest.cc',
+ 'scoped_generic_unittest.cc',
+ 'scoped_native_library_unittest.cc',
+ 'security_unittest.cc',
+ 'sequence_checker_unittest.cc',
+ 'sha1_unittest.cc',
+ 'stl_util_unittest.cc',
+ 'strings/nullable_string16_unittest.cc',
+ 'strings/pattern_unittest.cc',
+ 'strings/safe_sprintf_unittest.cc',
+ 'strings/string16_unittest.cc',
+ 'strings/string_number_conversions_unittest.cc',
+ 'strings/string_piece_unittest.cc',
+ 'strings/string_split_unittest.cc',
+ 'strings/string_tokenizer_unittest.cc',
+ 'strings/string_util_unittest.cc',
+ 'strings/stringize_macros_unittest.cc',
+ 'strings/stringprintf_unittest.cc',
+ 'strings/sys_string_conversions_mac_unittest.mm',
+ 'strings/sys_string_conversions_unittest.cc',
+ 'strings/utf_offset_string_conversions_unittest.cc',
+ 'strings/utf_string_conversions_unittest.cc',
+ 'supports_user_data_unittest.cc',
+ 'sync_socket_unittest.cc',
+ 'synchronization/cancellation_flag_unittest.cc',
+ 'synchronization/condition_variable_unittest.cc',
+ 'synchronization/lock_unittest.cc',
+ 'synchronization/read_write_lock_unittest.cc',
+ 'synchronization/waitable_event_unittest.cc',
+ 'synchronization/waitable_event_watcher_unittest.cc',
+ 'sys_byteorder_unittest.cc',
+ 'sys_info_unittest.cc',
+ 'system_monitor/system_monitor_unittest.cc',
+ 'task/cancelable_task_tracker_unittest.cc',
+ 'task_runner_util_unittest.cc',
+ 'task_scheduler/delayed_task_manager_unittest.cc',
+ 'task_scheduler/priority_queue_unittest.cc',
+ 'task_scheduler/scheduler_lock_unittest.cc',
+ 'task_scheduler/scheduler_service_thread_unittest.cc',
+ 'task_scheduler/scheduler_worker_unittest.cc',
+ 'task_scheduler/scheduler_worker_pool_impl_unittest.cc',
+ 'task_scheduler/scheduler_worker_stack_unittest.cc',
+ 'task_scheduler/sequence_sort_key_unittest.cc',
+ 'task_scheduler/sequence_unittest.cc',
+ 'task_scheduler/task_scheduler_impl_unittest.cc',
+ 'task_scheduler/task_tracker_unittest.cc',
+ 'task_scheduler/test_task_factory.cc',
+ 'task_scheduler/test_task_factory.h',
+ 'task_scheduler/test_utils.h',
+ 'template_util_unittest.cc',
+ 'test/histogram_tester_unittest.cc',
+ 'test/test_pending_task_unittest.cc',
+ 'test/test_reg_util_win_unittest.cc',
+ 'test/trace_event_analyzer_unittest.cc',
+ 'test/user_action_tester_unittest.cc',
+ 'threading/non_thread_safe_unittest.cc',
+ 'threading/platform_thread_unittest.cc',
+ 'threading/sequenced_worker_pool_unittest.cc',
+ 'threading/sequenced_task_runner_handle_unittest.cc',
+ 'threading/simple_thread_unittest.cc',
+ 'threading/thread_checker_unittest.cc',
+ 'threading/thread_collision_warner_unittest.cc',
+ 'threading/thread_id_name_manager_unittest.cc',
+ 'threading/thread_local_storage_unittest.cc',
+ 'threading/thread_local_unittest.cc',
+ 'threading/thread_unittest.cc',
+ 'threading/watchdog_unittest.cc',
+ 'threading/worker_pool_posix_unittest.cc',
+ 'threading/worker_pool_unittest.cc',
+ 'time/pr_time_unittest.cc',
+ 'time/time_unittest.cc',
+ 'time/time_win_unittest.cc',
+ 'timer/hi_res_timer_manager_unittest.cc',
+ 'timer/mock_timer_unittest.cc',
+ 'timer/timer_unittest.cc',
+ 'tools_sanity_unittest.cc',
+ 'tracked_objects_unittest.cc',
+ 'tuple_unittest.cc',
+ 'values_unittest.cc',
+ 'version_unittest.cc',
+ 'vlog_unittest.cc',
+ 'win/dllmain.cc',
+ 'win/enum_variant_unittest.cc',
+ 'win/event_trace_consumer_unittest.cc',
+ 'win/event_trace_controller_unittest.cc',
+ 'win/event_trace_provider_unittest.cc',
+ 'win/i18n_unittest.cc',
+ 'win/iunknown_impl_unittest.cc',
+ 'win/message_window_unittest.cc',
+ 'win/object_watcher_unittest.cc',
+ 'win/pe_image_unittest.cc',
+ 'win/registry_unittest.cc',
+ 'win/scoped_bstr_unittest.cc',
+ 'win/scoped_comptr_unittest.cc',
+ 'win/scoped_handle_unittest.cc',
+ 'win/scoped_process_information_unittest.cc',
+ 'win/scoped_variant_unittest.cc',
+ 'win/shortcut_unittest.cc',
+ 'win/startup_information_unittest.cc',
+ 'win/wait_chain_unittest.cc',
+ 'win/win_util_unittest.cc',
+ 'win/windows_version_unittest.cc',
+ 'win/wrapped_window_proc_unittest.cc',
+ '<@(trace_event_test_sources)',
+ ],
+ 'dependencies': [
+ 'base',
+ 'base_i18n',
+ 'base_message_loop_tests',
+ 'base_static',
+ 'run_all_unittests',
+ 'test_support_base',
+ 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/icu/icu.gyp:icui18n',
+ '../third_party/icu/icu.gyp:icuuc',
+ ],
+ 'includes': ['../build/nocompile.gypi'],
+ 'variables': {
+ # TODO(ajwong): Is there a way to autodetect this?
+ 'module_dir': 'base'
+ },
+ 'conditions': [
+ ['cfi_vptr==1 and cfi_cast==1', {
+ 'defines': [
+ # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+ 'CFI_CAST_CHECK',
+ ],
+ }],
+ ['OS == "ios" or OS == "mac"', {
+ 'dependencies': [
+ 'base_unittests_arc',
+ ],
+ }],
+ ['OS == "android"', {
+ 'dependencies': [
+ 'android/jni_generator/jni_generator.gyp:jni_generator_tests',
+ '../testing/android/native_test.gyp:native_test_native_code',
+ ],
+ }],
+ ['OS == "ios" and _toolset != "host"', {
+ 'sources/': [
+ # This test needs multiple processes.
+ ['exclude', '^files/file_locking_unittest\\.cc$'],
+ # iOS does not support FilePathWatcher.
+ ['exclude', '^files/file_path_watcher_unittest\\.cc$'],
+ # Only test the iOS-meaningful portion of memory and process_utils.
+ ['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
+ ['exclude', '^memory/shared_memory_unittest\\.cc$'],
+ ['exclude', '^process/memory_unittest'],
+ ['exclude', '^process/process_unittest\\.cc$'],
+ ['exclude', '^process/process_util_unittest\\.cc$'],
+ ['include', '^process/process_util_unittest_ios\\.cc$'],
+ # iOS does not use message_pump_libevent.
+ ['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'],
+ ],
+ 'actions': [
+ {
+ 'action_name': 'copy_test_data',
+ 'variables': {
+ 'test_data_files': [
+ 'test/data',
+ ],
+ 'test_data_prefix': 'base',
+ },
+ 'includes': [ '../build/copy_test_data_ios.gypi' ],
+ },
+ ],
+ }],
+ ['desktop_linux == 1 or chromeos == 1', {
+ 'defines': [
+ 'USE_SYMBOLIZE',
+ ],
+ 'conditions': [
+ [ 'desktop_linux==1', {
+ 'sources': [
+ 'nix/xdg_util_unittest.cc',
+ ],
+ }],
+ ],
+ }],
+ ['use_glib == 1', {
+ 'dependencies': [
+ '../build/linux/system.gyp:glib',
+ ],
+ }, { # use_glib == 0
+ 'sources!': [
+ 'message_loop/message_pump_glib_unittest.cc',
+ ]
+ }],
+ ['use_ozone == 1', {
+ 'sources!': [
+ 'message_loop/message_pump_glib_unittest.cc',
+ ]
+ }],
+ ['OS == "linux"', {
+ 'dependencies': [
+ 'malloc_wrapper',
+ ],
+ }],
+ [ 'OS == "win" and target_arch == "x64"', {
+ 'sources': [
+ 'profiler/win32_stack_frame_unwinder_unittest.cc',
+ ],
+ 'dependencies': [
+ 'base_profiler_test_support_library',
+ ],
+ }],
+ ['OS == "win"', {
+ 'dependencies': [
+ 'scoped_handle_test_dll'
+ ],
+ 'sources!': [
+ 'file_descriptor_shuffle_unittest.cc',
+ 'files/dir_reader_posix_unittest.cc',
+ 'message_loop/message_pump_libevent_unittest.cc',
+ 'threading/worker_pool_posix_unittest.cc',
+ ],
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [
+ 4267,
+ ],
+ 'conditions': [
+ ['icu_use_data_file_flag==0', {
+ # This is needed to trigger the dll copy step on windows.
+ # TODO(mark): This should not be necessary.
+ 'dependencies': [
+ '../third_party/icu/icu.gyp:icudata',
+ ],
+ }],
+ ],
+ }, { # OS != "win"
+ 'dependencies': [
+ 'third_party/libevent/libevent.gyp:libevent'
+ ],
+ }],
+ ['use_experimental_allocator_shim==1', {
+ 'sources': [ 'allocator/allocator_shim_unittest.cc']
+ }],
+ ], # conditions
+ 'target_conditions': [
+ ['OS == "ios" and _toolset != "host"', {
+ 'sources/': [
+ # Pull in specific Mac files for iOS (which have been filtered out
+ # by file name rules).
+ ['include', '^mac/bind_objc_block_unittest\\.mm$'],
+ ['include', '^mac/foundation_util_unittest\\.mm$',],
+ ['include', '^mac/objc_property_releaser_unittest\\.mm$'],
+ ['include', '^mac/scoped_nsobject_unittest\\.mm$'],
+ ['include', '^sys_string_conversions_mac_unittest\\.mm$'],
+ ],
+ }],
+ ['OS == "android"', {
+ 'sources/': [
+ ['include', '^debug/proc_maps_linux_unittest\\.cc$'],
+ ],
+ }],
+ # Enable more direct string conversions on platforms with native utf8
+ # strings
+ ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
+ 'defines': ['SYSTEM_NATIVE_UTF8'],
+ }],
+ # SyncSocket isn't used on iOS
+ ['OS=="ios"', {
+ 'sources!': [
+ 'sync_socket_unittest.cc',
+ ],
+ }],
+ ], # target_conditions
+ },
+ {
+ # GN: //base:base_perftests
+ 'target_name': 'base_perftests',
+ 'type': '<(gtest_target_type)',
+ 'dependencies': [
+ 'base',
+ 'test_support_base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'message_loop/message_pump_perftest.cc',
+ 'test/run_all_unittests.cc',
+ 'threading/thread_perftest.cc',
+ '../testing/perf/perf_test.cc'
+ ],
+ 'conditions': [
+ ['OS == "android"', {
+ 'dependencies': [
+ '../testing/android/native_test.gyp:native_test_native_code',
+ ],
+ }],
+ ],
+ },
+ {
+ # GN: //base:base_i18n_perftests
+ 'target_name': 'base_i18n_perftests',
+ 'type': '<(gtest_target_type)',
+ 'dependencies': [
+ 'test_support_base',
+ 'test_support_perf',
+ '../testing/gtest.gyp:gtest',
+ 'base_i18n',
+ 'base',
+ ],
+ 'sources': [
+ 'i18n/streaming_utf8_validator_perftest.cc',
+ ],
+ },
+ {
+ # GN: //base/test:test_support
+ 'target_name': 'test_support_base',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'base',
+ 'base_static',
+ 'base_i18n',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/icu/icu.gyp:icuuc',
+ '../third_party/libxml/libxml.gyp:libxml',
+ 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ ],
+ 'export_dependent_settings': [
+ 'base',
+ ],
+ 'conditions': [
+ ['os_posix==0', {
+ 'sources!': [
+ 'test/scoped_locale.cc',
+ 'test/scoped_locale.h',
+ ],
+ }],
+ ['os_bsd==1', {
+ 'sources!': [
+ 'test/test_file_util_linux.cc',
+ ],
+ }],
+ ['OS == "android"', {
+ 'dependencies': [
+ 'base_unittests_jni_headers',
+ 'base_java_unittest_support',
+ ],
+ }],
+ ['OS == "ios"', {
+ 'toolsets': ['host', 'target'],
+ }],
+ ],
+ 'sources': [
+ 'test/gtest_util.cc',
+ 'test/gtest_util.h',
+ 'test/gtest_xml_unittest_result_printer.cc',
+ 'test/gtest_xml_unittest_result_printer.h',
+ 'test/gtest_xml_util.cc',
+ 'test/gtest_xml_util.h',
+ 'test/histogram_tester.cc',
+ 'test/histogram_tester.h',
+ 'test/icu_test_util.cc',
+ 'test/icu_test_util.h',
+ 'test/ios/wait_util.h',
+ 'test/ios/wait_util.mm',
+ 'test/launcher/test_launcher.cc',
+ 'test/launcher/test_launcher.h',
+ 'test/launcher/test_launcher_tracer.cc',
+ 'test/launcher/test_launcher_tracer.h',
+ 'test/launcher/test_result.cc',
+ 'test/launcher/test_result.h',
+ 'test/launcher/test_results_tracker.cc',
+ 'test/launcher/test_results_tracker.h',
+ 'test/launcher/unit_test_launcher.cc',
+ 'test/launcher/unit_test_launcher.h',
+ 'test/launcher/unit_test_launcher_ios.cc',
+ 'test/mock_chrome_application_mac.h',
+ 'test/mock_chrome_application_mac.mm',
+ 'test/mock_devices_changed_observer.cc',
+ 'test/mock_devices_changed_observer.h',
+ 'test/mock_entropy_provider.cc',
+ 'test/mock_entropy_provider.h',
+ 'test/mock_log.cc',
+ 'test/mock_log.h',
+ 'test/multiprocess_test.cc',
+ 'test/multiprocess_test.h',
+ 'test/multiprocess_test_android.cc',
+ 'test/null_task_runner.cc',
+ 'test/null_task_runner.h',
+ 'test/opaque_ref_counted.cc',
+ 'test/opaque_ref_counted.h',
+ 'test/perf_log.cc',
+ 'test/perf_log.h',
+ 'test/perf_test_suite.cc',
+ 'test/perf_test_suite.h',
+ 'test/perf_time_logger.cc',
+ 'test/perf_time_logger.h',
+ 'test/power_monitor_test_base.cc',
+ 'test/power_monitor_test_base.h',
+ 'test/scoped_command_line.cc',
+ 'test/scoped_command_line.h',
+ 'test/scoped_locale.cc',
+ 'test/scoped_locale.h',
+ 'test/scoped_path_override.cc',
+ 'test/scoped_path_override.h',
+ 'test/sequenced_task_runner_test_template.cc',
+ 'test/sequenced_task_runner_test_template.h',
+ 'test/sequenced_worker_pool_owner.cc',
+ 'test/sequenced_worker_pool_owner.h',
+ 'test/simple_test_clock.cc',
+ 'test/simple_test_clock.h',
+ 'test/simple_test_tick_clock.cc',
+ 'test/simple_test_tick_clock.h',
+ 'test/task_runner_test_template.cc',
+ 'test/task_runner_test_template.h',
+ 'test/test_discardable_memory_allocator.cc',
+ 'test/test_discardable_memory_allocator.h',
+ 'test/test_file_util.cc',
+ 'test/test_file_util.h',
+ 'test/test_file_util_android.cc',
+ 'test/test_file_util_linux.cc',
+ 'test/test_file_util_mac.cc',
+ 'test/test_file_util_posix.cc',
+ 'test/test_file_util_win.cc',
+ 'test/test_io_thread.cc',
+ 'test/test_io_thread.h',
+ 'test/test_listener_ios.h',
+ 'test/test_listener_ios.mm',
+ 'test/test_message_loop.cc',
+ 'test/test_message_loop.h',
+ 'test/test_mock_time_task_runner.cc',
+ 'test/test_mock_time_task_runner.h',
+ 'test/test_pending_task.cc',
+ 'test/test_pending_task.h',
+ 'test/test_reg_util_win.cc',
+ 'test/test_reg_util_win.h',
+ 'test/test_shortcut_win.cc',
+ 'test/test_shortcut_win.h',
+ 'test/test_simple_task_runner.cc',
+ 'test/test_simple_task_runner.h',
+ 'test/test_suite.cc',
+ 'test/test_suite.h',
+ 'test/test_support_android.cc',
+ 'test/test_support_android.h',
+ 'test/test_support_ios.h',
+ 'test/test_support_ios.mm',
+ 'test/test_switches.cc',
+ 'test/test_switches.h',
+ 'test/test_timeouts.cc',
+ 'test/test_timeouts.h',
+ 'test/test_ui_thread_android.cc',
+ 'test/test_ui_thread_android.h',
+ 'test/thread_test_helper.cc',
+ 'test/thread_test_helper.h',
+ 'test/trace_event_analyzer.cc',
+ 'test/trace_event_analyzer.h',
+ 'test/trace_to_file.cc',
+ 'test/trace_to_file.h',
+ 'test/user_action_tester.cc',
+ 'test/user_action_tester.h',
+ 'test/values_test_util.cc',
+ 'test/values_test_util.h',
+ ],
+ 'target_conditions': [
+ ['OS == "ios"', {
+ 'sources/': [
+ # Pull in specific Mac files for iOS (which have been filtered out
+ # by file name rules).
+ ['include', '^test/test_file_util_mac\\.cc$'],
+ ],
+ }],
+ ['OS == "ios" and _toolset == "target"', {
+ 'sources!': [
+ # iOS uses its own unit test launcher.
+ 'test/launcher/unit_test_launcher.cc',
+ ],
+ }],
+ ['OS == "ios" and _toolset == "host"', {
+ 'sources!': [
+ 'test/launcher/unit_test_launcher_ios.cc',
+ 'test/test_support_ios.h',
+ 'test/test_support_ios.mm',
+ ],
+ }],
+ ], # target_conditions
+ },
+ {
+ 'target_name': 'test_support_perf',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'base',
+ 'test_support_base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'test/run_all_perftests.cc',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'PERF_TEST',
+ ],
+ },
+ },
+ {
+ 'target_name': 'test_launcher_nacl_nonsfi',
+ 'conditions': [
+ ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
+ 'type': 'static_library',
+ 'sources': [
+ 'test/launcher/test_launcher_nacl_nonsfi.cc',
+ ],
+ 'dependencies': [
+ 'test_support_base',
+ ],
+ }, {
+ 'type': 'none',
+ }],
+ ],
+ },
+ {
+ # GN version: //base/debug:debugging_flags
+ # Since this generates a file, it must only be referenced in the target
+ # toolchain or there will be multiple rules that generate the header.
+ # When referenced from a target that might be compiled in the host
+ # toolchain, always refer to 'base_debugging_flags#target'.
+ 'target_name': 'base_debugging_flags',
+ 'includes': [ '../build/buildflag_header.gypi' ],
+ 'variables': {
+ 'buildflag_header_path': 'base/debug/debugging_flags.h',
+ 'buildflag_flags': [
+ 'ENABLE_PROFILING=<(profiling)',
+ ],
+ },
+ },
+ {
+ # GN version: //base/win:base_win_features
+ # Since this generates a file, it must only be referenced in the target
+ # toolchain or there will be multiple rules that generate the header.
+ # When referenced from a target that might be compiled in the host
+ # toolchain, always refer to 'base_win_features#target'.
+ 'target_name': 'base_win_features',
+ 'conditions': [
+ ['OS=="win"', {
+ 'includes': [ '../build/buildflag_header.gypi' ],
+ 'variables': {
+ 'buildflag_header_path': 'base/win/base_features.h',
+ 'buildflag_flags': [
+ 'SINGLE_MODULE_MODE_HANDLE_VERIFIER=<(single_module_mode_handle_verifier)',
+ ],
+ },
+ }, {
+ 'type': 'none',
+ }],
+ ],
+ },
+ {
+ 'type': 'none',
+ 'target_name': 'base_build_date',
+ 'hard_dependency': 1,
+ 'actions': [{
+ 'action_name': 'generate_build_date_headers',
+ 'inputs': [
+ '<(DEPTH)/build/write_build_date_header.py',
+ '<(DEPTH)/build/util/LASTCHANGE'
+ ],
+ 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h' ],
+ 'action': [
+ 'python', '<(DEPTH)/build/write_build_date_header.py',
+ '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h',
+ '<(build_type)'
+ ]
+ }],
+ 'conditions': [
+ [ 'buildtype == "Official"', {
+ 'variables': {
+ 'build_type': 'official'
+ }
+ }, {
+ 'variables': {
+ 'build_type': 'default'
+ }
+ }],
+ ]
+ },
+ ],
+ 'conditions': [
+ ['OS=="ios" and "<(GENERATOR)"=="ninja"', {
+ 'targets': [
+ {
+ 'target_name': 'test_launcher',
+ 'toolsets': ['host'],
+ 'type': 'executable',
+ 'dependencies': [
+ 'test_support_base',
+ ],
+ 'sources': [
+ 'test/launcher/test_launcher_ios.cc',
+ ],
+ },
+ ],
+ }],
+ ['OS!="ios"', {
+ 'targets': [
+ {
+ # GN: //base:check_example
+ 'target_name': 'check_example',
+ 'type': 'executable',
+ 'sources': [
+ 'check_example.cc',
+ ],
+ 'dependencies': [
+ 'base',
+ ],
+ },
+ {
+ 'target_name': 'build_utf8_validator_tables',
+ 'type': 'executable',
+ 'toolsets': ['host'],
+ 'dependencies': [
+ 'base',
+ '../third_party/icu/icu.gyp:icuuc',
+ ],
+ 'sources': [
+ 'i18n/build_utf8_validator_tables.cc'
+ ],
+ },
+ ],
+ }],
+ ['OS == "win" and target_arch=="ia32"', {
+ 'targets': [
+ # The base_win64 target here allows us to use base for Win64 targets
+ # (the normal build is 32 bits).
+ {
+ 'target_name': 'base_win64',
+ 'type': '<(component)',
+ 'variables': {
+ 'base_target': 1,
+ },
+ 'dependencies': [
+ 'base_build_date',
+ 'base_debugging_flags#target',
+ 'base_static_win64',
+ '../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
+ 'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
+ 'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
+ ],
+ # TODO(gregoryd): direct_dependent_settings should be shared with the
+ # 32-bit target, but it doesn't work due to a bug in gyp
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ 'defines': [
+ 'BASE_WIN64',
+ '<@(nacl_win64_defines)',
+ ],
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ 'conditions': [
+ ['component == "shared_library"', {
+ 'sources!': [
+ 'debug/debug_on_start_win.cc',
+ ],
+ }],
+ ],
+ # Specify delayload for base_win64.dll.
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DelayLoadDLLs': [
+ 'cfgmgr32.dll',
+ 'powrprof.dll',
+ 'setupapi.dll',
+ ],
+ 'AdditionalDependencies': [
+ 'cfgmgr32.lib',
+ 'powrprof.lib',
+ 'setupapi.lib',
+ 'userenv.lib',
+ 'winmm.lib',
+ ],
+ },
+ },
+ # Specify delayload for components that link with base_win64.lib.
+ 'all_dependent_settings': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DelayLoadDLLs': [
+ 'cfgmgr32.dll',
+ 'powrprof.dll',
+ 'setupapi.dll',
+ ],
+ 'AdditionalDependencies': [
+ 'cfgmgr32.lib',
+ 'powrprof.lib',
+ 'setupapi.lib',
+ 'userenv.lib',
+ 'winmm.lib',
+ ],
+ },
+ },
+ },
+ # TODO(rvargas): Bug 78117. Remove this.
+ 'msvs_disabled_warnings': [
+ 4244,
+ 4996,
+ 4267,
+ ],
+ 'sources': [
+ 'auto_reset.h',
+ 'linux_util.cc',
+ 'linux_util.h',
+ 'md5.cc',
+ 'md5.h',
+ 'message_loop/message_pump_libevent.cc',
+ 'message_loop/message_pump_libevent.h',
+ 'metrics/field_trial.cc',
+ 'metrics/field_trial.h',
+ 'posix/file_descriptor_shuffle.cc',
+ 'posix/file_descriptor_shuffle.h',
+ 'sync_socket.h',
+ 'sync_socket_posix.cc',
+ 'sync_socket_win.cc',
+ 'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
+ 'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
+ ],
+ },
+ {
+ 'target_name': 'base_i18n_nacl_win64',
+ 'type': '<(component)',
+ # TODO(gregoryd): direct_dependent_settings should be shared with the
+ # 32-bit target, but it doesn't work due to a bug in gyp
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ 'defines': [
+ '<@(nacl_win64_defines)',
+ 'BASE_I18N_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'i18n/icu_util_nacl_win64.cc',
+ ],
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ },
+ {
+ # TODO(rvargas): Remove this when gyp finally supports a clean model.
+ # See bug 36232.
+ 'target_name': 'base_static_win64',
+ 'type': 'static_library',
+ 'sources': [
+ 'base_switches.cc',
+ 'base_switches.h',
+ 'win/pe_image.cc',
+ 'win/pe_image.h',
+ ],
+ 'sources!': [
+ # base64.cc depends on modp_b64.
+ 'base64.cc',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ 'defines': [
+ '<@(nacl_win64_defines)',
+ ],
+ # TODO(rvargas): Bug 78117. Remove this.
+ 'msvs_disabled_warnings': [
+ 4244,
+ ],
+ },
+ ],
+ }],
+ ['OS == "win" and target_arch=="x64"', {
+ 'targets': [
+ {
+ 'target_name': 'base_profiler_test_support_library',
+ # Must be a shared library so that it can be unloaded during testing.
+ 'type': 'shared_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'profiler/test_support_library.cc',
+ ],
+ },
+ ]
+ }],
+ ['os_posix==1 and OS!="mac" and OS!="ios"', {
+ 'targets': [
+ {
+ 'target_name': 'symbolize',
+ 'type': 'static_library',
+ 'toolsets': ['host', 'target'],
+ 'variables': {
+ 'chromium_code': 0,
+ },
+ 'conditions': [
+ ['OS == "solaris"', {
+ 'include_dirs': [
+ '/usr/gnu/include',
+ '/usr/gnu/include/libelf',
+ ],
+ },],
+ ],
+ 'cflags': [
+ '-Wno-sign-compare',
+ ],
+ 'cflags!': [
+ '-Wextra',
+ ],
+ 'defines': [
+ 'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"',
+ ],
+ 'sources': [
+ 'third_party/symbolize/config.h',
+ 'third_party/symbolize/demangle.cc',
+ 'third_party/symbolize/demangle.h',
+ 'third_party/symbolize/glog/logging.h',
+ 'third_party/symbolize/glog/raw_logging.h',
+ 'third_party/symbolize/symbolize.cc',
+ 'third_party/symbolize/symbolize.h',
+ 'third_party/symbolize/utilities.h',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'includes': [
+ '../build/android/increase_size_for_speed.gypi',
+ ],
+ },
+ {
+ 'target_name': 'xdg_mime',
+ 'type': 'static_library',
+ 'toolsets': ['host', 'target'],
+ 'variables': {
+ 'chromium_code': 0,
+ },
+ 'cflags!': [
+ '-Wextra',
+ ],
+ 'sources': [
+ 'third_party/xdg_mime/xdgmime.c',
+ 'third_party/xdg_mime/xdgmime.h',
+ 'third_party/xdg_mime/xdgmimealias.c',
+ 'third_party/xdg_mime/xdgmimealias.h',
+ 'third_party/xdg_mime/xdgmimecache.c',
+ 'third_party/xdg_mime/xdgmimecache.h',
+ 'third_party/xdg_mime/xdgmimeglob.c',
+ 'third_party/xdg_mime/xdgmimeglob.h',
+ 'third_party/xdg_mime/xdgmimeicon.c',
+ 'third_party/xdg_mime/xdgmimeicon.h',
+ 'third_party/xdg_mime/xdgmimeint.c',
+ 'third_party/xdg_mime/xdgmimeint.h',
+ 'third_party/xdg_mime/xdgmimemagic.c',
+ 'third_party/xdg_mime/xdgmimemagic.h',
+ 'third_party/xdg_mime/xdgmimeparent.c',
+ 'third_party/xdg_mime/xdgmimeparent.h',
+ ],
+ 'includes': [
+ '../build/android/increase_size_for_speed.gypi',
+ ],
+ },
+ ],
+ }],
+ ['OS == "linux"', {
+ 'targets': [
+ {
+ 'target_name': 'malloc_wrapper',
+ 'type': 'shared_library',
+ 'dependencies': [
+ 'base',
+ ],
+ 'sources': [
+ 'test/malloc_wrapper.cc',
+ ],
+ }
+ ],
+ }],
+ ['OS == "android"', {
+ 'targets': [
+ {
+ # GN: //base:base_jni_headers
+ 'target_name': 'base_jni_headers',
+ 'type': 'none',
+ 'sources': [
+ 'android/java/src/org/chromium/base/ApkAssets.java',
+ 'android/java/src/org/chromium/base/ApplicationStatus.java',
+ 'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
+ 'android/java/src/org/chromium/base/BuildInfo.java',
+ 'android/java/src/org/chromium/base/Callback.java',
+ 'android/java/src/org/chromium/base/CommandLine.java',
+ 'android/java/src/org/chromium/base/ContentUriUtils.java',
+ 'android/java/src/org/chromium/base/ContextUtils.java',
+ 'android/java/src/org/chromium/base/CpuFeatures.java',
+ 'android/java/src/org/chromium/base/EventLog.java',
+ 'android/java/src/org/chromium/base/FieldTrialList.java',
+ 'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java',
+ 'android/java/src/org/chromium/base/JNIUtils.java',
+ 'android/java/src/org/chromium/base/JavaHandlerThread.java',
+ 'android/java/src/org/chromium/base/LocaleUtils.java',
+ 'android/java/src/org/chromium/base/MemoryPressureListener.java',
+ 'android/java/src/org/chromium/base/PathService.java',
+ 'android/java/src/org/chromium/base/PathUtils.java',
+ 'android/java/src/org/chromium/base/PowerMonitor.java',
+ 'android/java/src/org/chromium/base/SysUtils.java',
+ 'android/java/src/org/chromium/base/SystemMessageHandler.java',
+ 'android/java/src/org/chromium/base/ThreadUtils.java',
+ 'android/java/src/org/chromium/base/TraceEvent.java',
+ 'android/java/src/org/chromium/base/library_loader/LibraryLoader.java',
+ 'android/java/src/org/chromium/base/metrics/RecordHistogram.java',
+ 'android/java/src/org/chromium/base/metrics/RecordUserAction.java',
+ ],
+ 'variables': {
+ 'jni_gen_package': 'base',
+ },
+ 'dependencies': [
+ 'android_runtime_jni_headers',
+ ],
+ 'includes': [ '../build/jni_generator.gypi' ],
+ },
+ {
+ # GN: //base:android_runtime_jni_headers
+ 'target_name': 'android_runtime_jni_headers',
+ 'type': 'none',
+ 'variables': {
+ 'jni_gen_package': 'base',
+ 'input_java_class': 'java/lang/Runtime.class',
+ },
+ 'includes': [ '../build/jar_file_jni_generator.gypi' ],
+ },
+ {
+ # GN: //base:base_unittests_jni_headers
+ 'target_name': 'base_unittests_jni_headers',
+ 'type': 'none',
+ 'sources': [
+ 'test/android/java/src/org/chromium/base/ContentUriTestUtils.java',
+ 'test/android/java/src/org/chromium/base/TestUiThread.java',
+ ],
+ 'variables': {
+ 'jni_gen_package': 'base',
+ },
+ 'includes': [ '../build/jni_generator.gypi' ],
+ },
+ {
+ # GN: //base:base_native_libraries_gen
+ 'target_name': 'base_native_libraries_gen',
+ 'type': 'none',
+ 'sources': [
+ 'android/java/templates/NativeLibraries.template',
+ ],
+ 'variables': {
+ 'package_name': 'org/chromium/base/library_loader',
+ 'template_deps': [],
+ },
+ 'includes': [ '../build/android/java_cpp_template.gypi' ],
+ },
+ {
+ # GN: //base:base_build_config_gen
+ 'target_name': 'base_build_config_gen',
+ 'type': 'none',
+ 'sources': [
+ 'android/java/templates/BuildConfig.template',
+ ],
+ 'variables': {
+ 'package_name': 'org/chromium/base',
+ 'template_deps': [],
+ },
+ 'includes': ['../build/android/java_cpp_template.gypi'],
+ },
+ {
+ # GN: //base:base_android_java_enums_srcjar
+ 'target_name': 'base_java_library_process_type',
+ 'type': 'none',
+ 'variables': {
+ 'source_file': 'android/library_loader/library_loader_hooks.h',
+ },
+ 'includes': [ '../build/android/java_cpp_enum.gypi' ],
+ },
+ {
+ # GN: //base:base_java
+ 'target_name': 'base_java',
+ 'type': 'none',
+ 'variables': {
+ 'java_in_dir': 'android/java',
+ 'jar_excluded_classes': [
+ '*/BuildConfig.class',
+ '*/NativeLibraries.class',
+ ],
+ },
+ 'dependencies': [
+ 'base_java_application_state',
+ 'base_java_library_load_from_apk_status_codes',
+ 'base_java_library_process_type',
+ 'base_java_memory_pressure_level',
+ 'base_build_config_gen',
+ 'base_native_libraries_gen',
+ '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
+ '../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
+ ],
+ 'all_dependent_settings': {
+ 'variables': {
+ 'generate_build_config': 1,
+ },
+ },
+ 'includes': [ '../build/java.gypi' ],
+ },
+ {
+ # GN: //base:base_java_unittest_support
+ 'target_name': 'base_java_unittest_support',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_java',
+ ],
+ 'variables': {
+ 'java_in_dir': '../base/test/android/java',
+ },
+ 'includes': [ '../build/java.gypi' ],
+ },
+ {
+ # GN: //base:base_android_java_enums_srcjar
+ 'target_name': 'base_java_application_state',
+ 'type': 'none',
+ 'variables': {
+ 'source_file': 'android/application_status_listener.h',
+ },
+ 'includes': [ '../build/android/java_cpp_enum.gypi' ],
+ },
+ {
+ # GN: //base:base_android_java_enums_srcjar
+ 'target_name': 'base_java_library_load_from_apk_status_codes',
+ 'type': 'none',
+ 'variables': {
+ 'source_file': 'android/library_loader/library_load_from_apk_status_codes.h'
+ },
+ 'includes': [ '../build/android/java_cpp_enum.gypi' ],
+ },
+ {
+ # GN: //base:base_android_java_enums_srcjar
+ 'target_name': 'base_java_memory_pressure_level',
+ 'type': 'none',
+ 'variables': {
+ 'source_file': 'memory/memory_pressure_listener.h',
+ },
+ 'includes': [ '../build/android/java_cpp_enum.gypi' ],
+ },
+ {
+ # GN: //base:base_java_test_support
+ 'target_name': 'base_java_test_support',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_java',
+ '../testing/android/on_device_instrumentation.gyp:reporter_java',
+ ],
+ 'variables': {
+ 'java_in_dir': '../base/test/android/javatests',
+ },
+ 'includes': [ '../build/java.gypi' ],
+ },
+ {
+ # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
+ # in the multidex shadow library. crbug.com/522043
+ # GN: //base:base_junit_test_support
+ 'target_name': 'base_junit_test_support',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_build_config_gen',
+ '../testing/android/junit/junit_test.gyp:junit_test_support',
+ '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
+ ],
+ 'variables': {
+ 'src_paths': [
+ '../base/test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java',
+ ],
+ },
+ 'includes': [ '../build/host_jar.gypi' ]
+ },
+ {
+ # GN: //base:base_junit_tests
+ 'target_name': 'base_junit_tests',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_java',
+ 'base_java_test_support',
+ 'base_junit_test_support',
+ '../testing/android/junit/junit_test.gyp:junit_test_support',
+ ],
+ 'variables': {
+ 'main_class': 'org.chromium.testing.local.JunitTestMain',
+ 'src_paths': [
+ '../base/android/junit/',
+ '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
+ '../base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java',
+ '../base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java',
+ '../base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java',
+ ],
+ 'test_type': 'junit',
+ 'wrapper_script_name': 'helper/<(_target_name)',
+ },
+ 'includes': [
+ '../build/android/test_runner.gypi',
+ '../build/host_jar.gypi',
+ ],
+ },
+ {
+ # GN: //base:base_javatests
+ 'target_name': 'base_javatests',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_java',
+ 'base_java_test_support',
+ ],
+ 'variables': {
+ 'java_in_dir': '../base/android/javatests',
+ },
+ 'includes': [ '../build/java.gypi' ],
+ },
+ {
+ # GN: //base/android/linker:chromium_android_linker
+ 'target_name': 'chromium_android_linker',
+ 'type': 'shared_library',
+ 'sources': [
+ 'android/linker/android_dlext.h',
+ 'android/linker/legacy_linker_jni.cc',
+ 'android/linker/legacy_linker_jni.h',
+ 'android/linker/linker_jni.cc',
+ 'android/linker/linker_jni.h',
+ 'android/linker/modern_linker_jni.cc',
+ 'android/linker/modern_linker_jni.h',
+ ],
+ # The crazy linker is never instrumented.
+ 'cflags!': [
+ '-finstrument-functions',
+ ],
+ 'dependencies': [
+ # The NDK contains the crazy_linker here:
+ # '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
+ # However, we use our own fork. See bug 384700.
+ '../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker',
+ ],
+ },
+ {
+ # GN: //base:base_perftests_apk
+ 'target_name': 'base_perftests_apk',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_perftests',
+ ],
+ 'variables': {
+ 'test_suite_name': 'base_perftests',
+ },
+ 'includes': [ '../build/apk_test.gypi' ],
+ },
+ {
+ # GN: //base:base_unittests_apk
+ 'target_name': 'base_unittests_apk',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_java',
+ 'base_unittests',
+ ],
+ 'variables': {
+ 'test_suite_name': 'base_unittests',
+ 'isolate_file': 'base_unittests.isolate',
+ },
+ 'includes': [ '../build/apk_test.gypi' ],
+ },
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop"',
+ {
+ 'targets': [
+ {
+ 'target_name': 'base_unittests_apk_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_unittests_apk',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'base_unittests_apk.isolate',
+ ],
+ },
+ ]
+ }
+ ],
+ ],
+ }],
+ ['OS == "win"', {
+ 'targets': [
+ {
+ # Target to manually rebuild pe_image_test.dll which is checked into
+ # base/test/data/pe_image.
+ 'target_name': 'pe_image_test',
+ 'type': 'shared_library',
+ 'sources': [
+ 'win/pe_image_test.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
+ 'DelayLoadDLLs': [
+ 'cfgmgr32.dll',
+ 'shell32.dll',
+ ],
+ 'AdditionalDependencies': [
+ 'cfgmgr32.lib',
+ 'shell32.lib',
+ ],
+ },
+ },
+ },
+ {
+ 'target_name': 'scoped_handle_test_dll',
+ 'type': 'loadable_module',
+ 'dependencies': [
+ 'base',
+ ],
+ 'sources': [
+ 'win/scoped_handle_test_dll.cc',
+ ],
+ },
+ ],
+ }],
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'base_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_unittests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'base_unittests.isolate',
+ ],
+ },
+ ],
+ }],
+ ['OS == "ios" or OS == "mac"', {
+ 'targets': [
+ {
+ 'target_name': 'base_unittests_arc',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'mac/bind_objc_block_unittest_arc.mm',
+ 'mac/scoped_nsobject_unittest_arc.mm'
+ ],
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ },
+ 'target_conditions': [
+ ['OS == "ios" and _toolset != "host"', {
+ 'sources/': [
+ ['include', 'mac/bind_objc_block_unittest_arc\\.mm$'],
+ ['include', 'mac/scoped_nsobject_unittest_arc\\.mm$'],
+ ],
+ }]
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/base/base.gypi b/libchrome/base/base.gypi
new file mode 100644
index 0000000..cb41e79
--- /dev/null
+++ b/libchrome/base/base.gypi
@@ -0,0 +1,1106 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'trace_event/trace_event.gypi',
+ ],
+ 'target_defaults': {
+ 'variables': {
+ 'base_target': 0,
+ 'base_i18n_target': 0,
+ },
+ 'target_conditions': [
+ # This part is shared between the targets defined below.
+ ['base_target==1', {
+ 'sources': [
+ '../build/build_config.h',
+ 'allocator/allocator_check.cc',
+ 'allocator/allocator_check.h',
+ 'allocator/allocator_extension.cc',
+ 'allocator/allocator_extension.h',
+ 'android/animation_frame_time_histogram.cc',
+ 'android/animation_frame_time_histogram.h',
+ 'android/apk_assets.cc',
+ 'android/apk_assets.h',
+ 'android/application_status_listener.cc',
+ 'android/application_status_listener.h',
+ 'android/base_jni_onload.cc',
+ 'android/base_jni_onload.h',
+ 'android/base_jni_registrar.cc',
+ 'android/base_jni_registrar.h',
+ 'android/build_info.cc',
+ 'android/build_info.h',
+ 'android/callback_android.cc',
+ 'android/callback_android.h',
+ 'android/command_line_android.cc',
+ 'android/command_line_android.h',
+ 'android/content_uri_utils.cc',
+ 'android/content_uri_utils.h',
+ 'android/context_utils.cc',
+ 'android/context_utils.h',
+ 'android/cpu_features.cc',
+ 'android/cxa_demangle_stub.cc',
+ 'android/event_log.cc',
+ 'android/event_log.h',
+ 'android/field_trial_list.cc',
+ 'android/field_trial_list.h',
+ 'android/fifo_utils.cc',
+ 'android/fifo_utils.h',
+ 'android/important_file_writer_android.cc',
+ 'android/important_file_writer_android.h',
+ 'android/java_handler_thread.cc',
+ 'android/java_handler_thread.h',
+ 'android/java_runtime.cc',
+ 'android/java_runtime.h',
+ 'android/jni_android.cc',
+ 'android/jni_android.h',
+ 'android/jni_array.cc',
+ 'android/jni_array.h',
+ 'android/jni_registrar.cc',
+ 'android/jni_registrar.h',
+ 'android/jni_string.cc',
+ 'android/jni_string.h',
+ 'android/jni_utils.cc',
+ 'android/jni_utils.h',
+ 'android/jni_weak_ref.cc',
+ 'android/jni_weak_ref.h',
+ 'android/library_loader/library_load_from_apk_status_codes.h',
+ 'android/library_loader/library_loader_hooks.cc',
+ 'android/library_loader/library_loader_hooks.h',
+ 'android/library_loader/library_prefetcher.cc',
+ 'android/library_loader/library_prefetcher.h',
+ 'android/locale_utils.cc',
+ 'android/locale_utils.h',
+ 'android/memory_pressure_listener_android.cc',
+ 'android/memory_pressure_listener_android.h',
+ 'android/path_service_android.cc',
+ 'android/path_service_android.h',
+ 'android/path_utils.cc',
+ 'android/path_utils.h',
+ 'android/record_histogram.cc',
+ 'android/record_histogram.h',
+ 'android/record_user_action.cc',
+ 'android/record_user_action.h',
+ 'android/scoped_java_ref.cc',
+ 'android/scoped_java_ref.h',
+ 'android/sys_utils.cc',
+ 'android/sys_utils.h',
+ 'android/thread_utils.h',
+ 'android/trace_event_binding.cc',
+ 'android/trace_event_binding.h',
+ 'at_exit.cc',
+ 'at_exit.h',
+ 'atomic_ref_count.h',
+ 'atomic_sequence_num.h',
+ 'atomicops.h',
+ 'atomicops_internals_portable.h',
+ 'atomicops_internals_x86_msvc.h',
+ 'barrier_closure.cc',
+ 'barrier_closure.h',
+ 'base64.cc',
+ 'base64.h',
+ 'base64url.cc',
+ 'base64url.h',
+ 'base_export.h',
+ 'base_paths.cc',
+ 'base_paths.h',
+ 'base_paths_android.cc',
+ 'base_paths_android.h',
+ 'base_paths_mac.h',
+ 'base_paths_mac.mm',
+ 'base_paths_posix.cc',
+ 'base_paths_posix.h',
+ 'base_paths_win.cc',
+ 'base_paths_win.h',
+ 'base_switches.h',
+ 'big_endian.cc',
+ 'big_endian.h',
+ 'bind.h',
+ 'bind_helpers.cc',
+ 'bind_helpers.h',
+ 'bind_internal.h',
+ 'bit_cast.h',
+ 'bits.h',
+ 'build_time.cc',
+ 'build_time.h',
+ 'callback.h',
+ 'callback_helpers.cc',
+ 'callback_helpers.h',
+ 'callback_internal.cc',
+ 'callback_internal.h',
+ 'callback_list.h',
+ 'cancelable_callback.h',
+ 'command_line.cc',
+ 'command_line.h',
+ 'compiler_specific.h',
+ 'containers/adapters.h',
+ 'containers/hash_tables.h',
+ 'containers/linked_list.h',
+ 'containers/mru_cache.h',
+ 'containers/scoped_ptr_hash_map.h',
+ 'containers/small_map.h',
+ 'containers/stack_container.h',
+ 'cpu.cc',
+ 'cpu.h',
+ 'critical_closure.h',
+ 'critical_closure_internal_ios.mm',
+ 'debug/alias.cc',
+ 'debug/alias.h',
+ 'debug/asan_invalid_access.cc',
+ 'debug/asan_invalid_access.h',
+ 'debug/close_handle_hook_win.cc',
+ 'debug/close_handle_hook_win.h',
+ 'debug/crash_logging.cc',
+ 'debug/crash_logging.h',
+ 'debug/debugger.cc',
+ 'debug/debugger.h',
+ 'debug/debugger_posix.cc',
+ 'debug/debugger_win.cc',
+ 'debug/dump_without_crashing.cc',
+ 'debug/dump_without_crashing.h',
+ 'debug/gdi_debug_util_win.cc',
+ 'debug/gdi_debug_util_win.h',
+ # This file depends on files from the 'allocator' target,
+ # but this target does not depend on 'allocator' (see
+ # allocator.gyp for details).
+ 'debug/leak_annotations.h',
+ 'debug/leak_tracker.h',
+ 'debug/proc_maps_linux.cc',
+ 'debug/proc_maps_linux.h',
+ 'debug/profiler.cc',
+ 'debug/profiler.h',
+ 'debug/stack_trace.cc',
+ 'debug/stack_trace.h',
+ 'debug/stack_trace_android.cc',
+ 'debug/stack_trace_posix.cc',
+ 'debug/stack_trace_win.cc',
+ 'debug/task_annotator.cc',
+ 'debug/task_annotator.h',
+ 'deferred_sequenced_task_runner.cc',
+ 'deferred_sequenced_task_runner.h',
+ 'environment.cc',
+ 'environment.h',
+ 'feature_list.cc',
+ 'feature_list.h',
+ 'file_descriptor_posix.h',
+ 'file_version_info.h',
+ 'file_version_info_mac.h',
+ 'file_version_info_mac.mm',
+ 'file_version_info_win.cc',
+ 'file_version_info_win.h',
+ 'files/dir_reader_fallback.h',
+ 'files/dir_reader_linux.h',
+ 'files/dir_reader_posix.h',
+ 'files/file.cc',
+ 'files/file.h',
+ 'files/file_enumerator.cc',
+ 'files/file_enumerator.h',
+ 'files/file_enumerator_posix.cc',
+ 'files/file_enumerator_win.cc',
+ 'files/file_path.cc',
+ 'files/file_path.h',
+ 'files/file_path_constants.cc',
+ 'files/file_path_watcher.cc',
+ 'files/file_path_watcher.h',
+ 'files/file_path_watcher_fsevents.cc',
+ 'files/file_path_watcher_fsevents.h',
+ 'files/file_path_watcher_kqueue.cc',
+ 'files/file_path_watcher_kqueue.h',
+ 'files/file_path_watcher_linux.cc',
+ 'files/file_path_watcher_mac.cc',
+ 'files/file_path_watcher_stub.cc',
+ 'files/file_path_watcher_win.cc',
+ 'files/file_posix.cc',
+ 'files/file_proxy.cc',
+ 'files/file_proxy.h',
+ 'files/file_tracing.cc',
+ 'files/file_tracing.h',
+ 'files/file_util.cc',
+ 'files/file_util.h',
+ 'files/file_util_android.cc',
+ 'files/file_util_linux.cc',
+ 'files/file_util_mac.mm',
+ 'files/file_util_posix.cc',
+ 'files/file_util_proxy.cc',
+ 'files/file_util_proxy.h',
+ 'files/file_util_win.cc',
+ 'files/file_win.cc',
+ 'files/important_file_writer.cc',
+ 'files/important_file_writer.h',
+ 'files/memory_mapped_file.cc',
+ 'files/memory_mapped_file.h',
+ 'files/memory_mapped_file_posix.cc',
+ 'files/memory_mapped_file_win.cc',
+ 'files/scoped_file.cc',
+ 'files/scoped_file.h',
+ 'files/scoped_temp_dir.cc',
+ 'files/scoped_temp_dir.h',
+ 'format_macros.h',
+ 'gtest_prod_util.h',
+ 'guid.cc',
+ 'guid.h',
+ 'hash.cc',
+ 'hash.h',
+ 'id_map.h',
+ 'ios/block_types.h',
+ 'ios/crb_protocol_observers.h',
+ 'ios/crb_protocol_observers.mm',
+ 'ios/device_util.h',
+ 'ios/device_util.mm',
+ 'ios/ios_util.h',
+ 'ios/ios_util.mm',
+ 'ios/ns_error_util.h',
+ 'ios/ns_error_util.mm',
+ 'ios/scoped_critical_action.h',
+ 'ios/scoped_critical_action.mm',
+ 'ios/weak_nsobject.h',
+ 'ios/weak_nsobject.mm',
+ 'json/json_file_value_serializer.cc',
+ 'json/json_file_value_serializer.h',
+ 'json/json_parser.cc',
+ 'json/json_parser.h',
+ 'json/json_reader.cc',
+ 'json/json_reader.h',
+ 'json/json_string_value_serializer.cc',
+ 'json/json_string_value_serializer.h',
+ 'json/json_value_converter.cc',
+ 'json/json_value_converter.h',
+ 'json/json_writer.cc',
+ 'json/json_writer.h',
+ 'json/string_escape.cc',
+ 'json/string_escape.h',
+ 'lazy_instance.cc',
+ 'lazy_instance.h',
+ 'location.cc',
+ 'location.h',
+ 'logging.cc',
+ 'logging.h',
+ 'logging_win.cc',
+ 'logging_win.h',
+ 'mac/authorization_util.h',
+ 'mac/authorization_util.mm',
+ 'mac/bind_objc_block.h',
+ 'mac/bundle_locations.h',
+ 'mac/bundle_locations.mm',
+ 'mac/call_with_eh_frame.cc',
+ 'mac/call_with_eh_frame.h',
+ 'mac/call_with_eh_frame_asm.S',
+ 'mac/close_nocancel.cc',
+ 'mac/cocoa_protocols.h',
+ 'mac/dispatch_source_mach.cc',
+ 'mac/dispatch_source_mach.h',
+ 'mac/foundation_util.h',
+ 'mac/foundation_util.mm',
+ 'mac/launch_services_util.cc',
+ 'mac/launch_services_util.h',
+ 'mac/launchd.cc',
+ 'mac/launchd.h',
+ 'mac/mac_logging.h',
+ 'mac/mac_logging.mm',
+ 'mac/mac_util.h',
+ 'mac/mac_util.mm',
+ 'mac/mach_logging.cc',
+ 'mac/mach_logging.h',
+ 'mac/mach_port_broker.h',
+ 'mac/mach_port_broker.mm',
+ 'mac/mach_port_util.cc',
+ 'mac/mach_port_util.h',
+ 'mac/objc_property_releaser.h',
+ 'mac/objc_property_releaser.mm',
+ 'mac/os_crash_dumps.cc',
+ 'mac/os_crash_dumps.h',
+ 'mac/scoped_aedesc.h',
+ 'mac/scoped_authorizationref.h',
+ 'mac/scoped_block.h',
+ 'mac/scoped_cftyperef.h',
+ 'mac/scoped_dispatch_object.h',
+ 'mac/scoped_ioobject.h',
+ 'mac/scoped_ioplugininterface.h',
+ 'mac/scoped_launch_data.h',
+ 'mac/scoped_mach_port.cc',
+ 'mac/scoped_mach_port.h',
+ 'mac/scoped_mach_vm.cc',
+ 'mac/scoped_mach_vm.h',
+ 'mac/scoped_nsautorelease_pool.h',
+ 'mac/scoped_nsautorelease_pool.mm',
+ 'mac/scoped_nsobject.h',
+ 'mac/scoped_nsobject.mm',
+ 'mac/scoped_objc_class_swizzler.h',
+ 'mac/scoped_objc_class_swizzler.mm',
+ 'mac/scoped_sending_event.h',
+ 'mac/scoped_sending_event.mm',
+ 'mac/scoped_typeref.h',
+ 'mac/sdk_forward_declarations.h',
+ 'mac/sdk_forward_declarations.mm',
+ 'macros.h',
+ 'md5.cc',
+ 'md5.h',
+ 'memory/aligned_memory.cc',
+ 'memory/aligned_memory.h',
+ 'memory/discardable_memory.cc',
+ 'memory/discardable_memory.h',
+ 'memory/discardable_memory_allocator.cc',
+ 'memory/discardable_memory_allocator.h',
+ 'memory/discardable_shared_memory.cc',
+ 'memory/discardable_shared_memory.h',
+ 'memory/free_deleter.h',
+ 'memory/linked_ptr.h',
+ 'memory/manual_constructor.h',
+ 'memory/memory_pressure_listener.cc',
+ 'memory/memory_pressure_listener.h',
+ 'memory/memory_pressure_monitor.cc',
+ 'memory/memory_pressure_monitor.h',
+ 'memory/memory_pressure_monitor_chromeos.cc',
+ 'memory/memory_pressure_monitor_chromeos.h',
+ 'memory/memory_pressure_monitor_mac.cc',
+ 'memory/memory_pressure_monitor_mac.h',
+ 'memory/memory_pressure_monitor_win.cc',
+ 'memory/memory_pressure_monitor_win.h',
+ 'memory/ptr_util.h',
+ 'memory/raw_scoped_refptr_mismatch_checker.h',
+ 'memory/ref_counted.cc',
+ 'memory/ref_counted.h',
+ 'memory/ref_counted_delete_on_message_loop.h',
+ 'memory/ref_counted_memory.cc',
+ 'memory/ref_counted_memory.h',
+ 'memory/scoped_policy.h',
+ 'memory/scoped_vector.h',
+ 'memory/shared_memory.h',
+ 'memory/shared_memory_android.cc',
+ 'memory/shared_memory_handle.h',
+ 'memory/shared_memory_handle_mac.cc',
+ 'memory/shared_memory_handle_win.cc',
+ 'memory/shared_memory_mac.cc',
+ 'memory/shared_memory_nacl.cc',
+ 'memory/shared_memory_posix.cc',
+ 'memory/shared_memory_win.cc',
+ 'memory/singleton.cc',
+ 'memory/singleton.h',
+ 'memory/weak_ptr.cc',
+ 'memory/weak_ptr.h',
+ 'message_loop/incoming_task_queue.cc',
+ 'message_loop/incoming_task_queue.h',
+ 'message_loop/message_loop.cc',
+ 'message_loop/message_loop.h',
+ 'message_loop/message_loop_task_runner.cc',
+ 'message_loop/message_loop_task_runner.h',
+ 'message_loop/message_pump.cc',
+ 'message_loop/message_pump.h',
+ 'message_loop/message_pump_android.cc',
+ 'message_loop/message_pump_android.h',
+ 'message_loop/message_pump_default.cc',
+ 'message_loop/message_pump_default.h',
+ 'message_loop/message_pump_win.cc',
+ 'message_loop/message_pump_win.h',
+ 'message_loop/timer_slack.h',
+ 'metrics/bucket_ranges.cc',
+ 'metrics/bucket_ranges.h',
+ 'metrics/histogram.cc',
+ 'metrics/histogram.h',
+ 'metrics/histogram_base.cc',
+ 'metrics/histogram_base.h',
+ 'metrics/histogram_delta_serialization.cc',
+ 'metrics/histogram_delta_serialization.h',
+ 'metrics/histogram_flattener.h',
+ 'metrics/histogram_macros.h',
+ 'metrics/histogram_samples.cc',
+ 'metrics/histogram_samples.h',
+ 'metrics/histogram_snapshot_manager.cc',
+ 'metrics/histogram_snapshot_manager.h',
+ 'metrics/metrics_hashes.cc',
+ 'metrics/metrics_hashes.h',
+ 'metrics/persistent_histogram_allocator.cc',
+ 'metrics/persistent_histogram_allocator.h',
+ 'metrics/persistent_memory_allocator.cc',
+ 'metrics/persistent_memory_allocator.h',
+ 'metrics/persistent_sample_map.cc',
+ 'metrics/persistent_sample_map.h',
+ 'metrics/sample_map.cc',
+ 'metrics/sample_map.h',
+ 'metrics/sample_vector.cc',
+ 'metrics/sample_vector.h',
+ 'metrics/sparse_histogram.cc',
+ 'metrics/sparse_histogram.h',
+ 'metrics/statistics_recorder.cc',
+ 'metrics/statistics_recorder.h',
+ 'metrics/user_metrics.cc',
+ 'metrics/user_metrics.h',
+ 'metrics/user_metrics_action.h',
+ 'native_library.h',
+ 'native_library_ios.mm',
+ 'native_library_mac.mm',
+ 'native_library_posix.cc',
+ 'native_library_win.cc',
+ 'nix/mime_util_xdg.cc',
+ 'nix/mime_util_xdg.h',
+ 'nix/xdg_util.cc',
+ 'nix/xdg_util.h',
+ 'numerics/safe_conversions.h',
+ 'numerics/safe_conversions_impl.h',
+ 'numerics/safe_math.h',
+ 'numerics/safe_math_impl.h',
+ 'observer_list.h',
+ 'observer_list_threadsafe.h',
+ 'optional.h',
+ 'os_compat_android.cc',
+ 'os_compat_android.h',
+ 'os_compat_nacl.cc',
+ 'os_compat_nacl.h',
+ 'path_service.cc',
+ 'path_service.h',
+ 'pending_task.cc',
+ 'pending_task.h',
+ 'pickle.cc',
+ 'pickle.h',
+ 'posix/eintr_wrapper.h',
+ 'posix/global_descriptors.cc',
+ 'posix/global_descriptors.h',
+ 'posix/safe_strerror.cc',
+ 'posix/safe_strerror.h',
+ 'posix/unix_domain_socket_linux.cc',
+ 'posix/unix_domain_socket_linux.h',
+ 'power_monitor/power_monitor.cc',
+ 'power_monitor/power_monitor.h',
+ 'power_monitor/power_monitor_device_source.cc',
+ 'power_monitor/power_monitor_device_source.h',
+ 'power_monitor/power_monitor_device_source_android.cc',
+ 'power_monitor/power_monitor_device_source_android.h',
+ 'power_monitor/power_monitor_device_source_chromeos.cc',
+ 'power_monitor/power_monitor_device_source_ios.mm',
+ 'power_monitor/power_monitor_device_source_mac.mm',
+ 'power_monitor/power_monitor_device_source_posix.cc',
+ 'power_monitor/power_monitor_device_source_win.cc',
+ 'power_monitor/power_monitor_source.cc',
+ 'power_monitor/power_monitor_source.h',
+ 'power_monitor/power_observer.h',
+ 'process/internal_linux.cc',
+ 'process/internal_linux.h',
+ 'process/kill.cc',
+ 'process/kill.h',
+ 'process/kill_mac.cc',
+ 'process/kill_posix.cc',
+ 'process/kill_win.cc',
+ 'process/launch.cc',
+ 'process/launch.h',
+ 'process/launch_ios.cc',
+ 'process/launch_mac.cc',
+ 'process/launch_posix.cc',
+ 'process/launch_win.cc',
+ 'process/memory.cc',
+ 'process/memory.h',
+ 'process/memory_linux.cc',
+ 'process/memory_mac.mm',
+ 'process/memory_win.cc',
+ 'process/port_provider_mac.cc',
+ 'process/port_provider_mac.h',
+ 'process/process.h',
+ 'process/process_handle.cc',
+ 'process/process_handle_freebsd.cc',
+ 'process/process_handle_linux.cc',
+ 'process/process_handle_mac.cc',
+ 'process/process_handle_openbsd.cc',
+ 'process/process_handle_posix.cc',
+ 'process/process_handle_win.cc',
+ 'process/process_info.h',
+ 'process/process_info_linux.cc',
+ 'process/process_info_mac.cc',
+ 'process/process_info_win.cc',
+ 'process/process_iterator.cc',
+ 'process/process_iterator.h',
+ 'process/process_iterator_freebsd.cc',
+ 'process/process_iterator_linux.cc',
+ 'process/process_iterator_mac.cc',
+ 'process/process_iterator_openbsd.cc',
+ 'process/process_iterator_win.cc',
+ 'process/process_linux.cc',
+ 'process/process_metrics.cc',
+ 'process/process_metrics.h',
+ 'process/process_metrics_freebsd.cc',
+ 'process/process_metrics_ios.cc',
+ 'process/process_metrics_linux.cc',
+ 'process/process_metrics_mac.cc',
+ 'process/process_metrics_nacl.cc',
+ 'process/process_metrics_openbsd.cc',
+ 'process/process_metrics_posix.cc',
+ 'process/process_metrics_win.cc',
+ 'process/process_posix.cc',
+ 'process/process_win.cc',
+ 'profiler/native_stack_sampler.cc',
+ 'profiler/native_stack_sampler.h',
+ 'profiler/native_stack_sampler_posix.cc',
+ 'profiler/native_stack_sampler_win.cc',
+ 'profiler/scoped_profile.cc',
+ 'profiler/scoped_profile.h',
+ 'profiler/scoped_tracker.cc',
+ 'profiler/scoped_tracker.h',
+ 'profiler/stack_sampling_profiler.cc',
+ 'profiler/stack_sampling_profiler.h',
+ 'profiler/tracked_time.cc',
+ 'profiler/tracked_time.h',
+ 'rand_util.cc',
+ 'rand_util.h',
+ 'rand_util_nacl.cc',
+ 'rand_util_posix.cc',
+ 'rand_util_win.cc',
+ 'run_loop.cc',
+ 'run_loop.h',
+ 'scoped_generic.h',
+ 'scoped_native_library.cc',
+ 'scoped_native_library.h',
+ 'scoped_observer.h',
+ 'sequence_checker.h',
+ 'sequence_checker_impl.cc',
+ 'sequence_checker_impl.h',
+ 'sequenced_task_runner.cc',
+ 'sequenced_task_runner.h',
+ 'sequenced_task_runner_helpers.h',
+ 'sha1.cc',
+ 'sha1.h',
+ 'single_thread_task_runner.h',
+ 'stl_util.h',
+ 'strings/latin1_string_conversions.cc',
+ 'strings/latin1_string_conversions.h',
+ 'strings/nullable_string16.cc',
+ 'strings/nullable_string16.h',
+ 'strings/pattern.cc',
+ 'strings/pattern.h',
+ 'strings/safe_sprintf.cc',
+ 'strings/safe_sprintf.h',
+ 'strings/string16.cc',
+ 'strings/string16.h',
+ 'strings/string_number_conversions.cc',
+ 'strings/string_number_conversions.h',
+ 'strings/string_piece.cc',
+ 'strings/string_piece.h',
+ 'strings/string_split.cc',
+ 'strings/string_split.h',
+ 'strings/string_tokenizer.h',
+ 'strings/string_util.cc',
+ 'strings/string_util.h',
+ 'strings/string_util_constants.cc',
+ 'strings/string_util_posix.h',
+ 'strings/string_util_win.h',
+ 'strings/stringize_macros.h',
+ 'strings/stringprintf.cc',
+ 'strings/stringprintf.h',
+ 'strings/sys_string_conversions.h',
+ 'strings/sys_string_conversions_mac.mm',
+ 'strings/sys_string_conversions_posix.cc',
+ 'strings/sys_string_conversions_win.cc',
+ 'strings/utf_offset_string_conversions.cc',
+ 'strings/utf_offset_string_conversions.h',
+ 'strings/utf_string_conversion_utils.cc',
+ 'strings/utf_string_conversion_utils.h',
+ 'strings/utf_string_conversions.cc',
+ 'strings/utf_string_conversions.h',
+ 'supports_user_data.cc',
+ 'supports_user_data.h',
+ 'synchronization/cancellation_flag.cc',
+ 'synchronization/cancellation_flag.h',
+ 'synchronization/condition_variable.h',
+ 'synchronization/condition_variable_posix.cc',
+ 'synchronization/condition_variable_win.cc',
+ 'synchronization/lock.cc',
+ 'synchronization/lock.h',
+ 'synchronization/lock_impl.h',
+ 'synchronization/lock_impl_posix.cc',
+ 'synchronization/lock_impl_win.cc',
+ 'synchronization/read_write_lock.h',
+ 'synchronization/read_write_lock_nacl.cc',
+ 'synchronization/read_write_lock_posix.cc',
+ 'synchronization/read_write_lock_win.cc',
+ 'synchronization/spin_wait.h',
+ 'synchronization/waitable_event.h',
+ 'synchronization/waitable_event_posix.cc',
+ 'synchronization/waitable_event_watcher.h',
+ 'synchronization/waitable_event_watcher_posix.cc',
+ 'synchronization/waitable_event_watcher_win.cc',
+ 'synchronization/waitable_event_win.cc',
+ 'sys_byteorder.h',
+ 'sys_info.cc',
+ 'sys_info.h',
+ 'sys_info_android.cc',
+ 'sys_info_chromeos.cc',
+ 'sys_info_freebsd.cc',
+ 'sys_info_internal.h',
+ 'sys_info_ios.mm',
+ 'sys_info_linux.cc',
+ 'sys_info_mac.mm',
+ 'sys_info_openbsd.cc',
+ 'sys_info_posix.cc',
+ 'sys_info_win.cc',
+ 'system_monitor/system_monitor.cc',
+ 'system_monitor/system_monitor.h',
+ 'task/cancelable_task_tracker.cc',
+ 'task/cancelable_task_tracker.h',
+ 'task_runner.cc',
+ 'task_runner.h',
+ 'task_runner_util.h',
+ 'task_scheduler/delayed_task_manager.cc',
+ 'task_scheduler/delayed_task_manager.h',
+ 'task_scheduler/priority_queue.cc',
+ 'task_scheduler/priority_queue.h',
+ 'task_scheduler/scheduler_lock.h',
+ 'task_scheduler/scheduler_lock_impl.cc',
+ 'task_scheduler/scheduler_lock_impl.h',
+ 'task_scheduler/scheduler_service_thread.cc',
+ 'task_scheduler/scheduler_service_thread.h',
+ 'task_scheduler/scheduler_worker.cc',
+ 'task_scheduler/scheduler_worker.h',
+ 'task_scheduler/scheduler_worker_pool.h',
+ 'task_scheduler/scheduler_worker_pool_impl.cc',
+ 'task_scheduler/scheduler_worker_pool_impl.h',
+ 'task_scheduler/scheduler_worker_stack.cc',
+ 'task_scheduler/scheduler_worker_stack.h',
+ 'task_scheduler/sequence.cc',
+ 'task_scheduler/sequence.h',
+ 'task_scheduler/sequence_sort_key.cc',
+ 'task_scheduler/sequence_sort_key.h',
+ 'task_scheduler/task.cc',
+ 'task_scheduler/task.h',
+ 'task_scheduler/task_scheduler.cc',
+ 'task_scheduler/task_scheduler.h',
+ 'task_scheduler/task_scheduler_impl.cc',
+ 'task_scheduler/task_scheduler_impl.h',
+ 'task_scheduler/task_tracker.cc',
+ 'task_scheduler/task_tracker.h',
+ 'task_scheduler/task_traits.cc',
+ 'task_scheduler/task_traits.h',
+ 'template_util.h',
+ 'third_party/dmg_fp/dmg_fp.h',
+ 'third_party/dmg_fp/dtoa_wrapper.cc',
+ 'third_party/dmg_fp/g_fmt.cc',
+ 'third_party/icu/icu_utf.cc',
+ 'third_party/icu/icu_utf.h',
+ 'third_party/nspr/prtime.cc',
+ 'third_party/nspr/prtime.h',
+ 'third_party/superfasthash/superfasthash.c',
+ 'third_party/xdg_mime/xdgmime.h',
+ 'threading/non_thread_safe.h',
+ 'threading/non_thread_safe_impl.cc',
+ 'threading/non_thread_safe_impl.h',
+ 'threading/platform_thread.h',
+ 'threading/platform_thread_android.cc',
+ 'threading/platform_thread_internal_posix.cc',
+ 'threading/platform_thread_internal_posix.h',
+ 'threading/platform_thread_linux.cc',
+ 'threading/platform_thread_mac.mm',
+ 'threading/platform_thread_posix.cc',
+ 'threading/platform_thread_win.cc',
+ 'threading/post_task_and_reply_impl.cc',
+ 'threading/post_task_and_reply_impl.h',
+ 'threading/sequenced_task_runner_handle.cc',
+ 'threading/sequenced_task_runner_handle.h',
+ 'threading/sequenced_worker_pool.cc',
+ 'threading/sequenced_worker_pool.h',
+ 'threading/simple_thread.cc',
+ 'threading/simple_thread.h',
+ 'threading/thread.cc',
+ 'threading/thread.h',
+ 'threading/thread_checker.h',
+ 'threading/thread_checker_impl.cc',
+ 'threading/thread_checker_impl.h',
+ 'threading/thread_collision_warner.cc',
+ 'threading/thread_collision_warner.h',
+ 'threading/thread_id_name_manager.cc',
+ 'threading/thread_id_name_manager.h',
+ 'threading/thread_local.h',
+ 'threading/thread_local_android.cc',
+ 'threading/thread_local_posix.cc',
+ 'threading/thread_local_storage.cc',
+ 'threading/thread_local_storage.h',
+ 'threading/thread_local_storage_posix.cc',
+ 'threading/thread_local_storage_win.cc',
+ 'threading/thread_local_win.cc',
+ 'threading/thread_restrictions.cc',
+ 'threading/thread_restrictions.h',
+ 'threading/thread_task_runner_handle.cc',
+ 'threading/thread_task_runner_handle.h',
+ 'threading/watchdog.cc',
+ 'threading/watchdog.h',
+ 'threading/worker_pool.cc',
+ 'threading/worker_pool.h',
+ 'threading/worker_pool_posix.cc',
+ 'threading/worker_pool_posix.h',
+ 'threading/worker_pool_win.cc',
+ 'time/clock.cc',
+ 'time/clock.h',
+ 'time/default_clock.cc',
+ 'time/default_clock.h',
+ 'time/default_tick_clock.cc',
+ 'time/default_tick_clock.h',
+ 'time/tick_clock.cc',
+ 'time/tick_clock.h',
+ 'time/time.cc',
+ 'time/time.h',
+ 'time/time_mac.cc',
+ 'time/time_posix.cc',
+ 'time/time_win.cc',
+ 'timer/elapsed_timer.cc',
+ 'timer/elapsed_timer.h',
+ 'timer/hi_res_timer_manager.h',
+ 'timer/hi_res_timer_manager_posix.cc',
+ 'timer/hi_res_timer_manager_win.cc',
+ 'timer/mock_timer.cc',
+ 'timer/mock_timer.h',
+ 'timer/timer.cc',
+ 'timer/timer.h',
+ 'tracked_objects.cc',
+ 'tracked_objects.h',
+ 'tracking_info.cc',
+ 'tracking_info.h',
+ 'tuple.h',
+ 'value_conversions.cc',
+ 'value_conversions.h',
+ 'values.cc',
+ 'values.h',
+ 'version.cc',
+ 'version.h',
+ 'vlog.cc',
+ 'vlog.h',
+ 'win/enum_variant.cc',
+ 'win/enum_variant.h',
+ 'win/event_trace_consumer.h',
+ 'win/event_trace_controller.cc',
+ 'win/event_trace_controller.h',
+ 'win/event_trace_provider.cc',
+ 'win/event_trace_provider.h',
+ 'win/i18n.cc',
+ 'win/i18n.h',
+ 'win/iat_patch_function.cc',
+ 'win/iat_patch_function.h',
+ 'win/iunknown_impl.cc',
+ 'win/iunknown_impl.h',
+ 'win/message_window.cc',
+ 'win/message_window.h',
+ 'win/object_watcher.cc',
+ 'win/object_watcher.h',
+ 'win/process_startup_helper.cc',
+ 'win/process_startup_helper.h',
+ 'win/registry.cc',
+ 'win/registry.h',
+ 'win/resource_util.cc',
+ 'win/resource_util.h',
+ 'win/scoped_bstr.cc',
+ 'win/scoped_bstr.h',
+ 'win/scoped_co_mem.h',
+ 'win/scoped_com_initializer.h',
+ 'win/scoped_comptr.h',
+ 'win/scoped_gdi_object.h',
+ 'win/scoped_handle.cc',
+ 'win/scoped_handle.h',
+ 'win/scoped_hdc.h',
+ 'win/scoped_hglobal.h',
+ 'win/scoped_process_information.cc',
+ 'win/scoped_process_information.h',
+ 'win/scoped_propvariant.h',
+ 'win/scoped_select_object.h',
+ 'win/scoped_variant.cc',
+ 'win/scoped_variant.h',
+ 'win/shortcut.cc',
+ 'win/shortcut.h',
+ 'win/startup_information.cc',
+ 'win/startup_information.h',
+ 'win/wait_chain.cc',
+ 'win/wait_chain.h',
+ 'win/win_util.cc',
+ 'win/win_util.h',
+ 'win/windows_version.cc',
+ 'win/windows_version.h',
+ 'win/wrapped_window_proc.cc',
+ 'win/wrapped_window_proc.h',
+ '<@(trace_event_sources)',
+ ],
+ 'defines': [
+ 'BASE_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'target_conditions': [
+ ['OS == "mac" or OS == "ios"', {
+ 'sources!': [
+ 'memory/shared_memory_posix.cc',
+ ],
+ }],
+ ['OS == "ios"', {
+ 'sources!': [
+ 'memory/discardable_shared_memory.cc',
+ 'memory/discardable_shared_memory.h',
+ ],
+ }],
+ ['(<(desktop_linux) == 0 and <(chromeos) == 0) or >(nacl_untrusted_build)==1', {
+ 'sources/': [
+ ['exclude', '^nix/'],
+ ],
+ }],
+ ['<(use_glib)==0 or >(nacl_untrusted_build)==1', {
+ 'sources!': [
+ 'message_loop/message_pump_glib.cc',
+ ],
+ }],
+ ['(OS != "linux" and <(os_bsd) != 1 and OS != "android") or >(nacl_untrusted_build)==1', {
+ 'sources!': [
+ # Not automatically excluded by the *linux.cc rules.
+ 'linux_util.cc',
+ ],
+ },
+ ],
+ ['>(nacl_untrusted_build)==1', {
+ 'sources!': [
+ 'base_paths.cc',
+ 'cpu.cc',
+ 'debug/stack_trace.cc',
+ 'debug/stack_trace_posix.cc',
+ 'files/file_enumerator_posix.cc',
+ 'files/file_path_watcher_fsevents.cc',
+ 'files/file_path_watcher_fsevents.h',
+ 'files/file_path_watcher_kqueue.cc',
+ 'files/file_path_watcher_kqueue.h',
+ 'files/file_proxy.cc',
+ 'files/file_util.cc',
+ 'files/file_util_posix.cc',
+ 'files/file_util_proxy.cc',
+ 'files/important_file_writer.cc',
+ 'files/scoped_temp_dir.cc',
+ 'memory/shared_memory_posix.cc',
+ 'native_library_posix.cc',
+ 'path_service.cc',
+ 'posix/unix_domain_socket_linux.cc',
+ 'process/kill.cc',
+ 'process/kill_posix.cc',
+ 'process/launch.cc',
+ 'process/launch_posix.cc',
+ 'process/process_metrics.cc',
+ 'process/process_metrics_posix.cc',
+ 'process/process_posix.cc',
+ 'rand_util_posix.cc',
+ 'scoped_native_library.cc',
+ 'synchronization/read_write_lock_posix.cc',
+ 'sys_info.cc',
+ 'sys_info_posix.cc',
+ 'third_party/dynamic_annotations/dynamic_annotations.c',
+ ],
+ 'sources/': [
+ ['include', '^threading/platform_thread_linux\\.cc$'],
+ ],
+ }],
+ ['OS == "android" and >(nacl_untrusted_build)==0', {
+ 'sources!': [
+ 'base_paths_posix.cc',
+ 'files/file_path_watcher_fsevents.cc',
+ 'files/file_path_watcher_fsevents.h',
+ 'files/file_path_watcher_kqueue.cc',
+ 'files/file_path_watcher_kqueue.h',
+ 'files/file_path_watcher_stub.cc',
+ 'power_monitor/power_monitor_device_source_posix.cc',
+ ],
+ 'sources/': [
+ ['include', '^debug/proc_maps_linux\\.cc$'],
+ ['include', '^files/file_path_watcher_linux\\.cc$'],
+ ['include', '^process/memory_linux\\.cc$'],
+ ['include', '^process/internal_linux\\.cc$'],
+ ['include', '^process/process_handle_linux\\.cc$'],
+ ['include', '^process/process_iterator\\.cc$'],
+ ['include', '^process/process_iterator_linux\\.cc$'],
+ ['include', '^process/process_metrics_linux\\.cc$'],
+ ['include', '^posix/unix_domain_socket_linux\\.cc$'],
+ ['include', '^strings/sys_string_conversions_posix\\.cc$'],
+ ['include', '^sys_info_linux\\.cc$'],
+ ['include', '^worker_pool_linux\\.cc$'],
+ ],
+ }],
+ ['OS == "android" and _toolset == "host" and host_os == "linux"', {
+ 'sources/': [
+ # Pull in specific files for host builds.
+ ['include', '^threading/platform_thread_linux\\.cc$'],
+ ],
+ }],
+ ['<(chromeos) == 1', {
+ 'sources!': [
+ 'power_monitor/power_monitor_device_source_posix.cc',
+ ],
+ }],
+ ['OS == "ios" and _toolset != "host"', {
+ 'sources/': [
+ # Pull in specific Mac files for iOS (which have been filtered out
+ # by file name rules).
+ ['include', '^base_paths_mac\\.'],
+ ['include', '^files/file_util_mac\\.'],
+ ['include', '^file_version_info_mac\\.'],
+ ['include', '^mac/bundle_locations\\.'],
+ ['include', '^mac/call_with_eh_frame\\.'],
+ ['include', '^mac/foundation_util\\.'],
+ ['include', '^mac/mac_logging\\.'],
+ ['include', '^mac/mach_logging\\.'],
+ ['include', '^mac/objc_property_releaser\\.'],
+ ['include', '^mac/scoped_block\\.'],
+ ['include', '^mac/scoped_mach_port\\.'],
+ ['include', '^mac/scoped_mach_vm\\.'],
+ ['include', '^mac/scoped_nsautorelease_pool\\.'],
+ ['include', '^mac/scoped_nsobject\\.'],
+ ['include', '^mac/scoped_objc_class_swizzler\\.'],
+ ['include', '^memory/shared_memory_posix\\.'],
+ ['include', '^message_loop/message_pump_mac\\.'],
+ ['include', '^strings/sys_string_conversions_mac\\.'],
+ ['include', '^threading/platform_thread_mac\\.'],
+ ['include', '^time/time_mac\\.'],
+ ['include', '^worker_pool_mac\\.'],
+ # Exclude all process/ except the minimal implementation
+ # needed on iOS (mostly for unit tests).
+ ['exclude', '^process/.*'],
+ ['include', '^process/.*_ios\.(cc|mm)$'],
+ ['include', '^process/memory_stubs\.cc$'],
+ ['include', '^process/process_handle_posix\.cc$'],
+ ['include', '^process/process_metrics\\.cc$'],
+ # Exclude unsupported features on iOS.
+ ['exclude', '^files/file_path_watcher.*'],
+ ['exclude', '^threading/platform_thread_internal_posix\\.(h|cc)'],
+ ['exclude', '^trace_event/malloc_dump_provider\\.(h|cc)$'],
+ ],
+ 'sources': [
+ 'process/memory_stubs.cc',
+ ],
+ 'sources!': [
+ 'message_loop/message_pump_libevent.cc'
+ ],
+ }],
+ ['OS == "ios" and _toolset == "host"', {
+ 'sources/': [
+ # Copied filename_rules to switch from iOS to Mac inclusions.
+ ['include', '_(cocoa|mac)(_unittest)?\\.(h|cc|mm?)$'],
+ ['include', '(^|/)(cocoa|mac)/'],
+ ['exclude', '_ios(_unittest)?\\.(h|cc|mm?)$'],
+ ['exclude', '(^|/)ios/'],
+ ['exclude', 'files/file_path_watcher_fsevents.cc'],
+ ['exclude', 'files/file_path_watcher_fsevents.h'],
+ ['include', 'files/file_path_watcher_mac.cc'],
+ ]
+ }],
+ # For now, just test the *BSD platforms enough to exclude them.
+ # Subsequent changes will include them further.
+ ['OS != "freebsd" or >(nacl_untrusted_build)==1', {
+ 'sources/': [ ['exclude', '_freebsd\\.cc$'] ],
+ },
+ ],
+ ['OS != "openbsd" or >(nacl_untrusted_build)==1', {
+ 'sources/': [ ['exclude', '_openbsd\\.cc$'] ],
+ },
+ ],
+ ['OS == "win" and >(nacl_untrusted_build)==0', {
+ 'include_dirs': [
+ '<(DEPTH)/third_party/wtl/include',
+ ],
+ 'sources': [
+ 'profiler/win32_stack_frame_unwinder.cc',
+ 'profiler/win32_stack_frame_unwinder.h',
+ ],
+ 'sources!': [
+ 'files/file_path_watcher_fsevents.cc',
+ 'files/file_path_watcher_fsevents.h',
+ 'files/file_path_watcher_kqueue.cc',
+ 'files/file_path_watcher_kqueue.h',
+ 'files/file_path_watcher_stub.cc',
+ 'message_loop/message_pump_libevent.cc',
+ 'posix/file_descriptor_shuffle.cc',
+ 'strings/string16.cc',
+ ],
+ },],
+ ['<(use_ozone) == 1', {
+ 'sources!': [
+ 'message_loop/message_pump_glib.cc',
+ ]
+ }],
+ ['OS == "linux" and >(nacl_untrusted_build)==0', {
+ 'sources!': [
+ 'files/file_path_watcher_fsevents.cc',
+ 'files/file_path_watcher_fsevents.h',
+ 'files/file_path_watcher_kqueue.cc',
+ 'files/file_path_watcher_kqueue.h',
+ 'files/file_path_watcher_stub.cc',
+ ],
+ }],
+ ['(OS == "mac" or OS == "ios") and >(nacl_untrusted_build)==0', {
+ 'sources/': [
+ ['exclude', '^base_paths_posix\\.cc$'],
+ ['exclude', '^files/file_path_watcher_stub\\.cc$'],
+ ['exclude', '^native_library_posix\\.cc$'],
+ ['exclude', '^strings/sys_string_conversions_posix\\.cc$'],
+ ['exclude', '^threading/platform_thread_internal_posix\\.cc$'],
+ ],
+ }],
+ ['<(os_bsd)==1 and >(nacl_untrusted_build)==0', {
+ 'sources': [
+ 'process/memory_stubs.cc',
+ ],
+ 'sources/': [
+ ['exclude', '^files/file_path_watcher_linux\\.cc$'],
+ ['exclude', '^files/file_path_watcher_stub\\.cc$'],
+ ['exclude', '^files/file_util_linux\\.cc$'],
+ ['exclude', '^process/process_linux\\.cc$'],
+ ['exclude', '^sys_info_linux\\.cc$'],
+ ],
+ }],
+ # Remove all unnecessary files for build_nexe.py to avoid exceeding
+ # command-line-string limitation when building NaCl on Windows.
+ ['OS == "win" and >(nacl_untrusted_build)==1', {
+ 'sources/': [ ['exclude', '\\.h$'] ],
+ }],
+ # Enable more direct string conversions on platforms with native utf8
+ # strings
+ ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
+ 'defines': ['SYSTEM_NATIVE_UTF8'],
+ }],
+ ],
+ }],
+ ['base_i18n_target==1', {
+ 'defines': [
+ 'BASE_I18N_IMPLEMENTATION',
+ ],
+ 'sources': [
+ 'i18n/base_i18n_export.h',
+ 'i18n/base_i18n_switches.cc',
+ 'i18n/base_i18n_switches.h',
+ 'i18n/bidi_line_iterator.cc',
+ 'i18n/bidi_line_iterator.h',
+ 'i18n/break_iterator.cc',
+ 'i18n/break_iterator.h',
+ 'i18n/case_conversion.cc',
+ 'i18n/case_conversion.h',
+ 'i18n/char_iterator.cc',
+ 'i18n/char_iterator.h',
+ 'i18n/file_util_icu.cc',
+ 'i18n/file_util_icu.h',
+ 'i18n/i18n_constants.cc',
+ 'i18n/i18n_constants.h',
+ 'i18n/icu_encoding_detection.cc',
+ 'i18n/icu_encoding_detection.h',
+ 'i18n/icu_string_conversions.cc',
+ 'i18n/icu_string_conversions.h',
+ 'i18n/icu_util.cc',
+ 'i18n/icu_util.h',
+ 'i18n/message_formatter.cc',
+ 'i18n/message_formatter.h',
+ 'i18n/number_formatting.cc',
+ 'i18n/number_formatting.h',
+ 'i18n/rtl.cc',
+ 'i18n/rtl.h',
+ 'i18n/streaming_utf8_validator.cc',
+ 'i18n/streaming_utf8_validator.h',
+ 'i18n/string_compare.cc',
+ 'i18n/string_compare.h',
+ 'i18n/string_search.cc',
+ 'i18n/string_search.h',
+ 'i18n/time_formatting.cc',
+ 'i18n/time_formatting.h',
+ 'i18n/timezone.cc',
+ 'i18n/timezone.h',
+ 'i18n/utf8_validator_tables.cc',
+ 'i18n/utf8_validator_tables.h',
+ ],
+ }]
+ ],
+ },
+}
diff --git a/libchrome/base/base.isolate b/libchrome/base/base.isolate
new file mode 100644
index 0000000..079d07d
--- /dev/null
+++ b/libchrome/base/base.isolate
@@ -0,0 +1,60 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ # While the target 'base' doesn't depend on ../third_party/icu/icu.gyp
+ # itself, virtually all targets using it has to include icu. The only
+ # exception is the Windows sandbox (?).
+ '../third_party/icu/icu.isolate',
+ # Sanitizer-instrumented third-party libraries (if enabled).
+ '../third_party/instrumented_libraries/instrumented_libraries.isolate',
+ # MSVS runtime libraries.
+ '../build/config/win/msvs_dependencies.isolate',
+ ],
+ 'conditions': [
+ ['use_custom_libcxx==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/lib/libc++.so',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
+ ],
+ },
+ }],
+ ['OS=="win" and asan==1 and component=="shared_library"', {
+ 'variables': {
+ 'files': [
+ # We only need x.y.z/lib/windows/clang_rt.asan_dynamic-i386.dll,
+ # but since the version (x.y.z) changes, just grab the whole dir.
+ '../third_party/llvm-build/Release+Asserts/lib/clang/',
+ ],
+ },
+ }],
+ ['OS=="linux" and (asan==1 or lsan==1 or msan==1 or tsan==1)', {
+ 'variables': {
+ 'files': [
+ # For llvm-symbolizer.
+ '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
+ ],
+ },
+ }],
+ ['asan==1 or lsan==1 or msan==1 or tsan==1', {
+ 'variables': {
+ 'files': [
+ '../tools/valgrind/asan/',
+ '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
+ ['asan==0 or lsan==0 or msan==0 or tsan==0', {
+ 'variables': {},
+ }],
+ ],
+}
diff --git a/libchrome/base/base64.cc b/libchrome/base/base64.cc
new file mode 100644
index 0000000..61d8538
--- /dev/null
+++ b/libchrome/base/base64.cc
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64.h"
+
+#include <stddef.h>
+
+#include <modp_b64/modp_b64.h>
+
+namespace base {
+
+void Base64Encode(const StringPiece& input, std::string* output) {
+ std::string temp;
+ temp.resize(modp_b64_encode_len(input.size())); // makes room for null byte
+
+ // modp_b64_encode_len() returns at least 1, so temp[0] is safe to use.
+ size_t output_size = modp_b64_encode(&(temp[0]), input.data(), input.size());
+
+ temp.resize(output_size); // strips off null byte
+ output->swap(temp);
+}
+
+bool Base64Decode(const StringPiece& input, std::string* output) {
+ std::string temp;
+ temp.resize(modp_b64_decode_len(input.size()));
+
+ // does not null terminate result since result is binary data!
+ size_t input_size = input.size();
+ size_t output_size = modp_b64_decode(&(temp[0]), input.data(), input_size);
+ if (output_size == MODP_B64_ERROR)
+ return false;
+
+ temp.resize(output_size);
+ output->swap(temp);
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/base64.h b/libchrome/base/base64.h
new file mode 100644
index 0000000..dd72c39
--- /dev/null
+++ b/libchrome/base/base64.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE64_H_
+#define BASE_BASE64_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Encodes the input string in base64. The encoding can be done in-place.
+BASE_EXPORT void Base64Encode(const StringPiece& input, std::string* output);
+
+// Decodes the base64 input string. Returns true if successful and false
+// otherwise. The output string is only modified if successful. The decoding can
+// be done in-place.
+BASE_EXPORT bool Base64Decode(const StringPiece& input, std::string* output);
+
+} // namespace base
+
+#endif // BASE_BASE64_H_
diff --git a/libchrome/base/base64_unittest.cc b/libchrome/base/base64_unittest.cc
new file mode 100644
index 0000000..91651f4
--- /dev/null
+++ b/libchrome/base/base64_unittest.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(Base64Test, Basic) {
+ const std::string kText = "hello world";
+ const std::string kBase64Text = "aGVsbG8gd29ybGQ=";
+
+ std::string encoded;
+ std::string decoded;
+ bool ok;
+
+ Base64Encode(kText, &encoded);
+ EXPECT_EQ(kBase64Text, encoded);
+
+ ok = Base64Decode(encoded, &decoded);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(kText, decoded);
+}
+
+TEST(Base64Test, InPlace) {
+ const std::string kText = "hello world";
+ const std::string kBase64Text = "aGVsbG8gd29ybGQ=";
+ std::string text(kText);
+
+ Base64Encode(text, &text);
+ EXPECT_EQ(kBase64Text, text);
+
+ bool ok = Base64Decode(text, &text);
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(text, kText);
+}
+
+} // namespace base
diff --git a/libchrome/base/base64url.cc b/libchrome/base/base64url.cc
new file mode 100644
index 0000000..942229e
--- /dev/null
+++ b/libchrome/base/base64url.cc
@@ -0,0 +1,102 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64url.h"
+
+#include <stddef.h>
+
+#include "base/base64.h"
+#include "base/macros.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_util.h"
+
+#include <modp_b64/modp_b64.h>
+
+namespace base {
+
+const char kPaddingChar = '=';
+
+// Base64url maps {+, /} to {-, _} in order for the encoded content to be safe
+// to use in a URL. These characters will be translated by this implementation.
+const char kBase64Chars[] = "+/";
+const char kBase64UrlSafeChars[] = "-_";
+
+void Base64UrlEncode(const StringPiece& input,
+ Base64UrlEncodePolicy policy,
+ std::string* output) {
+ Base64Encode(input, output);
+
+ ReplaceChars(*output, "+", "-", output);
+ ReplaceChars(*output, "/", "_", output);
+
+ switch (policy) {
+ case Base64UrlEncodePolicy::INCLUDE_PADDING:
+ // The padding included in |*output| will not be amended.
+ break;
+ case Base64UrlEncodePolicy::OMIT_PADDING:
+ // The padding included in |*output| will be removed.
+ const size_t last_non_padding_pos =
+ output->find_last_not_of(kPaddingChar);
+ if (last_non_padding_pos != std::string::npos)
+ output->resize(last_non_padding_pos + 1);
+
+ break;
+ }
+}
+
+bool Base64UrlDecode(const StringPiece& input,
+ Base64UrlDecodePolicy policy,
+ std::string* output) {
+ // Characters outside of the base64url alphabet are disallowed, which includes
+ // the {+, /} characters found in the conventional base64 alphabet.
+ if (input.find_first_of(kBase64Chars) != std::string::npos)
+ return false;
+
+ const size_t required_padding_characters = input.size() % 4;
+ const bool needs_replacement =
+ input.find_first_of(kBase64UrlSafeChars) != std::string::npos;
+
+ switch (policy) {
+ case Base64UrlDecodePolicy::REQUIRE_PADDING:
+ // Fail if the required padding is not included in |input|.
+ if (required_padding_characters > 0)
+ return false;
+ break;
+ case Base64UrlDecodePolicy::IGNORE_PADDING:
+ // Missing padding will be silently appended.
+ break;
+ case Base64UrlDecodePolicy::DISALLOW_PADDING:
+ // Fail if padding characters are included in |input|.
+ if (input.find_first_of(kPaddingChar) != std::string::npos)
+ return false;
+ break;
+ }
+
+ // If the string either needs replacement of URL-safe characters to normal
+ // base64 ones, or additional padding, a copy of |input| needs to be made in
+ // order to make these adjustments without side effects.
+ if (required_padding_characters > 0 || needs_replacement) {
+ std::string base64_input;
+
+ CheckedNumeric<size_t> base64_input_size = input.size();
+ if (required_padding_characters > 0)
+ base64_input_size += 4 - required_padding_characters;
+
+ base64_input.reserve(base64_input_size.ValueOrDie());
+ input.AppendToString(&base64_input);
+
+ // Substitute the base64url URL-safe characters to their base64 equivalents.
+ ReplaceChars(base64_input, "-", "+", &base64_input);
+ ReplaceChars(base64_input, "_", "/", &base64_input);
+
+ // Append the necessary padding characters.
+ base64_input.resize(base64_input_size.ValueOrDie(), '=');
+
+ return Base64Decode(base64_input, output);
+ }
+
+ return Base64Decode(input, output);
+}
+
+} // namespace base
diff --git a/libchrome/base/base64url.h b/libchrome/base/base64url.h
new file mode 100644
index 0000000..66a4824
--- /dev/null
+++ b/libchrome/base/base64url.h
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE64URL_H_
+#define BASE_BASE64URL_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+enum class Base64UrlEncodePolicy {
+ // Include the trailing padding in the output, when necessary.
+ INCLUDE_PADDING,
+
+ // Remove the trailing padding from the output.
+ OMIT_PADDING
+};
+
+// Encodes the |input| string in base64url, defined in RFC 4648:
+// https://tools.ietf.org/html/rfc4648#section-5
+//
+// The |policy| defines whether padding should be included or omitted from the
+// encoded |*output|. |input| and |*output| may reference the same storage.
+BASE_EXPORT void Base64UrlEncode(const StringPiece& input,
+ Base64UrlEncodePolicy policy,
+ std::string* output);
+
+enum class Base64UrlDecodePolicy {
+ // Require inputs contain trailing padding if non-aligned.
+ REQUIRE_PADDING,
+
+ // Accept inputs regardless of whether or not they have the correct padding.
+ IGNORE_PADDING,
+
+ // Reject inputs if they contain any trailing padding.
+ DISALLOW_PADDING
+};
+
+// Decodes the |input| string in base64url, defined in RFC 4648:
+// https://tools.ietf.org/html/rfc4648#section-5
+//
+// The |policy| defines whether padding will be required, ignored or disallowed
+// altogether. |input| and |*output| may reference the same storage.
+BASE_EXPORT bool Base64UrlDecode(const StringPiece& input,
+ Base64UrlDecodePolicy policy,
+ std::string* output) WARN_UNUSED_RESULT;
+
+} // namespace base
+
+#endif // BASE_BASE64URL_H_
diff --git a/libchrome/base/base64url_unittest.cc b/libchrome/base/base64url_unittest.cc
new file mode 100644
index 0000000..45aa4a8
--- /dev/null
+++ b/libchrome/base/base64url_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base64url.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+TEST(Base64UrlTest, EncodeIncludePaddingPolicy) {
+ std::string output;
+ Base64UrlEncode("hello?world", Base64UrlEncodePolicy::INCLUDE_PADDING,
+ &output);
+
+ // Base64 version: aGVsbG8/d29ybGQ=
+ EXPECT_EQ("aGVsbG8_d29ybGQ=", output);
+
+ // Test for behavior for very short and empty strings.
+ Base64UrlEncode("??", Base64UrlEncodePolicy::INCLUDE_PADDING, &output);
+ EXPECT_EQ("Pz8=", output);
+
+ Base64UrlEncode("", Base64UrlEncodePolicy::INCLUDE_PADDING, &output);
+ EXPECT_EQ("", output);
+}
+
+TEST(Base64UrlTest, EncodeOmitPaddingPolicy) {
+ std::string output;
+ Base64UrlEncode("hello?world", Base64UrlEncodePolicy::OMIT_PADDING, &output);
+
+ // base64 version: aGVsbG8/d29ybGQ=
+ EXPECT_EQ("aGVsbG8_d29ybGQ", output);
+
+ // Test for behavior for very short and empty strings.
+ Base64UrlEncode("??", Base64UrlEncodePolicy::OMIT_PADDING, &output);
+ EXPECT_EQ("Pz8", output);
+
+ Base64UrlEncode("", Base64UrlEncodePolicy::OMIT_PADDING, &output);
+ EXPECT_EQ("", output);
+}
+
+TEST(Base64UrlTest, DecodeRequirePaddingPolicy) {
+ std::string output;
+ ASSERT_TRUE(Base64UrlDecode("aGVsbG8_d29ybGQ=",
+ Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+
+ EXPECT_EQ("hello?world", output);
+
+ ASSERT_FALSE(Base64UrlDecode(
+ "aGVsbG8_d29ybGQ", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+
+ // Test for behavior for very short and empty strings.
+ ASSERT_TRUE(
+ Base64UrlDecode("Pz8=", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+ EXPECT_EQ("??", output);
+
+ ASSERT_TRUE(
+ Base64UrlDecode("", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+ EXPECT_EQ("", output);
+}
+
+TEST(Base64UrlTest, DecodeIgnorePaddingPolicy) {
+ std::string output;
+ ASSERT_TRUE(Base64UrlDecode("aGVsbG8_d29ybGQ",
+ Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+
+ EXPECT_EQ("hello?world", output);
+
+ // Including the padding is accepted as well.
+ ASSERT_TRUE(Base64UrlDecode("aGVsbG8_d29ybGQ=",
+ Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+
+ EXPECT_EQ("hello?world", output);
+}
+
+TEST(Base64UrlTest, DecodeDisallowPaddingPolicy) {
+ std::string output;
+ ASSERT_FALSE(Base64UrlDecode(
+ "aGVsbG8_d29ybGQ=", Base64UrlDecodePolicy::DISALLOW_PADDING, &output));
+
+ // The policy will allow the input when padding has been omitted.
+ ASSERT_TRUE(Base64UrlDecode(
+ "aGVsbG8_d29ybGQ", Base64UrlDecodePolicy::DISALLOW_PADDING, &output));
+
+ EXPECT_EQ("hello?world", output);
+}
+
+TEST(Base64UrlTest, DecodeDisallowsBase64Alphabet) {
+ std::string output;
+
+ // The "/" character is part of the conventional base64 alphabet, but has been
+ // substituted with "_" in the base64url alphabet.
+ ASSERT_FALSE(Base64UrlDecode(
+ "aGVsbG8/d29ybGQ=", Base64UrlDecodePolicy::REQUIRE_PADDING, &output));
+}
+
+TEST(Base64UrlTest, DecodeDisallowsPaddingOnly) {
+ std::string output;
+
+ ASSERT_FALSE(Base64UrlDecode(
+ "=", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+ ASSERT_FALSE(Base64UrlDecode(
+ "==", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+ ASSERT_FALSE(Base64UrlDecode(
+ "===", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+ ASSERT_FALSE(Base64UrlDecode(
+ "====", Base64UrlDecodePolicy::IGNORE_PADDING, &output));
+}
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/base_export.h b/libchrome/base/base_export.h
new file mode 100644
index 0000000..cf7ebd7
--- /dev/null
+++ b/libchrome/base/base_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASE_EXPORT_H_
+#define BASE_BASE_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __declspec(dllexport)
+#else
+#define BASE_EXPORT __declspec(dllimport)
+#endif // defined(BASE_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(BASE_IMPLEMENTATION)
+#define BASE_EXPORT __attribute__((visibility("default")))
+#else
+#define BASE_EXPORT
+#endif // defined(BASE_IMPLEMENTATION)
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define BASE_EXPORT
+#endif
+
+#endif // BASE_BASE_EXPORT_H_
diff --git a/libchrome/base/base_nacl.gyp b/libchrome/base/base_nacl.gyp
new file mode 100644
index 0000000..30763d4
--- /dev/null
+++ b/libchrome/base/base_nacl.gyp
@@ -0,0 +1,158 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ # base.gypi must be included before common_untrusted.gypi.
+ #
+ # TODO(sergeyu): Replace the target_defaults magic in base.gypi with a
+ # sources variables lists. That way order of includes will not matter.
+ 'base.gypi',
+ '../build/common_untrusted.gypi',
+ ],
+ 'conditions': [
+ ['disable_nacl==0 and disable_nacl_untrusted==0', {
+ 'targets': [
+ {
+ 'target_name': 'base_nacl',
+ 'type': 'none',
+ 'variables': {
+ 'base_target': 1,
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libbase_nacl.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 1,
+ 'build_pnacl_newlib': 1,
+ 'sources': [
+ 'base_switches.cc',
+ 'base_switches.h',
+ 'strings/string16.cc',
+ 'sync_socket_nacl.cc',
+ 'time/time_posix.cc',
+ ],
+ 'compile_flags': [
+ '-fno-strict-aliasing',
+ ],
+ },
+ 'dependencies': [
+ 'allocator/allocator.gyp:allocator_features#target',
+ 'base.gyp:base_debugging_flags',
+ 'base.gyp:base_build_date',
+ ],
+ },
+ {
+ 'target_name': 'base_i18n_nacl',
+ 'type': 'none',
+ 'variables': {
+ 'base_i18n_target': 1,
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libbase_i18n_nacl.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 0,
+ 'build_pnacl_newlib': 1,
+ 'sources': [
+ 'base_switches.cc',
+ 'base_switches.h',
+ 'strings/string16.cc',
+ 'sync_socket_nacl.cc',
+ 'time/time_posix.cc',
+ ],
+ },
+ 'dependencies': [
+ 'allocator/allocator.gyp:allocator_features#target',
+ 'base.gyp:base_build_date',
+ '../third_party/icu/icu_nacl.gyp:icudata_nacl',
+ '../third_party/icu/icu_nacl.gyp:icui18n_nacl',
+ '../third_party/icu/icu_nacl.gyp:icuuc_nacl',
+ ],
+ },
+ {
+ 'target_name': 'base_nacl_nonsfi',
+ 'type': 'none',
+ 'variables': {
+ 'base_target': 1,
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libbase_nacl_nonsfi.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 0,
+ 'build_pnacl_newlib': 0,
+ 'build_nonsfi_helper': 1,
+
+ 'sources': [
+ 'base_switches.cc',
+ 'base_switches.h',
+
+ # For PathExists and ReadFromFD.
+ 'files/file_util.cc',
+ 'files/file_util_posix.cc',
+
+ # For MessageLoopForIO based on libevent.
+ 'message_loop/message_pump_libevent.cc',
+ 'message_loop/message_pump_libevent.h',
+
+ # For UnixDomainSocket::SendMsg and RecvMsg.
+ 'posix/unix_domain_socket_linux.cc',
+
+ # For GetKnownDeadTerminationStatus and GetTerminationStatus.
+ 'process/kill_posix.cc',
+
+ # For ForkWithFlags.
+ 'process/launch.h',
+ 'process/launch_posix.cc',
+
+ # Unlike libbase_nacl, for Non-SFI build, we need to use
+ # rand_util_posix for random implementation, instead of
+ # rand_util_nacl.cc, which is based on IRT. rand_util_nacl.cc is
+ # excluded below.
+ 'rand_util_posix.cc',
+
+ # For CancelableSyncSocket.
+ 'sync_socket_nacl.cc',
+ ],
+ },
+ 'sources!': [
+ 'rand_util_nacl.cc',
+ ],
+ 'dependencies': [
+ 'allocator/allocator.gyp:allocator_features#target',
+ 'base.gyp:base_debugging_flags',
+ 'base.gyp:base_build_date',
+ 'third_party/libevent/libevent_nacl_nonsfi.gyp:event_nacl_nonsfi',
+ ],
+ },
+ {
+ 'target_name': 'test_support_base_nacl_nonsfi',
+ 'type': 'none',
+ 'variables': {
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libtest_support_base_nacl_nonsfi.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 0,
+ 'build_pnacl_newlib': 0,
+ 'build_nonsfi_helper': 1,
+
+ 'sources': [
+ 'test/gtest_util.cc',
+ 'test/launcher/unit_test_launcher_nacl_nonsfi.cc',
+ 'test/gtest_xml_unittest_result_printer.cc',
+ 'test/test_switches.cc',
+ ],
+ },
+ 'dependencies': [
+ 'base.gyp:base_build_date',
+ 'base_nacl_nonsfi',
+ '../testing/gtest_nacl.gyp:gtest_nacl',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/base/base_switches.cc b/libchrome/base/base_switches.cc
new file mode 100644
index 0000000..f5c6eb3
--- /dev/null
+++ b/libchrome/base/base_switches.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/base_switches.h"
+#include "build/build_config.h"
+
+namespace switches {
+
+// Disables the crash reporting.
+const char kDisableBreakpad[] = "disable-breakpad";
+
+// Indicates that crash reporting should be enabled. On platforms where helper
+// processes cannot access to files needed to make this decision, this flag is
+// generated internally.
+const char kEnableCrashReporter[] = "enable-crash-reporter";
+
+// Makes memory allocators keep track of their allocations and context, so a
+// detailed breakdown of memory usage can be presented in chrome://tracing when
+// the memory-infra category is enabled.
+const char kEnableHeapProfiling[] = "enable-heap-profiling";
+
+// Report native (walk the stack) allocation traces. By default pseudo stacks
+// derived from trace events are reported.
+const char kEnableHeapProfilingModeNative[] = "native";
+
+// Generates full memory crash dump.
+const char kFullMemoryCrashReport[] = "full-memory-crash-report";
+
+// Force low-end device mode when set.
+const char kEnableLowEndDeviceMode[] = "enable-low-end-device-mode";
+
+// Force disabling of low-end device mode when set.
+const char kDisableLowEndDeviceMode[] = "disable-low-end-device-mode";
+
+// This option can be used to force field trials when testing changes locally.
+// The argument is a list of name and value pairs, separated by slashes. If a
+// trial name is prefixed with an asterisk, that trial will start activated.
+// For example, the following argument defines two trials, with the second one
+// activated: "GoogleNow/Enable/*MaterialDesignNTP/Default/" This option can
+// also be used by the browser process to send the list of trials to a
+// non-browser process, using the same format. See
+// FieldTrialList::CreateTrialsFromString() in field_trial.h for details.
+const char kForceFieldTrials[] = "force-fieldtrials";
+
+// Suppresses all error dialogs when present.
+const char kNoErrorDialogs[] = "noerrdialogs";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process is a child process.
+const char kTestChildProcess[] = "test-child-process";
+
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process should not initialize ICU to
+// avoid creating any scoped handles too early in startup.
+const char kTestDoNotInitializeIcu[] = "test-do-not-initialize-icu";
+
+// Gives the default maximal active V-logging level; 0 is the default.
+// Normally positive values are used for V-logging levels.
+const char kV[] = "v";
+
+// Gives the per-module maximal V-logging levels to override the value
+// given by --v. E.g. "my_module=2,foo*=3" would change the logging
+// level for all code in source files "my_module.*" and "foo*.*"
+// ("-inl" suffixes are also disregarded for this matching).
+//
+// Any pattern containing a forward or backward slash will be tested
+// against the whole pathname and not just the module. E.g.,
+// "*/foo/bar/*=2" would change the logging level for all code in
+// source files under a "foo/bar" directory.
+const char kVModule[] = "vmodule";
+
+// Will wait for 60 seconds for a debugger to come to attach to the process.
+const char kWaitForDebugger[] = "wait-for-debugger";
+
+// Sends trace events from these categories to a file.
+// --trace-to-file on its own sends to default categories.
+const char kTraceToFile[] = "trace-to-file";
+
+// Specifies the file name for --trace-to-file. If unspecified, it will
+// go to a default file name.
+const char kTraceToFileName[] = "trace-to-file-name";
+
+// Configure whether chrome://profiler will contain timing information. This
+// option is enabled by default. A value of "0" will disable profiler timing,
+// while all other values will enable it.
+const char kProfilerTiming[] = "profiler-timing";
+// Value of the --profiler-timing flag that will disable timing information for
+// chrome://profiler.
+const char kProfilerTimingDisabledValue[] = "0";
+
+// Specifies a location for profiling output. This will only work if chrome has
+// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
+//
+// {pid} if present will be replaced by the pid of the process.
+// {count} if present will be incremented each time a profile is generated
+// for this process.
+// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
+// for tests.
+const char kProfilingFile[] = "profiling-file";
+
+#if defined(OS_WIN)
+// Disables the USB keyboard detection for blocking the OSK on Win8+.
+const char kDisableUsbKeyboardDetect[] = "disable-usb-keyboard-detect";
+#endif
+
+#if defined(OS_POSIX)
+// Used for turning on Breakpad crash reporting in a debug environment where
+// crash reporting is typically compiled but disabled.
+const char kEnableCrashReporterForTesting[] =
+ "enable-crash-reporter-for-testing";
+#endif
+
+} // namespace switches
diff --git a/libchrome/base/base_switches.h b/libchrome/base/base_switches.h
new file mode 100644
index 0000000..0585186
--- /dev/null
+++ b/libchrome/base/base_switches.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the "base" command-line switches.
+
+#ifndef BASE_BASE_SWITCHES_H_
+#define BASE_BASE_SWITCHES_H_
+
+#include "build/build_config.h"
+
+namespace switches {
+
+extern const char kDisableBreakpad[];
+extern const char kDisableLowEndDeviceMode[];
+extern const char kEnableCrashReporter[];
+extern const char kEnableHeapProfiling[];
+extern const char kEnableHeapProfilingModeNative[];
+extern const char kEnableLowEndDeviceMode[];
+extern const char kForceFieldTrials[];
+extern const char kFullMemoryCrashReport[];
+extern const char kNoErrorDialogs[];
+extern const char kProfilerTiming[];
+extern const char kProfilerTimingDisabledValue[];
+extern const char kProfilingFile[];
+extern const char kTestChildProcess[];
+extern const char kTestDoNotInitializeIcu[];
+extern const char kTraceToFile[];
+extern const char kTraceToFileName[];
+extern const char kV[];
+extern const char kVModule[];
+extern const char kWaitForDebugger[];
+
+#if defined(OS_WIN)
+extern const char kDisableUsbKeyboardDetect[];
+#endif
+
+#if defined(OS_POSIX)
+extern const char kEnableCrashReporterForTesting[];
+#endif
+
+} // namespace switches
+
+#endif // BASE_BASE_SWITCHES_H_
diff --git a/libchrome/base/base_unittests.isolate b/libchrome/base/base_unittests.isolate
new file mode 100644
index 0000000..208501f
--- /dev/null
+++ b/libchrome/base/base_unittests.isolate
@@ -0,0 +1,56 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '../testing/test_env.py',
+ '<(PRODUCT_DIR)/base_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ 'conditions': [
+ ['OS=="android" or OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ 'test/data/',
+ ],
+ },
+ }],
+ ['OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ '../testing/test_env.py',
+ ],
+ },
+ }],
+ ['OS=="linux"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/lib/libmalloc_wrapper.so',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1 and fastbuild==0', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/base_unittests.dSYM/',
+ ],
+ },
+ }],
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/base_unittests.exe.pdb',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ 'base.isolate',
+ ],
+}
diff --git a/libchrome/base/bind.h b/libchrome/base/bind.h
new file mode 100644
index 0000000..9cf65b6
--- /dev/null
+++ b/libchrome/base/bind.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_H_
+#define BASE_BIND_H_
+
+#include "base/bind_internal.h"
+
+// -----------------------------------------------------------------------------
+// Usage documentation
+// -----------------------------------------------------------------------------
+//
+// See base/callback.h for documentation.
+//
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// If you're reading the implementation, before proceeding further, you should
+// read the top comment of base/bind_internal.h for a definition of common
+// terms and concepts.
+
+namespace base {
+
+template <typename Functor, typename... Args>
+inline base::Callback<MakeUnboundRunType<Functor, Args...>> Bind(
+ Functor&& functor,
+ Args&&... args) {
+ using BindState = internal::MakeBindStateType<Functor, Args...>;
+ using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+ using Invoker = internal::Invoker<BindState, UnboundRunType>;
+
+ using CallbackType = Callback<UnboundRunType>;
+ return CallbackType(new BindState(std::forward<Functor>(functor),
+ std::forward<Args>(args)...),
+ &Invoker::Run);
+}
+
+} // namespace base
+
+#endif // BASE_BIND_H_
diff --git a/libchrome/base/bind_helpers.cc b/libchrome/base/bind_helpers.cc
new file mode 100644
index 0000000..f1fe46d
--- /dev/null
+++ b/libchrome/base/bind_helpers.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind_helpers.h"
+
+#include "base/callback.h"
+
+namespace base {
+
+void DoNothing() {
+}
+
+} // namespace base
diff --git a/libchrome/base/bind_helpers.h b/libchrome/base/bind_helpers.h
new file mode 100644
index 0000000..93d02e3
--- /dev/null
+++ b/libchrome/base/bind_helpers.h
@@ -0,0 +1,502 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This defines a set of argument wrappers and related factory methods that
+// can be used specify the refcounting and reference semantics of arguments
+// that are bound by the Bind() function in base/bind.h.
+//
+// It also defines a set of simple functions and utilities that people want
+// when using Callback<> and Bind().
+//
+//
+// ARGUMENT BINDING WRAPPERS
+//
+// The wrapper functions are base::Unretained(), base::Owned(), base::Passed(),
+// base::ConstRef(), and base::IgnoreResult().
+//
+// Unretained() allows Bind() to bind a non-refcounted class, and to disable
+// refcounting on arguments that are refcounted objects.
+//
+// Owned() transfers ownership of an object to the Callback resulting from
+// bind; the object will be deleted when the Callback is deleted.
+//
+// Passed() is for transferring movable-but-not-copyable types (eg. scoped_ptr)
+// through a Callback. Logically, this signifies a destructive transfer of
+// the state of the argument into the target function. Invoking
+// Callback::Run() twice on a Callback that was created with a Passed()
+// argument will CHECK() because the first invocation would have already
+// transferred ownership to the target function.
+//
+// RetainedRef() accepts a ref counted object and retains a reference to it.
+// When the callback is called, the object is passed as a raw pointer.
+//
+// ConstRef() allows binding a constant reference to an argument rather
+// than a copy.
+//
+// IgnoreResult() is used to adapt a function or Callback with a return type to
+// one with a void return. This is most useful if you have a function with,
+// say, a pesky ignorable bool return that you want to use with PostTask or
+// something else that expect a Callback with a void return.
+//
+// EXAMPLE OF Unretained():
+//
+// class Foo {
+// public:
+// void func() { cout << "Foo:f" << endl; }
+// };
+//
+// // In some function somewhere.
+// Foo foo;
+// Closure foo_callback =
+// Bind(&Foo::func, Unretained(&foo));
+// foo_callback.Run(); // Prints "Foo:f".
+//
+// Without the Unretained() wrapper on |&foo|, the above call would fail
+// to compile because Foo does not support the AddRef() and Release() methods.
+//
+//
+// EXAMPLE OF Owned():
+//
+// void foo(int* arg) { cout << *arg << endl }
+//
+// int* pn = new int(1);
+// Closure foo_callback = Bind(&foo, Owned(pn));
+//
+// foo_callback.Run(); // Prints "1"
+// foo_callback.Run(); // Prints "1"
+// *n = 2;
+// foo_callback.Run(); // Prints "2"
+//
+// foo_callback.Reset(); // |pn| is deleted. Also will happen when
+// // |foo_callback| goes out of scope.
+//
+// Without Owned(), someone would have to know to delete |pn| when the last
+// reference to the Callback is deleted.
+//
+// EXAMPLE OF RetainedRef():
+//
+// void foo(RefCountedBytes* bytes) {}
+//
+// scoped_refptr<RefCountedBytes> bytes = ...;
+// Closure callback = Bind(&foo, base::RetainedRef(bytes));
+// callback.Run();
+//
+// Without RetainedRef, the scoped_refptr would try to implicitly convert to
+// a raw pointer and fail compilation:
+//
+// Closure callback = Bind(&foo, bytes); // ERROR!
+//
+//
+// EXAMPLE OF ConstRef():
+//
+// void foo(int arg) { cout << arg << endl }
+//
+// int n = 1;
+// Closure no_ref = Bind(&foo, n);
+// Closure has_ref = Bind(&foo, ConstRef(n));
+//
+// no_ref.Run(); // Prints "1"
+// has_ref.Run(); // Prints "1"
+//
+// n = 2;
+// no_ref.Run(); // Prints "1"
+// has_ref.Run(); // Prints "2"
+//
+// Note that because ConstRef() takes a reference on |n|, |n| must outlive all
+// its bound callbacks.
+//
+//
+// EXAMPLE OF IgnoreResult():
+//
+// int DoSomething(int arg) { cout << arg << endl; }
+//
+// // Assign to a Callback with a void return type.
+// Callback<void(int)> cb = Bind(IgnoreResult(&DoSomething));
+// cb->Run(1); // Prints "1".
+//
+// // Prints "1" on |ml|.
+// ml->PostTask(FROM_HERE, Bind(IgnoreResult(&DoSomething), 1);
+//
+//
+// EXAMPLE OF Passed():
+//
+// void TakesOwnership(std::unique_ptr<Foo> arg) { }
+// std::unique_ptr<Foo> CreateFoo() { return std::unique_ptr<Foo>(new Foo());
+// }
+//
+// std::unique_ptr<Foo> f(new Foo());
+//
+// // |cb| is given ownership of Foo(). |f| is now NULL.
+// // You can use std::move(f) in place of &f, but it's more verbose.
+// Closure cb = Bind(&TakesOwnership, Passed(&f));
+//
+// // Run was never called so |cb| still owns Foo() and deletes
+// // it on Reset().
+// cb.Reset();
+//
+// // |cb| is given a new Foo created by CreateFoo().
+// cb = Bind(&TakesOwnership, Passed(CreateFoo()));
+//
+// // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
+// // no longer owns Foo() and, if reset, would not delete Foo().
+// cb.Run(); // Foo() is now transferred to |arg| and deleted.
+// cb.Run(); // This CHECK()s since Foo() already been used once.
+//
+// Passed() is particularly useful with PostTask() when you are transferring
+// ownership of an argument into a task, but don't necessarily know if the
+// task will always be executed. This can happen if the task is cancellable
+// or if it is posted to a TaskRunner.
+//
+//
+// SIMPLE FUNCTIONS AND UTILITIES.
+//
+// DoNothing() - Useful for creating a Closure that does nothing when called.
+// DeletePointer<T>() - Useful for creating a Closure that will delete a
+// pointer when invoked. Only use this when necessary.
+// In most cases MessageLoop::DeleteSoon() is a better
+// fit.
+
+#ifndef BASE_BIND_HELPERS_H_
+#define BASE_BIND_HELPERS_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/memory/weak_ptr.h"
+#include "build/build_config.h"
+
+namespace base {
+
+template <typename T>
+struct IsWeakReceiver;
+
+namespace internal {
+
+template <typename T>
+class UnretainedWrapper {
+ public:
+ explicit UnretainedWrapper(T* o) : ptr_(o) {}
+ T* get() const { return ptr_; }
+ private:
+ T* ptr_;
+};
+
+template <typename T>
+class ConstRefWrapper {
+ public:
+ explicit ConstRefWrapper(const T& o) : ptr_(&o) {}
+ const T& get() const { return *ptr_; }
+ private:
+ const T* ptr_;
+};
+
+template <typename T>
+class RetainedRefWrapper {
+ public:
+ explicit RetainedRefWrapper(T* o) : ptr_(o) {}
+ explicit RetainedRefWrapper(scoped_refptr<T> o) : ptr_(std::move(o)) {}
+ T* get() const { return ptr_.get(); }
+ private:
+ scoped_refptr<T> ptr_;
+};
+
+template <typename T>
+struct IgnoreResultHelper {
+ explicit IgnoreResultHelper(T functor) : functor_(std::move(functor)) {}
+ explicit operator bool() const { return !!functor_; }
+
+ T functor_;
+};
+
+// An alternate implementation is to avoid the destructive copy, and instead
+// specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
+// a class that is essentially a std::unique_ptr<>.
+//
+// The current implementation has the benefit though of leaving ParamTraits<>
+// fully in callback_internal.h as well as avoiding type conversions during
+// storage.
+template <typename T>
+class OwnedWrapper {
+ public:
+ explicit OwnedWrapper(T* o) : ptr_(o) {}
+ ~OwnedWrapper() { delete ptr_; }
+ T* get() const { return ptr_; }
+ OwnedWrapper(OwnedWrapper&& other) {
+ ptr_ = other.ptr_;
+ other.ptr_ = NULL;
+ }
+
+ private:
+ mutable T* ptr_;
+};
+
+// PassedWrapper is a copyable adapter for a scoper that ignores const.
+//
+// It is needed to get around the fact that Bind() takes a const reference to
+// all its arguments. Because Bind() takes a const reference to avoid
+// unnecessary copies, it is incompatible with movable-but-not-copyable
+// types; doing a destructive "move" of the type into Bind() would violate
+// the const correctness.
+//
+// This conundrum cannot be solved without either C++11 rvalue references or
+// a O(2^n) blowup of Bind() templates to handle each combination of regular
+// types and movable-but-not-copyable types. Thus we introduce a wrapper type
+// that is copyable to transmit the correct type information down into
+// BindState<>. Ignoring const in this type makes sense because it is only
+// created when we are explicitly trying to do a destructive move.
+//
+// Two notes:
+// 1) PassedWrapper supports any type that has a move constructor, however
+// the type will need to be specifically whitelisted in order for it to be
+// bound to a Callback. We guard this explicitly at the call of Passed()
+// to make for clear errors. Things not given to Passed() will be forwarded
+// and stored by value which will not work for general move-only types.
+// 2) is_valid_ is distinct from NULL because it is valid to bind a "NULL"
+// scoper to a Callback and allow the Callback to execute once.
+template <typename T>
+class PassedWrapper {
+ public:
+ explicit PassedWrapper(T&& scoper)
+ : is_valid_(true), scoper_(std::move(scoper)) {}
+ PassedWrapper(PassedWrapper&& other)
+ : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
+ T Take() const {
+ CHECK(is_valid_);
+ is_valid_ = false;
+ return std::move(scoper_);
+ }
+
+ private:
+ mutable bool is_valid_;
+ mutable T scoper_;
+};
+
+// Unwrap the stored parameters for the wrappers above.
+template <typename T>
+T&& Unwrap(T&& o) {
+ return std::forward<T>(o);
+}
+
+template <typename T>
+T* Unwrap(const UnretainedWrapper<T>& unretained) {
+ return unretained.get();
+}
+
+template <typename T>
+const T& Unwrap(const ConstRefWrapper<T>& const_ref) {
+ return const_ref.get();
+}
+
+template <typename T>
+T* Unwrap(const RetainedRefWrapper<T>& o) {
+ return o.get();
+}
+
+template <typename T>
+T* Unwrap(const OwnedWrapper<T>& o) {
+ return o.get();
+}
+
+template <typename T>
+T Unwrap(const PassedWrapper<T>& o) {
+ return o.Take();
+}
+
+// IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
+// method. It is used internally by Bind() to select the correct
+// InvokeHelper that will no-op itself in the event the WeakPtr<> for
+// the target object is invalidated.
+//
+// The first argument should be the type of the object that will be received by
+// the method.
+template <bool is_method, typename... Args>
+struct IsWeakMethod : std::false_type {};
+
+template <typename T, typename... Args>
+struct IsWeakMethod<true, T, Args...> : IsWeakReceiver<T> {};
+
+// Packs a list of types to hold them in a single type.
+template <typename... Types>
+struct TypeList {};
+
+// Used for DropTypeListItem implementation.
+template <size_t n, typename List>
+struct DropTypeListItemImpl;
+
+// Do not use enable_if and SFINAE here to avoid MSVC2013 compile failure.
+template <size_t n, typename T, typename... List>
+struct DropTypeListItemImpl<n, TypeList<T, List...>>
+ : DropTypeListItemImpl<n - 1, TypeList<List...>> {};
+
+template <typename T, typename... List>
+struct DropTypeListItemImpl<0, TypeList<T, List...>> {
+ using Type = TypeList<T, List...>;
+};
+
+template <>
+struct DropTypeListItemImpl<0, TypeList<>> {
+ using Type = TypeList<>;
+};
+
+// A type-level function that drops |n| list item from given TypeList.
+template <size_t n, typename List>
+using DropTypeListItem = typename DropTypeListItemImpl<n, List>::Type;
+
+// Used for TakeTypeListItem implementation.
+template <size_t n, typename List, typename... Accum>
+struct TakeTypeListItemImpl;
+
+// Do not use enable_if and SFINAE here to avoid MSVC2013 compile failure.
+template <size_t n, typename T, typename... List, typename... Accum>
+struct TakeTypeListItemImpl<n, TypeList<T, List...>, Accum...>
+ : TakeTypeListItemImpl<n - 1, TypeList<List...>, Accum..., T> {};
+
+template <typename T, typename... List, typename... Accum>
+struct TakeTypeListItemImpl<0, TypeList<T, List...>, Accum...> {
+ using Type = TypeList<Accum...>;
+};
+
+template <typename... Accum>
+struct TakeTypeListItemImpl<0, TypeList<>, Accum...> {
+ using Type = TypeList<Accum...>;
+};
+
+// A type-level function that takes first |n| list item from given TypeList.
+// E.g. TakeTypeListItem<3, TypeList<A, B, C, D>> is evaluated to
+// TypeList<A, B, C>.
+template <size_t n, typename List>
+using TakeTypeListItem = typename TakeTypeListItemImpl<n, List>::Type;
+
+// Used for ConcatTypeLists implementation.
+template <typename List1, typename List2>
+struct ConcatTypeListsImpl;
+
+template <typename... Types1, typename... Types2>
+struct ConcatTypeListsImpl<TypeList<Types1...>, TypeList<Types2...>> {
+ using Type = TypeList<Types1..., Types2...>;
+};
+
+// A type-level function that concats two TypeLists.
+template <typename List1, typename List2>
+using ConcatTypeLists = typename ConcatTypeListsImpl<List1, List2>::Type;
+
+// Used for MakeFunctionType implementation.
+template <typename R, typename ArgList>
+struct MakeFunctionTypeImpl;
+
+template <typename R, typename... Args>
+struct MakeFunctionTypeImpl<R, TypeList<Args...>> {
+ // MSVC 2013 doesn't support Type Alias of function types.
+ // Revisit this after we update it to newer version.
+ typedef R Type(Args...);
+};
+
+// A type-level function that constructs a function type that has |R| as its
+// return type and has TypeLists items as its arguments.
+template <typename R, typename ArgList>
+using MakeFunctionType = typename MakeFunctionTypeImpl<R, ArgList>::Type;
+
+// Used for ExtractArgs and ExtractReturnType.
+template <typename Signature>
+struct ExtractArgsImpl;
+
+template <typename R, typename... Args>
+struct ExtractArgsImpl<R(Args...)> {
+ using ReturnType = R;
+ using ArgsList = TypeList<Args...>;
+};
+
+// A type-level function that extracts function arguments into a TypeList.
+// E.g. ExtractArgs<R(A, B, C)> is evaluated to TypeList<A, B, C>.
+template <typename Signature>
+using ExtractArgs = typename ExtractArgsImpl<Signature>::ArgsList;
+
+// A type-level function that extracts the return type of a function.
+// E.g. ExtractReturnType<R(A, B, C)> is evaluated to R.
+template <typename Signature>
+using ExtractReturnType = typename ExtractArgsImpl<Signature>::ReturnType;
+
+} // namespace internal
+
+template <typename T>
+static inline internal::UnretainedWrapper<T> Unretained(T* o) {
+ return internal::UnretainedWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
+ return internal::RetainedRefWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
+ return internal::RetainedRefWrapper<T>(std::move(o));
+}
+
+template <typename T>
+static inline internal::ConstRefWrapper<T> ConstRef(const T& o) {
+ return internal::ConstRefWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::OwnedWrapper<T> Owned(T* o) {
+ return internal::OwnedWrapper<T>(o);
+}
+
+// We offer 2 syntaxes for calling Passed(). The first takes an rvalue and
+// is best suited for use with the return value of a function or other temporary
+// rvalues. The second takes a pointer to the scoper and is just syntactic sugar
+// to avoid having to write Passed(std::move(scoper)).
+//
+// Both versions of Passed() prevent T from being an lvalue reference. The first
+// via use of enable_if, and the second takes a T* which will not bind to T&.
+template <typename T,
+ typename std::enable_if<!std::is_lvalue_reference<T>::value>::type* =
+ nullptr>
+static inline internal::PassedWrapper<T> Passed(T&& scoper) {
+ return internal::PassedWrapper<T>(std::move(scoper));
+}
+template <typename T>
+static inline internal::PassedWrapper<T> Passed(T* scoper) {
+ return internal::PassedWrapper<T>(std::move(*scoper));
+}
+
+template <typename T>
+static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
+ return internal::IgnoreResultHelper<T>(std::move(data));
+}
+
+BASE_EXPORT void DoNothing();
+
+template<typename T>
+void DeletePointer(T* obj) {
+ delete obj;
+}
+
+// An injection point to control |this| pointer behavior on a method invocation.
+// If IsWeakReceiver<> is true_type for |T| and |T| is used for a receiver of a
+// method, base::Bind cancels the method invocation if the receiver is tested as
+// false.
+// E.g. Foo::bar() is not called:
+// struct Foo : base::SupportsWeakPtr<Foo> {
+// void bar() {}
+// };
+//
+// WeakPtr<Foo> oo = nullptr;
+// base::Bind(&Foo::bar, oo).Run();
+template <typename T>
+struct IsWeakReceiver : std::false_type {};
+
+template <typename T>
+struct IsWeakReceiver<internal::ConstRefWrapper<T>> : IsWeakReceiver<T> {};
+
+template <typename T>
+struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+
+} // namespace base
+
+#endif // BASE_BIND_HELPERS_H_
diff --git a/libchrome/base/bind_internal.h b/libchrome/base/bind_internal.h
new file mode 100644
index 0000000..3ce4417
--- /dev/null
+++ b/libchrome/base/bind_internal.h
@@ -0,0 +1,452 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIND_INTERNAL_H_
+#define BASE_BIND_INTERNAL_H_
+
+#include <stddef.h>
+
+#include <tuple>
+#include <type_traits>
+
+#include "base/bind_helpers.h"
+#include "base/callback_internal.h"
+#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
+#include "base/memory/weak_ptr.h"
+#include "base/template_util.h"
+#include "base/tuple.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+// See base/callback.h for user documentation.
+//
+//
+// CONCEPTS:
+// Functor -- A movable type representing something that should be called.
+// All function pointers and Callback<> are functors even if the
+// invocation syntax differs.
+// RunType -- A function type (as opposed to function _pointer_ type) for
+// a Callback<>::Run(). Usually just a convenience typedef.
+// (Bound)Args -- A set of types that stores the arguments.
+//
+// Types:
+// ForceVoidReturn<> -- Helper class for translating function signatures to
+// equivalent forms with a "void" return type.
+// FunctorTraits<> -- Type traits used to determine the correct RunType and
+// invocation manner for a Functor. This is where function
+// signature adapters are applied.
+// InvokeHelper<> -- Take a Functor + arguments and actully invokes it.
+// Handle the differing syntaxes needed for WeakPtr<>
+// support. This is separate from Invoker to avoid creating
+// multiple version of Invoker<>.
+// Invoker<> -- Unwraps the curried parameters and executes the Functor.
+// BindState<> -- Stores the curried parameters, and is the main entry point
+// into the Bind() system.
+
+template <typename...>
+struct make_void {
+ using type = void;
+};
+
+// A clone of C++17 std::void_t.
+// Unlike the original version, we need |make_void| as a helper struct to avoid
+// a C++14 defect.
+// ref: http://en.cppreference.com/w/cpp/types/void_t
+// ref: http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
+template <typename... Ts>
+using void_t = typename make_void<Ts...>::type;
+
+template <typename Callable,
+ typename Signature = decltype(&Callable::operator())>
+struct ExtractCallableRunTypeImpl;
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R(Callable::*)(Args...) const> {
+ using Type = R(Args...);
+};
+
+// Evaluated to RunType of the given callable type.
+// Example:
+// auto f = [](int, char*) { return 0.1; };
+// ExtractCallableRunType<decltype(f)>
+// is evaluated to
+// double(int, char*);
+template <typename Callable>
+using ExtractCallableRunType =
+ typename ExtractCallableRunTypeImpl<Callable>::Type;
+
+// IsConvertibleToRunType<Functor> is std::true_type if |Functor| has operator()
+// and convertible to the corresponding function pointer. Otherwise, it's
+// std::false_type.
+// Example:
+// IsConvertibleToRunType<void(*)()>::value is false.
+//
+// struct Foo {};
+// IsConvertibleToRunType<void(Foo::*)()>::value is false.
+//
+// auto f = []() {};
+// IsConvertibleToRunType<decltype(f)>::value is true.
+//
+// int i = 0;
+// auto g = [i]() {};
+// IsConvertibleToRunType<decltype(g)>::value is false.
+template <typename Functor, typename SFINAE = void>
+struct IsConvertibleToRunType : std::false_type {};
+
+template <typename Callable>
+struct IsConvertibleToRunType<Callable, void_t<decltype(&Callable::operator())>>
+ : std::is_convertible<Callable, ExtractCallableRunType<Callable>*> {};
+
+// HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
+// pointer to a RefCounted type.
+// Implementation note: This non-specialized case handles zero-arity case only.
+// Non-zero-arity cases should be handled by the specialization below.
+template <typename... Args>
+struct HasRefCountedTypeAsRawPtr : std::false_type {};
+
+// Implementation note: Select true_type if the first parameter is a raw pointer
+// to a RefCounted type. Otherwise, skip the first parameter and check rest of
+// parameters recursively.
+template <typename T, typename... Args>
+struct HasRefCountedTypeAsRawPtr<T, Args...>
+ : std::conditional<NeedsScopedRefptrButGetsRawPtr<T>::value,
+ std::true_type,
+ HasRefCountedTypeAsRawPtr<Args...>>::type {};
+
+// ForceVoidReturn<>
+//
+// Set of templates that support forcing the function return type to void.
+template <typename Sig>
+struct ForceVoidReturn;
+
+template <typename R, typename... Args>
+struct ForceVoidReturn<R(Args...)> {
+ using RunType = void(Args...);
+};
+
+// FunctorTraits<>
+//
+// See description at top of file.
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+
+// For a callable type that is convertible to the corresponding function type.
+// This specialization is intended to allow binding captureless lambdas by
+// base::Bind(), based on the fact that captureless lambdas can be convertible
+// to the function type while capturing lambdas can't.
+template <typename Functor>
+struct FunctorTraits<
+ Functor> {
+ using RunType = ExtractCallableRunType<Functor>;
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = false;
+
+ template <typename... RunArgs>
+ static ExtractReturnType<RunType>
+ Invoke(const Functor& functor, RunArgs&&... args) {
+ return functor(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R (*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename... RunArgs>
+ static R Invoke(R (*function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
+ }
+};
+
+#if defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__stdcall*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename... RunArgs>
+ static R Invoke(R(__stdcall* function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__fastcall*)(Args...)> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename... RunArgs>
+ static R Invoke(R(__fastcall* function)(Args...), RunArgs&&... args) {
+ return function(std::forward<RunArgs>(args)...);
+ }
+};
+
+#endif // defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...)> {
+ using RunType = R(Receiver*, Args...);
+ static constexpr bool is_method = true;
+ static constexpr bool is_nullable = true;
+
+ template <typename ReceiverPtr, typename... RunArgs>
+ static R Invoke(R (Receiver::*method)(Args...),
+ ReceiverPtr&& receiver_ptr,
+ RunArgs&&... args) {
+ // Clang skips CV qualifier check on a method pointer invocation when the
+ // receiver is a subclass. Store the receiver into a const reference to
+ // T to ensure the CV check works.
+ // https://llvm.org/bugs/show_bug.cgi?id=27037
+ Receiver& receiver = *receiver_ptr;
+ return (receiver.*method)(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For const methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const> {
+ using RunType = R(const Receiver*, Args...);
+ static constexpr bool is_method = true;
+ static constexpr bool is_nullable = true;
+
+ template <typename ReceiverPtr, typename... RunArgs>
+ static R Invoke(R (Receiver::*method)(Args...) const,
+ ReceiverPtr&& receiver_ptr,
+ RunArgs&&... args) {
+ // Clang skips CV qualifier check on a method pointer invocation when the
+ // receiver is a subclass. Store the receiver into a const reference to
+ // T to ensure the CV check works.
+ // https://llvm.org/bugs/show_bug.cgi?id=27037
+ const Receiver& receiver = *receiver_ptr;
+ return (receiver.*method)(std::forward<RunArgs>(args)...);
+ }
+};
+
+// For IgnoreResults.
+template <typename T>
+struct FunctorTraits<IgnoreResultHelper<T>> : FunctorTraits<T> {
+ using RunType =
+ typename ForceVoidReturn<typename FunctorTraits<T>::RunType>::RunType;
+
+ template <typename IgnoreResultType, typename... RunArgs>
+ static void Invoke(IgnoreResultType&& ignore_result_helper,
+ RunArgs&&... args) {
+ FunctorTraits<T>::Invoke(ignore_result_helper.functor_,
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+// For Callbacks.
+template <typename R, typename... Args, CopyMode copy_mode>
+struct FunctorTraits<Callback<R(Args...), copy_mode>> {
+ using RunType = R(Args...);
+ static constexpr bool is_method = false;
+ static constexpr bool is_nullable = true;
+
+ template <typename CallbackType, typename... RunArgs>
+ static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+ DCHECK(!callback.is_null());
+ return std::forward<CallbackType>(callback).Run(
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+// InvokeHelper<>
+//
+// There are 2 logical InvokeHelper<> specializations: normal, WeakCalls.
+//
+// The normal type just calls the underlying runnable.
+//
+// WeakCalls need special syntax that is applied to the first argument to check
+// if they should no-op themselves.
+template <bool is_weak_call, typename ReturnType>
+struct InvokeHelper;
+
+template <typename ReturnType>
+struct InvokeHelper<false, ReturnType> {
+ template <typename Functor, typename... RunArgs>
+ static inline ReturnType MakeItSo(Functor&& functor, RunArgs&&... args) {
+ using Traits = FunctorTraits<typename std::decay<Functor>::type>;
+ return Traits::Invoke(std::forward<Functor>(functor),
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+template <typename ReturnType>
+struct InvokeHelper<true, ReturnType> {
+ // WeakCalls are only supported for functions with a void return type.
+ // Otherwise, the function result would be undefined if the the WeakPtr<>
+ // is invalidated.
+ static_assert(std::is_void<ReturnType>::value,
+ "weak_ptrs can only bind to methods without return values");
+
+ template <typename Functor, typename BoundWeakPtr, typename... RunArgs>
+ static inline void MakeItSo(Functor&& functor,
+ BoundWeakPtr&& weak_ptr,
+ RunArgs&&... args) {
+ if (!weak_ptr)
+ return;
+ using Traits = FunctorTraits<typename std::decay<Functor>::type>;
+ Traits::Invoke(std::forward<Functor>(functor),
+ std::forward<BoundWeakPtr>(weak_ptr),
+ std::forward<RunArgs>(args)...);
+ }
+};
+
+// Invoker<>
+//
+// See description at the top of the file.
+template <typename StorageType, typename UnboundRunType>
+struct Invoker;
+
+template <typename StorageType, typename R, typename... UnboundArgs>
+struct Invoker<StorageType, R(UnboundArgs...)> {
+ static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
+ // Local references to make debugger stepping easier. If in a debugger,
+ // you really want to warp ahead and step through the
+ // InvokeHelper<>::MakeItSo() call below.
+ const StorageType* storage = static_cast<StorageType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return RunImpl(storage->functor_,
+ storage->bound_args_,
+ MakeIndexSequence<num_bound_args>(),
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+
+ private:
+ template <typename Functor, typename BoundArgsTuple, size_t... indices>
+ static inline R RunImpl(Functor&& functor,
+ BoundArgsTuple&& bound,
+ IndexSequence<indices...>,
+ UnboundArgs&&... unbound_args) {
+ static constexpr bool is_method =
+ FunctorTraits<typename std::decay<Functor>::type>::is_method;
+
+ using DecayedArgsTuple = typename std::decay<BoundArgsTuple>::type;
+ static constexpr bool is_weak_call =
+ IsWeakMethod<is_method,
+ typename std::tuple_element<
+ indices,
+ DecayedArgsTuple>::type...>::value;
+
+ return InvokeHelper<is_weak_call, R>::MakeItSo(
+ std::forward<Functor>(functor),
+ Unwrap(base::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
+ std::forward<UnboundArgs>(unbound_args)...);
+ }
+};
+
+// Used to implement MakeUnboundRunType.
+template <typename Functor, typename... BoundArgs>
+struct MakeUnboundRunTypeImpl {
+ using RunType =
+ typename FunctorTraits<typename std::decay<Functor>::type>::RunType;
+ using ReturnType = ExtractReturnType<RunType>;
+ using Args = ExtractArgs<RunType>;
+ using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), Args>;
+ using Type = MakeFunctionType<ReturnType, UnboundArgs>;
+};
+template <typename Functor>
+typename std::enable_if<FunctorTraits<Functor>::is_nullable, bool>::type
+IsNull(const Functor& functor) {
+ return !functor;
+}
+
+template <typename Functor>
+typename std::enable_if<!FunctorTraits<Functor>::is_nullable, bool>::type
+IsNull(const Functor&) {
+ return false;
+}
+
+// BindState<>
+//
+// This stores all the state passed into Bind().
+template <typename Functor, typename... BoundArgs>
+struct BindState final : BindStateBase {
+ template <typename ForwardFunctor, typename... ForwardBoundArgs>
+ explicit BindState(ForwardFunctor&& functor, ForwardBoundArgs&&... bound_args)
+ : BindStateBase(&Destroy),
+ functor_(std::forward<ForwardFunctor>(functor)),
+ bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+ DCHECK(!IsNull(functor_));
+ }
+
+ Functor functor_;
+ std::tuple<BoundArgs...> bound_args_;
+
+ private:
+ ~BindState() {}
+
+ static void Destroy(BindStateBase* self) {
+ delete static_cast<BindState*>(self);
+ }
+};
+
+// Used to implement MakeBindStateType.
+template <bool is_method, typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl;
+
+template <typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl<false, Functor, BoundArgs...> {
+ static_assert(!HasRefCountedTypeAsRawPtr<BoundArgs...>::value,
+ "A parameter is a refcounted type and needs scoped_refptr.");
+ using Type = BindState<typename std::decay<Functor>::type,
+ typename std::decay<BoundArgs>::type...>;
+};
+
+template <typename Functor>
+struct MakeBindStateTypeImpl<true, Functor> {
+ using Type = BindState<typename std::decay<Functor>::type>;
+};
+
+template <typename Functor, typename Receiver, typename... BoundArgs>
+struct MakeBindStateTypeImpl<true, Functor, Receiver, BoundArgs...> {
+ static_assert(
+ !std::is_array<typename std::remove_reference<Receiver>::type>::value,
+ "First bound argument to a method cannot be an array.");
+ static_assert(!HasRefCountedTypeAsRawPtr<BoundArgs...>::value,
+ "A parameter is a refcounted type and needs scoped_refptr.");
+
+ private:
+ using DecayedReceiver = typename std::decay<Receiver>::type;
+
+ public:
+ using Type = BindState<
+ typename std::decay<Functor>::type,
+ typename std::conditional<
+ std::is_pointer<DecayedReceiver>::value,
+ scoped_refptr<typename std::remove_pointer<DecayedReceiver>::type>,
+ DecayedReceiver>::type,
+ typename std::decay<BoundArgs>::type...>;
+};
+
+template <typename Functor, typename... BoundArgs>
+using MakeBindStateType = typename MakeBindStateTypeImpl<
+ FunctorTraits<typename std::decay<Functor>::type>::is_method,
+ Functor,
+ BoundArgs...>::Type;
+
+} // namespace internal
+
+// Returns a RunType of bound functor.
+// E.g. MakeUnboundRunType<R(A, B, C), A, B> is evaluated to R(C).
+template <typename Functor, typename... BoundArgs>
+using MakeUnboundRunType =
+ typename internal::MakeUnboundRunTypeImpl<Functor, BoundArgs...>::Type;
+
+} // namespace base
+
+#endif // BASE_BIND_INTERNAL_H_
diff --git a/libchrome/base/bind_unittest.cc b/libchrome/base/bind_unittest.cc
new file mode 100644
index 0000000..ba5113b
--- /dev/null
+++ b/libchrome/base/bind_unittest.cc
@@ -0,0 +1,1117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Mock;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace base {
+namespace {
+
+class IncompleteType;
+
+class NoRef {
+ public:
+ NoRef() {}
+
+ MOCK_METHOD0(VoidMethod0, void());
+ MOCK_CONST_METHOD0(VoidConstMethod0, void());
+
+ MOCK_METHOD0(IntMethod0, int());
+ MOCK_CONST_METHOD0(IntConstMethod0, int());
+
+ private:
+ // Particularly important in this test to ensure no copies are made.
+ DISALLOW_COPY_AND_ASSIGN(NoRef);
+};
+
+class HasRef : public NoRef {
+ public:
+ HasRef() {}
+
+ MOCK_CONST_METHOD0(AddRef, void());
+ MOCK_CONST_METHOD0(Release, bool());
+
+ private:
+ // Particularly important in this test to ensure no copies are made.
+ DISALLOW_COPY_AND_ASSIGN(HasRef);
+};
+
+class HasRefPrivateDtor : public HasRef {
+ private:
+ ~HasRefPrivateDtor() {}
+};
+
+static const int kParentValue = 1;
+static const int kChildValue = 2;
+
+class Parent {
+ public:
+ virtual ~Parent() {}
+ void AddRef() const {}
+ void Release() const {}
+ virtual void VirtualSet() { value = kParentValue; }
+ void NonVirtualSet() { value = kParentValue; }
+ int value;
+};
+
+class Child : public Parent {
+ public:
+ ~Child() override {}
+ void VirtualSet() override { value = kChildValue; }
+ void NonVirtualSet() { value = kChildValue; }
+};
+
+class NoRefParent {
+ public:
+ virtual ~NoRefParent() {}
+ virtual void VirtualSet() { value = kParentValue; }
+ void NonVirtualSet() { value = kParentValue; }
+ int value;
+};
+
+class NoRefChild : public NoRefParent {
+ public:
+ ~NoRefChild() override {}
+ private:
+ void VirtualSet() override { value = kChildValue; }
+ void NonVirtualSet() { value = kChildValue; }
+};
+
+// Used for probing the number of copies and moves that occur if a type must be
+// coerced during argument forwarding in the Run() methods.
+struct DerivedCopyMoveCounter {
+ DerivedCopyMoveCounter(int* copies,
+ int* assigns,
+ int* move_constructs,
+ int* move_assigns)
+ : copies_(copies),
+ assigns_(assigns),
+ move_constructs_(move_constructs),
+ move_assigns_(move_assigns) {}
+ int* copies_;
+ int* assigns_;
+ int* move_constructs_;
+ int* move_assigns_;
+};
+
+// Used for probing the number of copies and moves in an argument.
+class CopyMoveCounter {
+ public:
+ CopyMoveCounter(int* copies,
+ int* assigns,
+ int* move_constructs,
+ int* move_assigns)
+ : copies_(copies),
+ assigns_(assigns),
+ move_constructs_(move_constructs),
+ move_assigns_(move_assigns) {}
+
+ CopyMoveCounter(const CopyMoveCounter& other)
+ : copies_(other.copies_),
+ assigns_(other.assigns_),
+ move_constructs_(other.move_constructs_),
+ move_assigns_(other.move_assigns_) {
+ (*copies_)++;
+ }
+
+ CopyMoveCounter(CopyMoveCounter&& other)
+ : copies_(other.copies_),
+ assigns_(other.assigns_),
+ move_constructs_(other.move_constructs_),
+ move_assigns_(other.move_assigns_) {
+ (*move_constructs_)++;
+ }
+
+ // Probing for copies from coercion.
+ explicit CopyMoveCounter(const DerivedCopyMoveCounter& other)
+ : copies_(other.copies_),
+ assigns_(other.assigns_),
+ move_constructs_(other.move_constructs_),
+ move_assigns_(other.move_assigns_) {
+ (*copies_)++;
+ }
+
+ // Probing for moves from coercion.
+ explicit CopyMoveCounter(DerivedCopyMoveCounter&& other)
+ : copies_(other.copies_),
+ assigns_(other.assigns_),
+ move_constructs_(other.move_constructs_),
+ move_assigns_(other.move_assigns_) {
+ (*move_constructs_)++;
+ }
+
+ const CopyMoveCounter& operator=(const CopyMoveCounter& rhs) {
+ copies_ = rhs.copies_;
+ assigns_ = rhs.assigns_;
+ move_constructs_ = rhs.move_constructs_;
+ move_assigns_ = rhs.move_assigns_;
+
+ (*assigns_)++;
+
+ return *this;
+ }
+
+ const CopyMoveCounter& operator=(CopyMoveCounter&& rhs) {
+ copies_ = rhs.copies_;
+ assigns_ = rhs.assigns_;
+ move_constructs_ = rhs.move_constructs_;
+ move_assigns_ = rhs.move_assigns_;
+
+ (*move_assigns_)++;
+
+ return *this;
+ }
+
+ int copies() const {
+ return *copies_;
+ }
+
+ private:
+ int* copies_;
+ int* assigns_;
+ int* move_constructs_;
+ int* move_assigns_;
+};
+
+// Used for probing the number of copies in an argument. The instance is a
+// copyable and non-movable type.
+class CopyCounter {
+ public:
+ CopyCounter(int* copies, int* assigns)
+ : counter_(copies, assigns, nullptr, nullptr) {}
+ CopyCounter(const CopyCounter& other) : counter_(other.counter_) {}
+ CopyCounter& operator=(const CopyCounter& other) {
+ counter_ = other.counter_;
+ return *this;
+ }
+
+ explicit CopyCounter(const DerivedCopyMoveCounter& other) : counter_(other) {}
+
+ int copies() const { return counter_.copies(); }
+
+ private:
+ CopyMoveCounter counter_;
+};
+
+// Used for probing the number of moves in an argument. The instance is a
+// non-copyable and movable type.
+class MoveCounter {
+ public:
+ MoveCounter(int* move_constructs, int* move_assigns)
+ : counter_(nullptr, nullptr, move_constructs, move_assigns) {}
+ MoveCounter(MoveCounter&& other) : counter_(std::move(other.counter_)) {}
+ MoveCounter& operator=(MoveCounter&& other) {
+ counter_ = std::move(other.counter_);
+ return *this;
+ }
+
+ explicit MoveCounter(DerivedCopyMoveCounter&& other)
+ : counter_(std::move(other)) {}
+
+ private:
+ CopyMoveCounter counter_;
+};
+
+class DeleteCounter {
+ public:
+ explicit DeleteCounter(int* deletes)
+ : deletes_(deletes) {
+ }
+
+ ~DeleteCounter() {
+ (*deletes_)++;
+ }
+
+ void VoidMethod0() {}
+
+ private:
+ int* deletes_;
+};
+
+template <typename T>
+T PassThru(T scoper) {
+ return scoper;
+}
+
+// Some test functions that we can Bind to.
+template <typename T>
+T PolymorphicIdentity(T t) {
+ return t;
+}
+
+template <typename... Ts>
+struct VoidPolymorphic {
+ static void Run(Ts... t) {}
+};
+
+int Identity(int n) {
+ return n;
+}
+
+int ArrayGet(const int array[], int n) {
+ return array[n];
+}
+
+int Sum(int a, int b, int c, int d, int e, int f) {
+ return a + b + c + d + e + f;
+}
+
+const char* CStringIdentity(const char* s) {
+ return s;
+}
+
+int GetCopies(const CopyMoveCounter& counter) {
+ return counter.copies();
+}
+
+int UnwrapNoRefParent(NoRefParent p) {
+ return p.value;
+}
+
+int UnwrapNoRefParentPtr(NoRefParent* p) {
+ return p->value;
+}
+
+int UnwrapNoRefParentConstRef(const NoRefParent& p) {
+ return p.value;
+}
+
+void RefArgSet(int &n) {
+ n = 2;
+}
+
+void PtrArgSet(int *n) {
+ *n = 2;
+}
+
+int FunctionWithWeakFirstParam(WeakPtr<NoRef> o, int n) {
+ return n;
+}
+
+int FunctionWithScopedRefptrFirstParam(const scoped_refptr<HasRef>& o, int n) {
+ return n;
+}
+
+void TakesACallback(const Closure& callback) {
+ callback.Run();
+}
+
+class BindTest : public ::testing::Test {
+ public:
+ BindTest() {
+ const_has_ref_ptr_ = &has_ref_;
+ const_no_ref_ptr_ = &no_ref_;
+ static_func_mock_ptr = &static_func_mock_;
+ }
+
+ virtual ~BindTest() {
+ }
+
+ static void VoidFunc0() {
+ static_func_mock_ptr->VoidMethod0();
+ }
+
+ static int IntFunc0() { return static_func_mock_ptr->IntMethod0(); }
+
+ protected:
+ StrictMock<NoRef> no_ref_;
+ StrictMock<HasRef> has_ref_;
+ const HasRef* const_has_ref_ptr_;
+ const NoRef* const_no_ref_ptr_;
+ StrictMock<NoRef> static_func_mock_;
+
+ // Used by the static functions to perform expectations.
+ static StrictMock<NoRef>* static_func_mock_ptr;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BindTest);
+};
+
+StrictMock<NoRef>* BindTest::static_func_mock_ptr;
+
+// Sanity check that we can instantiate a callback for each arity.
+TEST_F(BindTest, ArityTest) {
+ Callback<int()> c0 = Bind(&Sum, 32, 16, 8, 4, 2, 1);
+ EXPECT_EQ(63, c0.Run());
+
+ Callback<int(int)> c1 = Bind(&Sum, 32, 16, 8, 4, 2);
+ EXPECT_EQ(75, c1.Run(13));
+
+ Callback<int(int,int)> c2 = Bind(&Sum, 32, 16, 8, 4);
+ EXPECT_EQ(85, c2.Run(13, 12));
+
+ Callback<int(int,int,int)> c3 = Bind(&Sum, 32, 16, 8);
+ EXPECT_EQ(92, c3.Run(13, 12, 11));
+
+ Callback<int(int,int,int,int)> c4 = Bind(&Sum, 32, 16);
+ EXPECT_EQ(94, c4.Run(13, 12, 11, 10));
+
+ Callback<int(int,int,int,int,int)> c5 = Bind(&Sum, 32);
+ EXPECT_EQ(87, c5.Run(13, 12, 11, 10, 9));
+
+ Callback<int(int,int,int,int,int,int)> c6 = Bind(&Sum);
+ EXPECT_EQ(69, c6.Run(13, 12, 11, 10, 9, 14));
+}
+
+// Test the Currying ability of the Callback system.
+TEST_F(BindTest, CurryingTest) {
+ Callback<int(int,int,int,int,int,int)> c6 = Bind(&Sum);
+ EXPECT_EQ(69, c6.Run(13, 12, 11, 10, 9, 14));
+
+ Callback<int(int,int,int,int,int)> c5 = Bind(c6, 32);
+ EXPECT_EQ(87, c5.Run(13, 12, 11, 10, 9));
+
+ Callback<int(int,int,int,int)> c4 = Bind(c5, 16);
+ EXPECT_EQ(94, c4.Run(13, 12, 11, 10));
+
+ Callback<int(int,int,int)> c3 = Bind(c4, 8);
+ EXPECT_EQ(92, c3.Run(13, 12, 11));
+
+ Callback<int(int,int)> c2 = Bind(c3, 4);
+ EXPECT_EQ(85, c2.Run(13, 12));
+
+ Callback<int(int)> c1 = Bind(c2, 2);
+ EXPECT_EQ(75, c1.Run(13));
+
+ Callback<int()> c0 = Bind(c1, 1);
+ EXPECT_EQ(63, c0.Run());
+}
+
+// Test that currying the rvalue result of another Bind() works correctly.
+// - rvalue should be usable as argument to Bind().
+// - multiple runs of resulting Callback remain valid.
+TEST_F(BindTest, CurryingRvalueResultOfBind) {
+ int n = 0;
+ Closure cb = base::Bind(&TakesACallback, base::Bind(&PtrArgSet, &n));
+
+ // If we implement Bind() such that the return value has auto_ptr-like
+ // semantics, the second call here will fail because ownership of
+ // the internal BindState<> would have been transfered to a *temporary*
+ // constructon of a Callback object on the first call.
+ cb.Run();
+ EXPECT_EQ(2, n);
+
+ n = 0;
+ cb.Run();
+ EXPECT_EQ(2, n);
+}
+
+// Function type support.
+// - Normal function.
+// - Normal function bound with non-refcounted first argument.
+// - Method bound to non-const object.
+// - Method bound to scoped_refptr.
+// - Const method bound to non-const object.
+// - Const method bound to const object.
+// - Derived classes can be used with pointers to non-virtual base functions.
+// - Derived classes can be used with pointers to virtual base functions (and
+// preserve virtual dispatch).
+TEST_F(BindTest, FunctionTypeSupport) {
+ EXPECT_CALL(static_func_mock_, VoidMethod0());
+ EXPECT_CALL(has_ref_, AddRef()).Times(4);
+ EXPECT_CALL(has_ref_, Release()).Times(4);
+ EXPECT_CALL(has_ref_, VoidMethod0()).Times(2);
+ EXPECT_CALL(has_ref_, VoidConstMethod0()).Times(2);
+
+ Closure normal_cb = Bind(&VoidFunc0);
+ Callback<NoRef*()> normal_non_refcounted_cb =
+ Bind(&PolymorphicIdentity<NoRef*>, &no_ref_);
+ normal_cb.Run();
+ EXPECT_EQ(&no_ref_, normal_non_refcounted_cb.Run());
+
+ Closure method_cb = Bind(&HasRef::VoidMethod0, &has_ref_);
+ Closure method_refptr_cb = Bind(&HasRef::VoidMethod0,
+ make_scoped_refptr(&has_ref_));
+ Closure const_method_nonconst_obj_cb = Bind(&HasRef::VoidConstMethod0,
+ &has_ref_);
+ Closure const_method_const_obj_cb = Bind(&HasRef::VoidConstMethod0,
+ const_has_ref_ptr_);
+ method_cb.Run();
+ method_refptr_cb.Run();
+ const_method_nonconst_obj_cb.Run();
+ const_method_const_obj_cb.Run();
+
+ Child child;
+ child.value = 0;
+ Closure virtual_set_cb = Bind(&Parent::VirtualSet, &child);
+ virtual_set_cb.Run();
+ EXPECT_EQ(kChildValue, child.value);
+
+ child.value = 0;
+ Closure non_virtual_set_cb = Bind(&Parent::NonVirtualSet, &child);
+ non_virtual_set_cb.Run();
+ EXPECT_EQ(kParentValue, child.value);
+}
+
+// Return value support.
+// - Function with return value.
+// - Method with return value.
+// - Const method with return value.
+TEST_F(BindTest, ReturnValues) {
+ EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+ EXPECT_CALL(has_ref_, AddRef()).Times(3);
+ EXPECT_CALL(has_ref_, Release()).Times(3);
+ EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(31337));
+ EXPECT_CALL(has_ref_, IntConstMethod0())
+ .WillOnce(Return(41337))
+ .WillOnce(Return(51337));
+
+ Callback<int()> normal_cb = Bind(&IntFunc0);
+ Callback<int()> method_cb = Bind(&HasRef::IntMethod0, &has_ref_);
+ Callback<int()> const_method_nonconst_obj_cb =
+ Bind(&HasRef::IntConstMethod0, &has_ref_);
+ Callback<int()> const_method_const_obj_cb =
+ Bind(&HasRef::IntConstMethod0, const_has_ref_ptr_);
+ EXPECT_EQ(1337, normal_cb.Run());
+ EXPECT_EQ(31337, method_cb.Run());
+ EXPECT_EQ(41337, const_method_nonconst_obj_cb.Run());
+ EXPECT_EQ(51337, const_method_const_obj_cb.Run());
+}
+
+// IgnoreResult adapter test.
+// - Function with return value.
+// - Method with return value.
+// - Const Method with return.
+// - Method with return value bound to WeakPtr<>.
+// - Const Method with return bound to WeakPtr<>.
+TEST_F(BindTest, IgnoreResult) {
+ EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+ EXPECT_CALL(has_ref_, AddRef()).Times(2);
+ EXPECT_CALL(has_ref_, Release()).Times(2);
+ EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
+ EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
+ EXPECT_CALL(no_ref_, IntMethod0()).WillOnce(Return(12));
+ EXPECT_CALL(no_ref_, IntConstMethod0()).WillOnce(Return(13));
+
+ Closure normal_func_cb = Bind(IgnoreResult(&IntFunc0));
+ normal_func_cb.Run();
+
+ Closure non_void_method_cb =
+ Bind(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+ non_void_method_cb.Run();
+
+ Closure non_void_const_method_cb =
+ Bind(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+ non_void_const_method_cb.Run();
+
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+ Closure non_void_weak_method_cb =
+ Bind(IgnoreResult(&NoRef::IntMethod0), weak_factory.GetWeakPtr());
+ non_void_weak_method_cb.Run();
+
+ Closure non_void_weak_const_method_cb =
+ Bind(IgnoreResult(&NoRef::IntConstMethod0), weak_factory.GetWeakPtr());
+ non_void_weak_const_method_cb.Run();
+
+ weak_factory.InvalidateWeakPtrs();
+ non_void_weak_const_method_cb.Run();
+ non_void_weak_method_cb.Run();
+}
+
+// Argument binding tests.
+// - Argument binding to primitive.
+// - Argument binding to primitive pointer.
+// - Argument binding to a literal integer.
+// - Argument binding to a literal string.
+// - Argument binding with template function.
+// - Argument binding to an object.
+// - Argument binding to pointer to incomplete type.
+// - Argument gets type converted.
+// - Pointer argument gets converted.
+// - Const Reference forces conversion.
+TEST_F(BindTest, ArgumentBinding) {
+ int n = 2;
+
+ Callback<int()> bind_primitive_cb = Bind(&Identity, n);
+ EXPECT_EQ(n, bind_primitive_cb.Run());
+
+ Callback<int*()> bind_primitive_pointer_cb =
+ Bind(&PolymorphicIdentity<int*>, &n);
+ EXPECT_EQ(&n, bind_primitive_pointer_cb.Run());
+
+ Callback<int()> bind_int_literal_cb = Bind(&Identity, 3);
+ EXPECT_EQ(3, bind_int_literal_cb.Run());
+
+ Callback<const char*()> bind_string_literal_cb =
+ Bind(&CStringIdentity, "hi");
+ EXPECT_STREQ("hi", bind_string_literal_cb.Run());
+
+ Callback<int()> bind_template_function_cb =
+ Bind(&PolymorphicIdentity<int>, 4);
+ EXPECT_EQ(4, bind_template_function_cb.Run());
+
+ NoRefParent p;
+ p.value = 5;
+ Callback<int()> bind_object_cb = Bind(&UnwrapNoRefParent, p);
+ EXPECT_EQ(5, bind_object_cb.Run());
+
+ IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
+ Callback<IncompleteType*()> bind_incomplete_ptr_cb =
+ Bind(&PolymorphicIdentity<IncompleteType*>, incomplete_ptr);
+ EXPECT_EQ(incomplete_ptr, bind_incomplete_ptr_cb.Run());
+
+ NoRefChild c;
+ c.value = 6;
+ Callback<int()> bind_promotes_cb = Bind(&UnwrapNoRefParent, c);
+ EXPECT_EQ(6, bind_promotes_cb.Run());
+
+ c.value = 7;
+ Callback<int()> bind_pointer_promotes_cb =
+ Bind(&UnwrapNoRefParentPtr, &c);
+ EXPECT_EQ(7, bind_pointer_promotes_cb.Run());
+
+ c.value = 8;
+ Callback<int()> bind_const_reference_promotes_cb =
+ Bind(&UnwrapNoRefParentConstRef, c);
+ EXPECT_EQ(8, bind_const_reference_promotes_cb.Run());
+}
+
+// Unbound argument type support tests.
+// - Unbound value.
+// - Unbound pointer.
+// - Unbound reference.
+// - Unbound const reference.
+// - Unbound unsized array.
+// - Unbound sized array.
+// - Unbound array-of-arrays.
+TEST_F(BindTest, UnboundArgumentTypeSupport) {
+ Callback<void(int)> unbound_value_cb = Bind(&VoidPolymorphic<int>::Run);
+ Callback<void(int*)> unbound_pointer_cb = Bind(&VoidPolymorphic<int*>::Run);
+ Callback<void(int&)> unbound_ref_cb = Bind(&VoidPolymorphic<int&>::Run);
+ Callback<void(const int&)> unbound_const_ref_cb =
+ Bind(&VoidPolymorphic<const int&>::Run);
+ Callback<void(int[])> unbound_unsized_array_cb =
+ Bind(&VoidPolymorphic<int[]>::Run);
+ Callback<void(int[2])> unbound_sized_array_cb =
+ Bind(&VoidPolymorphic<int[2]>::Run);
+ Callback<void(int[][2])> unbound_array_of_arrays_cb =
+ Bind(&VoidPolymorphic<int[][2]>::Run);
+
+ Callback<void(int&)> unbound_ref_with_bound_arg =
+ Bind(&VoidPolymorphic<int, int&>::Run, 1);
+}
+
+// Function with unbound reference parameter.
+// - Original parameter is modified by callback.
+TEST_F(BindTest, UnboundReferenceSupport) {
+ int n = 0;
+ Callback<void(int&)> unbound_ref_cb = Bind(&RefArgSet);
+ unbound_ref_cb.Run(n);
+ EXPECT_EQ(2, n);
+}
+
+// Functions that take reference parameters.
+// - Forced reference parameter type still stores a copy.
+// - Forced const reference parameter type still stores a copy.
+TEST_F(BindTest, ReferenceArgumentBinding) {
+ int n = 1;
+ int& ref_n = n;
+ const int& const_ref_n = n;
+
+ Callback<int()> ref_copies_cb = Bind(&Identity, ref_n);
+ EXPECT_EQ(n, ref_copies_cb.Run());
+ n++;
+ EXPECT_EQ(n - 1, ref_copies_cb.Run());
+
+ Callback<int()> const_ref_copies_cb = Bind(&Identity, const_ref_n);
+ EXPECT_EQ(n, const_ref_copies_cb.Run());
+ n++;
+ EXPECT_EQ(n - 1, const_ref_copies_cb.Run());
+}
+
+// Check that we can pass in arrays and have them be stored as a pointer.
+// - Array of values stores a pointer.
+// - Array of const values stores a pointer.
+TEST_F(BindTest, ArrayArgumentBinding) {
+ int array[4] = {1, 1, 1, 1};
+ const int (*const_array_ptr)[4] = &array;
+
+ Callback<int()> array_cb = Bind(&ArrayGet, array, 1);
+ EXPECT_EQ(1, array_cb.Run());
+
+ Callback<int()> const_array_cb = Bind(&ArrayGet, *const_array_ptr, 1);
+ EXPECT_EQ(1, const_array_cb.Run());
+
+ array[1] = 3;
+ EXPECT_EQ(3, array_cb.Run());
+ EXPECT_EQ(3, const_array_cb.Run());
+}
+
+// Unretained() wrapper support.
+// - Method bound to Unretained() non-const object.
+// - Const method bound to Unretained() non-const object.
+// - Const method bound to Unretained() const object.
+TEST_F(BindTest, Unretained) {
+ EXPECT_CALL(no_ref_, VoidMethod0());
+ EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
+
+ Callback<void()> method_cb =
+ Bind(&NoRef::VoidMethod0, Unretained(&no_ref_));
+ method_cb.Run();
+
+ Callback<void()> const_method_cb =
+ Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref_));
+ const_method_cb.Run();
+
+ Callback<void()> const_method_const_ptr_cb =
+ Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr_));
+ const_method_const_ptr_cb.Run();
+}
+
+// WeakPtr() support.
+// - Method bound to WeakPtr<> to non-const object.
+// - Const method bound to WeakPtr<> to non-const object.
+// - Const method bound to WeakPtr<> to const object.
+// - Normal Function with WeakPtr<> as P1 can have return type and is
+// not canceled.
+TEST_F(BindTest, WeakPtr) {
+ EXPECT_CALL(no_ref_, VoidMethod0());
+ EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
+
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+ Closure method_cb =
+ Bind(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+ method_cb.Run();
+
+ Closure const_method_cb =
+ Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ const_method_cb.Run();
+
+ Closure const_method_const_ptr_cb =
+ Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ const_method_const_ptr_cb.Run();
+
+ Callback<int(int)> normal_func_cb =
+ Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+ EXPECT_EQ(1, normal_func_cb.Run(1));
+
+ weak_factory.InvalidateWeakPtrs();
+ const_weak_factory.InvalidateWeakPtrs();
+
+ method_cb.Run();
+ const_method_cb.Run();
+ const_method_const_ptr_cb.Run();
+
+ // Still runs even after the pointers are invalidated.
+ EXPECT_EQ(2, normal_func_cb.Run(2));
+}
+
+// ConstRef() wrapper support.
+// - Binding w/o ConstRef takes a copy.
+// - Binding a ConstRef takes a reference.
+// - Binding ConstRef to a function ConstRef does not copy on invoke.
+TEST_F(BindTest, ConstRef) {
+ int n = 1;
+
+ Callback<int()> copy_cb = Bind(&Identity, n);
+ Callback<int()> const_ref_cb = Bind(&Identity, ConstRef(n));
+ EXPECT_EQ(n, copy_cb.Run());
+ EXPECT_EQ(n, const_ref_cb.Run());
+ n++;
+ EXPECT_EQ(n - 1, copy_cb.Run());
+ EXPECT_EQ(n, const_ref_cb.Run());
+
+ int copies = 0;
+ int assigns = 0;
+ int move_constructs = 0;
+ int move_assigns = 0;
+ CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+ Callback<int()> all_const_ref_cb =
+ Bind(&GetCopies, ConstRef(counter));
+ EXPECT_EQ(0, all_const_ref_cb.Run());
+ EXPECT_EQ(0, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(0, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+}
+
+TEST_F(BindTest, ScopedRefptr) {
+ EXPECT_CALL(has_ref_, AddRef()).Times(1);
+ EXPECT_CALL(has_ref_, Release()).Times(1);
+
+ const scoped_refptr<HasRef> refptr(&has_ref_);
+ Callback<int()> scoped_refptr_const_ref_cb =
+ Bind(&FunctionWithScopedRefptrFirstParam, base::ConstRef(refptr), 1);
+ EXPECT_EQ(1, scoped_refptr_const_ref_cb.Run());
+}
+
+// Test Owned() support.
+TEST_F(BindTest, Owned) {
+ int deletes = 0;
+ DeleteCounter* counter = new DeleteCounter(&deletes);
+
+ // If we don't capture, delete happens on Callback destruction/reset.
+ // return the same value.
+ Callback<DeleteCounter*()> no_capture_cb =
+ Bind(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+ ASSERT_EQ(counter, no_capture_cb.Run());
+ ASSERT_EQ(counter, no_capture_cb.Run());
+ EXPECT_EQ(0, deletes);
+ no_capture_cb.Reset(); // This should trigger a delete.
+ EXPECT_EQ(1, deletes);
+
+ deletes = 0;
+ counter = new DeleteCounter(&deletes);
+ base::Closure own_object_cb =
+ Bind(&DeleteCounter::VoidMethod0, Owned(counter));
+ own_object_cb.Run();
+ EXPECT_EQ(0, deletes);
+ own_object_cb.Reset();
+ EXPECT_EQ(1, deletes);
+}
+
+TEST_F(BindTest, UniquePtrReceiver) {
+ std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
+ EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
+ Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+}
+
+// Tests for Passed() wrapper support:
+// - Passed() can be constructed from a pointer to scoper.
+// - Passed() can be constructed from a scoper rvalue.
+// - Using Passed() gives Callback Ownership.
+// - Ownership is transferred from Callback to callee on the first Run().
+// - Callback supports unbound arguments.
+template <typename T>
+class BindMoveOnlyTypeTest : public ::testing::Test {
+};
+
+struct CustomDeleter {
+ void operator()(DeleteCounter* c) { delete c; }
+};
+
+using MoveOnlyTypesToTest =
+ ::testing::Types<std::unique_ptr<DeleteCounter>,
+ std::unique_ptr<DeleteCounter>,
+ std::unique_ptr<DeleteCounter, CustomDeleter>>;
+TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
+
+TYPED_TEST(BindMoveOnlyTypeTest, PassedToBoundCallback) {
+ int deletes = 0;
+
+ TypeParam ptr(new DeleteCounter(&deletes));
+ Callback<TypeParam()> callback = Bind(&PassThru<TypeParam>, Passed(&ptr));
+ EXPECT_FALSE(ptr.get());
+ EXPECT_EQ(0, deletes);
+
+ // If we never invoke the Callback, it retains ownership and deletes.
+ callback.Reset();
+ EXPECT_EQ(1, deletes);
+}
+
+TYPED_TEST(BindMoveOnlyTypeTest, PassedWithRvalue) {
+ int deletes = 0;
+ Callback<TypeParam()> callback = Bind(
+ &PassThru<TypeParam>, Passed(TypeParam(new DeleteCounter(&deletes))));
+ EXPECT_EQ(0, deletes);
+
+ // If we never invoke the Callback, it retains ownership and deletes.
+ callback.Reset();
+ EXPECT_EQ(1, deletes);
+}
+
+// Check that ownership can be transferred back out.
+TYPED_TEST(BindMoveOnlyTypeTest, ReturnMoveOnlyType) {
+ int deletes = 0;
+ DeleteCounter* counter = new DeleteCounter(&deletes);
+ Callback<TypeParam()> callback =
+ Bind(&PassThru<TypeParam>, Passed(TypeParam(counter)));
+ TypeParam result = callback.Run();
+ ASSERT_EQ(counter, result.get());
+ EXPECT_EQ(0, deletes);
+
+ // Resetting does not delete since ownership was transferred.
+ callback.Reset();
+ EXPECT_EQ(0, deletes);
+
+ // Ensure that we actually did get ownership.
+ result.reset();
+ EXPECT_EQ(1, deletes);
+}
+
+TYPED_TEST(BindMoveOnlyTypeTest, UnboundForwarding) {
+ int deletes = 0;
+ TypeParam ptr(new DeleteCounter(&deletes));
+ // Test unbound argument forwarding.
+ Callback<TypeParam(TypeParam)> cb_unbound = Bind(&PassThru<TypeParam>);
+ cb_unbound.Run(std::move(ptr));
+ EXPECT_EQ(1, deletes);
+}
+
+void VerifyVector(const std::vector<std::unique_ptr<int>>& v) {
+ ASSERT_EQ(1u, v.size());
+ EXPECT_EQ(12345, *v[0]);
+}
+
+std::vector<std::unique_ptr<int>> AcceptAndReturnMoveOnlyVector(
+ std::vector<std::unique_ptr<int>> v) {
+ VerifyVector(v);
+ return v;
+}
+
+// Test that a vector containing move-only types can be used with Callback.
+TEST_F(BindTest, BindMoveOnlyVector) {
+ using MoveOnlyVector = std::vector<std::unique_ptr<int>>;
+
+ MoveOnlyVector v;
+ v.push_back(WrapUnique(new int(12345)));
+
+ // Early binding should work:
+ base::Callback<MoveOnlyVector()> bound_cb =
+ base::Bind(&AcceptAndReturnMoveOnlyVector, Passed(&v));
+ MoveOnlyVector intermediate_result = bound_cb.Run();
+ VerifyVector(intermediate_result);
+
+ // As should passing it as an argument to Run():
+ base::Callback<MoveOnlyVector(MoveOnlyVector)> unbound_cb =
+ base::Bind(&AcceptAndReturnMoveOnlyVector);
+ MoveOnlyVector final_result = unbound_cb.Run(std::move(intermediate_result));
+ VerifyVector(final_result);
+}
+
+// Argument copy-constructor usage for non-reference copy-only parameters.
+// - Bound arguments are only copied once.
+// - Forwarded arguments are only copied once.
+// - Forwarded arguments with coercions are only copied twice (once for the
+// coercion, and one for the final dispatch).
+TEST_F(BindTest, ArgumentCopies) {
+ int copies = 0;
+ int assigns = 0;
+
+ CopyCounter counter(&copies, &assigns);
+ Bind(&VoidPolymorphic<CopyCounter>::Run, counter);
+ EXPECT_EQ(1, copies);
+ EXPECT_EQ(0, assigns);
+
+ copies = 0;
+ assigns = 0;
+ Bind(&VoidPolymorphic<CopyCounter>::Run, CopyCounter(&copies, &assigns));
+ EXPECT_EQ(1, copies);
+ EXPECT_EQ(0, assigns);
+
+ copies = 0;
+ assigns = 0;
+ Bind(&VoidPolymorphic<CopyCounter>::Run).Run(counter);
+ EXPECT_EQ(2, copies);
+ EXPECT_EQ(0, assigns);
+
+ copies = 0;
+ assigns = 0;
+ Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(&copies, &assigns));
+ EXPECT_EQ(1, copies);
+ EXPECT_EQ(0, assigns);
+
+ copies = 0;
+ assigns = 0;
+ DerivedCopyMoveCounter derived(&copies, &assigns, nullptr, nullptr);
+ Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(derived));
+ EXPECT_EQ(2, copies);
+ EXPECT_EQ(0, assigns);
+
+ copies = 0;
+ assigns = 0;
+ Bind(&VoidPolymorphic<CopyCounter>::Run)
+ .Run(CopyCounter(
+ DerivedCopyMoveCounter(&copies, &assigns, nullptr, nullptr)));
+ EXPECT_EQ(2, copies);
+ EXPECT_EQ(0, assigns);
+}
+
+// Argument move-constructor usage for move-only parameters.
+// - Bound arguments passed by move are not copied.
+TEST_F(BindTest, ArgumentMoves) {
+ int move_constructs = 0;
+ int move_assigns = 0;
+
+ Bind(&VoidPolymorphic<const MoveCounter&>::Run,
+ MoveCounter(&move_constructs, &move_assigns));
+ EXPECT_EQ(1, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ // TODO(tzik): Support binding move-only type into a non-reference parameter
+ // of a variant of Callback.
+
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<MoveCounter>::Run)
+ .Run(MoveCounter(&move_constructs, &move_assigns));
+ EXPECT_EQ(1, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<MoveCounter>::Run)
+ .Run(MoveCounter(DerivedCopyMoveCounter(
+ nullptr, nullptr, &move_constructs, &move_assigns)));
+ EXPECT_EQ(2, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+}
+
+// Argument constructor usage for non-reference movable-copyable
+// parameters.
+// - Bound arguments passed by move are not copied.
+// - Forwarded arguments are only copied once.
+// - Forwarded arguments with coercions are only copied once and moved once.
+TEST_F(BindTest, ArgumentCopiesAndMoves) {
+ int copies = 0;
+ int assigns = 0;
+ int move_constructs = 0;
+ int move_assigns = 0;
+
+ CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+ Bind(&VoidPolymorphic<CopyMoveCounter>::Run, counter);
+ EXPECT_EQ(1, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(0, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ copies = 0;
+ assigns = 0;
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<CopyMoveCounter>::Run,
+ CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+ EXPECT_EQ(0, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(1, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ copies = 0;
+ assigns = 0;
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<CopyMoveCounter>::Run).Run(counter);
+ EXPECT_EQ(1, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(1, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ copies = 0;
+ assigns = 0;
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+ .Run(CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+ EXPECT_EQ(0, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(1, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ DerivedCopyMoveCounter derived_counter(&copies, &assigns, &move_constructs,
+ &move_assigns);
+ copies = 0;
+ assigns = 0;
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+ .Run(CopyMoveCounter(derived_counter));
+ EXPECT_EQ(1, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(1, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+
+ copies = 0;
+ assigns = 0;
+ move_constructs = 0;
+ move_assigns = 0;
+ Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+ .Run(CopyMoveCounter(DerivedCopyMoveCounter(
+ &copies, &assigns, &move_constructs, &move_assigns)));
+ EXPECT_EQ(0, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(2, move_constructs);
+ EXPECT_EQ(0, move_assigns);
+}
+
+TEST_F(BindTest, CapturelessLambda) {
+ EXPECT_FALSE(internal::IsConvertibleToRunType<void>::value);
+ EXPECT_FALSE(internal::IsConvertibleToRunType<int>::value);
+ EXPECT_FALSE(internal::IsConvertibleToRunType<void(*)()>::value);
+ EXPECT_FALSE(internal::IsConvertibleToRunType<void(NoRef::*)()>::value);
+
+ auto f = []() {};
+ EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
+
+ int i = 0;
+ auto g = [i]() {};
+ EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
+
+ auto h = [](int, double) { return 'k'; };
+ EXPECT_TRUE((std::is_same<
+ char(int, double),
+ internal::ExtractCallableRunType<decltype(h)>>::value));
+
+ EXPECT_EQ(42, Bind([] { return 42; }).Run());
+ EXPECT_EQ(42, Bind([](int i) { return i * 7; }, 6).Run());
+
+ int x = 1;
+ base::Callback<void(int)> cb =
+ Bind([](int* x, int i) { *x *= i; }, Unretained(&x));
+ cb.Run(6);
+ EXPECT_EQ(6, x);
+ cb.Run(7);
+ EXPECT_EQ(42, x);
+}
+
+// Callback construction and assignment tests.
+// - Construction from an InvokerStorageHolder should not cause ref/deref.
+// - Assignment from other callback should only cause one ref
+//
+// TODO(ajwong): Is there actually a way to test this?
+
+#if defined(OS_WIN)
+int __fastcall FastCallFunc(int n) {
+ return n;
+}
+
+int __stdcall StdCallFunc(int n) {
+ return n;
+}
+
+// Windows specific calling convention support.
+// - Can bind a __fastcall function.
+// - Can bind a __stdcall function.
+TEST_F(BindTest, WindowsCallingConventions) {
+ Callback<int()> fastcall_cb = Bind(&FastCallFunc, 1);
+ EXPECT_EQ(1, fastcall_cb.Run());
+
+ Callback<int()> stdcall_cb = Bind(&StdCallFunc, 2);
+ EXPECT_EQ(2, stdcall_cb.Run());
+}
+#endif
+
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+// Test null callbacks cause a DCHECK.
+TEST(BindDeathTest, NullCallback) {
+ base::Callback<void(int)> null_cb;
+ ASSERT_TRUE(null_cb.is_null());
+ EXPECT_DEATH(base::Bind(null_cb, 42), "");
+}
+
+#endif // (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) &&
+ // GTEST_HAS_DEATH_TEST
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/bit_cast.h b/libchrome/base/bit_cast.h
new file mode 100644
index 0000000..c9514bc
--- /dev/null
+++ b/libchrome/base/bit_cast.h
@@ -0,0 +1,100 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIT_CAST_H_
+#define BASE_BIT_CAST_H_
+
+#include <string.h>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+// bit_cast<Dest,Source> is a template function that implements the equivalent
+// of "*reinterpret_cast<Dest*>(&source)". We need this in very low-level
+// functions like the protobuf library and fast math support.
+//
+// float f = 3.14159265358979;
+// int i = bit_cast<int32_t>(f);
+// // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+// // WRONG
+// float f = 3.14159265358979; // WRONG
+// int i = * reinterpret_cast<int*>(&f); // WRONG
+//
+// The address-casting method actually produces undefined behavior according to
+// the ISO C++98 specification, section 3.10 ("basic.lval"), paragraph 15.
+// (This did not substantially change in C++11.) Roughly, this section says: if
+// an object in memory has one type, and a program accesses it with a different
+// type, then the result is undefined behavior for most values of "different
+// type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f). And it is particularly true for conversions
+// between integral lvalues and floating-point lvalues.
+//
+// The purpose of this paragraph is to allow optimizing compilers to assume that
+// expressions with different types refer to different memory. Compilers are
+// known to take advantage of this. So a non-conforming program quietly
+// produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast. The problem is type punning:
+// holding an object in memory of one type and reading its bits back using a
+// different type.
+//
+// The C++ standard is more subtle and complex than this, but that is the basic
+// idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard, especially by the
+// example in section 3.9 . Also, of course, bit_cast<> wraps up the nasty
+// logic in one place.
+//
+// Fortunately memcpy() is very fast. In optimized mode, compilers replace
+// calls to memcpy() with inline object code when the size argument is a
+// compile-time constant. On a 32-bit system, memcpy(d,s,4) compiles to one
+// load and one store, and memcpy(d,s,8) compiles to two loads and two stores.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "bit_cast requires source and destination to be the same size");
+
+#if (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) || \
+ (defined(__clang__) && defined(_LIBCPP_VERSION)))
+ // GCC 5.1 contains the first libstdc++ with is_trivially_copyable.
+ // Assume libc++ Just Works: is_trivially_copyable added on May 13th 2011.
+ // However, with libc++ when GCC is the compiler the trait is buggy, see
+ // crbug.com/607158, so fall back to the less strict variant for non-clang.
+ static_assert(std::is_trivially_copyable<Dest>::value,
+ "non-trivially-copyable bit_cast is undefined");
+ static_assert(std::is_trivially_copyable<Source>::value,
+ "non-trivially-copyable bit_cast is undefined");
+#elif HAS_FEATURE(is_trivially_copyable)
+ // The compiler supports an equivalent intrinsic.
+ static_assert(__is_trivially_copyable(Dest),
+ "non-trivially-copyable bit_cast is undefined");
+ static_assert(__is_trivially_copyable(Source),
+ "non-trivially-copyable bit_cast is undefined");
+#elif COMPILER_GCC
+ // Fallback to compiler intrinsic on GCC and clang (which pretends to be
+ // GCC). This isn't quite the same as is_trivially_copyable but it'll do for
+ // our purpose.
+ static_assert(__has_trivial_copy(Dest),
+ "non-trivially-copyable bit_cast is undefined");
+ static_assert(__has_trivial_copy(Source),
+ "non-trivially-copyable bit_cast is undefined");
+#else
+ // Do nothing, let the bots handle it.
+#endif
+
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+#endif // BASE_BIT_CAST_H_
diff --git a/libchrome/base/bits.h b/libchrome/base/bits.h
new file mode 100644
index 0000000..a3a59d1
--- /dev/null
+++ b/libchrome/base/bits.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines some bit utilities.
+
+#ifndef BASE_BITS_H_
+#define BASE_BITS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+
+namespace base {
+namespace bits {
+
+// Returns the integer i such as 2^i <= n < 2^(i+1)
+inline int Log2Floor(uint32_t n) {
+ if (n == 0)
+ return -1;
+ int log = 0;
+ uint32_t value = n;
+ for (int i = 4; i >= 0; --i) {
+ int shift = (1 << i);
+ uint32_t x = value >> shift;
+ if (x != 0) {
+ value = x;
+ log += shift;
+ }
+ }
+ DCHECK_EQ(value, 1u);
+ return log;
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i
+inline int Log2Ceiling(uint32_t n) {
+ if (n == 0) {
+ return -1;
+ } else {
+ // Log2Floor returns -1 for 0, so the following works correctly for n=1.
+ return 1 + Log2Floor(n - 1);
+ }
+}
+
+// Round up |size| to a multiple of alignment, which must be a power of two.
+inline size_t Align(size_t size, size_t alignment) {
+ DCHECK_EQ(alignment & (alignment - 1), 0u);
+ return (size + alignment - 1) & ~(alignment - 1);
+}
+
+} // namespace bits
+} // namespace base
+
+#endif // BASE_BITS_H_
diff --git a/libchrome/base/bits_unittest.cc b/libchrome/base/bits_unittest.cc
new file mode 100644
index 0000000..4f5b6ea
--- /dev/null
+++ b/libchrome/base/bits_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the unit tests for the bit utilities.
+
+#include "base/bits.h"
+
+#include <stddef.h>
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace bits {
+
+TEST(BitsTest, Log2Floor) {
+ EXPECT_EQ(-1, Log2Floor(0));
+ EXPECT_EQ(0, Log2Floor(1));
+ EXPECT_EQ(1, Log2Floor(2));
+ EXPECT_EQ(1, Log2Floor(3));
+ EXPECT_EQ(2, Log2Floor(4));
+ for (int i = 3; i < 31; ++i) {
+ unsigned int value = 1U << i;
+ EXPECT_EQ(i, Log2Floor(value));
+ EXPECT_EQ(i, Log2Floor(value + 1));
+ EXPECT_EQ(i, Log2Floor(value + 2));
+ EXPECT_EQ(i - 1, Log2Floor(value - 1));
+ EXPECT_EQ(i - 1, Log2Floor(value - 2));
+ }
+ EXPECT_EQ(31, Log2Floor(0xffffffffU));
+}
+
+TEST(BitsTest, Log2Ceiling) {
+ EXPECT_EQ(-1, Log2Ceiling(0));
+ EXPECT_EQ(0, Log2Ceiling(1));
+ EXPECT_EQ(1, Log2Ceiling(2));
+ EXPECT_EQ(2, Log2Ceiling(3));
+ EXPECT_EQ(2, Log2Ceiling(4));
+ for (int i = 3; i < 31; ++i) {
+ unsigned int value = 1U << i;
+ EXPECT_EQ(i, Log2Ceiling(value));
+ EXPECT_EQ(i + 1, Log2Ceiling(value + 1));
+ EXPECT_EQ(i + 1, Log2Ceiling(value + 2));
+ EXPECT_EQ(i, Log2Ceiling(value - 1));
+ EXPECT_EQ(i, Log2Ceiling(value - 2));
+ }
+ EXPECT_EQ(32, Log2Ceiling(0xffffffffU));
+}
+
+TEST(BitsTest, Align) {
+ const size_t kSizeTMax = std::numeric_limits<size_t>::max();
+ EXPECT_EQ(0ul, Align(0, 4));
+ EXPECT_EQ(4ul, Align(1, 4));
+ EXPECT_EQ(4096ul, Align(1, 4096));
+ EXPECT_EQ(4096ul, Align(4096, 4096));
+ EXPECT_EQ(4096ul, Align(4095, 4096));
+ EXPECT_EQ(8192ul, Align(4097, 4096));
+ EXPECT_EQ(kSizeTMax - 31, Align(kSizeTMax - 62, 32));
+ EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
+}
+
+} // namespace bits
+} // namespace base
diff --git a/libchrome/base/build_time.cc b/libchrome/base/build_time.cc
new file mode 100644
index 0000000..866840d
--- /dev/null
+++ b/libchrome/base/build_time.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/build_time.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+#ifdef __ANDROID__
+#include <cutils/properties.h>
+#endif
+
+namespace base {
+
+Time GetBuildTime() {
+ Time integral_build_time;
+ // The format of __DATE__ and __TIME__ is specified by the ANSI C Standard,
+ // section 6.8.8.
+ //
+ // __DATE__ is exactly "Mmm DD YYYY".
+ // __TIME__ is exactly "hh:mm:ss".
+#if defined(__ANDROID__)
+ char kDateTime[PROPERTY_VALUE_MAX];
+ property_get("ro.build.date", kDateTime, "Sep 02 2008 08:00:00 PST");
+#elif defined(DONT_EMBED_BUILD_METADATA) && !defined(OFFICIAL_BUILD)
+ const char kDateTime[] = "Sep 02 2008 08:00:00 PST";
+#else
+ const char kDateTime[] = __DATE__ " " __TIME__ " PST";
+#endif
+ bool result = Time::FromString(kDateTime, &integral_build_time);
+ DCHECK(result);
+ return integral_build_time;
+}
+
+} // namespace base
diff --git a/libchrome/base/build_time.h b/libchrome/base/build_time.h
new file mode 100644
index 0000000..83c9875
--- /dev/null
+++ b/libchrome/base/build_time.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BUILD_TIME_H_
+#define BASE_BUILD_TIME_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// GetBuildTime returns the time at which the current binary was built,
+// rounded down to 5:00:00am at the start of the day in UTC.
+//
+// This uses a generated file, which doesn't trigger a rebuild when the time
+// changes. It will, however, be updated whenever //build/util/LASTCHANGE
+// changes.
+//
+// This value should only be considered accurate to within a day.
+// It will always be in the past.
+//
+// Note: If the build is not official (i.e. is_official_build = false)
+// this time will be set to 5:00:00am on the most recent first Sunday
+// of a month.
+Time BASE_EXPORT GetBuildTime();
+
+} // namespace base
+
+#endif // BASE_BUILD_TIME_H_
diff --git a/libchrome/base/build_time_unittest.cc b/libchrome/base/build_time_unittest.cc
new file mode 100644
index 0000000..64886b4
--- /dev/null
+++ b/libchrome/base/build_time_unittest.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/build_time.h"
+#if !defined(DONT_EMBED_BUILD_METADATA)
+#include "base/generated_build_date.h"
+#endif
+#include "base/time/time.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(BuildTime, DateLooksValid) {
+#if !defined(DONT_EMBED_BUILD_METADATA)
+ char build_date[] = BUILD_DATE;
+#else
+ char build_date[] = "Sep 02 2008 05:00:00";
+#endif
+
+ EXPECT_EQ(20u, strlen(build_date));
+ EXPECT_EQ(' ', build_date[3]);
+ EXPECT_EQ(' ', build_date[6]);
+ EXPECT_EQ(' ', build_date[11]);
+ EXPECT_EQ('0', build_date[12]);
+ EXPECT_EQ('5', build_date[13]);
+ EXPECT_EQ(':', build_date[14]);
+ EXPECT_EQ('0', build_date[15]);
+ EXPECT_EQ('0', build_date[16]);
+ EXPECT_EQ(':', build_date[17]);
+ EXPECT_EQ('0', build_date[18]);
+ EXPECT_EQ('0', build_date[19]);
+}
+
+TEST(BuildTime, InThePast) {
+ EXPECT_LT(base::GetBuildTime(), base::Time::Now());
+ EXPECT_LT(base::GetBuildTime(), base::Time::NowFromSystemTime());
+}
+
+#if !defined(DONT_EMBED_BUILD_METADATA)
+TEST(BuildTime, NotTooFar) {
+ // BuildTime must be less than 45 days old.
+ base::Time cutoff(base::Time::Now() - base::TimeDelta::FromDays(45));
+ EXPECT_GT(base::GetBuildTime(), cutoff);
+}
+#endif
diff --git a/libchrome/base/callback.h b/libchrome/base/callback.h
new file mode 100644
index 0000000..e087c73
--- /dev/null
+++ b/libchrome/base/callback.h
@@ -0,0 +1,395 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_H_
+#define BASE_CALLBACK_H_
+
+#include "base/callback_forward.h"
+#include "base/callback_internal.h"
+
+// NOTE: Header files that do not require the full definition of Callback or
+// Closure should #include "base/callback_forward.h" instead of this file.
+
+// -----------------------------------------------------------------------------
+// Introduction
+// -----------------------------------------------------------------------------
+//
+// The templated Callback class is a generalized function object. Together
+// with the Bind() function in bind.h, they provide a type-safe method for
+// performing partial application of functions.
+//
+// Partial application (or "currying") is the process of binding a subset of
+// a function's arguments to produce another function that takes fewer
+// arguments. This can be used to pass around a unit of delayed execution,
+// much like lexical closures are used in other languages. For example, it
+// is used in Chromium code to schedule tasks on different MessageLoops.
+//
+// A callback with no unbound input parameters (base::Callback<void()>)
+// is called a base::Closure. Note that this is NOT the same as what other
+// languages refer to as a closure -- it does not retain a reference to its
+// enclosing environment.
+//
+// MEMORY MANAGEMENT AND PASSING
+//
+// The Callback objects themselves should be passed by const-reference, and
+// stored by copy. They internally store their state via a refcounted class
+// and thus do not need to be deleted.
+//
+// The reason to pass via a const-reference is to avoid unnecessary
+// AddRef/Release pairs to the internal state.
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for basic stuff
+// -----------------------------------------------------------------------------
+//
+// BINDING A BARE FUNCTION
+//
+// int Return5() { return 5; }
+// base::Callback<int()> func_cb = base::Bind(&Return5);
+// LOG(INFO) << func_cb.Run(); // Prints 5.
+//
+// BINDING A CLASS METHOD
+//
+// The first argument to bind is the member function to call, the second is
+// the object on which to call it.
+//
+// class Ref : public base::RefCountedThreadSafe<Ref> {
+// public:
+// int Foo() { return 3; }
+// void PrintBye() { LOG(INFO) << "bye."; }
+// };
+// scoped_refptr<Ref> ref = new Ref();
+// base::Callback<void()> ref_cb = base::Bind(&Ref::Foo, ref);
+// LOG(INFO) << ref_cb.Run(); // Prints out 3.
+//
+// By default the object must support RefCounted or you will get a compiler
+// error. If you're passing between threads, be sure it's
+// RefCountedThreadSafe! See "Advanced binding of member functions" below if
+// you don't want to use reference counting.
+//
+// RUNNING A CALLBACK
+//
+// Callbacks can be run with their "Run" method, which has the same
+// signature as the template argument to the callback.
+//
+// void DoSomething(const base::Callback<void(int, std::string)>& callback) {
+// callback.Run(5, "hello");
+// }
+//
+// Callbacks can be run more than once (they don't get deleted or marked when
+// run). However, this precludes using base::Passed (see below).
+//
+// void DoSomething(const base::Callback<double(double)>& callback) {
+// double myresult = callback.Run(3.14159);
+// myresult += callback.Run(2.71828);
+// }
+//
+// PASSING UNBOUND INPUT PARAMETERS
+//
+// Unbound parameters are specified at the time a callback is Run(). They are
+// specified in the Callback template type:
+//
+// void MyFunc(int i, const std::string& str) {}
+// base::Callback<void(int, const std::string&)> cb = base::Bind(&MyFunc);
+// cb.Run(23, "hello, world");
+//
+// PASSING BOUND INPUT PARAMETERS
+//
+// Bound parameters are specified when you create thee callback as arguments
+// to Bind(). They will be passed to the function and the Run()ner of the
+// callback doesn't see those values or even know that the function it's
+// calling.
+//
+// void MyFunc(int i, const std::string& str) {}
+// base::Callback<void()> cb = base::Bind(&MyFunc, 23, "hello world");
+// cb.Run();
+//
+// A callback with no unbound input parameters (base::Callback<void()>)
+// is called a base::Closure. So we could have also written:
+//
+// base::Closure cb = base::Bind(&MyFunc, 23, "hello world");
+//
+// When calling member functions, bound parameters just go after the object
+// pointer.
+//
+// base::Closure cb = base::Bind(&MyClass::MyFunc, this, 23, "hello world");
+//
+// PARTIAL BINDING OF PARAMETERS
+//
+// You can specify some parameters when you create the callback, and specify
+// the rest when you execute the callback.
+//
+// void MyFunc(int i, const std::string& str) {}
+// base::Callback<void(const std::string&)> cb = base::Bind(&MyFunc, 23);
+// cb.Run("hello world");
+//
+// When calling a function bound parameters are first, followed by unbound
+// parameters.
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for advanced binding
+// -----------------------------------------------------------------------------
+//
+// BINDING A CLASS METHOD WITH WEAK POINTERS
+//
+// base::Bind(&MyClass::Foo, GetWeakPtr());
+//
+// The callback will not be run if the object has already been destroyed.
+// DANGER: weak pointers are not threadsafe, so don't use this
+// when passing between threads!
+//
+// BINDING A CLASS METHOD WITH MANUAL LIFETIME MANAGEMENT
+//
+// base::Bind(&MyClass::Foo, base::Unretained(this));
+//
+// This disables all lifetime management on the object. You're responsible
+// for making sure the object is alive at the time of the call. You break it,
+// you own it!
+//
+// BINDING A CLASS METHOD AND HAVING THE CALLBACK OWN THE CLASS
+//
+// MyClass* myclass = new MyClass;
+// base::Bind(&MyClass::Foo, base::Owned(myclass));
+//
+// The object will be deleted when the callback is destroyed, even if it's
+// not run (like if you post a task during shutdown). Potentially useful for
+// "fire and forget" cases.
+//
+// IGNORING RETURN VALUES
+//
+// Sometimes you want to call a function that returns a value in a callback
+// that doesn't expect a return value.
+//
+// int DoSomething(int arg) { cout << arg << endl; }
+// base::Callback<void(int)> cb =
+// base::Bind(base::IgnoreResult(&DoSomething));
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for binding parameters to Bind()
+// -----------------------------------------------------------------------------
+//
+// Bound parameters are specified as arguments to Bind() and are passed to the
+// function. A callback with no parameters or no unbound parameters is called a
+// Closure (base::Callback<void()> and base::Closure are the same thing).
+//
+// PASSING PARAMETERS OWNED BY THE CALLBACK
+//
+// void Foo(int* arg) { cout << *arg << endl; }
+// int* pn = new int(1);
+// base::Closure foo_callback = base::Bind(&foo, base::Owned(pn));
+//
+// The parameter will be deleted when the callback is destroyed, even if it's
+// not run (like if you post a task during shutdown).
+//
+// PASSING PARAMETERS AS A scoped_ptr
+//
+// void TakesOwnership(std::unique_ptr<Foo> arg) {}
+// std::unique_ptr<Foo> f(new Foo);
+// // f becomes null during the following call.
+// base::Closure cb = base::Bind(&TakesOwnership, base::Passed(&f));
+//
+// Ownership of the parameter will be with the callback until the it is run,
+// when ownership is passed to the callback function. This means the callback
+// can only be run once. If the callback is never run, it will delete the
+// object when it's destroyed.
+//
+// PASSING PARAMETERS AS A scoped_refptr
+//
+// void TakesOneRef(scoped_refptr<Foo> arg) {}
+// scoped_refptr<Foo> f(new Foo)
+// base::Closure cb = base::Bind(&TakesOneRef, f);
+//
+// This should "just work." The closure will take a reference as long as it
+// is alive, and another reference will be taken for the called function.
+//
+// PASSING PARAMETERS BY REFERENCE
+//
+// Const references are *copied* unless ConstRef is used. Example:
+//
+// void foo(const int& arg) { printf("%d %p\n", arg, &arg); }
+// int n = 1;
+// base::Closure has_copy = base::Bind(&foo, n);
+// base::Closure has_ref = base::Bind(&foo, base::ConstRef(n));
+// n = 2;
+// foo(n); // Prints "2 0xaaaaaaaaaaaa"
+// has_copy.Run(); // Prints "1 0xbbbbbbbbbbbb"
+// has_ref.Run(); // Prints "2 0xaaaaaaaaaaaa"
+//
+// Normally parameters are copied in the closure. DANGER: ConstRef stores a
+// const reference instead, referencing the original parameter. This means
+// that you must ensure the object outlives the callback!
+//
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// WHERE IS THIS DESIGN FROM:
+//
+// The design Callback and Bind is heavily influenced by C++'s
+// tr1::function/tr1::bind, and by the "Google Callback" system used inside
+// Google.
+//
+//
+// HOW THE IMPLEMENTATION WORKS:
+//
+// There are three main components to the system:
+// 1) The Callback classes.
+// 2) The Bind() functions.
+// 3) The arguments wrappers (e.g., Unretained() and ConstRef()).
+//
+// The Callback classes represent a generic function pointer. Internally,
+// it stores a refcounted piece of state that represents the target function
+// and all its bound parameters. Each Callback specialization has a templated
+// constructor that takes an BindState<>*. In the context of the constructor,
+// the static type of this BindState<> pointer uniquely identifies the
+// function it is representing, all its bound parameters, and a Run() method
+// that is capable of invoking the target.
+//
+// Callback's constructor takes the BindState<>* that has the full static type
+// and erases the target function type as well as the types of the bound
+// parameters. It does this by storing a pointer to the specific Run()
+// function, and upcasting the state of BindState<>* to a
+// BindStateBase*. This is safe as long as this BindStateBase pointer
+// is only used with the stored Run() pointer.
+//
+// To BindState<> objects are created inside the Bind() functions.
+// These functions, along with a set of internal templates, are responsible for
+//
+// - Unwrapping the function signature into return type, and parameters
+// - Determining the number of parameters that are bound
+// - Creating the BindState storing the bound parameters
+// - Performing compile-time asserts to avoid error-prone behavior
+// - Returning an Callback<> with an arity matching the number of unbound
+// parameters and that knows the correct refcounting semantics for the
+// target object if we are binding a method.
+//
+// The Bind functions do the above using type-inference, and template
+// specializations.
+//
+// By default Bind() will store copies of all bound parameters, and attempt
+// to refcount a target object if the function being bound is a class method.
+// These copies are created even if the function takes parameters as const
+// references. (Binding to non-const references is forbidden, see bind.h.)
+//
+// To change this behavior, we introduce a set of argument wrappers
+// (e.g., Unretained(), and ConstRef()). These are simple container templates
+// that are passed by value, and wrap a pointer to argument. See the
+// file-level comment in base/bind_helpers.h for more info.
+//
+// These types are passed to the Unwrap() functions, and the MaybeRefcount()
+// functions respectively to modify the behavior of Bind(). The Unwrap()
+// and MaybeRefcount() functions change behavior by doing partial
+// specialization based on whether or not a parameter is a wrapper type.
+//
+// ConstRef() is similar to tr1::cref. Unretained() is specific to Chromium.
+//
+//
+// WHY NOT TR1 FUNCTION/BIND?
+//
+// Direct use of tr1::function and tr1::bind was considered, but ultimately
+// rejected because of the number of copy constructors invocations involved
+// in the binding of arguments during construction, and the forwarding of
+// arguments during invocation. These copies will no longer be an issue in
+// C++0x because C++0x will support rvalue reference allowing for the compiler
+// to avoid these copies. However, waiting for C++0x is not an option.
+//
+// Measured with valgrind on gcc version 4.4.3 (Ubuntu 4.4.3-4ubuntu5), the
+// tr1::bind call itself will invoke a non-trivial copy constructor three times
+// for each bound parameter. Also, each when passing a tr1::function, each
+// bound argument will be copied again.
+//
+// In addition to the copies taken at binding and invocation, copying a
+// tr1::function causes a copy to be made of all the bound parameters and
+// state.
+//
+// Furthermore, in Chromium, it is desirable for the Callback to take a
+// reference on a target object when representing a class method call. This
+// is not supported by tr1.
+//
+// Lastly, tr1::function and tr1::bind has a more general and flexible API.
+// This includes things like argument reordering by use of
+// tr1::bind::placeholder, support for non-const reference parameters, and some
+// limited amount of subtyping of the tr1::function object (e.g.,
+// tr1::function<int(int)> is convertible to tr1::function<void(int)>).
+//
+// These are not features that are required in Chromium. Some of them, such as
+// allowing for reference parameters, and subtyping of functions, may actually
+// become a source of errors. Removing support for these features actually
+// allows for a simpler implementation, and a terser Currying API.
+//
+//
+// WHY NOT GOOGLE CALLBACKS?
+//
+// The Google callback system also does not support refcounting. Furthermore,
+// its implementation has a number of strange edge cases with respect to type
+// conversion of its arguments. In particular, the argument's constness must
+// at times match exactly the function signature, or the type-inference might
+// break. Given the above, writing a custom solution was easier.
+//
+//
+// MISSING FUNCTIONALITY
+// - Invoking the return of Bind. Bind(&foo).Run() does not work;
+// - Binding arrays to functions that take a non-const pointer.
+// Example:
+// void Foo(const char* ptr);
+// void Bar(char* ptr);
+// Bind(&Foo, "test");
+// Bind(&Bar, "test"); // This fails because ptr is not const.
+//
+// If you are thinking of forward declaring Callback in your own header file,
+// please include "base/callback_forward.h" instead.
+
+namespace base {
+
+template <typename R, typename... Args, internal::CopyMode copy_mode>
+class Callback<R(Args...), copy_mode>
+ : public internal::CallbackBase<copy_mode> {
+ private:
+ using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+
+ public:
+ // MSVC 2013 doesn't support Type Alias of function types.
+ // Revisit this after we update it to newer version.
+ typedef R RunType(Args...);
+
+ Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
+
+ Callback(internal::BindStateBase* bind_state,
+ PolymorphicInvoke invoke_func)
+ : internal::CallbackBase<copy_mode>(bind_state) {
+ using InvokeFuncStorage =
+ typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
+ this->polymorphic_invoke_ =
+ reinterpret_cast<InvokeFuncStorage>(invoke_func);
+ }
+
+ bool Equals(const Callback& other) const {
+ return this->EqualsInternal(other);
+ }
+
+ // Run() makes an extra copy compared to directly calling the bound function
+ // if an argument is passed-by-value and is copyable-but-not-movable:
+ // i.e. below copies CopyableNonMovableType twice.
+ // void F(CopyableNonMovableType) {}
+ // Bind(&F).Run(CopyableNonMovableType());
+ //
+ // We can not fully apply Perfect Forwarding idiom to the callchain from
+ // Callback::Run() to the target function. Perfect Forwarding requires
+ // knowing how the caller will pass the arguments. However, the signature of
+ // InvokerType::Run() needs to be fixed in the callback constructor, so Run()
+ // cannot template its arguments based on how it's called.
+ R Run(Args... args) const {
+ PolymorphicInvoke f =
+ reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
+ return f(this->bind_state_.get(), std::forward<Args>(args)...);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_CALLBACK_H_
diff --git a/libchrome/base/callback_forward.h b/libchrome/base/callback_forward.h
new file mode 100644
index 0000000..8b9b89c
--- /dev/null
+++ b/libchrome/base/callback_forward.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_FORWARD_H_
+#define BASE_CALLBACK_FORWARD_H_
+
+namespace base {
+namespace internal {
+
+// CopyMode is used to control the copyablity of a Callback.
+// MoveOnly indicates the Callback is not copyable but movable, and Copyable
+// indicates it is copyable and movable.
+enum class CopyMode {
+ MoveOnly, Copyable,
+};
+
+} // namespace internal
+
+template <typename Signature,
+ internal::CopyMode copy_mode = internal::CopyMode::Copyable>
+class Callback;
+
+// Syntactic sugar to make Callback<void()> easier to declare since it
+// will be used in a lot of APIs with delayed execution.
+using Closure = Callback<void()>;
+
+} // namespace base
+
+#endif // BASE_CALLBACK_FORWARD_H_
diff --git a/libchrome/base/callback_helpers.cc b/libchrome/base/callback_helpers.cc
new file mode 100644
index 0000000..838e6c8
--- /dev/null
+++ b/libchrome/base/callback_helpers.cc
@@ -0,0 +1,46 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_helpers.h"
+
+#include "base/callback.h"
+
+namespace base {
+
+ScopedClosureRunner::ScopedClosureRunner() {}
+
+ScopedClosureRunner::ScopedClosureRunner(const Closure& closure)
+ : closure_(closure) {}
+
+ScopedClosureRunner::~ScopedClosureRunner() {
+ if (!closure_.is_null())
+ closure_.Run();
+}
+
+ScopedClosureRunner::ScopedClosureRunner(ScopedClosureRunner&& other)
+ : closure_(other.Release()) {}
+
+ScopedClosureRunner& ScopedClosureRunner::operator=(
+ ScopedClosureRunner&& other) {
+ ReplaceClosure(other.Release());
+ return *this;
+}
+
+void ScopedClosureRunner::RunAndReset() {
+ Closure old_closure = Release();
+ if (!old_closure.is_null())
+ old_closure.Run();
+}
+
+void ScopedClosureRunner::ReplaceClosure(const Closure& closure) {
+ closure_ = closure;
+}
+
+Closure ScopedClosureRunner::Release() {
+ Closure result = closure_;
+ closure_.Reset();
+ return result;
+}
+
+} // namespace base
diff --git a/libchrome/base/callback_helpers.h b/libchrome/base/callback_helpers.h
new file mode 100644
index 0000000..782371f
--- /dev/null
+++ b/libchrome/base/callback_helpers.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This defines helpful methods for dealing with Callbacks. Because Callbacks
+// are implemented using templates, with a class per callback signature, adding
+// methods to Callback<> itself is unattractive (lots of extra code gets
+// generated). Instead, consider adding methods here.
+//
+// ResetAndReturn(&cb) is like cb.Reset() but allows executing a callback (via a
+// copy) after the original callback is Reset(). This can be handy if Run()
+// reads/writes the variable holding the Callback.
+
+#ifndef BASE_CALLBACK_HELPERS_H_
+#define BASE_CALLBACK_HELPERS_H_
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+namespace base {
+
+template <typename Sig>
+base::Callback<Sig> ResetAndReturn(base::Callback<Sig>* cb) {
+ base::Callback<Sig> ret(*cb);
+ cb->Reset();
+ return ret;
+}
+
+// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
+// that the Closure is executed no matter how the current scope exits.
+class BASE_EXPORT ScopedClosureRunner {
+ public:
+ ScopedClosureRunner();
+ explicit ScopedClosureRunner(const Closure& closure);
+ ~ScopedClosureRunner();
+
+ ScopedClosureRunner(ScopedClosureRunner&& other);
+
+ // Releases the current closure if it's set and replaces it with the closure
+ // from |other|.
+ ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
+
+ // Calls the current closure and resets it, so it wont be called again.
+ void RunAndReset();
+
+ // Replaces closure with the new one releasing the old one without calling it.
+ void ReplaceClosure(const Closure& closure);
+
+ // Releases the Closure without calling.
+ Closure Release() WARN_UNUSED_RESULT;
+
+ private:
+ Closure closure_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedClosureRunner);
+};
+
+} // namespace base
+
+#endif // BASE_CALLBACK_HELPERS_H_
diff --git a/libchrome/base/callback_helpers_unittest.cc b/libchrome/base/callback_helpers_unittest.cc
new file mode 100644
index 0000000..8283996
--- /dev/null
+++ b/libchrome/base/callback_helpers_unittest.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_helpers.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+void Increment(int* value) {
+ (*value)++;
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
+ int run_count = 0;
+ {
+ base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRelease) {
+ int run_count = 0;
+ base::Closure c;
+ {
+ base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
+ c = runner.Release();
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(0, run_count);
+ c.Run();
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerReplaceClosure) {
+ int run_count_1 = 0;
+ int run_count_2 = 0;
+ {
+ base::ScopedClosureRunner runner;
+ runner.ReplaceClosure(base::Bind(&Increment, &run_count_1));
+ runner.ReplaceClosure(base::Bind(&Increment, &run_count_2));
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(0, run_count_2);
+ }
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(1, run_count_2);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRunAndReset) {
+ int run_count_3 = 0;
+ {
+ base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_3));
+ EXPECT_EQ(0, run_count_3);
+ runner.RunAndReset();
+ EXPECT_EQ(1, run_count_3);
+ }
+ EXPECT_EQ(1, run_count_3);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveConstructor) {
+ int run_count = 0;
+ {
+ std::unique_ptr<base::ScopedClosureRunner> runner(
+ new base::ScopedClosureRunner(base::Bind(&Increment, &run_count)));
+ base::ScopedClosureRunner runner2(std::move(*runner));
+ runner.reset();
+ EXPECT_EQ(0, run_count);
+ }
+ EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveAssignment) {
+ int run_count_1 = 0;
+ int run_count_2 = 0;
+ {
+ base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_1));
+ {
+ base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count_2));
+ runner = std::move(runner2);
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(0, run_count_2);
+ }
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(0, run_count_2);
+ }
+ EXPECT_EQ(0, run_count_1);
+ EXPECT_EQ(1, run_count_2);
+}
+
+} // namespace
diff --git a/libchrome/base/callback_internal.cc b/libchrome/base/callback_internal.cc
new file mode 100644
index 0000000..4c8ccae
--- /dev/null
+++ b/libchrome/base/callback_internal.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_internal.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+void BindStateBase::AddRef() {
+ AtomicRefCountInc(&ref_count_);
+}
+
+void BindStateBase::Release() {
+ if (!AtomicRefCountDec(&ref_count_))
+ destructor_(this);
+}
+
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c)
+ : bind_state_(std::move(c.bind_state_)),
+ polymorphic_invoke_(c.polymorphic_invoke_) {
+ c.polymorphic_invoke_ = nullptr;
+}
+
+CallbackBase<CopyMode::MoveOnly>&
+CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) {
+ bind_state_ = std::move(c.bind_state_);
+ polymorphic_invoke_ = c.polymorphic_invoke_;
+ c.polymorphic_invoke_ = nullptr;
+ return *this;
+}
+
+void CallbackBase<CopyMode::MoveOnly>::Reset() {
+ polymorphic_invoke_ = nullptr;
+ // NULL the bind_state_ last, since it may be holding the last ref to whatever
+ // object owns us, and we may be deleted after that.
+ bind_state_ = nullptr;
+}
+
+bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
+ const CallbackBase& other) const {
+ return bind_state_.get() == other.bind_state_.get() &&
+ polymorphic_invoke_ == other.polymorphic_invoke_;
+}
+
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+ BindStateBase* bind_state)
+ : bind_state_(bind_state) {
+ DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
+}
+
+CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
+
+CallbackBase<CopyMode::Copyable>::CallbackBase(
+ const CallbackBase& c)
+ : CallbackBase<CopyMode::MoveOnly>(nullptr) {
+ bind_state_ = c.bind_state_;
+ polymorphic_invoke_ = c.polymorphic_invoke_;
+}
+
+CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c)
+ : CallbackBase<CopyMode::MoveOnly>(std::move(c)) {}
+
+CallbackBase<CopyMode::Copyable>&
+CallbackBase<CopyMode::Copyable>::operator=(const CallbackBase& c) {
+ bind_state_ = c.bind_state_;
+ polymorphic_invoke_ = c.polymorphic_invoke_;
+ return *this;
+}
+
+CallbackBase<CopyMode::Copyable>&
+CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) {
+ *static_cast<CallbackBase<CopyMode::MoveOnly>*>(this) = std::move(c);
+ return *this;
+}
+
+template class CallbackBase<CopyMode::MoveOnly>;
+template class CallbackBase<CopyMode::Copyable>;
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/callback_internal.h b/libchrome/base/callback_internal.h
new file mode 100644
index 0000000..0fe0b2d
--- /dev/null
+++ b/libchrome/base/callback_internal.h
@@ -0,0 +1,118 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions and classes that help the
+// implementation, and management of the Callback objects.
+
+#ifndef BASE_CALLBACK_INTERNAL_H_
+#define BASE_CALLBACK_INTERNAL_H_
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+namespace internal {
+template <CopyMode copy_mode>
+class CallbackBase;
+
+// BindStateBase is used to provide an opaque handle that the Callback
+// class can use to represent a function object with bound arguments. It
+// behaves as an existential type that is used by a corresponding
+// DoInvoke function to perform the function execution. This allows
+// us to shield the Callback class from the types of the bound argument via
+// "type erasure."
+// At the base level, the only task is to add reference counting data. Don't use
+// RefCountedThreadSafe since it requires the destructor to be a virtual method.
+// Creating a vtable for every BindState template instantiation results in a lot
+// of bloat. Its only task is to call the destructor which can be done with a
+// function pointer.
+class BindStateBase {
+ protected:
+ explicit BindStateBase(void (*destructor)(BindStateBase*))
+ : ref_count_(0), destructor_(destructor) {}
+ ~BindStateBase() = default;
+
+ private:
+ friend class scoped_refptr<BindStateBase>;
+ template <CopyMode copy_mode>
+ friend class CallbackBase;
+
+ void AddRef();
+ void Release();
+
+ AtomicRefCount ref_count_;
+
+ // Pointer to a function that will properly destroy |this|.
+ void (*destructor_)(BindStateBase*);
+
+ DISALLOW_COPY_AND_ASSIGN(BindStateBase);
+};
+
+// Holds the Callback methods that don't require specialization to reduce
+// template bloat.
+// CallbackBase<MoveOnly> is a direct base class of MoveOnly callbacks, and
+// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
+template <>
+class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
+ public:
+ CallbackBase(CallbackBase&& c);
+ CallbackBase& operator=(CallbackBase&& c);
+
+ // Returns true if Callback is null (doesn't refer to anything).
+ bool is_null() const { return bind_state_.get() == NULL; }
+ explicit operator bool() const { return !is_null(); }
+
+ // Returns the Callback into an uninitialized state.
+ void Reset();
+
+ protected:
+ // In C++, it is safe to cast function pointers to function pointers of
+ // another type. It is not okay to use void*. We create a InvokeFuncStorage
+ // that that can store our function pointer, and then cast it back to
+ // the original type on usage.
+ using InvokeFuncStorage = void(*)();
+
+ // Returns true if this callback equals |other|. |other| may be null.
+ bool EqualsInternal(const CallbackBase& other) const;
+
+ // Allow initializing of |bind_state_| via the constructor to avoid default
+ // initialization of the scoped_refptr. We do not also initialize
+ // |polymorphic_invoke_| here because doing a normal assignment in the
+ // derived Callback templates makes for much nicer compiler errors.
+ explicit CallbackBase(BindStateBase* bind_state);
+
+ // Force the destructor to be instantiated inside this translation unit so
+ // that our subclasses will not get inlined versions. Avoids more template
+ // bloat.
+ ~CallbackBase();
+
+ scoped_refptr<BindStateBase> bind_state_;
+ InvokeFuncStorage polymorphic_invoke_ = nullptr;
+};
+
+// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
+template <>
+class BASE_EXPORT CallbackBase<CopyMode::Copyable>
+ : public CallbackBase<CopyMode::MoveOnly> {
+ public:
+ CallbackBase(const CallbackBase& c);
+ CallbackBase(CallbackBase&& c);
+ CallbackBase& operator=(const CallbackBase& c);
+ CallbackBase& operator=(CallbackBase&& c);
+ protected:
+ explicit CallbackBase(BindStateBase* bind_state)
+ : CallbackBase<CopyMode::MoveOnly>(bind_state) {}
+ ~CallbackBase() {}
+};
+
+extern template class CallbackBase<CopyMode::MoveOnly>;
+extern template class CallbackBase<CopyMode::Copyable>;
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_CALLBACK_INTERNAL_H_
diff --git a/libchrome/base/callback_list.h b/libchrome/base/callback_list.h
new file mode 100644
index 0000000..7ab79dd
--- /dev/null
+++ b/libchrome/base/callback_list.h
@@ -0,0 +1,229 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CALLBACK_LIST_H_
+#define BASE_CALLBACK_LIST_H_
+
+#include <list>
+#include <memory>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// OVERVIEW:
+//
+// A container for a list of callbacks. Unlike a normal STL vector or list,
+// this container can be modified during iteration without invalidating the
+// iterator. It safely handles the case of a callback removing itself
+// or another callback from the list while callbacks are being run.
+//
+// TYPICAL USAGE:
+//
+// class MyWidget {
+// public:
+// ...
+//
+// typedef base::Callback<void(const Foo&)> OnFooCallback;
+//
+// std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+// RegisterCallback(const OnFooCallback& cb) {
+// return callback_list_.Add(cb);
+// }
+//
+// private:
+// void NotifyFoo(const Foo& foo) {
+// callback_list_.Notify(foo);
+// }
+//
+// base::CallbackList<void(const Foo&)> callback_list_;
+//
+// DISALLOW_COPY_AND_ASSIGN(MyWidget);
+// };
+//
+//
+// class MyWidgetListener {
+// public:
+// MyWidgetListener::MyWidgetListener() {
+// foo_subscription_ = MyWidget::GetCurrent()->RegisterCallback(
+// base::Bind(&MyWidgetListener::OnFoo, this)));
+// }
+//
+// MyWidgetListener::~MyWidgetListener() {
+// // Subscription gets deleted automatically and will deregister
+// // the callback in the process.
+// }
+//
+// private:
+// void OnFoo(const Foo& foo) {
+// // Do something.
+// }
+//
+// std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+// foo_subscription_;
+//
+// DISALLOW_COPY_AND_ASSIGN(MyWidgetListener);
+// };
+
+namespace base {
+
+namespace internal {
+
+template <typename CallbackType>
+class CallbackListBase {
+ public:
+ class Subscription {
+ public:
+ Subscription(CallbackListBase<CallbackType>* list,
+ typename std::list<CallbackType>::iterator iter)
+ : list_(list),
+ iter_(iter) {
+ }
+
+ ~Subscription() {
+ if (list_->active_iterator_count_) {
+ iter_->Reset();
+ } else {
+ list_->callbacks_.erase(iter_);
+ if (!list_->removal_callback_.is_null())
+ list_->removal_callback_.Run();
+ }
+ }
+
+ private:
+ CallbackListBase<CallbackType>* list_;
+ typename std::list<CallbackType>::iterator iter_;
+
+ DISALLOW_COPY_AND_ASSIGN(Subscription);
+ };
+
+ // Add a callback to the list. The callback will remain registered until the
+ // returned Subscription is destroyed, which must occur before the
+ // CallbackList is destroyed.
+ std::unique_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
+ DCHECK(!cb.is_null());
+ return std::unique_ptr<Subscription>(
+ new Subscription(this, callbacks_.insert(callbacks_.end(), cb)));
+ }
+
+ // Sets a callback which will be run when a subscription list is changed.
+ void set_removal_callback(const Closure& callback) {
+ removal_callback_ = callback;
+ }
+
+ // Returns true if there are no subscriptions. This is only valid to call when
+ // not looping through the list.
+ bool empty() {
+ DCHECK_EQ(0, active_iterator_count_);
+ return callbacks_.empty();
+ }
+
+ protected:
+ // An iterator class that can be used to access the list of callbacks.
+ class Iterator {
+ public:
+ explicit Iterator(CallbackListBase<CallbackType>* list)
+ : list_(list),
+ list_iter_(list_->callbacks_.begin()) {
+ ++list_->active_iterator_count_;
+ }
+
+ Iterator(const Iterator& iter)
+ : list_(iter.list_),
+ list_iter_(iter.list_iter_) {
+ ++list_->active_iterator_count_;
+ }
+
+ ~Iterator() {
+ if (list_ && --list_->active_iterator_count_ == 0) {
+ list_->Compact();
+ }
+ }
+
+ CallbackType* GetNext() {
+ while ((list_iter_ != list_->callbacks_.end()) && list_iter_->is_null())
+ ++list_iter_;
+
+ CallbackType* cb = NULL;
+ if (list_iter_ != list_->callbacks_.end()) {
+ cb = &(*list_iter_);
+ ++list_iter_;
+ }
+ return cb;
+ }
+
+ private:
+ CallbackListBase<CallbackType>* list_;
+ typename std::list<CallbackType>::iterator list_iter_;
+ };
+
+ CallbackListBase() : active_iterator_count_(0) {}
+
+ ~CallbackListBase() {
+ DCHECK_EQ(0, active_iterator_count_);
+ DCHECK_EQ(0U, callbacks_.size());
+ }
+
+ // Returns an instance of a CallbackListBase::Iterator which can be used
+ // to run callbacks.
+ Iterator GetIterator() {
+ return Iterator(this);
+ }
+
+ // Compact the list: remove any entries which were NULLed out during
+ // iteration.
+ void Compact() {
+ typename std::list<CallbackType>::iterator it = callbacks_.begin();
+ bool updated = false;
+ while (it != callbacks_.end()) {
+ if ((*it).is_null()) {
+ updated = true;
+ it = callbacks_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ if (updated && !removal_callback_.is_null())
+ removal_callback_.Run();
+ }
+
+ private:
+ std::list<CallbackType> callbacks_;
+ int active_iterator_count_;
+ Closure removal_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallbackListBase);
+};
+
+} // namespace internal
+
+template <typename Sig> class CallbackList;
+
+template <typename... Args>
+class CallbackList<void(Args...)>
+ : public internal::CallbackListBase<Callback<void(Args...)> > {
+ public:
+ typedef Callback<void(Args...)> CallbackType;
+
+ CallbackList() {}
+
+ template <typename... RunArgs>
+ void Notify(RunArgs&&... args) {
+ typename internal::CallbackListBase<CallbackType>::Iterator it =
+ this->GetIterator();
+ CallbackType* cb;
+ while ((cb = it.GetNext()) != NULL) {
+ cb->Run(args...);
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CallbackList);
+};
+
+} // namespace base
+
+#endif // BASE_CALLBACK_LIST_H_
diff --git a/libchrome/base/callback_list_unittest.cc b/libchrome/base/callback_list_unittest.cc
new file mode 100644
index 0000000..62081e9
--- /dev/null
+++ b/libchrome/base/callback_list_unittest.cc
@@ -0,0 +1,339 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback_list.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Listener {
+ public:
+ Listener() : total_(0), scaler_(1) {}
+ explicit Listener(int scaler) : total_(0), scaler_(scaler) {}
+ void IncrementTotal() { total_++; }
+ void IncrementByMultipleOfScaler(int x) { total_ += x * scaler_; }
+
+ int total() const { return total_; }
+
+ private:
+ int total_;
+ int scaler_;
+ DISALLOW_COPY_AND_ASSIGN(Listener);
+};
+
+class Remover {
+ public:
+ Remover() : total_(0) {}
+ void IncrementTotalAndRemove() {
+ total_++;
+ removal_subscription_.reset();
+ }
+ void SetSubscriptionToRemove(
+ std::unique_ptr<CallbackList<void(void)>::Subscription> sub) {
+ removal_subscription_ = std::move(sub);
+ }
+
+ int total() const { return total_; }
+
+ private:
+ int total_;
+ std::unique_ptr<CallbackList<void(void)>::Subscription> removal_subscription_;
+ DISALLOW_COPY_AND_ASSIGN(Remover);
+};
+
+class Adder {
+ public:
+ explicit Adder(CallbackList<void(void)>* cb_reg)
+ : added_(false),
+ total_(0),
+ cb_reg_(cb_reg) {
+ }
+ void AddCallback() {
+ if (!added_) {
+ added_ = true;
+ subscription_ =
+ cb_reg_->Add(Bind(&Adder::IncrementTotal, Unretained(this)));
+ }
+ }
+ void IncrementTotal() { total_++; }
+
+ bool added() const { return added_; }
+
+ int total() const { return total_; }
+
+ private:
+ bool added_;
+ int total_;
+ CallbackList<void(void)>* cb_reg_;
+ std::unique_ptr<CallbackList<void(void)>::Subscription> subscription_;
+ DISALLOW_COPY_AND_ASSIGN(Adder);
+};
+
+class Summer {
+ public:
+ Summer() : value_(0) {}
+
+ void AddOneParam(int a) { value_ = a; }
+ void AddTwoParam(int a, int b) { value_ = a + b; }
+ void AddThreeParam(int a, int b, int c) { value_ = a + b + c; }
+ void AddFourParam(int a, int b, int c, int d) { value_ = a + b + c + d; }
+ void AddFiveParam(int a, int b, int c, int d, int e) {
+ value_ = a + b + c + d + e;
+ }
+ void AddSixParam(int a, int b, int c, int d, int e , int f) {
+ value_ = a + b + c + d + e + f;
+ }
+
+ int value() const { return value_; }
+
+ private:
+ int value_;
+ DISALLOW_COPY_AND_ASSIGN(Summer);
+};
+
+class Counter {
+ public:
+ Counter() : value_(0) {}
+
+ void Increment() { value_++; }
+
+ int value() const { return value_; }
+
+ private:
+ int value_;
+ DISALLOW_COPY_AND_ASSIGN(Counter);
+};
+
+// Sanity check that we can instantiate a CallbackList for each arity.
+TEST(CallbackListTest, ArityTest) {
+ Summer s;
+
+ CallbackList<void(int)> c1;
+ std::unique_ptr<CallbackList<void(int)>::Subscription> subscription1 =
+ c1.Add(Bind(&Summer::AddOneParam, Unretained(&s)));
+
+ c1.Notify(1);
+ EXPECT_EQ(1, s.value());
+
+ CallbackList<void(int, int)> c2;
+ std::unique_ptr<CallbackList<void(int, int)>::Subscription> subscription2 =
+ c2.Add(Bind(&Summer::AddTwoParam, Unretained(&s)));
+
+ c2.Notify(1, 2);
+ EXPECT_EQ(3, s.value());
+
+ CallbackList<void(int, int, int)> c3;
+ std::unique_ptr<CallbackList<void(int, int, int)>::Subscription>
+ subscription3 = c3.Add(Bind(&Summer::AddThreeParam, Unretained(&s)));
+
+ c3.Notify(1, 2, 3);
+ EXPECT_EQ(6, s.value());
+
+ CallbackList<void(int, int, int, int)> c4;
+ std::unique_ptr<CallbackList<void(int, int, int, int)>::Subscription>
+ subscription4 = c4.Add(Bind(&Summer::AddFourParam, Unretained(&s)));
+
+ c4.Notify(1, 2, 3, 4);
+ EXPECT_EQ(10, s.value());
+
+ CallbackList<void(int, int, int, int, int)> c5;
+ std::unique_ptr<CallbackList<void(int, int, int, int, int)>::Subscription>
+ subscription5 = c5.Add(Bind(&Summer::AddFiveParam, Unretained(&s)));
+
+ c5.Notify(1, 2, 3, 4, 5);
+ EXPECT_EQ(15, s.value());
+
+ CallbackList<void(int, int, int, int, int, int)> c6;
+ std::unique_ptr<
+ CallbackList<void(int, int, int, int, int, int)>::Subscription>
+ subscription6 = c6.Add(Bind(&Summer::AddSixParam, Unretained(&s)));
+
+ c6.Notify(1, 2, 3, 4, 5, 6);
+ EXPECT_EQ(21, s.value());
+}
+
+// Sanity check that closures added to the list will be run, and those removed
+// from the list will not be run.
+TEST(CallbackListTest, BasicTest) {
+ CallbackList<void(void)> cb_reg;
+ Listener a, b, c;
+
+ std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
+ std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
+
+ EXPECT_TRUE(a_subscription.get());
+ EXPECT_TRUE(b_subscription.get());
+
+ cb_reg.Notify();
+
+ EXPECT_EQ(1, a.total());
+ EXPECT_EQ(1, b.total());
+
+ b_subscription.reset();
+
+ std::unique_ptr<CallbackList<void(void)>::Subscription> c_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&c)));
+
+ cb_reg.Notify();
+
+ EXPECT_EQ(2, a.total());
+ EXPECT_EQ(1, b.total());
+ EXPECT_EQ(1, c.total());
+
+ a_subscription.reset();
+ b_subscription.reset();
+ c_subscription.reset();
+}
+
+// Sanity check that callbacks with details added to the list will be run, with
+// the correct details, and those removed from the list will not be run.
+TEST(CallbackListTest, BasicTestWithParams) {
+ CallbackList<void(int)> cb_reg;
+ Listener a(1), b(-1), c(1);
+
+ std::unique_ptr<CallbackList<void(int)>::Subscription> a_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&a)));
+ std::unique_ptr<CallbackList<void(int)>::Subscription> b_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&b)));
+
+ EXPECT_TRUE(a_subscription.get());
+ EXPECT_TRUE(b_subscription.get());
+
+ cb_reg.Notify(10);
+
+ EXPECT_EQ(10, a.total());
+ EXPECT_EQ(-10, b.total());
+
+ b_subscription.reset();
+
+ std::unique_ptr<CallbackList<void(int)>::Subscription> c_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&c)));
+
+ cb_reg.Notify(10);
+
+ EXPECT_EQ(20, a.total());
+ EXPECT_EQ(-10, b.total());
+ EXPECT_EQ(10, c.total());
+
+ a_subscription.reset();
+ b_subscription.reset();
+ c_subscription.reset();
+}
+
+// Test the a callback can remove itself or a different callback from the list
+// during iteration without invalidating the iterator.
+TEST(CallbackListTest, RemoveCallbacksDuringIteration) {
+ CallbackList<void(void)> cb_reg;
+ Listener a, b;
+ Remover remover_1, remover_2;
+
+ std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+ cb_reg.Add(
+ Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+ std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+ cb_reg.Add(
+ Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
+ std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
+ std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
+
+ // |remover_1| will remove itself.
+ remover_1.SetSubscriptionToRemove(std::move(remover_1_sub));
+ // |remover_2| will remove a.
+ remover_2.SetSubscriptionToRemove(std::move(a_subscription));
+
+ cb_reg.Notify();
+
+ // |remover_1| runs once (and removes itself), |remover_2| runs once (and
+ // removes a), |a| never runs, and |b| runs once.
+ EXPECT_EQ(1, remover_1.total());
+ EXPECT_EQ(1, remover_2.total());
+ EXPECT_EQ(0, a.total());
+ EXPECT_EQ(1, b.total());
+
+ cb_reg.Notify();
+
+ // Only |remover_2| and |b| run this time.
+ EXPECT_EQ(1, remover_1.total());
+ EXPECT_EQ(2, remover_2.total());
+ EXPECT_EQ(0, a.total());
+ EXPECT_EQ(2, b.total());
+}
+
+// Test that a callback can add another callback to the list durning iteration
+// without invalidating the iterator. The newly added callback should be run on
+// the current iteration as will all other callbacks in the list.
+TEST(CallbackListTest, AddCallbacksDuringIteration) {
+ CallbackList<void(void)> cb_reg;
+ Adder a(&cb_reg);
+ Listener b;
+ std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+ cb_reg.Add(Bind(&Adder::AddCallback, Unretained(&a)));
+ std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+ cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
+
+ cb_reg.Notify();
+
+ EXPECT_EQ(1, a.total());
+ EXPECT_EQ(1, b.total());
+ EXPECT_TRUE(a.added());
+
+ cb_reg.Notify();
+
+ EXPECT_EQ(2, a.total());
+ EXPECT_EQ(2, b.total());
+}
+
+// Sanity check: notifying an empty list is a no-op.
+TEST(CallbackListTest, EmptyList) {
+ CallbackList<void(void)> cb_reg;
+
+ cb_reg.Notify();
+}
+
+TEST(CallbackList, RemovalCallback) {
+ Counter remove_count;
+ CallbackList<void(void)> cb_reg;
+ cb_reg.set_removal_callback(
+ Bind(&Counter::Increment, Unretained(&remove_count)));
+
+ std::unique_ptr<CallbackList<void(void)>::Subscription> subscription =
+ cb_reg.Add(Bind(&DoNothing));
+
+ // Removing a subscription outside of iteration signals the callback.
+ EXPECT_EQ(0, remove_count.value());
+ subscription.reset();
+ EXPECT_EQ(1, remove_count.value());
+
+ // Configure two subscriptions to remove themselves.
+ Remover remover_1, remover_2;
+ std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+ cb_reg.Add(
+ Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+ std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+ cb_reg.Add(
+ Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
+ remover_1.SetSubscriptionToRemove(std::move(remover_1_sub));
+ remover_2.SetSubscriptionToRemove(std::move(remover_2_sub));
+
+ // The callback should be signaled exactly once.
+ EXPECT_EQ(1, remove_count.value());
+ cb_reg.Notify();
+ EXPECT_EQ(2, remove_count.value());
+ EXPECT_TRUE(cb_reg.empty());
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/callback_unittest.cc b/libchrome/base/callback_unittest.cc
new file mode 100644
index 0000000..ce453a1
--- /dev/null
+++ b/libchrome/base/callback_unittest.cc
@@ -0,0 +1,166 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/callback.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/callback_internal.h"
+#include "base/memory/ref_counted.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+void NopInvokeFunc(internal::BindStateBase*) {}
+
+// White-box testpoints to inject into a Callback<> object for checking
+// comparators and emptiness APIs. Use a BindState that is specialized
+// based on a type we declared in the anonymous namespace above to remove any
+// chance of colliding with another instantiation and breaking the
+// one-definition-rule.
+struct FakeBindState1 : internal::BindStateBase {
+ FakeBindState1() : BindStateBase(&Destroy) {}
+ private:
+ ~FakeBindState1() {}
+ static void Destroy(internal::BindStateBase* self) {
+ delete static_cast<FakeBindState1*>(self);
+ }
+};
+
+struct FakeBindState2 : internal::BindStateBase {
+ FakeBindState2() : BindStateBase(&Destroy) {}
+ private:
+ ~FakeBindState2() {}
+ static void Destroy(internal::BindStateBase* self) {
+ delete static_cast<FakeBindState2*>(self);
+ }
+};
+
+namespace {
+
+class CallbackTest : public ::testing::Test {
+ public:
+ CallbackTest()
+ : callback_a_(new FakeBindState1(), &NopInvokeFunc),
+ callback_b_(new FakeBindState2(), &NopInvokeFunc) {
+ }
+
+ ~CallbackTest() override {}
+
+ protected:
+ Callback<void()> callback_a_;
+ const Callback<void()> callback_b_; // Ensure APIs work with const.
+ Callback<void()> null_callback_;
+};
+
+// Ensure we can create unbound callbacks. We need this to be able to store
+// them in class members that can be initialized later.
+TEST_F(CallbackTest, DefaultConstruction) {
+ Callback<void()> c0;
+ Callback<void(int)> c1;
+ Callback<void(int,int)> c2;
+ Callback<void(int,int,int)> c3;
+ Callback<void(int,int,int,int)> c4;
+ Callback<void(int,int,int,int,int)> c5;
+ Callback<void(int,int,int,int,int,int)> c6;
+
+ EXPECT_TRUE(c0.is_null());
+ EXPECT_TRUE(c1.is_null());
+ EXPECT_TRUE(c2.is_null());
+ EXPECT_TRUE(c3.is_null());
+ EXPECT_TRUE(c4.is_null());
+ EXPECT_TRUE(c5.is_null());
+ EXPECT_TRUE(c6.is_null());
+}
+
+TEST_F(CallbackTest, IsNull) {
+ EXPECT_TRUE(null_callback_.is_null());
+ EXPECT_FALSE(callback_a_.is_null());
+ EXPECT_FALSE(callback_b_.is_null());
+}
+
+TEST_F(CallbackTest, Equals) {
+ EXPECT_TRUE(callback_a_.Equals(callback_a_));
+ EXPECT_FALSE(callback_a_.Equals(callback_b_));
+ EXPECT_FALSE(callback_b_.Equals(callback_a_));
+
+ // We should compare based on instance, not type.
+ Callback<void()> callback_c(new FakeBindState1(), &NopInvokeFunc);
+ Callback<void()> callback_a2 = callback_a_;
+ EXPECT_TRUE(callback_a_.Equals(callback_a2));
+ EXPECT_FALSE(callback_a_.Equals(callback_c));
+
+ // Empty, however, is always equal to empty.
+ Callback<void()> empty2;
+ EXPECT_TRUE(null_callback_.Equals(empty2));
+}
+
+TEST_F(CallbackTest, Reset) {
+ // Resetting should bring us back to empty.
+ ASSERT_FALSE(callback_a_.is_null());
+ ASSERT_FALSE(callback_a_.Equals(null_callback_));
+
+ callback_a_.Reset();
+
+ EXPECT_TRUE(callback_a_.is_null());
+ EXPECT_TRUE(callback_a_.Equals(null_callback_));
+}
+
+struct TestForReentrancy {
+ TestForReentrancy()
+ : cb_already_run(false),
+ cb(Bind(&TestForReentrancy::AssertCBIsNull, Unretained(this))) {
+ }
+ void AssertCBIsNull() {
+ ASSERT_TRUE(cb.is_null());
+ cb_already_run = true;
+ }
+ bool cb_already_run;
+ Closure cb;
+};
+
+TEST_F(CallbackTest, ResetAndReturn) {
+ TestForReentrancy tfr;
+ ASSERT_FALSE(tfr.cb.is_null());
+ ASSERT_FALSE(tfr.cb_already_run);
+ ResetAndReturn(&tfr.cb).Run();
+ ASSERT_TRUE(tfr.cb.is_null());
+ ASSERT_TRUE(tfr.cb_already_run);
+}
+
+class CallbackOwner : public base::RefCounted<CallbackOwner> {
+ public:
+ explicit CallbackOwner(bool* deleted) {
+ callback_ = Bind(&CallbackOwner::Unused, this);
+ deleted_ = deleted;
+ }
+ void Reset() {
+ callback_.Reset();
+ // We are deleted here if no-one else had a ref to us.
+ }
+
+ private:
+ friend class base::RefCounted<CallbackOwner>;
+ virtual ~CallbackOwner() {
+ *deleted_ = true;
+ }
+ void Unused() {
+ FAIL() << "Should never be called";
+ }
+
+ Closure callback_;
+ bool* deleted_;
+};
+
+TEST_F(CallbackTest, CallbackHasLastRefOnContainingObject) {
+ bool deleted = false;
+ CallbackOwner* owner = new CallbackOwner(&deleted);
+ owner->Reset();
+ ASSERT_TRUE(deleted);
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/cancelable_callback.h b/libchrome/base/cancelable_callback.h
new file mode 100644
index 0000000..0034fdd
--- /dev/null
+++ b/libchrome/base/cancelable_callback.h
@@ -0,0 +1,134 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// CancelableCallback is a wrapper around base::Callback that allows
+// cancellation of a callback. CancelableCallback takes a reference on the
+// wrapped callback until this object is destroyed or Reset()/Cancel() are
+// called.
+//
+// NOTE:
+//
+// Calling CancelableCallback::Cancel() brings the object back to its natural,
+// default-constructed state, i.e., CancelableCallback::callback() will return
+// a null callback.
+//
+// THREAD-SAFETY:
+//
+// CancelableCallback objects must be created on, posted to, cancelled on, and
+// destroyed on the same thread.
+//
+//
+// EXAMPLE USAGE:
+//
+// In the following example, the test is verifying that RunIntensiveTest()
+// Quit()s the message loop within 4 seconds. The cancelable callback is posted
+// to the message loop, the intensive test runs, the message loop is run,
+// then the callback is cancelled.
+//
+// void TimeoutCallback(const std::string& timeout_message) {
+// FAIL() << timeout_message;
+// MessageLoop::current()->QuitWhenIdle();
+// }
+//
+// CancelableClosure timeout(base::Bind(&TimeoutCallback, "Test timed out."));
+// MessageLoop::current()->PostDelayedTask(FROM_HERE, timeout.callback(),
+// 4000) // 4 seconds to run.
+// RunIntensiveTest();
+// MessageLoop::current()->Run();
+// timeout.Cancel(); // Hopefully this is hit before the timeout callback runs.
+//
+
+#ifndef BASE_CANCELABLE_CALLBACK_H_
+#define BASE_CANCELABLE_CALLBACK_H_
+
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_internal.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+template <typename Sig>
+class CancelableCallback;
+
+template <typename... A>
+class CancelableCallback<void(A...)> {
+ public:
+ CancelableCallback() : weak_factory_(this) {}
+
+ // |callback| must not be null.
+ explicit CancelableCallback(const base::Callback<void(A...)>& callback)
+ : callback_(callback), weak_factory_(this) {
+ DCHECK(!callback.is_null());
+ InitializeForwarder();
+ }
+
+ ~CancelableCallback() {}
+
+ // Cancels and drops the reference to the wrapped callback.
+ void Cancel() {
+ weak_factory_.InvalidateWeakPtrs();
+ forwarder_.Reset();
+ callback_.Reset();
+ }
+
+ // Returns true if the wrapped callback has been cancelled.
+ bool IsCancelled() const {
+ return callback_.is_null();
+ }
+
+ // Sets |callback| as the closure that may be cancelled. |callback| may not
+ // be null. Outstanding and any previously wrapped callbacks are cancelled.
+ void Reset(const base::Callback<void(A...)>& callback) {
+ DCHECK(!callback.is_null());
+
+ // Outstanding tasks (e.g., posted to a message loop) must not be called.
+ Cancel();
+
+ // |forwarder_| is no longer valid after Cancel(), so re-bind.
+ InitializeForwarder();
+
+ callback_ = callback;
+ }
+
+ // Returns a callback that can be disabled by calling Cancel().
+ const base::Callback<void(A...)>& callback() const {
+ return forwarder_;
+ }
+
+ private:
+ void Forward(A... args) const {
+ callback_.Run(std::forward<A>(args)...);
+ }
+
+ // Helper method to bind |forwarder_| using a weak pointer from
+ // |weak_factory_|.
+ void InitializeForwarder() {
+ forwarder_ = base::Bind(&CancelableCallback<void(A...)>::Forward,
+ weak_factory_.GetWeakPtr());
+ }
+
+ // The wrapper closure.
+ base::Callback<void(A...)> forwarder_;
+
+ // The stored closure that may be cancelled.
+ base::Callback<void(A...)> callback_;
+
+ // Used to ensure Forward() is not run when this object is destroyed.
+ base::WeakPtrFactory<CancelableCallback<void(A...)>> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(CancelableCallback);
+};
+
+typedef CancelableCallback<void(void)> CancelableClosure;
+
+} // namespace base
+
+#endif // BASE_CANCELABLE_CALLBACK_H_
diff --git a/libchrome/base/cancelable_callback_unittest.cc b/libchrome/base/cancelable_callback_unittest.cc
new file mode 100644
index 0000000..23b6c1c
--- /dev/null
+++ b/libchrome/base/cancelable_callback_unittest.cc
@@ -0,0 +1,205 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cancelable_callback.h"
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class TestRefCounted : public RefCountedThreadSafe<TestRefCounted> {
+ private:
+ friend class RefCountedThreadSafe<TestRefCounted>;
+ ~TestRefCounted() {};
+};
+
+void Increment(int* count) { (*count)++; }
+void IncrementBy(int* count, int n) { (*count) += n; }
+void RefCountedParam(const scoped_refptr<TestRefCounted>& ref_counted) {}
+
+void OnMoveOnlyReceived(int* value, std::unique_ptr<int> result) {
+ *value = *result;
+}
+
+// Cancel().
+// - Callback can be run multiple times.
+// - After Cancel(), Run() completes but has no effect.
+TEST(CancelableCallbackTest, Cancel) {
+ int count = 0;
+ CancelableClosure cancelable(
+ base::Bind(&Increment, base::Unretained(&count)));
+
+ base::Closure callback = cancelable.callback();
+ callback.Run();
+ EXPECT_EQ(1, count);
+
+ callback.Run();
+ EXPECT_EQ(2, count);
+
+ cancelable.Cancel();
+ callback.Run();
+ EXPECT_EQ(2, count);
+}
+
+// Cancel() called multiple times.
+// - Cancel() cancels all copies of the wrapped callback.
+// - Calling Cancel() more than once has no effect.
+// - After Cancel(), callback() returns a null callback.
+TEST(CancelableCallbackTest, MultipleCancel) {
+ int count = 0;
+ CancelableClosure cancelable(
+ base::Bind(&Increment, base::Unretained(&count)));
+
+ base::Closure callback1 = cancelable.callback();
+ base::Closure callback2 = cancelable.callback();
+ cancelable.Cancel();
+
+ callback1.Run();
+ EXPECT_EQ(0, count);
+
+ callback2.Run();
+ EXPECT_EQ(0, count);
+
+ // Calling Cancel() again has no effect.
+ cancelable.Cancel();
+
+ // callback() of a cancelled callback is null.
+ base::Closure callback3 = cancelable.callback();
+ EXPECT_TRUE(callback3.is_null());
+}
+
+// CancelableCallback destroyed before callback is run.
+// - Destruction of CancelableCallback cancels outstanding callbacks.
+TEST(CancelableCallbackTest, CallbackCanceledOnDestruction) {
+ int count = 0;
+ base::Closure callback;
+
+ {
+ CancelableClosure cancelable(
+ base::Bind(&Increment, base::Unretained(&count)));
+
+ callback = cancelable.callback();
+ callback.Run();
+ EXPECT_EQ(1, count);
+ }
+
+ callback.Run();
+ EXPECT_EQ(1, count);
+}
+
+// Cancel() called on bound closure with a RefCounted parameter.
+// - Cancel drops wrapped callback (and, implicitly, its bound arguments).
+TEST(CancelableCallbackTest, CancelDropsCallback) {
+ scoped_refptr<TestRefCounted> ref_counted = new TestRefCounted;
+ EXPECT_TRUE(ref_counted->HasOneRef());
+
+ CancelableClosure cancelable(base::Bind(RefCountedParam, ref_counted));
+ EXPECT_FALSE(cancelable.IsCancelled());
+ EXPECT_TRUE(ref_counted.get());
+ EXPECT_FALSE(ref_counted->HasOneRef());
+
+ // There is only one reference to |ref_counted| after the Cancel().
+ cancelable.Cancel();
+ EXPECT_TRUE(cancelable.IsCancelled());
+ EXPECT_TRUE(ref_counted.get());
+ EXPECT_TRUE(ref_counted->HasOneRef());
+}
+
+// Reset().
+// - Reset() replaces the existing wrapped callback with a new callback.
+// - Reset() deactivates outstanding callbacks.
+TEST(CancelableCallbackTest, Reset) {
+ int count = 0;
+ CancelableClosure cancelable(
+ base::Bind(&Increment, base::Unretained(&count)));
+
+ base::Closure callback = cancelable.callback();
+ callback.Run();
+ EXPECT_EQ(1, count);
+
+ callback.Run();
+ EXPECT_EQ(2, count);
+
+ cancelable.Reset(
+ base::Bind(&IncrementBy, base::Unretained(&count), 3));
+ EXPECT_FALSE(cancelable.IsCancelled());
+
+ // The stale copy of the cancelable callback is non-null.
+ ASSERT_FALSE(callback.is_null());
+
+ // The stale copy of the cancelable callback is no longer active.
+ callback.Run();
+ EXPECT_EQ(2, count);
+
+ base::Closure callback2 = cancelable.callback();
+ ASSERT_FALSE(callback2.is_null());
+
+ callback2.Run();
+ EXPECT_EQ(5, count);
+}
+
+// IsCanceled().
+// - Cancel() transforms the CancelableCallback into a cancelled state.
+TEST(CancelableCallbackTest, IsNull) {
+ CancelableClosure cancelable;
+ EXPECT_TRUE(cancelable.IsCancelled());
+
+ int count = 0;
+ cancelable.Reset(base::Bind(&Increment,
+ base::Unretained(&count)));
+ EXPECT_FALSE(cancelable.IsCancelled());
+
+ cancelable.Cancel();
+ EXPECT_TRUE(cancelable.IsCancelled());
+}
+
+// CancelableCallback posted to a MessageLoop with PostTask.
+// - Callbacks posted to a MessageLoop can be cancelled.
+TEST(CancelableCallbackTest, PostTask) {
+ MessageLoop loop;
+
+ int count = 0;
+ CancelableClosure cancelable(base::Bind(&Increment,
+ base::Unretained(&count)));
+
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, cancelable.callback());
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(1, count);
+
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, cancelable.callback());
+
+ // Cancel before running the message loop.
+ cancelable.Cancel();
+ RunLoop().RunUntilIdle();
+
+ // Callback never ran due to cancellation; count is the same.
+ EXPECT_EQ(1, count);
+}
+
+// CancelableCallback can be used with move-only types.
+TEST(CancelableCallbackTest, MoveOnlyType) {
+ const int kExpectedResult = 42;
+
+ int result = 0;
+ CancelableCallback<void(std::unique_ptr<int>)> cb(
+ base::Bind(&OnMoveOnlyReceived, base::Unretained(&result)));
+ cb.callback().Run(base::WrapUnique(new int(kExpectedResult)));
+
+ EXPECT_EQ(kExpectedResult, result);
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/command_line.cc b/libchrome/base/command_line.cc
new file mode 100644
index 0000000..099bb18
--- /dev/null
+++ b/libchrome/base/command_line.cc
@@ -0,0 +1,493 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#endif
+
+namespace base {
+
+CommandLine* CommandLine::current_process_commandline_ = NULL;
+
+namespace {
+
+const CommandLine::CharType kSwitchTerminator[] = FILE_PATH_LITERAL("--");
+const CommandLine::CharType kSwitchValueSeparator[] = FILE_PATH_LITERAL("=");
+
+// Since we use a lazy match, make sure that longer versions (like "--") are
+// listed before shorter versions (like "-") of similar prefixes.
+#if defined(OS_WIN)
+// By putting slash last, we can control whether it is treaded as a switch
+// value by changing the value of switch_prefix_count to be one less than
+// the array size.
+const CommandLine::CharType* const kSwitchPrefixes[] = {L"--", L"-", L"/"};
+#elif defined(OS_POSIX)
+// Unixes don't use slash as a switch.
+const CommandLine::CharType* const kSwitchPrefixes[] = {"--", "-"};
+#endif
+size_t switch_prefix_count = arraysize(kSwitchPrefixes);
+
+size_t GetSwitchPrefixLength(const CommandLine::StringType& string) {
+ for (size_t i = 0; i < switch_prefix_count; ++i) {
+ CommandLine::StringType prefix(kSwitchPrefixes[i]);
+ if (string.compare(0, prefix.length(), prefix) == 0)
+ return prefix.length();
+ }
+ return 0;
+}
+
+// Fills in |switch_string| and |switch_value| if |string| is a switch.
+// This will preserve the input switch prefix in the output |switch_string|.
+bool IsSwitch(const CommandLine::StringType& string,
+ CommandLine::StringType* switch_string,
+ CommandLine::StringType* switch_value) {
+ switch_string->clear();
+ switch_value->clear();
+ size_t prefix_length = GetSwitchPrefixLength(string);
+ if (prefix_length == 0 || prefix_length == string.length())
+ return false;
+
+ const size_t equals_position = string.find(kSwitchValueSeparator);
+ *switch_string = string.substr(0, equals_position);
+ if (equals_position != CommandLine::StringType::npos)
+ *switch_value = string.substr(equals_position + 1);
+ return true;
+}
+
+// Append switches and arguments, keeping switches before arguments.
+void AppendSwitchesAndArguments(CommandLine* command_line,
+ const CommandLine::StringVector& argv) {
+ bool parse_switches = true;
+ for (size_t i = 1; i < argv.size(); ++i) {
+ CommandLine::StringType arg = argv[i];
+#if defined(OS_WIN)
+ TrimWhitespace(arg, TRIM_ALL, &arg);
+#else
+ TrimWhitespaceASCII(arg, TRIM_ALL, &arg);
+#endif
+
+ CommandLine::StringType switch_string;
+ CommandLine::StringType switch_value;
+ parse_switches &= (arg != kSwitchTerminator);
+ if (parse_switches && IsSwitch(arg, &switch_string, &switch_value)) {
+#if defined(OS_WIN)
+ command_line->AppendSwitchNative(UTF16ToASCII(switch_string),
+ switch_value);
+#elif defined(OS_POSIX)
+ command_line->AppendSwitchNative(switch_string, switch_value);
+#endif
+ } else {
+ command_line->AppendArgNative(arg);
+ }
+ }
+}
+
+#if defined(OS_WIN)
+// Quote a string as necessary for CommandLineToArgvW compatiblity *on Windows*.
+string16 QuoteForCommandLineToArgvW(const string16& arg,
+ bool quote_placeholders) {
+ // We follow the quoting rules of CommandLineToArgvW.
+ // http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+ string16 quotable_chars(L" \\\"");
+ // We may also be required to quote '%', which is commonly used in a command
+ // line as a placeholder. (It may be substituted for a string with spaces.)
+ if (quote_placeholders)
+ quotable_chars.push_back(L'%');
+ if (arg.find_first_of(quotable_chars) == string16::npos) {
+ // No quoting necessary.
+ return arg;
+ }
+
+ string16 out;
+ out.push_back(L'"');
+ for (size_t i = 0; i < arg.size(); ++i) {
+ if (arg[i] == '\\') {
+ // Find the extent of this run of backslashes.
+ size_t start = i, end = start + 1;
+ for (; end < arg.size() && arg[end] == '\\'; ++end) {}
+ size_t backslash_count = end - start;
+
+ // Backslashes are escapes only if the run is followed by a double quote.
+ // Since we also will end the string with a double quote, we escape for
+ // either a double quote or the end of the string.
+ if (end == arg.size() || arg[end] == '"') {
+ // To quote, we need to output 2x as many backslashes.
+ backslash_count *= 2;
+ }
+ for (size_t j = 0; j < backslash_count; ++j)
+ out.push_back('\\');
+
+ // Advance i to one before the end to balance i++ in loop.
+ i = end - 1;
+ } else if (arg[i] == '"') {
+ out.push_back('\\');
+ out.push_back('"');
+ } else {
+ out.push_back(arg[i]);
+ }
+ }
+ out.push_back('"');
+
+ return out;
+}
+#endif
+
+} // namespace
+
+CommandLine::CommandLine(NoProgram) : argv_(1), begin_args_(1) {}
+
+CommandLine::CommandLine(const FilePath& program)
+ : argv_(1),
+ begin_args_(1) {
+ SetProgram(program);
+}
+
+CommandLine::CommandLine(int argc, const CommandLine::CharType* const* argv)
+ : argv_(1),
+ begin_args_(1) {
+ InitFromArgv(argc, argv);
+}
+
+CommandLine::CommandLine(const StringVector& argv)
+ : argv_(1),
+ begin_args_(1) {
+ InitFromArgv(argv);
+}
+
+CommandLine::CommandLine(const CommandLine& other)
+ : argv_(other.argv_),
+ switches_(other.switches_),
+ begin_args_(other.begin_args_) {
+ ResetStringPieces();
+}
+
+CommandLine& CommandLine::operator=(const CommandLine& other) {
+ argv_ = other.argv_;
+ switches_ = other.switches_;
+ begin_args_ = other.begin_args_;
+ ResetStringPieces();
+ return *this;
+}
+
+CommandLine::~CommandLine() {
+}
+
+#if defined(OS_WIN)
+// static
+void CommandLine::set_slash_is_not_a_switch() {
+ // The last switch prefix should be slash, so adjust the size to skip it.
+ DCHECK_EQ(wcscmp(kSwitchPrefixes[arraysize(kSwitchPrefixes) - 1], L"/"), 0);
+ switch_prefix_count = arraysize(kSwitchPrefixes) - 1;
+}
+
+// static
+void CommandLine::InitUsingArgvForTesting(int argc, const char* const* argv) {
+ DCHECK(!current_process_commandline_);
+ current_process_commandline_ = new CommandLine(NO_PROGRAM);
+ // On Windows we need to convert the command line arguments to string16.
+ base::CommandLine::StringVector argv_vector;
+ for (int i = 0; i < argc; ++i)
+ argv_vector.push_back(UTF8ToUTF16(argv[i]));
+ current_process_commandline_->InitFromArgv(argv_vector);
+}
+#endif
+
+// static
+bool CommandLine::Init(int argc, const char* const* argv) {
+ if (current_process_commandline_) {
+ // If this is intentional, Reset() must be called first. If we are using
+ // the shared build mode, we have to share a single object across multiple
+ // shared libraries.
+ return false;
+ }
+
+ current_process_commandline_ = new CommandLine(NO_PROGRAM);
+#if defined(OS_WIN)
+ current_process_commandline_->ParseFromString(::GetCommandLineW());
+#elif defined(OS_POSIX)
+ current_process_commandline_->InitFromArgv(argc, argv);
+#endif
+
+ return true;
+}
+
+// static
+void CommandLine::Reset() {
+ DCHECK(current_process_commandline_);
+ delete current_process_commandline_;
+ current_process_commandline_ = NULL;
+}
+
+// static
+CommandLine* CommandLine::ForCurrentProcess() {
+ DCHECK(current_process_commandline_);
+ return current_process_commandline_;
+}
+
+// static
+bool CommandLine::InitializedForCurrentProcess() {
+ return !!current_process_commandline_;
+}
+
+#if defined(OS_WIN)
+// static
+CommandLine CommandLine::FromString(const string16& command_line) {
+ CommandLine cmd(NO_PROGRAM);
+ cmd.ParseFromString(command_line);
+ return cmd;
+}
+#endif
+
+void CommandLine::InitFromArgv(int argc,
+ const CommandLine::CharType* const* argv) {
+ StringVector new_argv;
+ for (int i = 0; i < argc; ++i)
+ new_argv.push_back(argv[i]);
+ InitFromArgv(new_argv);
+}
+
+void CommandLine::InitFromArgv(const StringVector& argv) {
+ argv_ = StringVector(1);
+ switches_.clear();
+ switches_by_stringpiece_.clear();
+ begin_args_ = 1;
+ SetProgram(argv.empty() ? FilePath() : FilePath(argv[0]));
+ AppendSwitchesAndArguments(this, argv);
+}
+
+FilePath CommandLine::GetProgram() const {
+ return FilePath(argv_[0]);
+}
+
+void CommandLine::SetProgram(const FilePath& program) {
+#if defined(OS_WIN)
+ TrimWhitespace(program.value(), TRIM_ALL, &argv_[0]);
+#else
+ TrimWhitespaceASCII(program.value(), TRIM_ALL, &argv_[0]);
+#endif
+}
+
+bool CommandLine::HasSwitch(const base::StringPiece& switch_string) const {
+ DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
+ return switches_by_stringpiece_.find(switch_string) !=
+ switches_by_stringpiece_.end();
+}
+
+bool CommandLine::HasSwitch(const char switch_constant[]) const {
+ return HasSwitch(base::StringPiece(switch_constant));
+}
+
+std::string CommandLine::GetSwitchValueASCII(
+ const base::StringPiece& switch_string) const {
+ StringType value = GetSwitchValueNative(switch_string);
+ if (!IsStringASCII(value)) {
+ DLOG(WARNING) << "Value of switch (" << switch_string << ") must be ASCII.";
+ return std::string();
+ }
+#if defined(OS_WIN)
+ return UTF16ToASCII(value);
+#else
+ return value;
+#endif
+}
+
+FilePath CommandLine::GetSwitchValuePath(
+ const base::StringPiece& switch_string) const {
+ return FilePath(GetSwitchValueNative(switch_string));
+}
+
+CommandLine::StringType CommandLine::GetSwitchValueNative(
+ const base::StringPiece& switch_string) const {
+ DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
+ auto result = switches_by_stringpiece_.find(switch_string);
+ return result == switches_by_stringpiece_.end() ? StringType()
+ : *(result->second);
+}
+
+void CommandLine::AppendSwitch(const std::string& switch_string) {
+ AppendSwitchNative(switch_string, StringType());
+}
+
+void CommandLine::AppendSwitchPath(const std::string& switch_string,
+ const FilePath& path) {
+ AppendSwitchNative(switch_string, path.value());
+}
+
+void CommandLine::AppendSwitchNative(const std::string& switch_string,
+ const CommandLine::StringType& value) {
+#if defined(OS_WIN)
+ const std::string switch_key = ToLowerASCII(switch_string);
+ StringType combined_switch_string(ASCIIToUTF16(switch_key));
+#elif defined(OS_POSIX)
+ const std::string& switch_key = switch_string;
+ StringType combined_switch_string(switch_key);
+#endif
+ size_t prefix_length = GetSwitchPrefixLength(combined_switch_string);
+ auto insertion =
+ switches_.insert(make_pair(switch_key.substr(prefix_length), value));
+ if (!insertion.second)
+ insertion.first->second = value;
+ switches_by_stringpiece_[insertion.first->first] = &(insertion.first->second);
+ // Preserve existing switch prefixes in |argv_|; only append one if necessary.
+ if (prefix_length == 0)
+ combined_switch_string = kSwitchPrefixes[0] + combined_switch_string;
+ if (!value.empty())
+ combined_switch_string += kSwitchValueSeparator + value;
+ // Append the switch and update the switches/arguments divider |begin_args_|.
+ argv_.insert(argv_.begin() + begin_args_++, combined_switch_string);
+}
+
+void CommandLine::AppendSwitchASCII(const std::string& switch_string,
+ const std::string& value_string) {
+#if defined(OS_WIN)
+ AppendSwitchNative(switch_string, ASCIIToUTF16(value_string));
+#elif defined(OS_POSIX)
+ AppendSwitchNative(switch_string, value_string);
+#endif
+}
+
+void CommandLine::CopySwitchesFrom(const CommandLine& source,
+ const char* const switches[],
+ size_t count) {
+ for (size_t i = 0; i < count; ++i) {
+ if (source.HasSwitch(switches[i]))
+ AppendSwitchNative(switches[i], source.GetSwitchValueNative(switches[i]));
+ }
+}
+
+CommandLine::StringVector CommandLine::GetArgs() const {
+ // Gather all arguments after the last switch (may include kSwitchTerminator).
+ StringVector args(argv_.begin() + begin_args_, argv_.end());
+ // Erase only the first kSwitchTerminator (maybe "--" is a legitimate page?)
+ StringVector::iterator switch_terminator =
+ std::find(args.begin(), args.end(), kSwitchTerminator);
+ if (switch_terminator != args.end())
+ args.erase(switch_terminator);
+ return args;
+}
+
+void CommandLine::AppendArg(const std::string& value) {
+#if defined(OS_WIN)
+ DCHECK(IsStringUTF8(value));
+ AppendArgNative(UTF8ToWide(value));
+#elif defined(OS_POSIX)
+ AppendArgNative(value);
+#endif
+}
+
+void CommandLine::AppendArgPath(const FilePath& path) {
+ AppendArgNative(path.value());
+}
+
+void CommandLine::AppendArgNative(const CommandLine::StringType& value) {
+ argv_.push_back(value);
+}
+
+void CommandLine::AppendArguments(const CommandLine& other,
+ bool include_program) {
+ if (include_program)
+ SetProgram(other.GetProgram());
+ AppendSwitchesAndArguments(this, other.argv());
+}
+
+void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
+ if (wrapper.empty())
+ return;
+ // The wrapper may have embedded arguments (like "gdb --args"). In this case,
+ // we don't pretend to do anything fancy, we just split on spaces.
+ StringVector wrapper_argv = SplitString(
+ wrapper, FilePath::StringType(1, ' '), base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ // Prepend the wrapper and update the switches/arguments |begin_args_|.
+ argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
+ begin_args_ += wrapper_argv.size();
+}
+
+#if defined(OS_WIN)
+void CommandLine::ParseFromString(const string16& command_line) {
+ string16 command_line_string;
+ TrimWhitespace(command_line, TRIM_ALL, &command_line_string);
+ if (command_line_string.empty())
+ return;
+
+ int num_args = 0;
+ wchar_t** args = NULL;
+ args = ::CommandLineToArgvW(command_line_string.c_str(), &num_args);
+
+ DPLOG_IF(FATAL, !args) << "CommandLineToArgvW failed on command line: "
+ << UTF16ToUTF8(command_line);
+ InitFromArgv(num_args, args);
+ LocalFree(args);
+}
+#endif
+
+CommandLine::StringType CommandLine::GetCommandLineStringInternal(
+ bool quote_placeholders) const {
+ StringType string(argv_[0]);
+#if defined(OS_WIN)
+ string = QuoteForCommandLineToArgvW(string, quote_placeholders);
+#endif
+ StringType params(GetArgumentsStringInternal(quote_placeholders));
+ if (!params.empty()) {
+ string.append(StringType(FILE_PATH_LITERAL(" ")));
+ string.append(params);
+ }
+ return string;
+}
+
+CommandLine::StringType CommandLine::GetArgumentsStringInternal(
+ bool quote_placeholders) const {
+#if !defined(OS_WIN)
+ (void)quote_placeholders; // Avoid an unused warning.
+#endif
+ StringType params;
+ // Append switches and arguments.
+ bool parse_switches = true;
+ for (size_t i = 1; i < argv_.size(); ++i) {
+ StringType arg = argv_[i];
+ StringType switch_string;
+ StringType switch_value;
+ parse_switches &= arg != kSwitchTerminator;
+ if (i > 1)
+ params.append(StringType(FILE_PATH_LITERAL(" ")));
+ if (parse_switches && IsSwitch(arg, &switch_string, &switch_value)) {
+ params.append(switch_string);
+ if (!switch_value.empty()) {
+#if defined(OS_WIN)
+ switch_value =
+ QuoteForCommandLineToArgvW(switch_value, quote_placeholders);
+#endif
+ params.append(kSwitchValueSeparator + switch_value);
+ }
+ } else {
+#if defined(OS_WIN)
+ arg = QuoteForCommandLineToArgvW(arg, quote_placeholders);
+#endif
+ params.append(arg);
+ }
+ }
+ return params;
+}
+
+void CommandLine::ResetStringPieces() {
+ switches_by_stringpiece_.clear();
+ for (const auto& entry : switches_)
+ switches_by_stringpiece_[entry.first] = &(entry.second);
+}
+
+} // namespace base
diff --git a/libchrome/base/command_line.h b/libchrome/base/command_line.h
new file mode 100644
index 0000000..3d29f8f
--- /dev/null
+++ b/libchrome/base/command_line.h
@@ -0,0 +1,259 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class works with command lines: building and parsing.
+// Arguments with prefixes ('--', '-', and on Windows, '/') are switches.
+// Switches will precede all other arguments without switch prefixes.
+// Switches can optionally have values, delimited by '=', e.g., "-switch=value".
+// An argument of "--" will terminate switch parsing during initialization,
+// interpreting subsequent tokens as non-switch arguments, regardless of prefix.
+
+// There is a singleton read-only CommandLine that represents the command line
+// that the current process was started with. It must be initialized in main().
+
+#ifndef BASE_COMMAND_LINE_H_
+#define BASE_COMMAND_LINE_H_
+
+#include <stddef.h>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class FilePath;
+
+class BASE_EXPORT CommandLine {
+ public:
+#if defined(OS_WIN)
+ // The native command line string type.
+ using StringType = string16;
+#elif defined(OS_POSIX)
+ using StringType = std::string;
+#endif
+
+ using CharType = StringType::value_type;
+ using StringVector = std::vector<StringType>;
+ using SwitchMap = std::map<std::string, StringType>;
+ using StringPieceSwitchMap = std::map<StringPiece, const StringType*>;
+
+ // A constructor for CommandLines that only carry switches and arguments.
+ enum NoProgram { NO_PROGRAM };
+ explicit CommandLine(NoProgram no_program);
+
+ // Construct a new command line with |program| as argv[0].
+ explicit CommandLine(const FilePath& program);
+
+ // Construct a new command line from an argument list.
+ CommandLine(int argc, const CharType* const* argv);
+ explicit CommandLine(const StringVector& argv);
+
+ // Override copy and assign to ensure |switches_by_stringpiece_| is valid.
+ CommandLine(const CommandLine& other);
+ CommandLine& operator=(const CommandLine& other);
+
+ ~CommandLine();
+
+#if defined(OS_WIN)
+ // By default this class will treat command-line arguments beginning with
+ // slashes as switches on Windows, but not other platforms.
+ //
+ // If this behavior is inappropriate for your application, you can call this
+ // function BEFORE initializing the current process' global command line
+ // object and the behavior will be the same as Posix systems (only hyphens
+ // begin switches, everything else will be an arg).
+ static void set_slash_is_not_a_switch();
+
+ // Normally when the CommandLine singleton is initialized it gets the command
+ // line via the GetCommandLineW API and then uses the shell32 API
+ // CommandLineToArgvW to parse the command line and convert it back to
+ // argc and argv. Tests who don't want this dependency on shell32 and need
+ // to honor the arguments passed in should use this function.
+ static void InitUsingArgvForTesting(int argc, const char* const* argv);
+#endif
+
+ // Initialize the current process CommandLine singleton. On Windows, ignores
+ // its arguments (we instead parse GetCommandLineW() directly) because we
+ // don't trust the CRT's parsing of the command line, but it still must be
+ // called to set up the command line. Returns false if initialization has
+ // already occurred, and true otherwise. Only the caller receiving a 'true'
+ // return value should take responsibility for calling Reset.
+ static bool Init(int argc, const char* const* argv);
+
+ // Destroys the current process CommandLine singleton. This is necessary if
+ // you want to reset the base library to its initial state (for example, in an
+ // outer library that needs to be able to terminate, and be re-initialized).
+ // If Init is called only once, as in main(), Reset() is not necessary.
+ // Do not call this in tests. Use base::test::ScopedCommandLine instead.
+ static void Reset();
+
+ // Get the singleton CommandLine representing the current process's
+ // command line. Note: returned value is mutable, but not thread safe;
+ // only mutate if you know what you're doing!
+ static CommandLine* ForCurrentProcess();
+
+ // Returns true if the CommandLine has been initialized for the given process.
+ static bool InitializedForCurrentProcess();
+
+#if defined(OS_WIN)
+ static CommandLine FromString(const string16& command_line);
+#endif
+
+ // Initialize from an argv vector.
+ void InitFromArgv(int argc, const CharType* const* argv);
+ void InitFromArgv(const StringVector& argv);
+
+ // Constructs and returns the represented command line string.
+ // CAUTION! This should be avoided on POSIX because quoting behavior is
+ // unclear.
+ StringType GetCommandLineString() const {
+ return GetCommandLineStringInternal(false);
+ }
+
+#if defined(OS_WIN)
+ // Constructs and returns the represented command line string. Assumes the
+ // command line contains placeholders (eg, %1) and quotes any program or
+ // argument with a '%' in it. This should be avoided unless the placeholder is
+ // required by an external interface (eg, the Windows registry), because it is
+ // not generally safe to replace it with an arbitrary string. If possible,
+ // placeholders should be replaced *before* converting the command line to a
+ // string.
+ StringType GetCommandLineStringWithPlaceholders() const {
+ return GetCommandLineStringInternal(true);
+ }
+#endif
+
+ // Constructs and returns the represented arguments string.
+ // CAUTION! This should be avoided on POSIX because quoting behavior is
+ // unclear.
+ StringType GetArgumentsString() const {
+ return GetArgumentsStringInternal(false);
+ }
+
+#if defined(OS_WIN)
+ // Constructs and returns the represented arguments string. Assumes the
+ // command line contains placeholders (eg, %1) and quotes any argument with a
+ // '%' in it. This should be avoided unless the placeholder is required by an
+ // external interface (eg, the Windows registry), because it is not generally
+ // safe to replace it with an arbitrary string. If possible, placeholders
+ // should be replaced *before* converting the arguments to a string.
+ StringType GetArgumentsStringWithPlaceholders() const {
+ return GetArgumentsStringInternal(true);
+ }
+#endif
+
+ // Returns the original command line string as a vector of strings.
+ const StringVector& argv() const { return argv_; }
+
+ // Get and Set the program part of the command line string (the first item).
+ FilePath GetProgram() const;
+ void SetProgram(const FilePath& program);
+
+ // Returns true if this command line contains the given switch.
+ // Switch names must be lowercase.
+ // The second override provides an optimized version to avoid inlining codegen
+ // at every callsite to find the length of the constant and construct a
+ // StringPiece.
+ bool HasSwitch(const StringPiece& switch_string) const;
+ bool HasSwitch(const char switch_constant[]) const;
+
+ // Returns the value associated with the given switch. If the switch has no
+ // value or isn't present, this method returns the empty string.
+ // Switch names must be lowercase.
+ std::string GetSwitchValueASCII(const StringPiece& switch_string) const;
+ FilePath GetSwitchValuePath(const StringPiece& switch_string) const;
+ StringType GetSwitchValueNative(const StringPiece& switch_string) const;
+
+ // Get a copy of all switches, along with their values.
+ const SwitchMap& GetSwitches() const { return switches_; }
+
+ // Append a switch [with optional value] to the command line.
+ // Note: Switches will precede arguments regardless of appending order.
+ void AppendSwitch(const std::string& switch_string);
+ void AppendSwitchPath(const std::string& switch_string,
+ const FilePath& path);
+ void AppendSwitchNative(const std::string& switch_string,
+ const StringType& value);
+ void AppendSwitchASCII(const std::string& switch_string,
+ const std::string& value);
+
+ // Copy a set of switches (and any values) from another command line.
+ // Commonly used when launching a subprocess.
+ void CopySwitchesFrom(const CommandLine& source,
+ const char* const switches[],
+ size_t count);
+
+ // Get the remaining arguments to the command.
+ StringVector GetArgs() const;
+
+ // Append an argument to the command line. Note that the argument is quoted
+ // properly such that it is interpreted as one argument to the target command.
+ // AppendArg is primarily for ASCII; non-ASCII input is interpreted as UTF-8.
+ // Note: Switches will precede arguments regardless of appending order.
+ void AppendArg(const std::string& value);
+ void AppendArgPath(const FilePath& value);
+ void AppendArgNative(const StringType& value);
+
+ // Append the switches and arguments from another command line to this one.
+ // If |include_program| is true, include |other|'s program as well.
+ void AppendArguments(const CommandLine& other, bool include_program);
+
+ // Insert a command before the current command.
+ // Common for debuggers, like "valgrind" or "gdb --args".
+ void PrependWrapper(const StringType& wrapper);
+
+#if defined(OS_WIN)
+ // Initialize by parsing the given command line string.
+ // The program name is assumed to be the first item in the string.
+ void ParseFromString(const string16& command_line);
+#endif
+
+ private:
+ // Disallow default constructor; a program name must be explicitly specified.
+ CommandLine();
+ // Allow the copy constructor. A common pattern is to copy of the current
+ // process's command line and then add some flags to it. For example:
+ // CommandLine cl(*CommandLine::ForCurrentProcess());
+ // cl.AppendSwitch(...);
+
+ // Internal version of GetCommandLineString. If |quote_placeholders| is true,
+ // also quotes parts with '%' in them.
+ StringType GetCommandLineStringInternal(bool quote_placeholders) const;
+
+ // Internal version of GetArgumentsString. If |quote_placeholders| is true,
+ // also quotes parts with '%' in them.
+ StringType GetArgumentsStringInternal(bool quote_placeholders) const;
+
+ // Reconstruct |switches_by_stringpiece| to be a mirror of |switches|.
+ // |switches_by_stringpiece| only contains pointers to objects owned by
+ // |switches|.
+ void ResetStringPieces();
+
+ // The singleton CommandLine representing the current process's command line.
+ static CommandLine* current_process_commandline_;
+
+ // The argv array: { program, [(--|-|/)switch[=value]]*, [--], [argument]* }
+ StringVector argv_;
+
+ // Parsed-out switch keys and values.
+ SwitchMap switches_;
+
+ // A mirror of |switches_| with only references to the actual strings.
+ // The StringPiece internally holds a pointer to a key in |switches_| while
+ // the mapped_type points to a value in |switches_|.
+ // Used for allocation-free lookups.
+ StringPieceSwitchMap switches_by_stringpiece_;
+
+ // The index after the program and switches, any arguments start here.
+ size_t begin_args_;
+};
+
+} // namespace base
+
+#endif // BASE_COMMAND_LINE_H_
diff --git a/libchrome/base/command_line_unittest.cc b/libchrome/base/command_line_unittest.cc
new file mode 100644
index 0000000..bcfc6c5
--- /dev/null
+++ b/libchrome/base/command_line_unittest.cc
@@ -0,0 +1,409 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// To test Windows quoting behavior, we use a string that has some backslashes
+// and quotes.
+// Consider the command-line argument: q\"bs1\bs2\\bs3q\\\"
+// Here it is with C-style escapes.
+static const CommandLine::StringType kTrickyQuoted =
+ FILE_PATH_LITERAL("q\\\"bs1\\bs2\\\\bs3q\\\\\\\"");
+// It should be parsed by Windows as: q"bs1\bs2\\bs3q\"
+// Here that is with C-style escapes.
+static const CommandLine::StringType kTricky =
+ FILE_PATH_LITERAL("q\"bs1\\bs2\\\\bs3q\\\"");
+
+TEST(CommandLineTest, CommandLineConstructor) {
+ const CommandLine::CharType* argv[] = {
+ FILE_PATH_LITERAL("program"),
+ FILE_PATH_LITERAL("--foo="),
+ FILE_PATH_LITERAL("-bAr"),
+ FILE_PATH_LITERAL("-spaetzel=pierogi"),
+ FILE_PATH_LITERAL("-baz"),
+ FILE_PATH_LITERAL("flim"),
+ FILE_PATH_LITERAL("--other-switches=--dog=canine --cat=feline"),
+ FILE_PATH_LITERAL("-spaetzle=Crepe"),
+ FILE_PATH_LITERAL("-=loosevalue"),
+ FILE_PATH_LITERAL("-"),
+ FILE_PATH_LITERAL("FLAN"),
+ FILE_PATH_LITERAL("a"),
+ FILE_PATH_LITERAL("--input-translation=45--output-rotation"),
+ FILE_PATH_LITERAL("--"),
+ FILE_PATH_LITERAL("--"),
+ FILE_PATH_LITERAL("--not-a-switch"),
+ FILE_PATH_LITERAL("\"in the time of submarines...\""),
+ FILE_PATH_LITERAL("unquoted arg-with-space")};
+ CommandLine cl(arraysize(argv), argv);
+
+ EXPECT_FALSE(cl.GetCommandLineString().empty());
+ EXPECT_FALSE(cl.HasSwitch("cruller"));
+ EXPECT_FALSE(cl.HasSwitch("flim"));
+ EXPECT_FALSE(cl.HasSwitch("program"));
+ EXPECT_FALSE(cl.HasSwitch("dog"));
+ EXPECT_FALSE(cl.HasSwitch("cat"));
+ EXPECT_FALSE(cl.HasSwitch("output-rotation"));
+ EXPECT_FALSE(cl.HasSwitch("not-a-switch"));
+ EXPECT_FALSE(cl.HasSwitch("--"));
+
+ EXPECT_EQ(FilePath(FILE_PATH_LITERAL("program")).value(),
+ cl.GetProgram().value());
+
+ EXPECT_TRUE(cl.HasSwitch("foo"));
+#if defined(OS_WIN)
+ EXPECT_TRUE(cl.HasSwitch("bar"));
+#else
+ EXPECT_FALSE(cl.HasSwitch("bar"));
+#endif
+ EXPECT_TRUE(cl.HasSwitch("baz"));
+ EXPECT_TRUE(cl.HasSwitch("spaetzle"));
+ EXPECT_TRUE(cl.HasSwitch("other-switches"));
+ EXPECT_TRUE(cl.HasSwitch("input-translation"));
+
+ EXPECT_EQ("Crepe", cl.GetSwitchValueASCII("spaetzle"));
+ EXPECT_EQ("", cl.GetSwitchValueASCII("foo"));
+ EXPECT_EQ("", cl.GetSwitchValueASCII("bar"));
+ EXPECT_EQ("", cl.GetSwitchValueASCII("cruller"));
+ EXPECT_EQ("--dog=canine --cat=feline", cl.GetSwitchValueASCII(
+ "other-switches"));
+ EXPECT_EQ("45--output-rotation", cl.GetSwitchValueASCII("input-translation"));
+
+ const CommandLine::StringVector& args = cl.GetArgs();
+ ASSERT_EQ(8U, args.size());
+
+ std::vector<CommandLine::StringType>::const_iterator iter = args.begin();
+ EXPECT_EQ(FILE_PATH_LITERAL("flim"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("-"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("FLAN"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("a"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("--"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("--not-a-switch"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("\"in the time of submarines...\""), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("unquoted arg-with-space"), *iter);
+ ++iter;
+ EXPECT_TRUE(iter == args.end());
+}
+
+TEST(CommandLineTest, CommandLineFromString) {
+#if defined(OS_WIN)
+ CommandLine cl = CommandLine::FromString(
+ L"program --foo= -bAr /Spaetzel=pierogi /Baz flim "
+ L"--other-switches=\"--dog=canine --cat=feline\" "
+ L"-spaetzle=Crepe -=loosevalue FLAN "
+ L"--input-translation=\"45\"--output-rotation "
+ L"--quotes=" + kTrickyQuoted + L" "
+ L"-- -- --not-a-switch "
+ L"\"in the time of submarines...\"");
+
+ EXPECT_FALSE(cl.GetCommandLineString().empty());
+ EXPECT_FALSE(cl.HasSwitch("cruller"));
+ EXPECT_FALSE(cl.HasSwitch("flim"));
+ EXPECT_FALSE(cl.HasSwitch("program"));
+ EXPECT_FALSE(cl.HasSwitch("dog"));
+ EXPECT_FALSE(cl.HasSwitch("cat"));
+ EXPECT_FALSE(cl.HasSwitch("output-rotation"));
+ EXPECT_FALSE(cl.HasSwitch("not-a-switch"));
+ EXPECT_FALSE(cl.HasSwitch("--"));
+
+ EXPECT_EQ(FilePath(FILE_PATH_LITERAL("program")).value(),
+ cl.GetProgram().value());
+
+ EXPECT_TRUE(cl.HasSwitch("foo"));
+ EXPECT_TRUE(cl.HasSwitch("bar"));
+ EXPECT_TRUE(cl.HasSwitch("baz"));
+ EXPECT_TRUE(cl.HasSwitch("spaetzle"));
+ EXPECT_TRUE(cl.HasSwitch("other-switches"));
+ EXPECT_TRUE(cl.HasSwitch("input-translation"));
+ EXPECT_TRUE(cl.HasSwitch("quotes"));
+
+ EXPECT_EQ("Crepe", cl.GetSwitchValueASCII("spaetzle"));
+ EXPECT_EQ("", cl.GetSwitchValueASCII("foo"));
+ EXPECT_EQ("", cl.GetSwitchValueASCII("bar"));
+ EXPECT_EQ("", cl.GetSwitchValueASCII("cruller"));
+ EXPECT_EQ("--dog=canine --cat=feline", cl.GetSwitchValueASCII(
+ "other-switches"));
+ EXPECT_EQ("45--output-rotation", cl.GetSwitchValueASCII("input-translation"));
+ EXPECT_EQ(kTricky, cl.GetSwitchValueNative("quotes"));
+
+ const CommandLine::StringVector& args = cl.GetArgs();
+ ASSERT_EQ(5U, args.size());
+
+ std::vector<CommandLine::StringType>::const_iterator iter = args.begin();
+ EXPECT_EQ(FILE_PATH_LITERAL("flim"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("FLAN"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("--"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("--not-a-switch"), *iter);
+ ++iter;
+ EXPECT_EQ(FILE_PATH_LITERAL("in the time of submarines..."), *iter);
+ ++iter;
+ EXPECT_TRUE(iter == args.end());
+
+ // Check that a generated string produces an equivalent command line.
+ CommandLine cl_duplicate = CommandLine::FromString(cl.GetCommandLineString());
+ EXPECT_EQ(cl.GetCommandLineString(), cl_duplicate.GetCommandLineString());
+#endif
+}
+
+// Tests behavior with an empty input string.
+TEST(CommandLineTest, EmptyString) {
+#if defined(OS_WIN)
+ CommandLine cl_from_string = CommandLine::FromString(L"");
+ EXPECT_TRUE(cl_from_string.GetCommandLineString().empty());
+ EXPECT_TRUE(cl_from_string.GetProgram().empty());
+ EXPECT_EQ(1U, cl_from_string.argv().size());
+ EXPECT_TRUE(cl_from_string.GetArgs().empty());
+#endif
+ CommandLine cl_from_argv(0, NULL);
+ EXPECT_TRUE(cl_from_argv.GetCommandLineString().empty());
+ EXPECT_TRUE(cl_from_argv.GetProgram().empty());
+ EXPECT_EQ(1U, cl_from_argv.argv().size());
+ EXPECT_TRUE(cl_from_argv.GetArgs().empty());
+}
+
+TEST(CommandLineTest, GetArgumentsString) {
+ static const FilePath::CharType kPath1[] =
+ FILE_PATH_LITERAL("C:\\Some File\\With Spaces.ggg");
+ static const FilePath::CharType kPath2[] =
+ FILE_PATH_LITERAL("C:\\no\\spaces.ggg");
+
+ static const char kFirstArgName[] = "first-arg";
+ static const char kSecondArgName[] = "arg2";
+ static const char kThirdArgName[] = "arg with space";
+ static const char kFourthArgName[] = "nospace";
+ static const char kFifthArgName[] = "%1";
+
+ CommandLine cl(CommandLine::NO_PROGRAM);
+ cl.AppendSwitchPath(kFirstArgName, FilePath(kPath1));
+ cl.AppendSwitchPath(kSecondArgName, FilePath(kPath2));
+ cl.AppendArg(kThirdArgName);
+ cl.AppendArg(kFourthArgName);
+ cl.AppendArg(kFifthArgName);
+
+#if defined(OS_WIN)
+ CommandLine::StringType expected_first_arg(UTF8ToUTF16(kFirstArgName));
+ CommandLine::StringType expected_second_arg(UTF8ToUTF16(kSecondArgName));
+ CommandLine::StringType expected_third_arg(UTF8ToUTF16(kThirdArgName));
+ CommandLine::StringType expected_fourth_arg(UTF8ToUTF16(kFourthArgName));
+ CommandLine::StringType expected_fifth_arg(UTF8ToUTF16(kFifthArgName));
+#elif defined(OS_POSIX)
+ CommandLine::StringType expected_first_arg(kFirstArgName);
+ CommandLine::StringType expected_second_arg(kSecondArgName);
+ CommandLine::StringType expected_third_arg(kThirdArgName);
+ CommandLine::StringType expected_fourth_arg(kFourthArgName);
+ CommandLine::StringType expected_fifth_arg(kFifthArgName);
+#endif
+
+#if defined(OS_WIN)
+#define QUOTE_ON_WIN FILE_PATH_LITERAL("\"")
+#else
+#define QUOTE_ON_WIN FILE_PATH_LITERAL("")
+#endif // OS_WIN
+
+ CommandLine::StringType expected_str;
+ expected_str.append(FILE_PATH_LITERAL("--"))
+ .append(expected_first_arg)
+ .append(FILE_PATH_LITERAL("="))
+ .append(QUOTE_ON_WIN)
+ .append(kPath1)
+ .append(QUOTE_ON_WIN)
+ .append(FILE_PATH_LITERAL(" "))
+ .append(FILE_PATH_LITERAL("--"))
+ .append(expected_second_arg)
+ .append(FILE_PATH_LITERAL("="))
+ .append(QUOTE_ON_WIN)
+ .append(kPath2)
+ .append(QUOTE_ON_WIN)
+ .append(FILE_PATH_LITERAL(" "))
+ .append(QUOTE_ON_WIN)
+ .append(expected_third_arg)
+ .append(QUOTE_ON_WIN)
+ .append(FILE_PATH_LITERAL(" "))
+ .append(expected_fourth_arg)
+ .append(FILE_PATH_LITERAL(" "));
+
+ CommandLine::StringType expected_str_no_quote_placeholders(expected_str);
+ expected_str_no_quote_placeholders.append(expected_fifth_arg);
+ EXPECT_EQ(expected_str_no_quote_placeholders, cl.GetArgumentsString());
+
+#if defined(OS_WIN)
+ CommandLine::StringType expected_str_quote_placeholders(expected_str);
+ expected_str_quote_placeholders.append(QUOTE_ON_WIN)
+ .append(expected_fifth_arg)
+ .append(QUOTE_ON_WIN);
+ EXPECT_EQ(expected_str_quote_placeholders,
+ cl.GetArgumentsStringWithPlaceholders());
+#endif
+}
+
+// Test methods for appending switches to a command line.
+TEST(CommandLineTest, AppendSwitches) {
+ std::string switch1 = "switch1";
+ std::string switch2 = "switch2";
+ std::string value2 = "value";
+ std::string switch3 = "switch3";
+ std::string value3 = "a value with spaces";
+ std::string switch4 = "switch4";
+ std::string value4 = "\"a value with quotes\"";
+ std::string switch5 = "quotes";
+ CommandLine::StringType value5 = kTricky;
+
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+
+ cl.AppendSwitch(switch1);
+ cl.AppendSwitchASCII(switch2, value2);
+ cl.AppendSwitchASCII(switch3, value3);
+ cl.AppendSwitchASCII(switch4, value4);
+ cl.AppendSwitchASCII(switch5, value4);
+ cl.AppendSwitchNative(switch5, value5);
+
+ EXPECT_TRUE(cl.HasSwitch(switch1));
+ EXPECT_TRUE(cl.HasSwitch(switch2));
+ EXPECT_EQ(value2, cl.GetSwitchValueASCII(switch2));
+ EXPECT_TRUE(cl.HasSwitch(switch3));
+ EXPECT_EQ(value3, cl.GetSwitchValueASCII(switch3));
+ EXPECT_TRUE(cl.HasSwitch(switch4));
+ EXPECT_EQ(value4, cl.GetSwitchValueASCII(switch4));
+ EXPECT_TRUE(cl.HasSwitch(switch5));
+ EXPECT_EQ(value5, cl.GetSwitchValueNative(switch5));
+
+#if defined(OS_WIN)
+ EXPECT_EQ(L"Program "
+ L"--switch1 "
+ L"--switch2=value "
+ L"--switch3=\"a value with spaces\" "
+ L"--switch4=\"\\\"a value with quotes\\\"\" "
+ // Even though the switches are unique, appending can add repeat
+ // switches to argv.
+ L"--quotes=\"\\\"a value with quotes\\\"\" "
+ L"--quotes=\"" + kTrickyQuoted + L"\"",
+ cl.GetCommandLineString());
+#endif
+}
+
+TEST(CommandLineTest, AppendSwitchesDashDash) {
+ const CommandLine::CharType* raw_argv[] = { FILE_PATH_LITERAL("prog"),
+ FILE_PATH_LITERAL("--"),
+ FILE_PATH_LITERAL("--arg1") };
+ CommandLine cl(arraysize(raw_argv), raw_argv);
+
+ cl.AppendSwitch("switch1");
+ cl.AppendSwitchASCII("switch2", "foo");
+
+ cl.AppendArg("--arg2");
+
+ EXPECT_EQ(FILE_PATH_LITERAL("prog --switch1 --switch2=foo -- --arg1 --arg2"),
+ cl.GetCommandLineString());
+ CommandLine::StringVector cl_argv = cl.argv();
+ EXPECT_EQ(FILE_PATH_LITERAL("prog"), cl_argv[0]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--switch1"), cl_argv[1]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--switch2=foo"), cl_argv[2]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--"), cl_argv[3]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--arg1"), cl_argv[4]);
+ EXPECT_EQ(FILE_PATH_LITERAL("--arg2"), cl_argv[5]);
+}
+
+// Tests that when AppendArguments is called that the program is set correctly
+// on the target CommandLine object and the switches from the source
+// CommandLine are added to the target.
+TEST(CommandLineTest, AppendArguments) {
+ CommandLine cl1(FilePath(FILE_PATH_LITERAL("Program")));
+ cl1.AppendSwitch("switch1");
+ cl1.AppendSwitchASCII("switch2", "foo");
+
+ CommandLine cl2(CommandLine::NO_PROGRAM);
+ cl2.AppendArguments(cl1, true);
+ EXPECT_EQ(cl1.GetProgram().value(), cl2.GetProgram().value());
+ EXPECT_EQ(cl1.GetCommandLineString(), cl2.GetCommandLineString());
+
+ CommandLine c1(FilePath(FILE_PATH_LITERAL("Program1")));
+ c1.AppendSwitch("switch1");
+ CommandLine c2(FilePath(FILE_PATH_LITERAL("Program2")));
+ c2.AppendSwitch("switch2");
+
+ c1.AppendArguments(c2, true);
+ EXPECT_EQ(c1.GetProgram().value(), c2.GetProgram().value());
+ EXPECT_TRUE(c1.HasSwitch("switch1"));
+ EXPECT_TRUE(c1.HasSwitch("switch2"));
+}
+
+#if defined(OS_WIN)
+// Make sure that the command line string program paths are quoted as necessary.
+// This only makes sense on Windows and the test is basically here to guard
+// against regressions.
+TEST(CommandLineTest, ProgramQuotes) {
+ // Check that quotes are not added for paths without spaces.
+ const FilePath kProgram(L"Program");
+ CommandLine cl_program(kProgram);
+ EXPECT_EQ(kProgram.value(), cl_program.GetProgram().value());
+ EXPECT_EQ(kProgram.value(), cl_program.GetCommandLineString());
+
+ const FilePath kProgramPath(L"Program Path");
+
+ // Check that quotes are not returned from GetProgram().
+ CommandLine cl_program_path(kProgramPath);
+ EXPECT_EQ(kProgramPath.value(), cl_program_path.GetProgram().value());
+
+ // Check that quotes are added to command line string paths containing spaces.
+ CommandLine::StringType cmd_string(cl_program_path.GetCommandLineString());
+ EXPECT_EQ(L"\"Program Path\"", cmd_string);
+
+ // Check the optional quoting of placeholders in programs.
+ CommandLine cl_quote_placeholder(FilePath(L"%1"));
+ EXPECT_EQ(L"%1", cl_quote_placeholder.GetCommandLineString());
+ EXPECT_EQ(L"\"%1\"",
+ cl_quote_placeholder.GetCommandLineStringWithPlaceholders());
+}
+#endif
+
+// Calling Init multiple times should not modify the previous CommandLine.
+TEST(CommandLineTest, Init) {
+ // Call Init without checking output once so we know it's been called
+ // whether or not the test runner does so.
+ CommandLine::Init(0, NULL);
+ CommandLine* initial = CommandLine::ForCurrentProcess();
+ EXPECT_FALSE(CommandLine::Init(0, NULL));
+ CommandLine* current = CommandLine::ForCurrentProcess();
+ EXPECT_EQ(initial, current);
+}
+
+// Test that copies of CommandLine have a valid StringPiece map.
+TEST(CommandLineTest, Copy) {
+ std::unique_ptr<CommandLine> initial(
+ new CommandLine(CommandLine::NO_PROGRAM));
+ initial->AppendSwitch("a");
+ initial->AppendSwitch("bbbbbbbbbbbbbbb");
+ initial->AppendSwitch("c");
+ CommandLine copy_constructed(*initial);
+ CommandLine assigned = *initial;
+ CommandLine::SwitchMap switch_map = initial->GetSwitches();
+ initial.reset();
+ for (const auto& pair : switch_map)
+ EXPECT_TRUE(copy_constructed.HasSwitch(pair.first));
+ for (const auto& pair : switch_map)
+ EXPECT_TRUE(assigned.HasSwitch(pair.first));
+}
+
+} // namespace base
diff --git a/libchrome/base/compiler_specific.h b/libchrome/base/compiler_specific.h
new file mode 100644
index 0000000..c2a02de
--- /dev/null
+++ b/libchrome/base/compiler_specific.h
@@ -0,0 +1,203 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "build/build_config.h"
+
+#if defined(ANDROID)
+// Prefer Android's libbase definitions to our own.
+#include <android-base/macros.h>
+#endif // defined(ANDROID)
+
+#if defined(COMPILER_MSVC)
+
+// For _Printf_format_string_.
+#include <sal.h>
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n))
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \
+ __pragma(warning(disable:n))
+
+// MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level. The level
+// remains in effect until popped by MSVC_POP_WARNING(). Use 0 to disable all
+// warnings.
+#define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) MSVC_SUPPRESS_WARNING(4275) \
+ code
+
+#else // Not MSVC
+
+#define _Printf_format_string_
+#define MSVC_SUPPRESS_WARNING(n)
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_PUSH_WARNING_LEVEL(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+#define NON_EXPORTED_BASE(code) code
+
+#endif // COMPILER_MSVC
+
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+// int x = ...;
+// ALLOW_UNUSED_LOCAL(x);
+#define ALLOW_UNUSED_LOCAL(x) false ? (void)x : (void)0
+
+// Annotate a typedef or function indicating it's ok if it's not used.
+// Use like:
+// typedef Foo Bar ALLOW_UNUSED_TYPE;
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#else
+#define ALLOW_UNUSED_TYPE
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+// NOINLINE void DoStuff() { ... }
+#if defined(COMPILER_GCC)
+#define NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define NOINLINE __declspec(noinline)
+#else
+#define NOINLINE
+#endif
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+// class ALIGNAS(16) MyClass { ... }
+// ALIGNAS(16) int array[4];
+#if defined(COMPILER_MSVC)
+#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Return the byte alignment of the given type (available at compile time).
+// Use like:
+// ALIGNOF(int32_t) // this would be 4
+#if defined(COMPILER_MSVC)
+#define ALIGNOF(type) __alignof(type)
+#elif defined(COMPILER_GCC)
+#define ALIGNOF(type) __alignof__(type)
+#endif
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+// int foo() WARN_UNUSED_RESULT;
+// To explicitly ignore a result, see |ignore_result()| in base/macros.h.
+#undef WARN_UNUSED_RESULT
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(COMPILER_GCC)
+#define PRINTF_FORMAT(format_param, dots_param) \
+ __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// WPRINTF_FORMAT is the same, but for wide format strings.
+// This doesn't appear to yet be implemented in any compiler.
+// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
+#define WPRINTF_FORMAT(format_param, dots_param)
+// If available, it would look like:
+// __attribute__((format(wprintf, format_param, dots_param)))
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
+
+// Check a memory region for initializedness, as if it was being used here.
+// If any bits are uninitialized, crash with an MSan report.
+// Use this to sanitize data which MSan won't be able to track, e.g. before
+// passing data to another process via shared memory.
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
+ __msan_check_mem_is_initialized(p, size)
+#else // MEMORY_SANITIZER
+#define MSAN_UNPOISON(p, size)
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
+#endif // MEMORY_SANITIZER
+
+// Macro useful for writing cross-platform function pointers.
+#if !defined(CDECL)
+#if defined(OS_WIN)
+#define CDECL __cdecl
+#else // defined(OS_WIN)
+#define CDECL
+#endif // defined(OS_WIN)
+#endif // !defined(CDECL)
+
+// Macro for hinting that an expression is likely to be false.
+#if !defined(UNLIKELY)
+#if defined(COMPILER_GCC)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif // defined(COMPILER_GCC)
+#endif // !defined(UNLIKELY)
+
+// Compiler feature-detection.
+// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
+#if defined(__has_feature)
+#define HAS_FEATURE(FEATURE) __has_feature(FEATURE)
+#else
+#define HAS_FEATURE(FEATURE) 0
+#endif
+
+#endif // BASE_COMPILER_SPECIFIC_H_
diff --git a/libchrome/base/containers/adapters.h b/libchrome/base/containers/adapters.h
new file mode 100644
index 0000000..fa671b4
--- /dev/null
+++ b/libchrome/base/containers/adapters.h
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_ADAPTERS_H_
+#define BASE_CONTAINERS_ADAPTERS_H_
+
+#include <stddef.h>
+
+#include <iterator>
+
+#include "base/macros.h"
+
+namespace base {
+
+namespace internal {
+
+// Internal adapter class for implementing base::Reversed.
+template <typename T>
+class ReversedAdapter {
+ public:
+ using Iterator = decltype(static_cast<T*>(nullptr)->rbegin());
+
+ explicit ReversedAdapter(T& t) : t_(t) {}
+ ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+
+ // TODO(mdempsky): Once we can use C++14 library features, use std::rbegin
+ // and std::rend instead, so we can remove the specialization below.
+ Iterator begin() const { return t_.rbegin(); }
+ Iterator end() const { return t_.rend(); }
+
+ private:
+ T& t_;
+
+ DISALLOW_ASSIGN(ReversedAdapter);
+};
+
+template <typename T, size_t N>
+class ReversedAdapter<T[N]> {
+ public:
+ using Iterator = std::reverse_iterator<T*>;
+
+ explicit ReversedAdapter(T (&t)[N]) : t_(t) {}
+ ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+
+ Iterator begin() const { return Iterator(&t_[N]); }
+ Iterator end() const { return Iterator(&t_[0]); }
+
+ private:
+ T (&t_)[N];
+
+ DISALLOW_ASSIGN(ReversedAdapter);
+};
+
+} // namespace internal
+
+// Reversed returns a container adapter usable in a range-based "for" statement
+// for iterating a reversible container in reverse order.
+//
+// Example:
+//
+// std::vector<int> v = ...;
+// for (int i : base::Reversed(v)) {
+// // iterates through v from back to front
+// }
+template <typename T>
+internal::ReversedAdapter<T> Reversed(T& t) {
+ return internal::ReversedAdapter<T>(t);
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_ADAPTERS_H_
diff --git a/libchrome/base/containers/hash_tables.h b/libchrome/base/containers/hash_tables.h
new file mode 100644
index 0000000..8da7b67
--- /dev/null
+++ b/libchrome/base/containers/hash_tables.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_HASH_TABLES_H_
+#define BASE_CONTAINERS_HASH_TABLES_H_
+
+#include <cstddef>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+
+#include "base/hash.h"
+
+// This header file is deprecated. Use the corresponding C++11 type
+// instead. https://crbug.com/576864
+
+// Use a custom hasher instead.
+#define BASE_HASH_NAMESPACE base_hash
+
+namespace BASE_HASH_NAMESPACE {
+
+// A separate hasher which, by default, forwards to std::hash. This is so legacy
+// uses of BASE_HASH_NAMESPACE with base::hash_map do not interfere with
+// std::hash mid-transition.
+template<typename T>
+struct hash {
+ std::size_t operator()(const T& value) const { return std::hash<T>()(value); }
+};
+
+// Use base::IntPairHash from base/hash.h as a custom hasher instead.
+template <typename Type1, typename Type2>
+struct hash<std::pair<Type1, Type2>> {
+ std::size_t operator()(std::pair<Type1, Type2> value) const {
+ return base::HashInts(value.first, value.second);
+ }
+};
+
+} // namespace BASE_HASH_NAMESPACE
+
+namespace base {
+
+// Use std::unordered_map instead.
+template <class Key,
+ class T,
+ class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+ class Pred = std::equal_to<Key>,
+ class Alloc = std::allocator<std::pair<const Key, T>>>
+using hash_map = std::unordered_map<Key, T, Hash, Pred, Alloc>;
+
+// Use std::unordered_multimap instead.
+template <class Key,
+ class T,
+ class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+ class Pred = std::equal_to<Key>,
+ class Alloc = std::allocator<std::pair<const Key, T>>>
+using hash_multimap = std::unordered_multimap<Key, T, Hash, Pred, Alloc>;
+
+// Use std::unordered_multiset instead.
+template <class Key,
+ class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+ class Pred = std::equal_to<Key>,
+ class Alloc = std::allocator<Key>>
+using hash_multiset = std::unordered_multiset<Key, Hash, Pred, Alloc>;
+
+// Use std::unordered_set instead.
+template <class Key,
+ class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+ class Pred = std::equal_to<Key>,
+ class Alloc = std::allocator<Key>>
+using hash_set = std::unordered_set<Key, Hash, Pred, Alloc>;
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_HASH_TABLES_H_
diff --git a/libchrome/base/containers/linked_list.h b/libchrome/base/containers/linked_list.h
new file mode 100644
index 0000000..41461ff
--- /dev/null
+++ b/libchrome/base/containers/linked_list.h
@@ -0,0 +1,176 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_LINKED_LIST_H_
+#define BASE_CONTAINERS_LINKED_LIST_H_
+
+#include "base/macros.h"
+
+// Simple LinkedList type. (See the Q&A section to understand how this
+// differs from std::list).
+//
+// To use, start by declaring the class which will be contained in the linked
+// list, as extending LinkNode (this gives it next/previous pointers).
+//
+// class MyNodeType : public LinkNode<MyNodeType> {
+// ...
+// };
+//
+// Next, to keep track of the list's head/tail, use a LinkedList instance:
+//
+// LinkedList<MyNodeType> list;
+//
+// To add elements to the list, use any of LinkedList::Append,
+// LinkNode::InsertBefore, or LinkNode::InsertAfter:
+//
+// LinkNode<MyNodeType>* n1 = ...;
+// LinkNode<MyNodeType>* n2 = ...;
+// LinkNode<MyNodeType>* n3 = ...;
+//
+// list.Append(n1);
+// list.Append(n3);
+// n3->InsertBefore(n3);
+//
+// Lastly, to iterate through the linked list forwards:
+//
+// for (LinkNode<MyNodeType>* node = list.head();
+// node != list.end();
+// node = node->next()) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// Or to iterate the linked list backwards:
+//
+// for (LinkNode<MyNodeType>* node = list.tail();
+// node != list.end();
+// node = node->previous()) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// Questions and Answers:
+//
+// Q. Should I use std::list or base::LinkedList?
+//
+// A. The main reason to use base::LinkedList over std::list is
+// performance. If you don't care about the performance differences
+// then use an STL container, as it makes for better code readability.
+//
+// Comparing the performance of base::LinkedList<T> to std::list<T*>:
+//
+// * Erasing an element of type T* from base::LinkedList<T> is
+// an O(1) operation. Whereas for std::list<T*> it is O(n).
+// That is because with std::list<T*> you must obtain an
+// iterator to the T* element before you can call erase(iterator).
+//
+// * Insertion operations with base::LinkedList<T> never require
+// heap allocations.
+//
+// Q. How does base::LinkedList implementation differ from std::list?
+//
+// A. Doubly-linked lists are made up of nodes that contain "next" and
+// "previous" pointers that reference other nodes in the list.
+//
+// With base::LinkedList<T>, the type being inserted already reserves
+// space for the "next" and "previous" pointers (base::LinkNode<T>*).
+// Whereas with std::list<T> the type can be anything, so the implementation
+// needs to glue on the "next" and "previous" pointers using
+// some internal node type.
+
+namespace base {
+
+template <typename T>
+class LinkNode {
+ public:
+ LinkNode() : previous_(NULL), next_(NULL) {}
+ LinkNode(LinkNode<T>* previous, LinkNode<T>* next)
+ : previous_(previous), next_(next) {}
+
+ // Insert |this| into the linked list, before |e|.
+ void InsertBefore(LinkNode<T>* e) {
+ this->next_ = e;
+ this->previous_ = e->previous_;
+ e->previous_->next_ = this;
+ e->previous_ = this;
+ }
+
+ // Insert |this| into the linked list, after |e|.
+ void InsertAfter(LinkNode<T>* e) {
+ this->next_ = e->next_;
+ this->previous_ = e;
+ e->next_->previous_ = this;
+ e->next_ = this;
+ }
+
+ // Remove |this| from the linked list.
+ void RemoveFromList() {
+ this->previous_->next_ = this->next_;
+ this->next_->previous_ = this->previous_;
+ // next() and previous() return non-NULL if and only this node is not in any
+ // list.
+ this->next_ = NULL;
+ this->previous_ = NULL;
+ }
+
+ LinkNode<T>* previous() const {
+ return previous_;
+ }
+
+ LinkNode<T>* next() const {
+ return next_;
+ }
+
+ // Cast from the node-type to the value type.
+ const T* value() const {
+ return static_cast<const T*>(this);
+ }
+
+ T* value() {
+ return static_cast<T*>(this);
+ }
+
+ private:
+ LinkNode<T>* previous_;
+ LinkNode<T>* next_;
+
+ DISALLOW_COPY_AND_ASSIGN(LinkNode);
+};
+
+template <typename T>
+class LinkedList {
+ public:
+ // The "root" node is self-referential, and forms the basis of a circular
+ // list (root_.next() will point back to the start of the list,
+ // and root_->previous() wraps around to the end of the list).
+ LinkedList() : root_(&root_, &root_) {}
+
+ // Appends |e| to the end of the linked list.
+ void Append(LinkNode<T>* e) {
+ e->InsertBefore(&root_);
+ }
+
+ LinkNode<T>* head() const {
+ return root_.next();
+ }
+
+ LinkNode<T>* tail() const {
+ return root_.previous();
+ }
+
+ const LinkNode<T>* end() const {
+ return &root_;
+ }
+
+ bool empty() const { return head() == end(); }
+
+ private:
+ LinkNode<T> root_;
+
+ DISALLOW_COPY_AND_ASSIGN(LinkedList);
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_LINKED_LIST_H_
diff --git a/libchrome/base/containers/mru_cache.h b/libchrome/base/containers/mru_cache.h
new file mode 100644
index 0000000..6c1d626
--- /dev/null
+++ b/libchrome/base/containers/mru_cache.h
@@ -0,0 +1,256 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains a template for a Most Recently Used cache that allows
+// constant-time access to items using a key, but easy identification of the
+// least-recently-used items for removal. Each key can only be associated with
+// one payload item at a time.
+//
+// The key object will be stored twice, so it should support efficient copying.
+//
+// NOTE: While all operations are O(1), this code is written for
+// legibility rather than optimality. If future profiling identifies this as
+// a bottleneck, there is room for smaller values of 1 in the O(1). :]
+
+#ifndef BASE_CONTAINERS_MRU_CACHE_H_
+#define BASE_CONTAINERS_MRU_CACHE_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <functional>
+#include <list>
+#include <map>
+#include <unordered_map>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+
+// MRUCacheBase ----------------------------------------------------------------
+
+// This template is used to standardize map type containers that can be used
+// by MRUCacheBase. This level of indirection is necessary because of the way
+// that template template params and default template params interact.
+template <class KeyType, class ValueType, class CompareType>
+struct MRUCacheStandardMap {
+ typedef std::map<KeyType, ValueType, CompareType> Type;
+};
+
+// Base class for the MRU cache specializations defined below.
+template <class KeyType,
+ class PayloadType,
+ class HashOrCompareType,
+ template <typename, typename, typename> class MapType =
+ MRUCacheStandardMap>
+class MRUCacheBase {
+ public:
+ // The payload of the list. This maintains a copy of the key so we can
+ // efficiently delete things given an element of the list.
+ typedef std::pair<KeyType, PayloadType> value_type;
+
+ private:
+ typedef std::list<value_type> PayloadList;
+ typedef typename MapType<KeyType,
+ typename PayloadList::iterator,
+ HashOrCompareType>::Type KeyIndex;
+
+ public:
+ typedef typename PayloadList::size_type size_type;
+
+ typedef typename PayloadList::iterator iterator;
+ typedef typename PayloadList::const_iterator const_iterator;
+ typedef typename PayloadList::reverse_iterator reverse_iterator;
+ typedef typename PayloadList::const_reverse_iterator const_reverse_iterator;
+
+ enum { NO_AUTO_EVICT = 0 };
+
+ // The max_size is the size at which the cache will prune its members to when
+ // a new item is inserted. If the caller wants to manager this itself (for
+ // example, maybe it has special work to do when something is evicted), it
+ // can pass NO_AUTO_EVICT to not restrict the cache size.
+ explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
+
+ virtual ~MRUCacheBase() {}
+
+ size_type max_size() const { return max_size_; }
+
+ // Inserts a payload item with the given key. If an existing item has
+ // the same key, it is removed prior to insertion. An iterator indicating the
+ // inserted item will be returned (this will always be the front of the list).
+ //
+ // The payload will be forwarded.
+ template <typename Payload>
+ iterator Put(const KeyType& key, Payload&& payload) {
+ // Remove any existing payload with that key.
+ typename KeyIndex::iterator index_iter = index_.find(key);
+ if (index_iter != index_.end()) {
+ // Erase the reference to it. The index reference will be replaced in the
+ // code below.
+ Erase(index_iter->second);
+ } else if (max_size_ != NO_AUTO_EVICT) {
+ // New item is being inserted which might make it larger than the maximum
+ // size: kick the oldest thing out if necessary.
+ ShrinkToSize(max_size_ - 1);
+ }
+
+ ordering_.push_front(value_type(key, std::forward<Payload>(payload)));
+ index_.insert(std::make_pair(key, ordering_.begin()));
+ return ordering_.begin();
+ }
+
+ // Retrieves the contents of the given key, or end() if not found. This method
+ // has the side effect of moving the requested item to the front of the
+ // recency list.
+ //
+ // TODO(brettw) We may want a const version of this function in the future.
+ iterator Get(const KeyType& key) {
+ typename KeyIndex::iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ typename PayloadList::iterator iter = index_iter->second;
+
+ // Move the touched item to the front of the recency ordering.
+ ordering_.splice(ordering_.begin(), ordering_, iter);
+ return ordering_.begin();
+ }
+
+ // Retrieves the payload associated with a given key and returns it via
+ // result without affecting the ordering (unlike Get).
+ iterator Peek(const KeyType& key) {
+ typename KeyIndex::const_iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ return index_iter->second;
+ }
+
+ const_iterator Peek(const KeyType& key) const {
+ typename KeyIndex::const_iterator index_iter = index_.find(key);
+ if (index_iter == index_.end())
+ return end();
+ return index_iter->second;
+ }
+
+ // Exchanges the contents of |this| by the contents of the |other|.
+ void Swap(MRUCacheBase& other) {
+ ordering_.swap(other.ordering_);
+ index_.swap(other.index_);
+ std::swap(max_size_, other.max_size_);
+ }
+
+ // Erases the item referenced by the given iterator. An iterator to the item
+ // following it will be returned. The iterator must be valid.
+ iterator Erase(iterator pos) {
+ index_.erase(pos->first);
+ return ordering_.erase(pos);
+ }
+
+ // MRUCache entries are often processed in reverse order, so we add this
+ // convenience function (not typically defined by STL containers).
+ reverse_iterator Erase(reverse_iterator pos) {
+ // We have to actually give it the incremented iterator to delete, since
+ // the forward iterator that base() returns is actually one past the item
+ // being iterated over.
+ return reverse_iterator(Erase((++pos).base()));
+ }
+
+ // Shrinks the cache so it only holds |new_size| items. If |new_size| is
+ // bigger or equal to the current number of items, this will do nothing.
+ void ShrinkToSize(size_type new_size) {
+ for (size_type i = size(); i > new_size; i--)
+ Erase(rbegin());
+ }
+
+ // Deletes everything from the cache.
+ void Clear() {
+ index_.clear();
+ ordering_.clear();
+ }
+
+ // Returns the number of elements in the cache.
+ size_type size() const {
+ // We don't use ordering_.size() for the return value because
+ // (as a linked list) it can be O(n).
+ DCHECK(index_.size() == ordering_.size());
+ return index_.size();
+ }
+
+ // Allows iteration over the list. Forward iteration starts with the most
+ // recent item and works backwards.
+ //
+ // Note that since these iterators are actually iterators over a list, you
+ // can keep them as you insert or delete things (as long as you don't delete
+ // the one you are pointing to) and they will still be valid.
+ iterator begin() { return ordering_.begin(); }
+ const_iterator begin() const { return ordering_.begin(); }
+ iterator end() { return ordering_.end(); }
+ const_iterator end() const { return ordering_.end(); }
+
+ reverse_iterator rbegin() { return ordering_.rbegin(); }
+ const_reverse_iterator rbegin() const { return ordering_.rbegin(); }
+ reverse_iterator rend() { return ordering_.rend(); }
+ const_reverse_iterator rend() const { return ordering_.rend(); }
+
+ bool empty() const { return ordering_.empty(); }
+
+ private:
+ PayloadList ordering_;
+ KeyIndex index_;
+
+ size_type max_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
+};
+
+// MRUCache --------------------------------------------------------------------
+
+// A container that does not do anything to free its data. Use this when storing
+// value types (as opposed to pointers) in the list.
+template <class KeyType, class PayloadType>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
+ private:
+ using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
+
+ public:
+ // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+ explicit MRUCache(typename ParentType::size_type max_size)
+ : ParentType(max_size) {}
+ virtual ~MRUCache() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MRUCache);
+};
+
+// HashingMRUCache ------------------------------------------------------------
+
+template <class KeyType, class ValueType, class HashType>
+struct MRUCacheHashMap {
+ typedef std::unordered_map<KeyType, ValueType, HashType> Type;
+};
+
+// This class is similar to MRUCache, except that it uses std::unordered_map as
+// the map type instead of std::map. Note that your KeyType must be hashable to
+// use this cache or you need to provide a hashing class.
+template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
+class HashingMRUCache
+ : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap> {
+ private:
+ using ParentType =
+ MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
+
+ public:
+ // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
+ explicit HashingMRUCache(typename ParentType::size_type max_size)
+ : ParentType(max_size) {}
+ virtual ~HashingMRUCache() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_MRU_CACHE_H_
diff --git a/libchrome/base/containers/scoped_ptr_hash_map.h b/libchrome/base/containers/scoped_ptr_hash_map.h
new file mode 100644
index 0000000..f513f06
--- /dev/null
+++ b/libchrome/base/containers/scoped_ptr_hash_map.h
@@ -0,0 +1,176 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
+#define BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// Deprecated. Use std::unordered_map instead. https://crbug.com/579229
+//
+// This type acts like a hash_map<K, std::unique_ptr<V, D> >, based on top of
+// base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
+// structure.
+template <typename Key, typename ScopedPtr>
+class ScopedPtrHashMap {
+ typedef base::hash_map<Key, typename ScopedPtr::element_type*> Container;
+
+ public:
+ typedef typename Container::key_type key_type;
+ typedef typename Container::mapped_type mapped_type;
+ typedef typename Container::value_type value_type;
+ typedef typename Container::iterator iterator;
+ typedef typename Container::const_iterator const_iterator;
+
+ ScopedPtrHashMap() {}
+
+ ~ScopedPtrHashMap() { clear(); }
+
+ void swap(ScopedPtrHashMap<Key, ScopedPtr>& other) {
+ data_.swap(other.data_);
+ }
+
+ // Replaces value but not key if key is already present.
+ iterator set(const Key& key, ScopedPtr data) {
+ iterator it = find(key);
+ if (it != end()) {
+ // Let ScopedPtr decide how to delete. For example, it may use custom
+ // deleter.
+ ScopedPtr(it->second).reset();
+ it->second = data.release();
+ return it;
+ }
+
+ return data_.insert(std::make_pair(key, data.release())).first;
+ }
+
+ // Does nothing if key is already present
+ std::pair<iterator, bool> add(const Key& key, ScopedPtr data) {
+ std::pair<iterator, bool> result =
+ data_.insert(std::make_pair(key, data.get()));
+ if (result.second)
+ ::ignore_result(data.release());
+ return result;
+ }
+
+ void erase(iterator it) {
+ // Let ScopedPtr decide how to delete.
+ ScopedPtr(it->second).reset();
+ data_.erase(it);
+ }
+
+ size_t erase(const Key& k) {
+ iterator it = data_.find(k);
+ if (it == data_.end())
+ return 0;
+ erase(it);
+ return 1;
+ }
+
+ ScopedPtr take(iterator it) {
+ DCHECK(it != data_.end());
+ if (it == data_.end())
+ return ScopedPtr();
+
+ ScopedPtr ret(it->second);
+ it->second = NULL;
+ return ret;
+ }
+
+ ScopedPtr take(const Key& k) {
+ iterator it = find(k);
+ if (it == data_.end())
+ return ScopedPtr();
+
+ return take(it);
+ }
+
+ ScopedPtr take_and_erase(iterator it) {
+ DCHECK(it != data_.end());
+ if (it == data_.end())
+ return ScopedPtr();
+
+ ScopedPtr ret(it->second);
+ data_.erase(it);
+ return ret;
+ }
+
+ ScopedPtr take_and_erase(const Key& k) {
+ iterator it = find(k);
+ if (it == data_.end())
+ return ScopedPtr();
+
+ return take_and_erase(it);
+ }
+
+ // Returns the element in the hash_map that matches the given key.
+ // If no such element exists it returns NULL.
+ typename ScopedPtr::element_type* get(const Key& k) const {
+ const_iterator it = find(k);
+ if (it == end())
+ return NULL;
+ return it->second;
+ }
+
+ inline bool contains(const Key& k) const { return data_.count(k) > 0; }
+
+ inline void clear() {
+ auto it = data_.begin();
+ while (it != data_.end()) {
+ // NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+ // Deleting the value does not always invalidate the iterator, but it may
+ // do so if the key is a pointer into the value object.
+ auto temp = it;
+ ++it;
+ // Let ScopedPtr decide how to delete.
+ ScopedPtr(temp->second).reset();
+ }
+ data_.clear();
+ }
+
+ inline const_iterator find(const Key& k) const { return data_.find(k); }
+ inline iterator find(const Key& k) { return data_.find(k); }
+
+ inline size_t count(const Key& k) const { return data_.count(k); }
+ inline std::pair<const_iterator, const_iterator> equal_range(
+ const Key& k) const {
+ return data_.equal_range(k);
+ }
+ inline std::pair<iterator, iterator> equal_range(const Key& k) {
+ return data_.equal_range(k);
+ }
+
+ inline size_t size() const { return data_.size(); }
+ inline size_t max_size() const { return data_.max_size(); }
+
+ inline bool empty() const { return data_.empty(); }
+
+ inline size_t bucket_count() const { return data_.bucket_count(); }
+ inline void resize(size_t size) { return data_.resize(size); }
+
+ inline iterator begin() { return data_.begin(); }
+ inline const_iterator begin() const { return data_.begin(); }
+ inline iterator end() { return data_.end(); }
+ inline const_iterator end() const { return data_.end(); }
+
+ private:
+ Container data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedPtrHashMap);
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
diff --git a/libchrome/base/containers/small_map.h b/libchrome/base/containers/small_map.h
new file mode 100644
index 0000000..82ed6c5
--- /dev/null
+++ b/libchrome/base/containers/small_map.h
@@ -0,0 +1,653 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SMALL_MAP_H_
+#define BASE_CONTAINERS_SMALL_MAP_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/manual_constructor.h"
+
+namespace base {
+
+// An STL-like associative container which starts out backed by a simple
+// array but switches to some other container type if it grows beyond a
+// fixed size.
+//
+// WHAT TYPE OF MAP SHOULD YOU USE?
+// --------------------------------
+//
+// - std::map should be the default if you're not sure, since it's the most
+// difficult to mess up. Generally this is backed by a red-black tree. It
+// will generate a lot of code (if you use a common key type like int or
+// string the linker will probably emiminate the duplicates). It will
+// do heap allocations for each element.
+//
+// - If you only ever keep a couple of items and have very simple usage,
+// consider whether a using a vector and brute-force searching it will be
+// the most efficient. It's not a lot of generated code (less then a
+// red-black tree if your key is "weird" and not eliminated as duplicate of
+// something else) and will probably be faster and do fewer heap allocations
+// than std::map if you have just a couple of items.
+//
+// - base::hash_map should be used if you need O(1) lookups. It may waste
+// space in the hash table, and it can be easy to write correct-looking
+// code with the default hash function being wrong or poorly-behaving.
+//
+// - SmallMap combines the performance benefits of the brute-force-searched
+// vector for small cases (no extra heap allocations), but can efficiently
+// fall back if you end up adding many items. It will generate more code
+// than std::map (at least 160 bytes for operator[]) which is bad if you
+// have a "weird" key where map functions can't be
+// duplicate-code-eliminated. If you have a one-off key and aren't in
+// performance-critical code, this bloat may negate some of the benefits and
+// you should consider on of the other options.
+//
+// SmallMap will pick up the comparator from the underlying map type. In
+// std::map (and in MSVC additionally hash_map) only a "less" operator is
+// defined, which requires us to do two comparisons per element when doing the
+// brute-force search in the simple array.
+//
+// We define default overrides for the common map types to avoid this
+// double-compare, but you should be aware of this if you use your own
+// operator< for your map and supply yor own version of == to the SmallMap.
+// You can use regular operator== by just doing:
+//
+// base::SmallMap<std::map<MyKey, MyValue>, 4, std::equal_to<KyKey> >
+//
+//
+// USAGE
+// -----
+//
+// NormalMap: The map type to fall back to. This also defines the key
+// and value types for the SmallMap.
+// kArraySize: The size of the initial array of results. This will be
+// allocated with the SmallMap object rather than separately on
+// the heap. Once the map grows beyond this size, the map type
+// will be used instead.
+// EqualKey: A functor which tests two keys for equality. If the wrapped
+// map type has a "key_equal" member (hash_map does), then that will
+// be used by default. If the wrapped map type has a strict weak
+// ordering "key_compare" (std::map does), that will be used to
+// implement equality by default.
+// MapInit: A functor that takes a ManualConstructor<NormalMap>* and uses it to
+// initialize the map. This functor will be called at most once per
+// SmallMap, when the map exceeds the threshold of kArraySize and we
+// are about to copy values from the array to the map. The functor
+// *must* call one of the Init() methods provided by
+// ManualConstructor, since after it runs we assume that the NormalMap
+// has been initialized.
+//
+// example:
+// base::SmallMap< std::map<string, int> > days;
+// days["sunday" ] = 0;
+// days["monday" ] = 1;
+// days["tuesday" ] = 2;
+// days["wednesday"] = 3;
+// days["thursday" ] = 4;
+// days["friday" ] = 5;
+// days["saturday" ] = 6;
+//
+// You should assume that SmallMap might invalidate all the iterators
+// on any call to erase(), insert() and operator[].
+
+namespace internal {
+
+template <typename NormalMap>
+class SmallMapDefaultInit {
+ public:
+ void operator()(ManualConstructor<NormalMap>* map) const {
+ map->Init();
+ }
+};
+
+// has_key_equal<M>::value is true iff there exists a type M::key_equal. This is
+// used to dispatch to one of the select_equal_key<> metafunctions below.
+template <typename M>
+struct has_key_equal {
+ typedef char sml; // "small" is sometimes #defined so we use an abbreviation.
+ typedef struct { char dummy[2]; } big;
+ // Two functions, one accepts types that have a key_equal member, and one that
+ // accepts anything. They each return a value of a different size, so we can
+ // determine at compile-time which function would have been called.
+ template <typename U> static big test(typename U::key_equal*);
+ template <typename> static sml test(...);
+ // Determines if M::key_equal exists by looking at the size of the return
+ // type of the compiler-chosen test() function.
+ static const bool value = (sizeof(test<M>(0)) == sizeof(big));
+};
+template <typename M> const bool has_key_equal<M>::value;
+
+// Base template used for map types that do NOT have an M::key_equal member,
+// e.g., std::map<>. These maps have a strict weak ordering comparator rather
+// than an equality functor, so equality will be implemented in terms of that
+// comparator.
+//
+// There's a partial specialization of this template below for map types that do
+// have an M::key_equal member.
+template <typename M, bool has_key_equal_value>
+struct select_equal_key {
+ struct equal_key {
+ bool operator()(const typename M::key_type& left,
+ const typename M::key_type& right) {
+ // Implements equality in terms of a strict weak ordering comparator.
+ typename M::key_compare comp;
+ return !comp(left, right) && !comp(right, left);
+ }
+ };
+};
+
+// Provide overrides to use operator== for key compare for the "normal" map and
+// hash map types. If you override the default comparator or allocator for a
+// map or hash_map, or use another type of map, this won't get used.
+//
+// If we switch to using std::unordered_map for base::hash_map, then the
+// hash_map specialization can be removed.
+template <typename KeyType, typename ValueType>
+struct select_equal_key< std::map<KeyType, ValueType>, false> {
+ struct equal_key {
+ bool operator()(const KeyType& left, const KeyType& right) {
+ return left == right;
+ }
+ };
+};
+template <typename KeyType, typename ValueType>
+struct select_equal_key< base::hash_map<KeyType, ValueType>, false> {
+ struct equal_key {
+ bool operator()(const KeyType& left, const KeyType& right) {
+ return left == right;
+ }
+ };
+};
+
+// Partial template specialization handles case where M::key_equal exists, e.g.,
+// hash_map<>.
+template <typename M>
+struct select_equal_key<M, true> {
+ typedef typename M::key_equal equal_key;
+};
+
+} // namespace internal
+
+template <typename NormalMap,
+ int kArraySize = 4,
+ typename EqualKey =
+ typename internal::select_equal_key<
+ NormalMap,
+ internal::has_key_equal<NormalMap>::value>::equal_key,
+ typename MapInit = internal::SmallMapDefaultInit<NormalMap> >
+class SmallMap {
+ // We cannot rely on the compiler to reject array of size 0. In
+ // particular, gcc 2.95.3 does it but later versions allow 0-length
+ // arrays. Therefore, we explicitly reject non-positive kArraySize
+ // here.
+ static_assert(kArraySize > 0, "default initial size should be positive");
+
+ public:
+ typedef typename NormalMap::key_type key_type;
+ typedef typename NormalMap::mapped_type data_type;
+ typedef typename NormalMap::mapped_type mapped_type;
+ typedef typename NormalMap::value_type value_type;
+ typedef EqualKey key_equal;
+
+ SmallMap() : size_(0), functor_(MapInit()) {}
+
+ explicit SmallMap(const MapInit& functor) : size_(0), functor_(functor) {}
+
+ // Allow copy-constructor and assignment, since STL allows them too.
+ SmallMap(const SmallMap& src) {
+ // size_ and functor_ are initted in InitFrom()
+ InitFrom(src);
+ }
+ void operator=(const SmallMap& src) {
+ if (&src == this) return;
+
+ // This is not optimal. If src and dest are both using the small
+ // array, we could skip the teardown and reconstruct. One problem
+ // to be resolved is that the value_type itself is pair<const K,
+ // V>, and const K is not assignable.
+ Destroy();
+ InitFrom(src);
+ }
+ ~SmallMap() {
+ Destroy();
+ }
+
+ class const_iterator;
+
+ class iterator {
+ public:
+ typedef typename NormalMap::iterator::iterator_category iterator_category;
+ typedef typename NormalMap::iterator::value_type value_type;
+ typedef typename NormalMap::iterator::difference_type difference_type;
+ typedef typename NormalMap::iterator::pointer pointer;
+ typedef typename NormalMap::iterator::reference reference;
+
+ inline iterator(): array_iter_(NULL) {}
+
+ inline iterator& operator++() {
+ if (array_iter_ != NULL) {
+ ++array_iter_;
+ } else {
+ ++hash_iter_;
+ }
+ return *this;
+ }
+ inline iterator operator++(int /*unused*/) {
+ iterator result(*this);
+ ++(*this);
+ return result;
+ }
+ inline iterator& operator--() {
+ if (array_iter_ != NULL) {
+ --array_iter_;
+ } else {
+ --hash_iter_;
+ }
+ return *this;
+ }
+ inline iterator operator--(int /*unused*/) {
+ iterator result(*this);
+ --(*this);
+ return result;
+ }
+ inline value_type* operator->() const {
+ if (array_iter_ != NULL) {
+ return array_iter_->get();
+ } else {
+ return hash_iter_.operator->();
+ }
+ }
+
+ inline value_type& operator*() const {
+ if (array_iter_ != NULL) {
+ return *array_iter_->get();
+ } else {
+ return *hash_iter_;
+ }
+ }
+
+ inline bool operator==(const iterator& other) const {
+ if (array_iter_ != NULL) {
+ return array_iter_ == other.array_iter_;
+ } else {
+ return other.array_iter_ == NULL && hash_iter_ == other.hash_iter_;
+ }
+ }
+
+ inline bool operator!=(const iterator& other) const {
+ return !(*this == other);
+ }
+
+ bool operator==(const const_iterator& other) const;
+ bool operator!=(const const_iterator& other) const;
+
+ private:
+ friend class SmallMap;
+ friend class const_iterator;
+ inline explicit iterator(ManualConstructor<value_type>* init)
+ : array_iter_(init) {}
+ inline explicit iterator(const typename NormalMap::iterator& init)
+ : array_iter_(NULL), hash_iter_(init) {}
+
+ ManualConstructor<value_type>* array_iter_;
+ typename NormalMap::iterator hash_iter_;
+ };
+
+ class const_iterator {
+ public:
+ typedef typename NormalMap::const_iterator::iterator_category
+ iterator_category;
+ typedef typename NormalMap::const_iterator::value_type value_type;
+ typedef typename NormalMap::const_iterator::difference_type difference_type;
+ typedef typename NormalMap::const_iterator::pointer pointer;
+ typedef typename NormalMap::const_iterator::reference reference;
+
+ inline const_iterator(): array_iter_(NULL) {}
+ // Non-explicit ctor lets us convert regular iterators to const iterators
+ inline const_iterator(const iterator& other)
+ : array_iter_(other.array_iter_), hash_iter_(other.hash_iter_) {}
+
+ inline const_iterator& operator++() {
+ if (array_iter_ != NULL) {
+ ++array_iter_;
+ } else {
+ ++hash_iter_;
+ }
+ return *this;
+ }
+ inline const_iterator operator++(int /*unused*/) {
+ const_iterator result(*this);
+ ++(*this);
+ return result;
+ }
+
+ inline const_iterator& operator--() {
+ if (array_iter_ != NULL) {
+ --array_iter_;
+ } else {
+ --hash_iter_;
+ }
+ return *this;
+ }
+ inline const_iterator operator--(int /*unused*/) {
+ const_iterator result(*this);
+ --(*this);
+ return result;
+ }
+
+ inline const value_type* operator->() const {
+ if (array_iter_ != NULL) {
+ return array_iter_->get();
+ } else {
+ return hash_iter_.operator->();
+ }
+ }
+
+ inline const value_type& operator*() const {
+ if (array_iter_ != NULL) {
+ return *array_iter_->get();
+ } else {
+ return *hash_iter_;
+ }
+ }
+
+ inline bool operator==(const const_iterator& other) const {
+ if (array_iter_ != NULL) {
+ return array_iter_ == other.array_iter_;
+ } else {
+ return other.array_iter_ == NULL && hash_iter_ == other.hash_iter_;
+ }
+ }
+
+ inline bool operator!=(const const_iterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend class SmallMap;
+ inline explicit const_iterator(
+ const ManualConstructor<value_type>* init)
+ : array_iter_(init) {}
+ inline explicit const_iterator(
+ const typename NormalMap::const_iterator& init)
+ : array_iter_(NULL), hash_iter_(init) {}
+
+ const ManualConstructor<value_type>* array_iter_;
+ typename NormalMap::const_iterator hash_iter_;
+ };
+
+ iterator find(const key_type& key) {
+ key_equal compare;
+ if (size_ >= 0) {
+ for (int i = 0; i < size_; i++) {
+ if (compare(array_[i]->first, key)) {
+ return iterator(array_ + i);
+ }
+ }
+ return iterator(array_ + size_);
+ } else {
+ return iterator(map()->find(key));
+ }
+ }
+
+ const_iterator find(const key_type& key) const {
+ key_equal compare;
+ if (size_ >= 0) {
+ for (int i = 0; i < size_; i++) {
+ if (compare(array_[i]->first, key)) {
+ return const_iterator(array_ + i);
+ }
+ }
+ return const_iterator(array_ + size_);
+ } else {
+ return const_iterator(map()->find(key));
+ }
+ }
+
+ // Invalidates iterators.
+ data_type& operator[](const key_type& key) {
+ key_equal compare;
+
+ if (size_ >= 0) {
+ // operator[] searches backwards, favoring recently-added
+ // elements.
+ for (int i = size_-1; i >= 0; --i) {
+ if (compare(array_[i]->first, key)) {
+ return array_[i]->second;
+ }
+ }
+ if (size_ == kArraySize) {
+ ConvertToRealMap();
+ return (*map_)[key];
+ } else {
+ array_[size_].Init(key, data_type());
+ return array_[size_++]->second;
+ }
+ } else {
+ return (*map_)[key];
+ }
+ }
+
+ // Invalidates iterators.
+ std::pair<iterator, bool> insert(const value_type& x) {
+ key_equal compare;
+
+ if (size_ >= 0) {
+ for (int i = 0; i < size_; i++) {
+ if (compare(array_[i]->first, x.first)) {
+ return std::make_pair(iterator(array_ + i), false);
+ }
+ }
+ if (size_ == kArraySize) {
+ ConvertToRealMap(); // Invalidates all iterators!
+ std::pair<typename NormalMap::iterator, bool> ret = map_->insert(x);
+ return std::make_pair(iterator(ret.first), ret.second);
+ } else {
+ array_[size_].Init(x);
+ return std::make_pair(iterator(array_ + size_++), true);
+ }
+ } else {
+ std::pair<typename NormalMap::iterator, bool> ret = map_->insert(x);
+ return std::make_pair(iterator(ret.first), ret.second);
+ }
+ }
+
+ // Invalidates iterators.
+ template <class InputIterator>
+ void insert(InputIterator f, InputIterator l) {
+ while (f != l) {
+ insert(*f);
+ ++f;
+ }
+ }
+
+ iterator begin() {
+ if (size_ >= 0) {
+ return iterator(array_);
+ } else {
+ return iterator(map_->begin());
+ }
+ }
+ const_iterator begin() const {
+ if (size_ >= 0) {
+ return const_iterator(array_);
+ } else {
+ return const_iterator(map_->begin());
+ }
+ }
+
+ iterator end() {
+ if (size_ >= 0) {
+ return iterator(array_ + size_);
+ } else {
+ return iterator(map_->end());
+ }
+ }
+ const_iterator end() const {
+ if (size_ >= 0) {
+ return const_iterator(array_ + size_);
+ } else {
+ return const_iterator(map_->end());
+ }
+ }
+
+ void clear() {
+ if (size_ >= 0) {
+ for (int i = 0; i < size_; i++) {
+ array_[i].Destroy();
+ }
+ } else {
+ map_.Destroy();
+ }
+ size_ = 0;
+ }
+
+ // Invalidates iterators.
+ void erase(const iterator& position) {
+ if (size_ >= 0) {
+ int i = position.array_iter_ - array_;
+ array_[i].Destroy();
+ --size_;
+ if (i != size_) {
+ array_[i].InitFromMove(std::move(array_[size_]));
+ array_[size_].Destroy();
+ }
+ } else {
+ map_->erase(position.hash_iter_);
+ }
+ }
+
+ size_t erase(const key_type& key) {
+ iterator iter = find(key);
+ if (iter == end()) return 0u;
+ erase(iter);
+ return 1u;
+ }
+
+ size_t count(const key_type& key) const {
+ return (find(key) == end()) ? 0 : 1;
+ }
+
+ size_t size() const {
+ if (size_ >= 0) {
+ return static_cast<size_t>(size_);
+ } else {
+ return map_->size();
+ }
+ }
+
+ bool empty() const {
+ if (size_ >= 0) {
+ return (size_ == 0);
+ } else {
+ return map_->empty();
+ }
+ }
+
+ // Returns true if we have fallen back to using the underlying map
+ // representation.
+ bool UsingFullMap() const {
+ return size_ < 0;
+ }
+
+ inline NormalMap* map() {
+ CHECK(UsingFullMap());
+ return map_.get();
+ }
+ inline const NormalMap* map() const {
+ CHECK(UsingFullMap());
+ return map_.get();
+ }
+
+ private:
+ int size_; // negative = using hash_map
+
+ MapInit functor_;
+
+ // We want to call constructors and destructors manually, but we don't
+ // want to allocate and deallocate the memory used for them separately.
+ // So, we use this crazy ManualConstructor class.
+ //
+ // Since array_ and map_ are mutually exclusive, we'll put them in a
+ // union, too. We add in a dummy_ value which quiets MSVC from otherwise
+ // giving an erroneous "union member has copy constructor" error message
+ // (C2621). This dummy member has to come before array_ to quiet the
+ // compiler.
+ //
+ // TODO(brettw) remove this and use C++11 unions when we require C++11.
+ union {
+ ManualConstructor<value_type> dummy_;
+ ManualConstructor<value_type> array_[kArraySize];
+ ManualConstructor<NormalMap> map_;
+ };
+
+ void ConvertToRealMap() {
+ // Move the current elements into a temporary array.
+ ManualConstructor<value_type> temp_array[kArraySize];
+
+ for (int i = 0; i < kArraySize; i++) {
+ temp_array[i].InitFromMove(std::move(array_[i]));
+ array_[i].Destroy();
+ }
+
+ // Initialize the map.
+ size_ = -1;
+ functor_(&map_);
+
+ // Insert elements into it.
+ for (int i = 0; i < kArraySize; i++) {
+ map_->insert(std::move(*temp_array[i]));
+ temp_array[i].Destroy();
+ }
+ }
+
+ // Helpers for constructors and destructors.
+ void InitFrom(const SmallMap& src) {
+ functor_ = src.functor_;
+ size_ = src.size_;
+ if (src.size_ >= 0) {
+ for (int i = 0; i < size_; i++) {
+ array_[i].Init(*src.array_[i]);
+ }
+ } else {
+ functor_(&map_);
+ (*map_.get()) = (*src.map_.get());
+ }
+ }
+ void Destroy() {
+ if (size_ >= 0) {
+ for (int i = 0; i < size_; i++) {
+ array_[i].Destroy();
+ }
+ } else {
+ map_.Destroy();
+ }
+ }
+};
+
+template <typename NormalMap, int kArraySize, typename EqualKey,
+ typename Functor>
+inline bool SmallMap<NormalMap, kArraySize, EqualKey,
+ Functor>::iterator::operator==(
+ const const_iterator& other) const {
+ return other == *this;
+}
+template <typename NormalMap, int kArraySize, typename EqualKey,
+ typename Functor>
+inline bool SmallMap<NormalMap, kArraySize, EqualKey,
+ Functor>::iterator::operator!=(
+ const const_iterator& other) const {
+ return other != *this;
+}
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_SMALL_MAP_H_
diff --git a/libchrome/base/containers/stack_container.h b/libchrome/base/containers/stack_container.h
new file mode 100644
index 0000000..9e0efc1
--- /dev/null
+++ b/libchrome/base/containers/stack_container.h
@@ -0,0 +1,268 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_STACK_CONTAINER_H_
+#define BASE_CONTAINERS_STACK_CONTAINER_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template<typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+ public:
+ typedef typename std::allocator<T>::pointer pointer;
+ typedef typename std::allocator<T>::size_type size_type;
+
+ // Backing store for the allocator. The container owner is responsible for
+ // maintaining this for as long as any containers using this allocator are
+ // live.
+ struct Source {
+ Source() : used_stack_buffer_(false) {
+ }
+
+ // Casts the buffer in its right type.
+ T* stack_buffer() { return stack_buffer_.template data_as<T>(); }
+ const T* stack_buffer() const {
+ return stack_buffer_.template data_as<T>();
+ }
+
+ // The buffer itself. It is not of type T because we don't want the
+ // constructors and destructors to be automatically called. Define a POD
+ // buffer of the right size instead.
+ base::AlignedMemory<sizeof(T[stack_capacity]), ALIGNOF(T)> stack_buffer_;
+#if defined(__GNUC__) && !defined(ARCH_CPU_X86_FAMILY)
+ static_assert(ALIGNOF(T) <= 16, "http://crbug.com/115612");
+#endif
+
+ // Set when the stack buffer is used for an allocation. We do not track
+ // how much of the buffer is used, only that somebody is using it.
+ bool used_stack_buffer_;
+ };
+
+ // Used by containers when they want to refer to an allocator of type U.
+ template<typename U>
+ struct rebind {
+ typedef StackAllocator<U, stack_capacity> other;
+ };
+
+ // For the straight up copy c-tor, we can share storage.
+ StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+ : std::allocator<T>(), source_(rhs.source_) {
+ }
+
+ // ISO C++ requires the following constructor to be defined,
+ // and std::vector in VC++2008SP1 Release fails with an error
+ // in the class _Container_base_aux_alloc_real (from <xutility>)
+ // if the constructor does not exist.
+ // For this constructor, we cannot share storage; there's
+ // no guarantee that the Source buffer of Ts is large enough
+ // for Us.
+ // TODO: If we were fancy pants, perhaps we could share storage
+ // iff sizeof(T) == sizeof(U).
+ template<typename U, size_t other_capacity>
+ StackAllocator(const StackAllocator<U, other_capacity>& other)
+ : source_(NULL) {
+ }
+
+ // This constructor must exist. It creates a default allocator that doesn't
+ // actually have a stack buffer. glibc's std::string() will compare the
+ // current allocator against the default-constructed allocator, so this
+ // should be fast.
+ StackAllocator() : source_(NULL) {
+ }
+
+ explicit StackAllocator(Source* source) : source_(source) {
+ }
+
+ // Actually do the allocation. Use the stack buffer if nobody has used it yet
+ // and the size requested fits. Otherwise, fall through to the standard
+ // allocator.
+ pointer allocate(size_type n, void* hint = 0) {
+ if (source_ != NULL && !source_->used_stack_buffer_
+ && n <= stack_capacity) {
+ source_->used_stack_buffer_ = true;
+ return source_->stack_buffer();
+ } else {
+ return std::allocator<T>::allocate(n, hint);
+ }
+ }
+
+ // Free: when trying to free the stack buffer, just mark it as free. For
+ // non-stack-buffer pointers, just fall though to the standard allocator.
+ void deallocate(pointer p, size_type n) {
+ if (source_ != NULL && p == source_->stack_buffer())
+ source_->used_stack_buffer_ = false;
+ else
+ std::allocator<T>::deallocate(p, n);
+ }
+
+ private:
+ Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template<typename TContainerType, int stack_capacity>
+class StackContainer {
+ public:
+ typedef TContainerType ContainerType;
+ typedef typename ContainerType::value_type ContainedType;
+ typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+ // Allocator must be constructed before the container!
+ StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+ // Make the container use the stack allocation by reserving our buffer size
+ // before doing anything else.
+ container_.reserve(stack_capacity);
+ }
+
+ // Getters for the actual container.
+ //
+ // Danger: any copies of this made using the copy constructor must have
+ // shorter lifetimes than the source. The copy will share the same allocator
+ // and therefore the same stack buffer as the original. Use std::copy to
+ // copy into a "real" container for longer-lived objects.
+ ContainerType& container() { return container_; }
+ const ContainerType& container() const { return container_; }
+
+ // Support operator-> to get to the container. This allows nicer syntax like:
+ // StackContainer<...> foo;
+ // std::sort(foo->begin(), foo->end());
+ ContainerType* operator->() { return &container_; }
+ const ContainerType* operator->() const { return &container_; }
+
+#ifdef UNIT_TEST
+ // Retrieves the stack source so that that unit tests can verify that the
+ // buffer is being used properly.
+ const typename Allocator::Source& stack_data() const {
+ return stack_data_;
+ }
+#endif
+
+ protected:
+ typename Allocator::Source stack_data_;
+ Allocator allocator_;
+ ContainerType container_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackContainer);
+};
+
+// StackString -----------------------------------------------------------------
+
+template<size_t stack_capacity>
+class StackString : public StackContainer<
+ std::basic_string<char,
+ std::char_traits<char>,
+ StackAllocator<char, stack_capacity> >,
+ stack_capacity> {
+ public:
+ StackString() : StackContainer<
+ std::basic_string<char,
+ std::char_traits<char>,
+ StackAllocator<char, stack_capacity> >,
+ stack_capacity>() {
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackString);
+};
+
+// StackStrin16 ----------------------------------------------------------------
+
+template<size_t stack_capacity>
+class StackString16 : public StackContainer<
+ std::basic_string<char16,
+ base::string16_char_traits,
+ StackAllocator<char16, stack_capacity> >,
+ stack_capacity> {
+ public:
+ StackString16() : StackContainer<
+ std::basic_string<char16,
+ base::string16_char_traits,
+ StackAllocator<char16, stack_capacity> >,
+ stack_capacity>() {
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackString16);
+};
+
+// StackVector -----------------------------------------------------------------
+
+// Example:
+// StackVector<int, 16> foo;
+// foo->push_back(22); // we have overloaded operator->
+// foo[0] = 10; // as well as operator[]
+template<typename T, size_t stack_capacity>
+class StackVector : public StackContainer<
+ std::vector<T, StackAllocator<T, stack_capacity> >,
+ stack_capacity> {
+ public:
+ StackVector() : StackContainer<
+ std::vector<T, StackAllocator<T, stack_capacity> >,
+ stack_capacity>() {
+ }
+
+ // We need to put this in STL containers sometimes, which requires a copy
+ // constructor. We can't call the regular copy constructor because that will
+ // take the stack buffer from the original. Here, we create an empty object
+ // and make a stack buffer of its own.
+ StackVector(const StackVector<T, stack_capacity>& other)
+ : StackContainer<
+ std::vector<T, StackAllocator<T, stack_capacity> >,
+ stack_capacity>() {
+ this->container().assign(other->begin(), other->end());
+ }
+
+ StackVector<T, stack_capacity>& operator=(
+ const StackVector<T, stack_capacity>& other) {
+ this->container().assign(other->begin(), other->end());
+ return *this;
+ }
+
+ // Vectors are commonly indexed, which isn't very convenient even with
+ // operator-> (using "->at()" does exception stuff we don't want).
+ T& operator[](size_t i) { return this->container().operator[](i); }
+ const T& operator[](size_t i) const {
+ return this->container().operator[](i);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_STACK_CONTAINER_H_
diff --git a/libchrome/base/cpu.cc b/libchrome/base/cpu.cc
new file mode 100644
index 0000000..de4a001
--- /dev/null
+++ b/libchrome/base/cpu.cc
@@ -0,0 +1,238 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <algorithm>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#endif
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#if defined(_MSC_VER)
+#include <intrin.h>
+#include <immintrin.h> // For _xgetbv()
+#endif
+#endif
+
+namespace base {
+
+CPU::CPU()
+ : signature_(0),
+ type_(0),
+ family_(0),
+ model_(0),
+ stepping_(0),
+ ext_model_(0),
+ ext_family_(0),
+ has_mmx_(false),
+ has_sse_(false),
+ has_sse2_(false),
+ has_sse3_(false),
+ has_ssse3_(false),
+ has_sse41_(false),
+ has_sse42_(false),
+ has_avx_(false),
+ has_avx2_(false),
+ has_aesni_(false),
+ has_non_stop_time_stamp_counter_(false),
+ cpu_vendor_("unknown") {
+ Initialize();
+}
+
+namespace {
+
+#if defined(ARCH_CPU_X86_FAMILY)
+#ifndef _MSC_VER
+
+#if defined(__pic__) && defined(__i386__)
+
+void __cpuid(int cpu_info[4], int info_type) {
+ __asm__ volatile (
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+}
+
+#else
+
+void __cpuid(int cpu_info[4], int info_type) {
+ __asm__ volatile (
+ "cpuid\n"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+ : "a"(info_type)
+ );
+}
+
+#endif
+
+// _xgetbv returns the value of an Intel Extended Control Register (XCR).
+// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
+uint64_t _xgetbv(uint32_t xcr) {
+ uint32_t eax, edx;
+
+ __asm__ volatile (
+ "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
+ return (static_cast<uint64_t>(edx) << 32) | eax;
+}
+
+#endif // !_MSC_VER
+#endif // ARCH_CPU_X86_FAMILY
+
+#if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+class LazyCpuInfoValue {
+ public:
+ LazyCpuInfoValue() {
+ // This function finds the value from /proc/cpuinfo under the key "model
+ // name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
+ // and later for arm64) and is shown once per CPU. "Processor" is used in
+ // earler versions and is shown only once at the top of /proc/cpuinfo
+ // regardless of the number CPUs.
+ const char kModelNamePrefix[] = "model name\t: ";
+ const char kProcessorPrefix[] = "Processor\t: ";
+
+ std::string contents;
+ ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
+ DCHECK(!contents.empty());
+ if (contents.empty()) {
+ return;
+ }
+
+ std::istringstream iss(contents);
+ std::string line;
+ while (std::getline(iss, line)) {
+ if (brand_.empty() &&
+ (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
+ line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
+ brand_.assign(line.substr(strlen(kModelNamePrefix)));
+ }
+ }
+ }
+
+ const std::string& brand() const { return brand_; }
+
+ private:
+ std::string brand_;
+ DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
+};
+
+base::LazyInstance<LazyCpuInfoValue>::Leaky g_lazy_cpuinfo =
+ LAZY_INSTANCE_INITIALIZER;
+
+#endif // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
+ // defined(OS_LINUX))
+
+} // anonymous namespace
+
+void CPU::Initialize() {
+#if defined(ARCH_CPU_X86_FAMILY)
+ int cpu_info[4] = {-1};
+ char cpu_string[48];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form. The human readable order is CPUInfo[1] |
+ // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+ // before using memcpy to copy these three array elements to cpu_string.
+ __cpuid(cpu_info, 0);
+ int num_ids = cpu_info[0];
+ std::swap(cpu_info[2], cpu_info[3]);
+ memcpy(cpu_string, &cpu_info[1], 3 * sizeof(cpu_info[1]));
+ cpu_vendor_.assign(cpu_string, 3 * sizeof(cpu_info[1]));
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ int cpu_info7[4] = {0};
+ __cpuid(cpu_info, 1);
+ if (num_ids >= 7) {
+ __cpuid(cpu_info7, 7);
+ }
+ signature_ = cpu_info[0];
+ stepping_ = cpu_info[0] & 0xf;
+ model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+ family_ = (cpu_info[0] >> 8) & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ ext_model_ = (cpu_info[0] >> 16) & 0xf;
+ ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+ has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+ has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+ has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+ has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+ has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+ has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ // AVX instructions will generate an illegal instruction exception unless
+ // a) they are supported by the CPU,
+ // b) XSAVE is supported by the CPU and
+ // c) XSAVE is enabled by the kernel.
+ // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
+ //
+ // In addition, we have observed some crashes with the xgetbv instruction
+ // even after following Intel's example code. (See crbug.com/375968.)
+ // Because of that, we also test the XSAVE bit because its description in
+ // the CPUID documentation suggests that it signals xgetbv support.
+ has_avx_ =
+ (cpu_info[2] & 0x10000000) != 0 &&
+ (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
+ (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
+ (_xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
+ has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
+ has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
+ }
+
+ // Get the brand string of the cpu.
+ __cpuid(cpu_info, 0x80000000);
+ const int parameter_end = 0x80000004;
+ int max_parameter = cpu_info[0];
+
+ if (cpu_info[0] >= parameter_end) {
+ char* cpu_string_ptr = cpu_string;
+
+ for (int parameter = 0x80000002; parameter <= parameter_end &&
+ cpu_string_ptr < &cpu_string[sizeof(cpu_string)]; parameter++) {
+ __cpuid(cpu_info, parameter);
+ memcpy(cpu_string_ptr, cpu_info, sizeof(cpu_info));
+ cpu_string_ptr += sizeof(cpu_info);
+ }
+ cpu_brand_.assign(cpu_string, cpu_string_ptr - cpu_string);
+ }
+
+ const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+ if (max_parameter >= parameter_containing_non_stop_time_stamp_counter) {
+ __cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
+ has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+ }
+#elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
+ cpu_brand_.assign(g_lazy_cpuinfo.Get().brand());
+#endif
+}
+
+CPU::IntelMicroArchitecture CPU::GetIntelMicroArchitecture() const {
+ if (has_avx2()) return AVX2;
+ if (has_avx()) return AVX;
+ if (has_sse42()) return SSE42;
+ if (has_sse41()) return SSE41;
+ if (has_ssse3()) return SSSE3;
+ if (has_sse3()) return SSE3;
+ if (has_sse2()) return SSE2;
+ if (has_sse()) return SSE;
+ return PENTIUM;
+}
+
+} // namespace base
diff --git a/libchrome/base/cpu.h b/libchrome/base/cpu.h
new file mode 100644
index 0000000..0e4303b
--- /dev/null
+++ b/libchrome/base/cpu.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CPU_H_
+#define BASE_CPU_H_
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Query information about the processor.
+class BASE_EXPORT CPU {
+ public:
+ // Constructor
+ CPU();
+
+ enum IntelMicroArchitecture {
+ PENTIUM,
+ SSE,
+ SSE2,
+ SSE3,
+ SSSE3,
+ SSE41,
+ SSE42,
+ AVX,
+ AVX2,
+ MAX_INTEL_MICRO_ARCHITECTURE
+ };
+
+ // Accessors for CPU information.
+ const std::string& vendor_name() const { return cpu_vendor_; }
+ int signature() const { return signature_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int family() const { return family_; }
+ int type() const { return type_; }
+ int extended_model() const { return ext_model_; }
+ int extended_family() const { return ext_family_; }
+ bool has_mmx() const { return has_mmx_; }
+ bool has_sse() const { return has_sse_; }
+ bool has_sse2() const { return has_sse2_; }
+ bool has_sse3() const { return has_sse3_; }
+ bool has_ssse3() const { return has_ssse3_; }
+ bool has_sse41() const { return has_sse41_; }
+ bool has_sse42() const { return has_sse42_; }
+ bool has_avx() const { return has_avx_; }
+ bool has_avx2() const { return has_avx2_; }
+ bool has_aesni() const { return has_aesni_; }
+ bool has_non_stop_time_stamp_counter() const {
+ return has_non_stop_time_stamp_counter_;
+ }
+
+ IntelMicroArchitecture GetIntelMicroArchitecture() const;
+ const std::string& cpu_brand() const { return cpu_brand_; }
+
+ private:
+ // Query the processor for CPUID information.
+ void Initialize();
+
+ int signature_; // raw form of type, family, model, and stepping
+ int type_; // process type
+ int family_; // family of the processor
+ int model_; // model of processor
+ int stepping_; // processor revision number
+ int ext_model_;
+ int ext_family_;
+ bool has_mmx_;
+ bool has_sse_;
+ bool has_sse2_;
+ bool has_sse3_;
+ bool has_ssse3_;
+ bool has_sse41_;
+ bool has_sse42_;
+ bool has_avx_;
+ bool has_avx2_;
+ bool has_aesni_;
+ bool has_non_stop_time_stamp_counter_;
+ std::string cpu_vendor_;
+ std::string cpu_brand_;
+};
+
+} // namespace base
+
+#endif // BASE_CPU_H_
diff --git a/libchrome/base/cpu_unittest.cc b/libchrome/base/cpu_unittest.cc
new file mode 100644
index 0000000..ec14620
--- /dev/null
+++ b/libchrome/base/cpu_unittest.cc
@@ -0,0 +1,117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+#include "build/build_config.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if _MSC_VER >= 1700
+// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
+#pragma warning(disable: 4752)
+#endif
+
+// Tests whether we can run extended instructions represented by the CPU
+// information. This test actually executes some extended instructions (such as
+// MMX, SSE, etc.) supported by the CPU and sees we can run them without
+// "undefined instruction" exceptions. That is, this test succeeds when this
+// test finishes without a crash.
+TEST(CPU, RunExtendedInstructions) {
+#if defined(ARCH_CPU_X86_FAMILY)
+ // Retrieve the CPU information.
+ base::CPU cpu;
+
+ ASSERT_TRUE(cpu.has_mmx());
+ ASSERT_TRUE(cpu.has_sse());
+ ASSERT_TRUE(cpu.has_sse2());
+
+// GCC and clang instruction test.
+#if defined(COMPILER_GCC)
+ // Execute an MMX instruction.
+ __asm__ __volatile__("emms\n" : : : "mm0");
+
+ // Execute an SSE instruction.
+ __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
+
+ // Execute an SSE 2 instruction.
+ __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
+
+ if (cpu.has_sse3()) {
+ // Execute an SSE 3 instruction.
+ __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
+ }
+
+ if (cpu.has_ssse3()) {
+ // Execute a Supplimental SSE 3 instruction.
+ __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
+ }
+
+ if (cpu.has_sse41()) {
+ // Execute an SSE 4.1 instruction.
+ __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
+ }
+
+ if (cpu.has_sse42()) {
+ // Execute an SSE 4.2 instruction.
+ __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
+ }
+
+ if (cpu.has_avx()) {
+ // Execute an AVX instruction.
+ __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
+ }
+
+ if (cpu.has_avx2()) {
+ // Execute an AVX 2 instruction.
+ __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
+ }
+
+// Visual C 32 bit and ClangCL 32/64 bit test.
+#elif defined(COMPILER_MSVC) && (defined(ARCH_CPU_32_BITS) || \
+ (defined(ARCH_CPU_64_BITS) && defined(__clang__)))
+
+ // Execute an MMX instruction.
+ __asm emms;
+
+ // Execute an SSE instruction.
+ __asm xorps xmm0, xmm0;
+
+ // Execute an SSE 2 instruction.
+ __asm psrldq xmm0, 0;
+
+ if (cpu.has_sse3()) {
+ // Execute an SSE 3 instruction.
+ __asm addsubpd xmm0, xmm0;
+ }
+
+ if (cpu.has_ssse3()) {
+ // Execute a Supplimental SSE 3 instruction.
+ __asm psignb xmm0, xmm0;
+ }
+
+ if (cpu.has_sse41()) {
+ // Execute an SSE 4.1 instruction.
+ __asm pmuldq xmm0, xmm0;
+ }
+
+ if (cpu.has_sse42()) {
+ // Execute an SSE 4.2 instruction.
+ __asm crc32 eax, eax;
+ }
+
+// Visual C 2012 required for AVX.
+#if _MSC_VER >= 1700
+ if (cpu.has_avx()) {
+ // Execute an AVX instruction.
+ __asm vzeroupper;
+ }
+
+ if (cpu.has_avx2()) {
+ // Execute an AVX 2 instruction.
+ __asm vpunpcklbw ymm0, ymm0, ymm0
+ }
+#endif // _MSC_VER >= 1700
+#endif // defined(COMPILER_GCC)
+#endif // defined(ARCH_CPU_X86_FAMILY)
+}
diff --git a/libchrome/base/critical_closure.h b/libchrome/base/critical_closure.h
new file mode 100644
index 0000000..6ebd7af
--- /dev/null
+++ b/libchrome/base/critical_closure.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CRITICAL_CLOSURE_H_
+#define BASE_CRITICAL_CLOSURE_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include "base/bind.h"
+#include "base/ios/scoped_critical_action.h"
+#endif
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_IOS)
+// Returns true if multi-tasking is supported on this iOS device.
+bool IsMultiTaskingSupported();
+
+// This class wraps a closure so it can continue to run for a period of time
+// when the application goes to the background by using
+// |ios::ScopedCriticalAction|.
+template <typename R>
+class CriticalClosure {
+ public:
+ explicit CriticalClosure(const Callback<R(void)>& closure)
+ : closure_(closure) {}
+
+ ~CriticalClosure() {}
+
+ R Run() {
+ return closure_.Run();
+ }
+
+ private:
+ ios::ScopedCriticalAction critical_action_;
+ Callback<R(void)> closure_;
+
+ DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
+};
+#endif // defined(OS_IOS)
+
+} // namespace internal
+
+// Returns a closure (which may return a result, but must not require any extra
+// arguments) that will continue to run for a period of time when the
+// application goes to the background if possible on platforms where
+// applications don't execute while backgrounded, otherwise the original task is
+// returned.
+//
+// Example:
+// file_task_runner_->PostTask(
+// FROM_HERE,
+// MakeCriticalClosure(base::Bind(&WriteToDiskTask, path_, data)));
+//
+// Note new closures might be posted in this closure. If the new closures need
+// background running time, |MakeCriticalClosure| should be applied on them
+// before posting.
+#if defined(OS_IOS)
+template <typename R>
+Callback<R(void)> MakeCriticalClosure(const Callback<R(void)>& closure) {
+ DCHECK(internal::IsMultiTaskingSupported());
+ return base::Bind(&internal::CriticalClosure<R>::Run,
+ Owned(new internal::CriticalClosure<R>(closure)));
+}
+#else // defined(OS_IOS)
+template <typename R>
+inline Callback<R(void)> MakeCriticalClosure(const Callback<R(void)>& closure) {
+ // No-op for platforms where the application does not need to acquire
+ // background time for closures to finish when it goes into the background.
+ return closure;
+}
+#endif // defined(OS_IOS)
+
+} // namespace base
+
+#endif // BASE_CRITICAL_CLOSURE_H_
diff --git a/libchrome/base/debug/alias.cc b/libchrome/base/debug/alias.cc
new file mode 100644
index 0000000..ff35574
--- /dev/null
+++ b/libchrome/base/debug/alias.cc
@@ -0,0 +1,22 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/alias.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", off)
+#endif
+
+void Alias(const void*) {}
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", on)
+#endif
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/alias.h b/libchrome/base/debug/alias.h
new file mode 100644
index 0000000..3b2ab64
--- /dev/null
+++ b/libchrome/base/debug/alias.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_ALIAS_H_
+#define BASE_DEBUG_ALIAS_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Make the optimizer think that var is aliased. This is to prevent it from
+// optimizing out variables that that would not otherwise be live at the point
+// of a potential crash.
+void BASE_EXPORT Alias(const void* var);
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_ALIAS_H_
diff --git a/libchrome/base/debug/debugger.cc b/libchrome/base/debug/debugger.cc
new file mode 100644
index 0000000..1ccee1c
--- /dev/null
+++ b/libchrome/base/debug/debugger.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace debug {
+
+static bool is_debug_ui_suppressed = false;
+
+bool WaitForDebugger(int wait_seconds, bool silent) {
+#if defined(OS_ANDROID)
+ // The pid from which we know which process to attach to are not output by
+ // android ddms, so we have to print it out explicitly.
+ DLOG(INFO) << "DebugUtil::WaitForDebugger(pid=" << static_cast<int>(getpid())
+ << ")";
+#endif
+ for (int i = 0; i < wait_seconds * 10; ++i) {
+ if (BeingDebugged()) {
+ if (!silent)
+ BreakDebugger();
+ return true;
+ }
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+ }
+ return false;
+}
+
+void SetSuppressDebugUI(bool suppress) {
+ is_debug_ui_suppressed = suppress;
+}
+
+bool IsDebugUISuppressed() {
+ return is_debug_ui_suppressed;
+}
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/debugger.h b/libchrome/base/debug/debugger.h
new file mode 100644
index 0000000..8680e28
--- /dev/null
+++ b/libchrome/base/debug/debugger.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a cross platform interface for helper functions related to
+// debuggers. You should use this to test if you're running under a debugger,
+// and if you would like to yield (breakpoint) into the debugger.
+
+#ifndef BASE_DEBUG_DEBUGGER_H_
+#define BASE_DEBUG_DEBUGGER_H_
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Waits wait_seconds seconds for a debugger to attach to the current process.
+// When silent is false, an exception is thrown when a debugger is detected.
+BASE_EXPORT bool WaitForDebugger(int wait_seconds, bool silent);
+
+// Returns true if the given process is being run under a debugger.
+//
+// On OS X, the underlying mechanism doesn't work when the sandbox is enabled.
+// To get around this, this function caches its value.
+//
+// WARNING: Because of this, on OS X, a call MUST be made to this function
+// BEFORE the sandbox is enabled.
+BASE_EXPORT bool BeingDebugged();
+
+// Break into the debugger, assumes a debugger is present.
+BASE_EXPORT void BreakDebugger();
+
+// Used in test code, this controls whether showing dialogs and breaking into
+// the debugger is suppressed for debug errors, even in debug mode (normally
+// release mode doesn't do this stuff -- this is controlled separately).
+// Normally UI is not suppressed. This is normally used when running automated
+// tests where we want a crash rather than a dialog or a debugger.
+BASE_EXPORT void SetSuppressDebugUI(bool suppress);
+BASE_EXPORT bool IsDebugUISuppressed();
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_DEBUGGER_H_
diff --git a/libchrome/base/debug/debugger_posix.cc b/libchrome/base/debug/debugger_posix.cc
new file mode 100644
index 0000000..a157d9a
--- /dev/null
+++ b/libchrome/base/debug/debugger_posix.cc
@@ -0,0 +1,263 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(__GLIBCXX__)
+#include <cxxabi.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_BSD)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <sys/user.h>
+#endif
+
+#include <ostream>
+
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_piece.h"
+
+#if defined(USE_SYMBOLIZE)
+#error "symbolize support was removed from libchrome"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/threading/platform_thread.h"
+#endif
+
+namespace base {
+namespace debug {
+
+#if defined(OS_MACOSX) || defined(OS_BSD)
+
+// Based on Apple's recommended method as described in
+// http://developer.apple.com/qa/qa2004/qa1361.html
+bool BeingDebugged() {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+ //
+ // While some code used below may be async-signal unsafe, note how
+ // the result is cached (see |is_set| and |being_debugged| static variables
+ // right below). If this code is properly warmed-up early
+ // in the start-up process, it should be safe to use later.
+
+ // If the process is sandboxed then we can't use the sysctl, so cache the
+ // value.
+ static bool is_set = false;
+ static bool being_debugged = false;
+
+ if (is_set)
+ return being_debugged;
+
+ // Initialize mib, which tells sysctl what info we want. In this case,
+ // we're looking for information about a specific process ID.
+ int mib[] = {
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_PID,
+ getpid()
+#if defined(OS_OPENBSD)
+ , sizeof(struct kinfo_proc),
+ 0
+#endif
+ };
+
+ // Caution: struct kinfo_proc is marked __APPLE_API_UNSTABLE. The source and
+ // binary interfaces may change.
+ struct kinfo_proc info;
+ size_t info_size = sizeof(info);
+
+#if defined(OS_OPENBSD)
+ if (sysctl(mib, arraysize(mib), NULL, &info_size, NULL, 0) < 0)
+ return -1;
+
+ mib[5] = (info_size / sizeof(struct kinfo_proc));
+#endif
+
+ int sysctl_result = sysctl(mib, arraysize(mib), &info, &info_size, NULL, 0);
+ DCHECK_EQ(sysctl_result, 0);
+ if (sysctl_result != 0) {
+ is_set = true;
+ being_debugged = false;
+ return being_debugged;
+ }
+
+ // This process is being debugged if the P_TRACED flag is set.
+ is_set = true;
+#if defined(OS_FREEBSD)
+ being_debugged = (info.ki_flag & P_TRACED) != 0;
+#elif defined(OS_BSD)
+ being_debugged = (info.p_flag & P_TRACED) != 0;
+#else
+ being_debugged = (info.kp_proc.p_flag & P_TRACED) != 0;
+#endif
+ return being_debugged;
+}
+
+#elif defined(OS_LINUX) || defined(OS_ANDROID)
+
+// We can look in /proc/self/status for TracerPid. We are likely used in crash
+// handling, so we are careful not to use the heap or have side effects.
+// Another option that is common is to try to ptrace yourself, but then we
+// can't detach without forking(), and that's not so great.
+// static
+bool BeingDebugged() {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+ int status_fd = open("/proc/self/status", O_RDONLY);
+ if (status_fd == -1)
+ return false;
+
+ // We assume our line will be in the first 1024 characters and that we can
+ // read this much all at once. In practice this will generally be true.
+ // This simplifies and speeds up things considerably.
+ char buf[1024];
+
+ ssize_t num_read = HANDLE_EINTR(read(status_fd, buf, sizeof(buf)));
+ if (IGNORE_EINTR(close(status_fd)) < 0)
+ return false;
+
+ if (num_read <= 0)
+ return false;
+
+ StringPiece status(buf, num_read);
+ StringPiece tracer("TracerPid:\t");
+
+ StringPiece::size_type pid_index = status.find(tracer);
+ if (pid_index == StringPiece::npos)
+ return false;
+
+ // Our pid is 0 without a debugger, assume this for any pid starting with 0.
+ pid_index += tracer.size();
+ return pid_index < status.size() && status[pid_index] != '0';
+}
+
+#else
+
+bool BeingDebugged() {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+#endif
+
+// We want to break into the debugger in Debug mode, and cause a crash dump in
+// Release mode. Breakpad behaves as follows:
+//
+// +-------+-----------------+-----------------+
+// | OS | Dump on SIGTRAP | Dump on SIGABRT |
+// +-------+-----------------+-----------------+
+// | Linux | N | Y |
+// | Mac | Y | N |
+// +-------+-----------------+-----------------+
+//
+// Thus we do the following:
+// Linux: Debug mode if a debugger is attached, send SIGTRAP; otherwise send
+// SIGABRT
+// Mac: Always send SIGTRAP.
+
+#if defined(ARCH_CPU_ARMEL)
+#define DEBUG_BREAK_ASM() asm("bkpt 0")
+#elif defined(ARCH_CPU_ARM64)
+#define DEBUG_BREAK_ASM() asm("brk 0")
+#elif defined(ARCH_CPU_MIPS_FAMILY)
+#define DEBUG_BREAK_ASM() asm("break 2")
+#elif defined(ARCH_CPU_X86_FAMILY)
+#define DEBUG_BREAK_ASM() asm("int3")
+#endif
+
+#if defined(NDEBUG) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
+#define DEBUG_BREAK() abort()
+#elif defined(OS_NACL)
+// The NaCl verifier doesn't let use use int3. For now, we call abort(). We
+// should ask for advice from some NaCl experts about the optimum thing here.
+// http://code.google.com/p/nativeclient/issues/detail?id=645
+#define DEBUG_BREAK() abort()
+#elif !defined(OS_MACOSX)
+// Though Android has a "helpful" process called debuggerd to catch native
+// signals on the general assumption that they are fatal errors. If no debugger
+// is attached, we call abort since Breakpad needs SIGABRT to create a dump.
+// When debugger is attached, for ARM platform the bkpt instruction appears
+// to cause SIGBUS which is trapped by debuggerd, and we've had great
+// difficulty continuing in a debugger once we stop from SIG triggered by native
+// code, use GDB to set |go| to 1 to resume execution; for X86 platform, use
+// "int3" to setup breakpiont and raise SIGTRAP.
+//
+// On other POSIX architectures, except Mac OS X, we use the same logic to
+// ensure that breakpad creates a dump on crashes while it is still possible to
+// use a debugger.
+namespace {
+void DebugBreak() {
+ if (!BeingDebugged()) {
+ abort();
+ } else {
+#if defined(DEBUG_BREAK_ASM)
+ DEBUG_BREAK_ASM();
+#else
+ volatile int go = 0;
+ while (!go) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+ }
+#endif
+ }
+}
+} // namespace
+#define DEBUG_BREAK() DebugBreak()
+#elif defined(DEBUG_BREAK_ASM)
+#define DEBUG_BREAK() DEBUG_BREAK_ASM()
+#else
+#error "Don't know how to debug break on this architecture/OS"
+#endif
+
+void BreakDebugger() {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+ // Linker's ICF feature may merge this function with other functions with the
+ // same definition (e.g. any function whose sole job is to call abort()) and
+ // it may confuse the crash report processing system. http://crbug.com/508489
+ static int static_variable_to_make_this_function_unique = 0;
+ base::debug::Alias(&static_variable_to_make_this_function_unique);
+
+ DEBUG_BREAK();
+#if defined(OS_ANDROID) && !defined(OFFICIAL_BUILD)
+ // For Android development we always build release (debug builds are
+ // unmanageably large), so the unofficial build is used for debugging. It is
+ // helpful to be able to insert BreakDebugger() statements in the source,
+ // attach the debugger, inspect the state of the program and then resume it by
+ // setting the 'go' variable above.
+#elif defined(NDEBUG)
+ // Terminate the program after signaling the debug break.
+ _exit(1);
+#endif
+}
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/debugger_unittest.cc b/libchrome/base/debug/debugger_unittest.cc
new file mode 100644
index 0000000..0a5a039
--- /dev/null
+++ b/libchrome/base/debug/debugger_unittest.cc
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/debugger.h"
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+void CrashWithBreakDebugger() {
+ base::debug::SetSuppressDebugUI(false);
+ base::debug::BreakDebugger();
+
+#if defined(OS_WIN)
+ // This should not be executed.
+ _exit(125);
+#endif
+}
+#endif // defined(GTEST_HAS_DEATH_TEST)
+
+} // namespace
+
+// Death tests misbehave on Android.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+TEST(Debugger, CrashAtBreakpoint) {
+ EXPECT_DEATH(CrashWithBreakDebugger(), "");
+}
+
+#if defined(OS_WIN)
+TEST(Debugger, DoesntExecuteBeyondBreakpoint) {
+ EXPECT_EXIT(CrashWithBreakDebugger(),
+ ::testing::ExitedWithCode(0x80000003), "");
+}
+#endif // defined(OS_WIN)
+
+#else // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+TEST(Debugger, NoTest) {
+}
+#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
diff --git a/libchrome/base/debug/debugging_flags.h b/libchrome/base/debug/debugging_flags.h
new file mode 100644
index 0000000..1ea435f
--- /dev/null
+++ b/libchrome/base/debug/debugging_flags.h
@@ -0,0 +1,11 @@
+// Generated by build/write_buildflag_header.py
+// From "base_debugging_flags"
+
+#ifndef BASE_DEBUG_DEBUGGING_FLAGS_H_
+#define BASE_DEBUG_DEBUGGING_FLAGS_H_
+
+#include "build/buildflag.h"
+
+#define BUILDFLAG_INTERNAL_ENABLE_PROFILING() (0)
+
+#endif // BASE_DEBUG_DEBUGGING_FLAGS_H_
diff --git a/libchrome/base/debug/leak_annotations.h b/libchrome/base/debug/leak_annotations.h
new file mode 100644
index 0000000..dc50246
--- /dev/null
+++ b/libchrome/base/debug/leak_annotations.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_ANNOTATIONS_H_
+#define BASE_DEBUG_LEAK_ANNOTATIONS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// This file defines macros which can be used to annotate intentional memory
+// leaks. Support for annotations is implemented in LeakSanitizer. Annotated
+// objects will be treated as a source of live pointers, i.e. any heap objects
+// reachable by following pointers from an annotated object will not be
+// reported as leaks.
+//
+// ANNOTATE_SCOPED_MEMORY_LEAK: all allocations made in the current scope
+// will be annotated as leaks.
+// ANNOTATE_LEAKING_OBJECT_PTR(X): the heap object referenced by pointer X will
+// be annotated as a leak.
+
+#if defined(LEAK_SANITIZER) && !defined(OS_NACL)
+
+#include <sanitizer/lsan_interface.h>
+
+class ScopedLeakSanitizerDisabler {
+ public:
+ ScopedLeakSanitizerDisabler() { __lsan_disable(); }
+ ~ScopedLeakSanitizerDisabler() { __lsan_enable(); }
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedLeakSanitizerDisabler);
+};
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK \
+ ScopedLeakSanitizerDisabler leak_sanitizer_disabler; static_cast<void>(0)
+
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) __lsan_ignore_object(X);
+
+#else
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK ((void)0)
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) ((void)0)
+
+#endif
+
+#endif // BASE_DEBUG_LEAK_ANNOTATIONS_H_
diff --git a/libchrome/base/debug/leak_tracker.h b/libchrome/base/debug/leak_tracker.h
new file mode 100644
index 0000000..9dd1622
--- /dev/null
+++ b/libchrome/base/debug/leak_tracker.h
@@ -0,0 +1,140 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_TRACKER_H_
+#define BASE_DEBUG_LEAK_TRACKER_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+// Only enable leak tracking in non-uClibc debug builds.
+#if !defined(NDEBUG) && !defined(__UCLIBC__)
+#define ENABLE_LEAK_TRACKER
+#endif
+
+#ifdef ENABLE_LEAK_TRACKER
+#include "base/containers/linked_list.h"
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#endif // ENABLE_LEAK_TRACKER
+
+// LeakTracker is a helper to verify that all instances of a class
+// have been destroyed.
+//
+// It is particularly useful for classes that are bound to a single thread --
+// before destroying that thread, one can check that there are no remaining
+// instances of that class.
+//
+// For example, to enable leak tracking for class net::URLRequest, start by
+// adding a member variable of type LeakTracker<net::URLRequest>.
+//
+// class URLRequest {
+// ...
+// private:
+// base::LeakTracker<URLRequest> leak_tracker_;
+// };
+//
+//
+// Next, when we believe all instances of net::URLRequest have been deleted:
+//
+// LeakTracker<net::URLRequest>::CheckForLeaks();
+//
+// Should the check fail (because there are live instances of net::URLRequest),
+// then the allocation callstack for each leaked instances is dumped to
+// the error log.
+//
+// If ENABLE_LEAK_TRACKER is not defined, then the check has no effect.
+
+namespace base {
+namespace debug {
+
+#ifndef ENABLE_LEAK_TRACKER
+
+// If leak tracking is disabled, do nothing.
+template<typename T>
+class LeakTracker {
+ public:
+ ~LeakTracker() {}
+ static void CheckForLeaks() {}
+ static int NumLiveInstances() { return -1; }
+};
+
+#else
+
+// If leak tracking is enabled we track where the object was allocated from.
+
+template<typename T>
+class LeakTracker : public LinkNode<LeakTracker<T> > {
+ public:
+ LeakTracker() {
+ instances()->Append(this);
+ }
+
+ ~LeakTracker() {
+ this->RemoveFromList();
+ }
+
+ static void CheckForLeaks() {
+ // Walk the allocation list and print each entry it contains.
+ size_t count = 0;
+
+ // Copy the first 3 leak allocation callstacks onto the stack.
+ // This way if we hit the CHECK() in a release build, the leak
+ // information will be available in mini-dump.
+ const size_t kMaxStackTracesToCopyOntoStack = 3;
+ StackTrace stacktraces[kMaxStackTracesToCopyOntoStack];
+
+ for (LinkNode<LeakTracker<T> >* node = instances()->head();
+ node != instances()->end();
+ node = node->next()) {
+ StackTrace& allocation_stack = node->value()->allocation_stack_;
+
+ if (count < kMaxStackTracesToCopyOntoStack)
+ stacktraces[count] = allocation_stack;
+
+ ++count;
+ if (LOG_IS_ON(ERROR)) {
+ LOG_STREAM(ERROR) << "Leaked " << node << " which was allocated by:";
+ allocation_stack.OutputToStream(&LOG_STREAM(ERROR));
+ }
+ }
+
+ CHECK_EQ(0u, count);
+
+ // Hack to keep |stacktraces| and |count| alive (so compiler
+ // doesn't optimize it out, and it will appear in mini-dumps).
+ if (count == 0x1234) {
+ for (size_t i = 0; i < kMaxStackTracesToCopyOntoStack; ++i)
+ stacktraces[i].Print();
+ }
+ }
+
+ static int NumLiveInstances() {
+ // Walk the allocation list and count how many entries it has.
+ int count = 0;
+ for (LinkNode<LeakTracker<T> >* node = instances()->head();
+ node != instances()->end();
+ node = node->next()) {
+ ++count;
+ }
+ return count;
+ }
+
+ private:
+ // Each specialization of LeakTracker gets its own static storage.
+ static LinkedList<LeakTracker<T> >* instances() {
+ static LinkedList<LeakTracker<T> > list;
+ return &list;
+ }
+
+ StackTrace allocation_stack_;
+};
+
+#endif // ENABLE_LEAK_TRACKER
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_LEAK_TRACKER_H_
diff --git a/libchrome/base/debug/leak_tracker_unittest.cc b/libchrome/base/debug/leak_tracker_unittest.cc
new file mode 100644
index 0000000..8b4c568
--- /dev/null
+++ b/libchrome/base/debug/leak_tracker_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/leak_tracker.h"
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+class ClassA {
+ private:
+ LeakTracker<ClassA> leak_tracker_;
+};
+
+class ClassB {
+ private:
+ LeakTracker<ClassB> leak_tracker_;
+};
+
+#ifndef ENABLE_LEAK_TRACKER
+
+// If leak tracking is disabled, we should do nothing.
+TEST(LeakTrackerTest, NotEnabled) {
+ EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
+
+ // Use scoped_ptr so compiler doesn't complain about unused variables.
+ std::unique_ptr<ClassA> a1(new ClassA);
+ std::unique_ptr<ClassB> b1(new ClassB);
+ std::unique_ptr<ClassB> b2(new ClassB);
+
+ EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
+}
+
+#else
+
+TEST(LeakTrackerTest, Basic) {
+ {
+ ClassA a1;
+
+ EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
+
+ ClassB b1;
+ ClassB b2;
+
+ EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
+
+ std::unique_ptr<ClassA> a2(new ClassA);
+
+ EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
+
+ a2.reset();
+
+ EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
+ }
+
+ EXPECT_EQ(0, LeakTracker<ClassA>::NumLiveInstances());
+ EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
+}
+
+// Try some orderings of create/remove to hit different cases in the linked-list
+// assembly.
+TEST(LeakTrackerTest, LinkedList) {
+ EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
+
+ std::unique_ptr<ClassA> a1(new ClassA);
+ std::unique_ptr<ClassA> a2(new ClassA);
+ std::unique_ptr<ClassA> a3(new ClassA);
+ std::unique_ptr<ClassA> a4(new ClassA);
+
+ EXPECT_EQ(4, LeakTracker<ClassA>::NumLiveInstances());
+
+ // Remove the head of the list (a1).
+ a1.reset();
+ EXPECT_EQ(3, LeakTracker<ClassA>::NumLiveInstances());
+
+ // Remove the tail of the list (a4).
+ a4.reset();
+ EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
+
+ // Append to the new tail of the list (a3).
+ std::unique_ptr<ClassA> a5(new ClassA);
+ EXPECT_EQ(3, LeakTracker<ClassA>::NumLiveInstances());
+
+ a2.reset();
+ a3.reset();
+
+ EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
+
+ a5.reset();
+ EXPECT_EQ(0, LeakTracker<ClassA>::NumLiveInstances());
+}
+
+TEST(LeakTrackerTest, NoOpCheckForLeaks) {
+ // There are no live instances of ClassA, so this should do nothing.
+ LeakTracker<ClassA>::CheckForLeaks();
+}
+
+#endif // ENABLE_LEAK_TRACKER
+
+} // namespace
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/proc_maps_linux.h b/libchrome/base/debug/proc_maps_linux.h
new file mode 100644
index 0000000..38e9231
--- /dev/null
+++ b/libchrome/base/debug/proc_maps_linux.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_PROC_MAPS_LINUX_H_
+#define BASE_DEBUG_PROC_MAPS_LINUX_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace debug {
+
+// Describes a region of mapped memory and the path of the file mapped.
+struct MappedMemoryRegion {
+ enum Permission {
+ READ = 1 << 0,
+ WRITE = 1 << 1,
+ EXECUTE = 1 << 2,
+ PRIVATE = 1 << 3, // If set, region is private, otherwise it is shared.
+ };
+
+ // The address range [start,end) of mapped memory.
+ uintptr_t start;
+ uintptr_t end;
+
+ // Byte offset into |path| of the range mapped into memory.
+ unsigned long long offset;
+
+ // Bitmask of read/write/execute/private/shared permissions.
+ uint8_t permissions;
+
+ // Name of the file mapped into memory.
+ //
+ // NOTE: path names aren't guaranteed to point at valid files. For example,
+ // "[heap]" and "[stack]" are used to represent the location of the process'
+ // heap and stack, respectively.
+ std::string path;
+};
+
+// Reads the data from /proc/self/maps and stores the result in |proc_maps|.
+// Returns true if successful, false otherwise.
+//
+// There is *NO* guarantee that the resulting contents will be free of
+// duplicates or even contain valid entries by time the method returns.
+//
+//
+// THE GORY DETAILS
+//
+// Did you know it's next-to-impossible to atomically read the whole contents
+// of /proc/<pid>/maps? You would think that if we passed in a large-enough
+// buffer to read() that It Should Just Work(tm), but sadly that's not the case.
+//
+// Linux's procfs uses seq_file [1] for handling iteration, text formatting,
+// and dealing with resulting data that is larger than the size of a page. That
+// last bit is especially important because it means that seq_file will never
+// return more than the size of a page in a single call to read().
+//
+// Unfortunately for a program like Chrome the size of /proc/self/maps is
+// larger than the size of page so we're forced to call read() multiple times.
+// If the virtual memory table changed in any way between calls to read() (e.g.,
+// a different thread calling mprotect()), it can make seq_file generate
+// duplicate entries or skip entries.
+//
+// Even if seq_file was changed to keep flushing the contents of its page-sized
+// buffer to the usermode buffer inside a single call to read(), it has to
+// release its lock on the virtual memory table to handle page faults while
+// copying data to usermode. This puts us in the same situation where the table
+// can change while we're copying data.
+//
+// Alternatives such as fork()-and-suspend-the-parent-while-child-reads were
+// attempted, but they present more subtle problems than it's worth. Depending
+// on your use case your best bet may be to read /proc/<pid>/maps prior to
+// starting other threads.
+//
+// [1] http://kernelnewbies.org/Documents/SeqFileHowTo
+BASE_EXPORT bool ReadProcMaps(std::string* proc_maps);
+
+// Parses /proc/<pid>/maps input data and stores in |regions|. Returns true
+// and updates |regions| if and only if all of |input| was successfully parsed.
+BASE_EXPORT bool ParseProcMaps(const std::string& input,
+ std::vector<MappedMemoryRegion>* regions);
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_PROC_MAPS_LINUX_H_
diff --git a/libchrome/base/debug/stack_trace.cc b/libchrome/base/debug/stack_trace.cc
new file mode 100644
index 0000000..ac0ead7
--- /dev/null
+++ b/libchrome/base/debug/stack_trace.cc
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <sstream>
+
+#include "base/macros.h"
+
+#if HAVE_TRACE_STACK_FRAME_POINTERS && defined(OS_ANDROID)
+#include <pthread.h>
+#include "base/process/process_handle.h"
+#include "base/threading/platform_thread.h"
+#endif
+
+namespace base {
+namespace debug {
+
+StackTrace::StackTrace(const void* const* trace, size_t count) {
+ count = std::min(count, arraysize(trace_));
+ if (count)
+ memcpy(trace_, trace, count * sizeof(trace_[0]));
+ count_ = count;
+}
+
+StackTrace::~StackTrace() {
+}
+
+const void *const *StackTrace::Addresses(size_t* count) const {
+ *count = count_;
+ if (count_)
+ return trace_;
+ return NULL;
+}
+
+std::string StackTrace::ToString() const {
+ std::stringstream stream;
+#if !defined(__UCLIBC__)
+ OutputToStream(&stream);
+#endif
+ return stream.str();
+}
+
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+
+#if defined(OS_ANDROID)
+
+static uintptr_t GetStackEnd() {
+ // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+ // from the main thread. So we need to cache end of stack in that case to get
+ // acceptable performance.
+ // For all other threads pthread_getattr_np() is fast enough as it just reads
+ // values from its pthread_t argument.
+ static uintptr_t main_stack_end = 0;
+
+ bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+
+ if (is_main_thread && main_stack_end) {
+ return main_stack_end;
+ }
+
+ uintptr_t stack_begin = 0;
+ size_t stack_size = 0;
+ pthread_attr_t attributes;
+ int error = pthread_getattr_np(pthread_self(), &attributes);
+ if (!error) {
+ error = pthread_attr_getstack(
+ &attributes,
+ reinterpret_cast<void**>(&stack_begin),
+ &stack_size);
+ pthread_attr_destroy(&attributes);
+ }
+ DCHECK(!error);
+
+ uintptr_t stack_end = stack_begin + stack_size;
+ if (is_main_thread) {
+ main_stack_end = stack_end;
+ }
+ return stack_end;
+}
+
+#endif // defined(OS_ANDROID)
+
+size_t TraceStackFramePointers(const void** out_trace,
+ size_t max_depth,
+ size_t skip_initial) {
+ // Usage of __builtin_frame_address() enables frame pointers in this
+ // function even if they are not enabled globally. So 'sp' will always
+ // be valid.
+ uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+
+#if defined(OS_ANDROID)
+ uintptr_t stack_end = GetStackEnd();
+#endif
+
+ size_t depth = 0;
+ while (depth < max_depth) {
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+ // GCC and LLVM generate slightly different frames on ARM, see
+ // https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+ // x86-compatible frame, while GCC needs adjustment.
+ sp -= sizeof(uintptr_t);
+#endif
+
+#if defined(OS_ANDROID)
+ // Both sp[0] and s[1] must be valid.
+ if (sp + 2 * sizeof(uintptr_t) > stack_end) {
+ break;
+ }
+#endif
+
+ if (skip_initial != 0) {
+ skip_initial--;
+ } else {
+ out_trace[depth++] = reinterpret_cast<const void**>(sp)[1];
+ }
+
+ // Find out next frame pointer
+ // (heuristics are from TCMalloc's stacktrace functions)
+ {
+ uintptr_t next_sp = reinterpret_cast<const uintptr_t*>(sp)[0];
+
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (next_sp <= sp) break;
+
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if (next_sp - sp > 100000) break;
+
+ // Check alignment.
+ if (sp & (sizeof(void*) - 1)) break;
+
+ sp = next_sp;
+ }
+ }
+
+ return depth;
+}
+
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/stack_trace.h b/libchrome/base/debug/stack_trace.h
new file mode 100644
index 0000000..23e7b51
--- /dev/null
+++ b/libchrome/base/debug/stack_trace.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_STACK_TRACE_H_
+#define BASE_DEBUG_STACK_TRACE_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <unistd.h>
+#endif
+
+#if defined(OS_WIN)
+struct _EXCEPTION_POINTERS;
+struct _CONTEXT;
+#endif
+
+#if defined(OS_POSIX) && ( \
+ defined(__i386__) || defined(__x86_64__) || \
+ (defined(__arm__) && !defined(__thumb__)))
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+#else
+#define HAVE_TRACE_STACK_FRAME_POINTERS 0
+#endif
+
+namespace base {
+namespace debug {
+
+// Enables stack dump to console output on exception and signals.
+// When enabled, the process will quit immediately. This is meant to be used in
+// unit_tests only! This is not thread-safe: only call from main thread.
+// In sandboxed processes, this has to be called before the sandbox is turned
+// on.
+// Calling this function on Linux opens /proc/self/maps and caches its
+// contents. In non-official builds, this function also opens the object files
+// that are loaded in memory and caches their file descriptors (this cannot be
+// done in official builds because it has security implications).
+BASE_EXPORT bool EnableInProcessStackDumping();
+
+// A stacktrace can be helpful in debugging. For example, you can include a
+// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
+// can later see where the given object was created from.
+class BASE_EXPORT StackTrace {
+ public:
+ // Creates a stacktrace from the current location.
+ StackTrace();
+
+ // Creates a stacktrace from an existing array of instruction
+ // pointers (such as returned by Addresses()). |count| will be
+ // trimmed to |kMaxTraces|.
+ StackTrace(const void* const* trace, size_t count);
+
+#if defined(OS_WIN)
+ // Creates a stacktrace for an exception.
+ // Note: this function will throw an import not found (StackWalk64) exception
+ // on system without dbghelp 5.1.
+ StackTrace(_EXCEPTION_POINTERS* exception_pointers);
+ StackTrace(const _CONTEXT* context);
+#endif
+
+ // Copying and assignment are allowed with the default functions.
+
+ ~StackTrace();
+
+ // Gets an array of instruction pointer values. |*count| will be set to the
+ // number of elements in the returned array.
+ const void* const* Addresses(size_t* count) const;
+
+ // Prints the stack trace to stderr.
+ void Print() const;
+
+#if !defined(__UCLIBC__)
+ // Resolves backtrace to symbols and write to stream.
+ void OutputToStream(std::ostream* os) const;
+#endif
+
+ // Resolves backtrace to symbols and returns as string.
+ std::string ToString() const;
+
+ private:
+#if defined(OS_WIN)
+ void InitTrace(const _CONTEXT* context_record);
+#endif
+
+ // From http://msdn.microsoft.com/en-us/library/bb204633.aspx,
+ // the sum of FramesToSkip and FramesToCapture must be less than 63,
+ // so set it to 62. Even if on POSIX it could be a larger value, it usually
+ // doesn't give much more information.
+ static const int kMaxTraces = 62;
+
+ void* trace_[kMaxTraces];
+
+ // The number of valid frames in |trace_|.
+ size_t count_;
+};
+
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+// Traces the stack by using frame pointers. This function is faster but less
+// reliable than StackTrace. It should work for debug and profiling builds,
+// but not for release builds (although there are some exceptions).
+//
+// Writes at most |max_depth| frames (instruction pointers) into |out_trace|
+// after skipping |skip_initial| frames. Note that the function itself is not
+// added to the trace so |skip_initial| should be 0 in most cases.
+// Returns number of frames written.
+BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
+ size_t max_depth,
+ size_t skip_initial);
+#endif // HAVE_TRACE_STACK_FRAME_POINTERS
+
+namespace internal {
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+BASE_EXPORT char *itoa_r(intptr_t i,
+ char *buf,
+ size_t sz,
+ int base,
+ size_t padding);
+#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
+
+} // namespace internal
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_STACK_TRACE_H_
diff --git a/libchrome/base/debug/stack_trace_posix.cc b/libchrome/base/debug/stack_trace_posix.cc
new file mode 100644
index 0000000..3c0299c
--- /dev/null
+++ b/libchrome/base/debug/stack_trace_posix.cc
@@ -0,0 +1,819 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/stack_trace.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <map>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#if defined(__GLIBCXX__)
+#include <cxxabi.h>
+#endif
+#if !defined(__UCLIBC__)
+#include <execinfo.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#endif
+
+#include "base/debug/debugger.h"
+#include "base/debug/proc_maps_linux.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/free_deleter.h"
+#include "base/memory/singleton.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_number_conversions.h"
+#include "build/build_config.h"
+
+#if defined(USE_SYMBOLIZE)
+#error "symbolize support was removed from libchrome"
+#endif
+
+namespace base {
+namespace debug {
+
+namespace {
+
+volatile sig_atomic_t in_signal_handler = 0;
+
+#if !defined(USE_SYMBOLIZE) && defined(__GLIBCXX__)
+// The prefix used for mangled symbols, per the Itanium C++ ABI:
+// http://www.codesourcery.com/cxx-abi/abi.html#mangling
+const char kMangledSymbolPrefix[] = "_Z";
+
+// Characters that can be used for symbols, generated by Ruby:
+// (('a'..'z').to_a+('A'..'Z').to_a+('0'..'9').to_a + ['_']).join
+const char kSymbolCharacters[] =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+#endif // !defined(USE_SYMBOLIZE) && defined(__GLIBCXX__)
+
+#if !defined(USE_SYMBOLIZE)
+// Demangles C++ symbols in the given text. Example:
+//
+// "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
+// =>
+// "out/Debug/base_unittests(StackTrace::StackTrace()+0x20) [0x817778c]"
+#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
+void DemangleSymbols(std::string* text) {
+ // Note: code in this function is NOT async-signal safe (std::string uses
+ // malloc internally).
+ std::string::size_type search_from = 0;
+ while (search_from < text->size()) {
+ // Look for the start of a mangled symbol, from search_from.
+ std::string::size_type mangled_start =
+ text->find(kMangledSymbolPrefix, search_from);
+ if (mangled_start == std::string::npos) {
+ break; // Mangled symbol not found.
+ }
+
+ // Look for the end of the mangled symbol.
+ std::string::size_type mangled_end =
+ text->find_first_not_of(kSymbolCharacters, mangled_start);
+ if (mangled_end == std::string::npos) {
+ mangled_end = text->size();
+ }
+ std::string mangled_symbol =
+ text->substr(mangled_start, mangled_end - mangled_start);
+
+ // Try to demangle the mangled symbol candidate.
+ int status = 0;
+ std::unique_ptr<char, base::FreeDeleter> demangled_symbol(
+ abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
+ if (status == 0) { // Demangling is successful.
+ // Remove the mangled symbol.
+ text->erase(mangled_start, mangled_end - mangled_start);
+ // Insert the demangled symbol.
+ text->insert(mangled_start, demangled_symbol.get());
+ // Next time, we'll start right after the demangled symbol we inserted.
+ search_from = mangled_start + strlen(demangled_symbol.get());
+ } else {
+ // Failed to demangle. Retry after the "_Z" we just found.
+ search_from = mangled_start + 2;
+ }
+ }
+}
+#elif !defined(__UCLIBC__)
+void DemangleSymbols(std::string* /* text */) {}
+#endif // defined(__GLIBCXX__) && !defined(__UCLIBC__)
+
+#endif // !defined(USE_SYMBOLIZE)
+
+class BacktraceOutputHandler {
+ public:
+ virtual void HandleOutput(const char* output) = 0;
+
+ protected:
+ virtual ~BacktraceOutputHandler() {}
+};
+
+#if defined(USE_SYMBOLIZE) || !defined(__UCLIBC__)
+void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
+ // This should be more than enough to store a 64-bit number in hex:
+ // 16 hex digits + 1 for null-terminator.
+ char buf[17] = { '\0' };
+ handler->HandleOutput("0x");
+ internal::itoa_r(reinterpret_cast<intptr_t>(pointer),
+ buf, sizeof(buf), 16, 12);
+ handler->HandleOutput(buf);
+}
+#endif // defined(USE_SYMBOLIZE) || !defined(__UCLIBC__)
+
+#if defined(USE_SYMBOLIZE)
+void OutputFrameId(intptr_t frame_id, BacktraceOutputHandler* handler) {
+ // Max unsigned 64-bit number in decimal has 20 digits (18446744073709551615).
+ // Hence, 30 digits should be more than enough to represent it in decimal
+ // (including the null-terminator).
+ char buf[30] = { '\0' };
+ handler->HandleOutput("#");
+ internal::itoa_r(frame_id, buf, sizeof(buf), 10, 1);
+ handler->HandleOutput(buf);
+}
+#endif // defined(USE_SYMBOLIZE)
+
+#if !defined(__UCLIBC__)
+void ProcessBacktrace(void *const * trace,
+ size_t size,
+ BacktraceOutputHandler* handler) {
+ (void)trace; // unused based on build context below.
+ (void)size; // unusud based on build context below.
+ (void)handler; // unused based on build context below.
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if defined(USE_SYMBOLIZE)
+ for (size_t i = 0; i < size; ++i) {
+ OutputFrameId(i, handler);
+ handler->HandleOutput(" ");
+ OutputPointer(trace[i], handler);
+ handler->HandleOutput(" ");
+
+ char buf[1024] = { '\0' };
+
+ // Subtract by one as return address of function may be in the next
+ // function when a function is annotated as noreturn.
+ void* address = static_cast<char*>(trace[i]) - 1;
+ if (google::Symbolize(address, buf, sizeof(buf)))
+ handler->HandleOutput(buf);
+ else
+ handler->HandleOutput("<unknown>");
+
+ handler->HandleOutput("\n");
+ }
+#else
+ bool printed = false;
+
+ // Below part is async-signal unsafe (uses malloc), so execute it only
+ // when we are not executing the signal handler.
+ if (in_signal_handler == 0) {
+ std::unique_ptr<char*, FreeDeleter> trace_symbols(
+ backtrace_symbols(trace, size));
+ if (trace_symbols.get()) {
+ for (size_t i = 0; i < size; ++i) {
+ std::string trace_symbol = trace_symbols.get()[i];
+ DemangleSymbols(&trace_symbol);
+ handler->HandleOutput(trace_symbol.c_str());
+ handler->HandleOutput("\n");
+ }
+
+ printed = true;
+ }
+ }
+
+ if (!printed) {
+ for (size_t i = 0; i < size; ++i) {
+ handler->HandleOutput(" [");
+ OutputPointer(trace[i], handler);
+ handler->HandleOutput("]\n");
+ }
+ }
+#endif // defined(USE_SYMBOLIZE)
+}
+#endif // !defined(__UCLIBC__)
+
+void PrintToStderr(const char* output) {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+ ignore_result(HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))));
+}
+
+void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
+ (void)void_context; // unused depending on build context
+ // NOTE: This code MUST be async-signal safe.
+ // NO malloc or stdio is allowed here.
+
+ // Record the fact that we are in the signal handler now, so that the rest
+ // of StackTrace can behave in an async-signal-safe manner.
+ in_signal_handler = 1;
+
+ if (BeingDebugged())
+ BreakDebugger();
+
+ PrintToStderr("Received signal ");
+ char buf[1024] = { 0 };
+ internal::itoa_r(signal, buf, sizeof(buf), 10, 0);
+ PrintToStderr(buf);
+ if (signal == SIGBUS) {
+ if (info->si_code == BUS_ADRALN)
+ PrintToStderr(" BUS_ADRALN ");
+ else if (info->si_code == BUS_ADRERR)
+ PrintToStderr(" BUS_ADRERR ");
+ else if (info->si_code == BUS_OBJERR)
+ PrintToStderr(" BUS_OBJERR ");
+ else
+ PrintToStderr(" <unknown> ");
+ } else if (signal == SIGFPE) {
+ if (info->si_code == FPE_FLTDIV)
+ PrintToStderr(" FPE_FLTDIV ");
+ else if (info->si_code == FPE_FLTINV)
+ PrintToStderr(" FPE_FLTINV ");
+ else if (info->si_code == FPE_FLTOVF)
+ PrintToStderr(" FPE_FLTOVF ");
+ else if (info->si_code == FPE_FLTRES)
+ PrintToStderr(" FPE_FLTRES ");
+ else if (info->si_code == FPE_FLTSUB)
+ PrintToStderr(" FPE_FLTSUB ");
+ else if (info->si_code == FPE_FLTUND)
+ PrintToStderr(" FPE_FLTUND ");
+ else if (info->si_code == FPE_INTDIV)
+ PrintToStderr(" FPE_INTDIV ");
+ else if (info->si_code == FPE_INTOVF)
+ PrintToStderr(" FPE_INTOVF ");
+ else
+ PrintToStderr(" <unknown> ");
+ } else if (signal == SIGILL) {
+ if (info->si_code == ILL_BADSTK)
+ PrintToStderr(" ILL_BADSTK ");
+ else if (info->si_code == ILL_COPROC)
+ PrintToStderr(" ILL_COPROC ");
+ else if (info->si_code == ILL_ILLOPN)
+ PrintToStderr(" ILL_ILLOPN ");
+ else if (info->si_code == ILL_ILLADR)
+ PrintToStderr(" ILL_ILLADR ");
+ else if (info->si_code == ILL_ILLTRP)
+ PrintToStderr(" ILL_ILLTRP ");
+ else if (info->si_code == ILL_PRVOPC)
+ PrintToStderr(" ILL_PRVOPC ");
+ else if (info->si_code == ILL_PRVREG)
+ PrintToStderr(" ILL_PRVREG ");
+ else
+ PrintToStderr(" <unknown> ");
+ } else if (signal == SIGSEGV) {
+ if (info->si_code == SEGV_MAPERR)
+ PrintToStderr(" SEGV_MAPERR ");
+ else if (info->si_code == SEGV_ACCERR)
+ PrintToStderr(" SEGV_ACCERR ");
+ else
+ PrintToStderr(" <unknown> ");
+ }
+ if (signal == SIGBUS || signal == SIGFPE ||
+ signal == SIGILL || signal == SIGSEGV) {
+ internal::itoa_r(reinterpret_cast<intptr_t>(info->si_addr),
+ buf, sizeof(buf), 16, 12);
+ PrintToStderr(buf);
+ }
+ PrintToStderr("\n");
+
+#if defined(CFI_ENFORCEMENT)
+ if (signal == SIGILL && info->si_code == ILL_ILLOPN) {
+ PrintToStderr(
+ "CFI: Most likely a control flow integrity violation; for more "
+ "information see:\n");
+ PrintToStderr(
+ "https://www.chromium.org/developers/testing/control-flow-integrity\n");
+ }
+#endif
+
+ debug::StackTrace().Print();
+
+#if defined(OS_LINUX)
+#if ARCH_CPU_X86_FAMILY
+ ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context);
+ const struct {
+ const char* label;
+ greg_t value;
+ } registers[] = {
+#if ARCH_CPU_32_BITS
+ { " gs: ", context->uc_mcontext.gregs[REG_GS] },
+ { " fs: ", context->uc_mcontext.gregs[REG_FS] },
+ { " es: ", context->uc_mcontext.gregs[REG_ES] },
+ { " ds: ", context->uc_mcontext.gregs[REG_DS] },
+ { " edi: ", context->uc_mcontext.gregs[REG_EDI] },
+ { " esi: ", context->uc_mcontext.gregs[REG_ESI] },
+ { " ebp: ", context->uc_mcontext.gregs[REG_EBP] },
+ { " esp: ", context->uc_mcontext.gregs[REG_ESP] },
+ { " ebx: ", context->uc_mcontext.gregs[REG_EBX] },
+ { " edx: ", context->uc_mcontext.gregs[REG_EDX] },
+ { " ecx: ", context->uc_mcontext.gregs[REG_ECX] },
+ { " eax: ", context->uc_mcontext.gregs[REG_EAX] },
+ { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] },
+ { " err: ", context->uc_mcontext.gregs[REG_ERR] },
+ { " ip: ", context->uc_mcontext.gregs[REG_EIP] },
+ { " cs: ", context->uc_mcontext.gregs[REG_CS] },
+ { " efl: ", context->uc_mcontext.gregs[REG_EFL] },
+ { " usp: ", context->uc_mcontext.gregs[REG_UESP] },
+ { " ss: ", context->uc_mcontext.gregs[REG_SS] },
+#elif ARCH_CPU_64_BITS
+ { " r8: ", context->uc_mcontext.gregs[REG_R8] },
+ { " r9: ", context->uc_mcontext.gregs[REG_R9] },
+ { " r10: ", context->uc_mcontext.gregs[REG_R10] },
+ { " r11: ", context->uc_mcontext.gregs[REG_R11] },
+ { " r12: ", context->uc_mcontext.gregs[REG_R12] },
+ { " r13: ", context->uc_mcontext.gregs[REG_R13] },
+ { " r14: ", context->uc_mcontext.gregs[REG_R14] },
+ { " r15: ", context->uc_mcontext.gregs[REG_R15] },
+ { " di: ", context->uc_mcontext.gregs[REG_RDI] },
+ { " si: ", context->uc_mcontext.gregs[REG_RSI] },
+ { " bp: ", context->uc_mcontext.gregs[REG_RBP] },
+ { " bx: ", context->uc_mcontext.gregs[REG_RBX] },
+ { " dx: ", context->uc_mcontext.gregs[REG_RDX] },
+ { " ax: ", context->uc_mcontext.gregs[REG_RAX] },
+ { " cx: ", context->uc_mcontext.gregs[REG_RCX] },
+ { " sp: ", context->uc_mcontext.gregs[REG_RSP] },
+ { " ip: ", context->uc_mcontext.gregs[REG_RIP] },
+ { " efl: ", context->uc_mcontext.gregs[REG_EFL] },
+ { " cgf: ", context->uc_mcontext.gregs[REG_CSGSFS] },
+ { " erf: ", context->uc_mcontext.gregs[REG_ERR] },
+ { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] },
+ { " msk: ", context->uc_mcontext.gregs[REG_OLDMASK] },
+ { " cr2: ", context->uc_mcontext.gregs[REG_CR2] },
+#endif // ARCH_CPU_32_BITS
+ };
+
+#if ARCH_CPU_32_BITS
+ const int kRegisterPadding = 8;
+#elif ARCH_CPU_64_BITS
+ const int kRegisterPadding = 16;
+#endif
+
+ for (size_t i = 0; i < arraysize(registers); i++) {
+ PrintToStderr(registers[i].label);
+ internal::itoa_r(registers[i].value, buf, sizeof(buf),
+ 16, kRegisterPadding);
+ PrintToStderr(buf);
+
+ if ((i + 1) % 4 == 0)
+ PrintToStderr("\n");
+ }
+ PrintToStderr("\n");
+#endif // ARCH_CPU_X86_FAMILY
+#endif // defined(OS_LINUX)
+
+ PrintToStderr("[end of stack trace]\n");
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ if (::signal(signal, SIG_DFL) == SIG_ERR)
+ _exit(1);
+#else
+ // Non-Mac OSes should probably reraise the signal as well, but the Linux
+ // sandbox tests break on CrOS devices.
+ // https://code.google.com/p/chromium/issues/detail?id=551681
+ _exit(1);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+}
+
+class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+ PrintBacktraceOutputHandler() {}
+
+ void HandleOutput(const char* output) override {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+ PrintToStderr(output);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
+};
+
+class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+ explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {
+ }
+
+ void HandleOutput(const char* output) override { (*os_) << output; }
+
+ private:
+ std::ostream* os_;
+
+ DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
+};
+
+void WarmUpBacktrace() {
+ // Warm up stack trace infrastructure. It turns out that on the first
+ // call glibc initializes some internal data structures using pthread_once,
+ // and even backtrace() can call malloc(), leading to hangs.
+ //
+ // Example stack trace snippet (with tcmalloc):
+ //
+ // #8 0x0000000000a173b5 in tc_malloc
+ // at ./third_party/tcmalloc/chromium/src/debugallocation.cc:1161
+ // #9 0x00007ffff7de7900 in _dl_map_object_deps at dl-deps.c:517
+ // #10 0x00007ffff7ded8a9 in dl_open_worker at dl-open.c:262
+ // #11 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+ // #12 0x00007ffff7ded31a in _dl_open (file=0x7ffff625e298 "libgcc_s.so.1")
+ // at dl-open.c:639
+ // #13 0x00007ffff6215602 in do_dlopen at dl-libc.c:89
+ // #14 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+ // #15 0x00007ffff62156c4 in dlerror_run at dl-libc.c:48
+ // #16 __GI___libc_dlopen_mode at dl-libc.c:165
+ // #17 0x00007ffff61ef8f5 in init
+ // at ../sysdeps/x86_64/../ia64/backtrace.c:53
+ // #18 0x00007ffff6aad400 in pthread_once
+ // at ../nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S:104
+ // #19 0x00007ffff61efa14 in __GI___backtrace
+ // at ../sysdeps/x86_64/../ia64/backtrace.c:104
+ // #20 0x0000000000752a54 in base::debug::StackTrace::StackTrace
+ // at base/debug/stack_trace_posix.cc:175
+ // #21 0x00000000007a4ae5 in
+ // base::(anonymous namespace)::StackDumpSignalHandler
+ // at base/process_util_posix.cc:172
+ // #22 <signal handler called>
+ StackTrace stack_trace;
+}
+
+} // namespace
+
+#if defined(USE_SYMBOLIZE)
+
+// class SandboxSymbolizeHelper.
+//
+// The purpose of this class is to prepare and install a "file open" callback
+// needed by the stack trace symbolization code
+// (base/third_party/symbolize/symbolize.h) so that it can function properly
+// in a sandboxed process. The caveat is that this class must be instantiated
+// before the sandboxing is enabled so that it can get the chance to open all
+// the object files that are loaded in the virtual address space of the current
+// process.
+class SandboxSymbolizeHelper {
+ public:
+ // Returns the singleton instance.
+ static SandboxSymbolizeHelper* GetInstance() {
+ return Singleton<SandboxSymbolizeHelper>::get();
+ }
+
+ private:
+ friend struct DefaultSingletonTraits<SandboxSymbolizeHelper>;
+
+ SandboxSymbolizeHelper()
+ : is_initialized_(false) {
+ Init();
+ }
+
+ ~SandboxSymbolizeHelper() {
+ UnregisterCallback();
+ CloseObjectFiles();
+ }
+
+ // Returns a O_RDONLY file descriptor for |file_path| if it was opened
+ // successfully during the initialization. The file is repositioned at
+ // offset 0.
+ // IMPORTANT: This function must be async-signal-safe because it can be
+ // called from a signal handler (symbolizing stack frames for a crash).
+ int GetFileDescriptor(const char* file_path) {
+ int fd = -1;
+
+#if !defined(OFFICIAL_BUILD)
+ if (file_path) {
+ // The assumption here is that iterating over std::map<std::string, int>
+ // using a const_iterator does not allocate dynamic memory, hense it is
+ // async-signal-safe.
+ std::map<std::string, int>::const_iterator it;
+ for (it = modules_.begin(); it != modules_.end(); ++it) {
+ if (strcmp((it->first).c_str(), file_path) == 0) {
+ // POSIX.1-2004 requires an implementation to guarantee that dup()
+ // is async-signal-safe.
+ fd = dup(it->second);
+ break;
+ }
+ }
+ // POSIX.1-2004 requires an implementation to guarantee that lseek()
+ // is async-signal-safe.
+ if (fd >= 0 && lseek(fd, 0, SEEK_SET) < 0) {
+ // Failed to seek.
+ fd = -1;
+ }
+ }
+#endif // !defined(OFFICIAL_BUILD)
+
+ return fd;
+ }
+
+ // Searches for the object file (from /proc/self/maps) that contains
+ // the specified pc. If found, sets |start_address| to the start address
+ // of where this object file is mapped in memory, sets the module base
+ // address into |base_address|, copies the object file name into
+ // |out_file_name|, and attempts to open the object file. If the object
+ // file is opened successfully, returns the file descriptor. Otherwise,
+ // returns -1. |out_file_name_size| is the size of the file name buffer
+ // (including the null terminator).
+ // IMPORTANT: This function must be async-signal-safe because it can be
+ // called from a signal handler (symbolizing stack frames for a crash).
+ static int OpenObjectFileContainingPc(uint64_t pc, uint64_t& start_address,
+ uint64_t& base_address, char* file_path,
+ int file_path_size) {
+ // This method can only be called after the singleton is instantiated.
+ // This is ensured by the following facts:
+ // * This is the only static method in this class, it is private, and
+ // the class has no friends (except for the DefaultSingletonTraits).
+ // The compiler guarantees that it can only be called after the
+ // singleton is instantiated.
+ // * This method is used as a callback for the stack tracing code and
+ // the callback registration is done in the constructor, so logically
+ // it cannot be called before the singleton is created.
+ SandboxSymbolizeHelper* instance = GetInstance();
+
+ // The assumption here is that iterating over
+ // std::vector<MappedMemoryRegion> using a const_iterator does not allocate
+ // dynamic memory, hence it is async-signal-safe.
+ std::vector<MappedMemoryRegion>::const_iterator it;
+ bool is_first = true;
+ for (it = instance->regions_.begin(); it != instance->regions_.end();
+ ++it, is_first = false) {
+ const MappedMemoryRegion& region = *it;
+ if (region.start <= pc && pc < region.end) {
+ start_address = region.start;
+ // Don't subtract 'start_address' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ base_address = (is_first ? 0U : start_address) - region.offset;
+ if (file_path && file_path_size > 0) {
+ strncpy(file_path, region.path.c_str(), file_path_size);
+ // Ensure null termination.
+ file_path[file_path_size - 1] = '\0';
+ }
+ return instance->GetFileDescriptor(region.path.c_str());
+ }
+ }
+ return -1;
+ }
+
+ // Parses /proc/self/maps in order to compile a list of all object file names
+ // for the modules that are loaded in the current process.
+ // Returns true on success.
+ bool CacheMemoryRegions() {
+ // Reads /proc/self/maps.
+ std::string contents;
+ if (!ReadProcMaps(&contents)) {
+ LOG(ERROR) << "Failed to read /proc/self/maps";
+ return false;
+ }
+
+ // Parses /proc/self/maps.
+ if (!ParseProcMaps(contents, ®ions_)) {
+ LOG(ERROR) << "Failed to parse the contents of /proc/self/maps";
+ return false;
+ }
+
+ is_initialized_ = true;
+ return true;
+ }
+
+ // Opens all object files and caches their file descriptors.
+ void OpenSymbolFiles() {
+ // Pre-opening and caching the file descriptors of all loaded modules is
+ // not safe for production builds. Hence it is only done in non-official
+ // builds. For more details, take a look at: http://crbug.com/341966.
+#if !defined(OFFICIAL_BUILD)
+ // Open the object files for all read-only executable regions and cache
+ // their file descriptors.
+ std::vector<MappedMemoryRegion>::const_iterator it;
+ for (it = regions_.begin(); it != regions_.end(); ++it) {
+ const MappedMemoryRegion& region = *it;
+ // Only interesed in read-only executable regions.
+ if ((region.permissions & MappedMemoryRegion::READ) ==
+ MappedMemoryRegion::READ &&
+ (region.permissions & MappedMemoryRegion::WRITE) == 0 &&
+ (region.permissions & MappedMemoryRegion::EXECUTE) ==
+ MappedMemoryRegion::EXECUTE) {
+ if (region.path.empty()) {
+ // Skip regions with empty file names.
+ continue;
+ }
+ if (region.path[0] == '[') {
+ // Skip pseudo-paths, like [stack], [vdso], [heap], etc ...
+ continue;
+ }
+ // Avoid duplicates.
+ if (modules_.find(region.path) == modules_.end()) {
+ int fd = open(region.path.c_str(), O_RDONLY | O_CLOEXEC);
+ if (fd >= 0) {
+ modules_.insert(std::make_pair(region.path, fd));
+ } else {
+ LOG(WARNING) << "Failed to open file: " << region.path
+ << "\n Error: " << strerror(errno);
+ }
+ }
+ }
+ }
+#endif // !defined(OFFICIAL_BUILD)
+ }
+
+ // Initializes and installs the symbolization callback.
+ void Init() {
+ if (CacheMemoryRegions()) {
+ OpenSymbolFiles();
+ google::InstallSymbolizeOpenObjectFileCallback(
+ &OpenObjectFileContainingPc);
+ }
+ }
+
+ // Unregister symbolization callback.
+ void UnregisterCallback() {
+ if (is_initialized_) {
+ google::InstallSymbolizeOpenObjectFileCallback(NULL);
+ is_initialized_ = false;
+ }
+ }
+
+ // Closes all file descriptors owned by this instance.
+ void CloseObjectFiles() {
+#if !defined(OFFICIAL_BUILD)
+ std::map<std::string, int>::iterator it;
+ for (it = modules_.begin(); it != modules_.end(); ++it) {
+ int ret = IGNORE_EINTR(close(it->second));
+ DCHECK(!ret);
+ it->second = -1;
+ }
+ modules_.clear();
+#endif // !defined(OFFICIAL_BUILD)
+ }
+
+ // Set to true upon successful initialization.
+ bool is_initialized_;
+
+#if !defined(OFFICIAL_BUILD)
+ // Mapping from file name to file descriptor. Includes file descriptors
+ // for all successfully opened object files and the file descriptor for
+ // /proc/self/maps. This code is not safe for production builds.
+ std::map<std::string, int> modules_;
+#endif // !defined(OFFICIAL_BUILD)
+
+ // Cache for the process memory regions. Produced by parsing the contents
+ // of /proc/self/maps cache.
+ std::vector<MappedMemoryRegion> regions_;
+
+ DISALLOW_COPY_AND_ASSIGN(SandboxSymbolizeHelper);
+};
+#endif // USE_SYMBOLIZE
+
+bool EnableInProcessStackDumping() {
+#if defined(USE_SYMBOLIZE)
+ SandboxSymbolizeHelper::GetInstance();
+#endif // USE_SYMBOLIZE
+
+ // When running in an application, our code typically expects SIGPIPE
+ // to be ignored. Therefore, when testing that same code, it should run
+ // with SIGPIPE ignored as well.
+ struct sigaction sigpipe_action;
+ memset(&sigpipe_action, 0, sizeof(sigpipe_action));
+ sigpipe_action.sa_handler = SIG_IGN;
+ sigemptyset(&sigpipe_action.sa_mask);
+ bool success = (sigaction(SIGPIPE, &sigpipe_action, NULL) == 0);
+
+ // Avoid hangs during backtrace initialization, see above.
+ WarmUpBacktrace();
+
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ action.sa_flags = SA_RESETHAND | SA_SIGINFO;
+ action.sa_sigaction = &StackDumpSignalHandler;
+ sigemptyset(&action.sa_mask);
+
+ success &= (sigaction(SIGILL, &action, NULL) == 0);
+ success &= (sigaction(SIGABRT, &action, NULL) == 0);
+ success &= (sigaction(SIGFPE, &action, NULL) == 0);
+ success &= (sigaction(SIGBUS, &action, NULL) == 0);
+ success &= (sigaction(SIGSEGV, &action, NULL) == 0);
+// On Linux, SIGSYS is reserved by the kernel for seccomp-bpf sandboxing.
+#if !defined(OS_LINUX)
+ success &= (sigaction(SIGSYS, &action, NULL) == 0);
+#endif // !defined(OS_LINUX)
+
+ return success;
+}
+
+StackTrace::StackTrace() {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if !defined(__UCLIBC__)
+ // Though the backtrace API man page does not list any possible negative
+ // return values, we take no chance.
+ count_ = base::saturated_cast<size_t>(backtrace(trace_, arraysize(trace_)));
+#else
+ count_ = 0;
+#endif
+}
+
+void StackTrace::Print() const {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if !defined(__UCLIBC__)
+ PrintBacktraceOutputHandler handler;
+ ProcessBacktrace(trace_, count_, &handler);
+#endif
+}
+
+#if !defined(__UCLIBC__)
+void StackTrace::OutputToStream(std::ostream* os) const {
+ StreamBacktraceOutputHandler handler(os);
+ ProcessBacktrace(trace_, count_, &handler);
+}
+#endif
+
+namespace internal {
+
+// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
+ // Make sure we can write at least one NUL byte.
+ size_t n = 1;
+ if (n > sz)
+ return NULL;
+
+ if (base < 2 || base > 16) {
+ buf[0] = '\000';
+ return NULL;
+ }
+
+ char* start = buf;
+
+ uintptr_t j = i;
+
+ // Handle negative numbers (only for base 10).
+ if (i < 0 && base == 10) {
+ // This does "j = -i" while avoiding integer overflow.
+ j = static_cast<uintptr_t>(-(i + 1)) + 1;
+
+ // Make sure we can write the '-' character.
+ if (++n > sz) {
+ buf[0] = '\000';
+ return NULL;
+ }
+ *start++ = '-';
+ }
+
+ // Loop until we have converted the entire number. Output at least one
+ // character (i.e. '0').
+ char* ptr = start;
+ do {
+ // Make sure there is still enough space left in our output buffer.
+ if (++n > sz) {
+ buf[0] = '\000';
+ return NULL;
+ }
+
+ // Output the next digit.
+ *ptr++ = "0123456789abcdef"[j % base];
+ j /= base;
+
+ if (padding > 0)
+ padding--;
+ } while (j > 0 || padding > 0);
+
+ // Terminate the output with a NUL character.
+ *ptr = '\000';
+
+ // Conversion to ASCII actually resulted in the digits being in reverse
+ // order. We can't easily generate them in forward order, as we can't tell
+ // the number of characters needed until we are done converting.
+ // So, now, we reverse the string (except for the possible "-" sign).
+ while (--ptr > start) {
+ char ch = *ptr;
+ *ptr = *start;
+ *start++ = ch;
+ }
+ return buf;
+}
+
+} // namespace internal
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/task_annotator.cc b/libchrome/base/debug/task_annotator.cc
new file mode 100644
index 0000000..4ba4d91
--- /dev/null
+++ b/libchrome/base/debug/task_annotator.cc
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/task_annotator.h"
+
+#include "base/debug/alias.h"
+#include "base/pending_task.h"
+#include "base/trace_event/trace_event.h"
+#include "base/tracked_objects.h"
+
+namespace base {
+namespace debug {
+
+TaskAnnotator::TaskAnnotator() {
+}
+
+TaskAnnotator::~TaskAnnotator() {
+}
+
+void TaskAnnotator::DidQueueTask(const char* queue_function,
+ const PendingTask& pending_task) {
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ queue_function,
+ TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+ TRACE_EVENT_FLAG_FLOW_OUT);
+}
+
+void TaskAnnotator::RunTask(const char* queue_function,
+ const PendingTask& pending_task) {
+ tracked_objects::TaskStopwatch stopwatch;
+ stopwatch.Start();
+ tracked_objects::Duration queue_duration =
+ stopwatch.StartTime() - pending_task.EffectiveTimePosted();
+
+ TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ queue_function,
+ TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+ TRACE_EVENT_FLAG_FLOW_IN,
+ "queue_duration",
+ queue_duration.InMilliseconds());
+
+ // Before running the task, store the program counter where it was posted
+ // and deliberately alias it to ensure it is on the stack if the task
+ // crashes. Be careful not to assume that the variable itself will have the
+ // expected value when displayed by the optimizer in an optimized build.
+ // Look at a memory dump of the stack.
+ const void* program_counter = pending_task.posted_from.program_counter();
+ debug::Alias(&program_counter);
+
+ pending_task.task.Run();
+
+ stopwatch.Stop();
+ tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
+ pending_task, stopwatch);
+}
+
+uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
+ return (static_cast<uint64_t>(task.sequence_num) << 32) |
+ ((static_cast<uint64_t>(reinterpret_cast<intptr_t>(this)) << 32) >>
+ 32);
+}
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/debug/task_annotator.h b/libchrome/base/debug/task_annotator.h
new file mode 100644
index 0000000..2687c5c
--- /dev/null
+++ b/libchrome/base/debug/task_annotator.h
@@ -0,0 +1,45 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_TASK_ANNOTATOR_H_
+#define BASE_DEBUG_TASK_ANNOTATOR_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+struct PendingTask;
+namespace debug {
+
+// Implements common debug annotations for posted tasks. This includes data
+// such as task origins, queueing durations and memory usage.
+class BASE_EXPORT TaskAnnotator {
+ public:
+ TaskAnnotator();
+ ~TaskAnnotator();
+
+ // Called to indicate that a task has been queued to run in the future.
+ // |queue_function| is used as the trace flow event name.
+ void DidQueueTask(const char* queue_function,
+ const PendingTask& pending_task);
+
+ // Run a previously queued task. |queue_function| should match what was
+ // passed into |DidQueueTask| for this task.
+ void RunTask(const char* queue_function, const PendingTask& pending_task);
+
+ private:
+ // Creates a process-wide unique ID to represent this task in trace events.
+ // This will be mangled with a Process ID hash to reduce the likelyhood of
+ // colliding with TaskAnnotator pointers on other processes.
+ uint64_t GetTaskTraceID(const PendingTask& task) const;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
+};
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_TASK_ANNOTATOR_H_
diff --git a/libchrome/base/debug/task_annotator_unittest.cc b/libchrome/base/debug/task_annotator_unittest.cc
new file mode 100644
index 0000000..9f5c442
--- /dev/null
+++ b/libchrome/base/debug/task_annotator_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/debug/task_annotator.h"
+#include "base/bind.h"
+#include "base/pending_task.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+namespace {
+
+void TestTask(int* result) {
+ *result = 123;
+}
+
+} // namespace
+
+TEST(TaskAnnotatorTest, QueueAndRunTask) {
+ int result = 0;
+ PendingTask pending_task(FROM_HERE, Bind(&TestTask, &result));
+
+ TaskAnnotator annotator;
+ annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
+ EXPECT_EQ(0, result);
+ annotator.RunTask("TaskAnnotatorTest::Queue", pending_task);
+ EXPECT_EQ(123, result);
+}
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/environment.cc b/libchrome/base/environment.cc
new file mode 100644
index 0000000..534a7a8
--- /dev/null
+++ b/libchrome/base/environment.cc
@@ -0,0 +1,240 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/environment.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <stdlib.h>
+#elif defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+namespace {
+
+class EnvironmentImpl : public Environment {
+ public:
+ bool GetVar(StringPiece variable_name, std::string* result) override {
+ if (GetVarImpl(variable_name, result))
+ return true;
+
+ // Some commonly used variable names are uppercase while others
+ // are lowercase, which is inconsistent. Let's try to be helpful
+ // and look for a variable name with the reverse case.
+ // I.e. HTTP_PROXY may be http_proxy for some users/systems.
+ char first_char = variable_name[0];
+ std::string alternate_case_var;
+ if (IsAsciiLower(first_char))
+ alternate_case_var = ToUpperASCII(variable_name);
+ else if (IsAsciiUpper(first_char))
+ alternate_case_var = ToLowerASCII(variable_name);
+ else
+ return false;
+ return GetVarImpl(alternate_case_var.c_str(), result);
+ }
+
+ bool SetVar(StringPiece variable_name,
+ const std::string& new_value) override {
+ return SetVarImpl(variable_name, new_value);
+ }
+
+ bool UnSetVar(StringPiece variable_name) override {
+ return UnSetVarImpl(variable_name);
+ }
+
+ private:
+ bool GetVarImpl(StringPiece variable_name, std::string* result) {
+#if defined(OS_POSIX)
+ const char* env_value = getenv(variable_name.data());
+ if (!env_value)
+ return false;
+ // Note that the variable may be defined but empty.
+ if (result)
+ *result = env_value;
+ return true;
+#elif defined(OS_WIN)
+ DWORD value_length =
+ ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr, 0);
+ if (value_length == 0)
+ return false;
+ if (result) {
+ std::unique_ptr<wchar_t[]> value(new wchar_t[value_length]);
+ ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), value.get(),
+ value_length);
+ *result = WideToUTF8(value.get());
+ }
+ return true;
+#else
+#error need to port
+#endif
+ }
+
+ bool SetVarImpl(StringPiece variable_name, const std::string& new_value) {
+#if defined(OS_POSIX)
+ // On success, zero is returned.
+ return !setenv(variable_name.data(), new_value.c_str(), 1);
+#elif defined(OS_WIN)
+ // On success, a nonzero value is returned.
+ return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(),
+ UTF8ToWide(new_value).c_str());
+#endif
+ }
+
+ bool UnSetVarImpl(StringPiece variable_name) {
+#if defined(OS_POSIX)
+ // On success, zero is returned.
+ return !unsetenv(variable_name.data());
+#elif defined(OS_WIN)
+ // On success, a nonzero value is returned.
+ return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr);
+#endif
+ }
+};
+
+// Parses a null-terminated input string of an environment block. The key is
+// placed into the given string, and the total length of the line, including
+// the terminating null, is returned.
+size_t ParseEnvLine(const NativeEnvironmentString::value_type* input,
+ NativeEnvironmentString* key) {
+ // Skip to the equals or end of the string, this is the key.
+ size_t cur = 0;
+ while (input[cur] && input[cur] != '=')
+ cur++;
+ *key = NativeEnvironmentString(&input[0], cur);
+
+ // Now just skip to the end of the string.
+ while (input[cur])
+ cur++;
+ return cur + 1;
+}
+
+} // namespace
+
+namespace env_vars {
+
+#if defined(OS_POSIX)
+// On Posix systems, this variable contains the location of the user's home
+// directory. (e.g, /home/username/).
+const char kHome[] = "HOME";
+#endif
+
+} // namespace env_vars
+
+Environment::~Environment() {}
+
+// static
+std::unique_ptr<Environment> Environment::Create() {
+ return MakeUnique<EnvironmentImpl>();
+}
+
+bool Environment::HasVar(StringPiece variable_name) {
+ return GetVar(variable_name, nullptr);
+}
+
+#if defined(OS_WIN)
+
+string16 AlterEnvironment(const wchar_t* env,
+ const EnvironmentMap& changes) {
+ string16 result;
+
+ // First copy all unmodified values to the output.
+ size_t cur_env = 0;
+ string16 key;
+ while (env[cur_env]) {
+ const wchar_t* line = &env[cur_env];
+ size_t line_length = ParseEnvLine(line, &key);
+
+ // Keep only values not specified in the change vector.
+ EnvironmentMap::const_iterator found_change = changes.find(key);
+ if (found_change == changes.end())
+ result.append(line, line_length);
+
+ cur_env += line_length;
+ }
+
+ // Now append all modified and new values.
+ for (EnvironmentMap::const_iterator i = changes.begin();
+ i != changes.end(); ++i) {
+ if (!i->second.empty()) {
+ result.append(i->first);
+ result.push_back('=');
+ result.append(i->second);
+ result.push_back(0);
+ }
+ }
+
+ // An additional null marks the end of the list. We always need a double-null
+ // in case nothing was added above.
+ if (result.empty())
+ result.push_back(0);
+ result.push_back(0);
+ return result;
+}
+
+#elif defined(OS_POSIX)
+
+std::unique_ptr<char* []> AlterEnvironment(const char* const* const env,
+ const EnvironmentMap& changes) {
+ std::string value_storage; // Holds concatenated null-terminated strings.
+ std::vector<size_t> result_indices; // Line indices into value_storage.
+
+ // First build up all of the unchanged environment strings. These are
+ // null-terminated of the form "key=value".
+ std::string key;
+ for (size_t i = 0; env[i]; i++) {
+ size_t line_length = ParseEnvLine(env[i], &key);
+
+ // Keep only values not specified in the change vector.
+ EnvironmentMap::const_iterator found_change = changes.find(key);
+ if (found_change == changes.end()) {
+ result_indices.push_back(value_storage.size());
+ value_storage.append(env[i], line_length);
+ }
+ }
+
+ // Now append all modified and new values.
+ for (EnvironmentMap::const_iterator i = changes.begin();
+ i != changes.end(); ++i) {
+ if (!i->second.empty()) {
+ result_indices.push_back(value_storage.size());
+ value_storage.append(i->first);
+ value_storage.push_back('=');
+ value_storage.append(i->second);
+ value_storage.push_back(0);
+ }
+ }
+
+ size_t pointer_count_required =
+ result_indices.size() + 1 + // Null-terminated array of pointers.
+ (value_storage.size() + sizeof(char*) - 1) / sizeof(char*); // Buffer.
+ std::unique_ptr<char* []> result(new char*[pointer_count_required]);
+
+ // The string storage goes after the array of pointers.
+ char* storage_data = reinterpret_cast<char*>(
+ &result.get()[result_indices.size() + 1]);
+ if (!value_storage.empty())
+ memcpy(storage_data, value_storage.data(), value_storage.size());
+
+ // Fill array of pointers at the beginning of the result.
+ for (size_t i = 0; i < result_indices.size(); i++)
+ result[i] = &storage_data[result_indices[i]];
+ result[result_indices.size()] = 0; // Null terminator.
+
+ return result;
+}
+
+#endif // OS_POSIX
+
+} // namespace base
diff --git a/libchrome/base/environment.h b/libchrome/base/environment.h
new file mode 100644
index 0000000..3a4ed04
--- /dev/null
+++ b/libchrome/base/environment.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ENVIRONMENT_H_
+#define BASE_ENVIRONMENT_H_
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace env_vars {
+
+#if defined(OS_POSIX)
+BASE_EXPORT extern const char kHome[];
+#endif
+
+} // namespace env_vars
+
+class BASE_EXPORT Environment {
+ public:
+ virtual ~Environment();
+
+ // Returns the appropriate platform-specific instance.
+ static std::unique_ptr<Environment> Create();
+
+ // Gets an environment variable's value and stores it in |result|.
+ // Returns false if the key is unset.
+ virtual bool GetVar(StringPiece variable_name, std::string* result) = 0;
+
+ // Syntactic sugar for GetVar(variable_name, nullptr);
+ virtual bool HasVar(StringPiece variable_name);
+
+ // Returns true on success, otherwise returns false.
+ virtual bool SetVar(StringPiece variable_name,
+ const std::string& new_value) = 0;
+
+ // Returns true on success, otherwise returns false.
+ virtual bool UnSetVar(StringPiece variable_name) = 0;
+};
+
+
+#if defined(OS_WIN)
+
+typedef string16 NativeEnvironmentString;
+typedef std::map<NativeEnvironmentString, NativeEnvironmentString>
+ EnvironmentMap;
+
+// Returns a modified environment vector constructed from the given environment
+// and the list of changes given in |changes|. Each key in the environment is
+// matched against the first element of the pairs. In the event of a match, the
+// value is replaced by the second of the pair, unless the second is empty, in
+// which case the key-value is removed.
+//
+// This Windows version takes and returns a Windows-style environment block
+// which is a concatenated list of null-terminated 16-bit strings. The end is
+// marked by a double-null terminator. The size of the returned string will
+// include the terminators.
+BASE_EXPORT string16 AlterEnvironment(const wchar_t* env,
+ const EnvironmentMap& changes);
+
+#elif defined(OS_POSIX)
+
+typedef std::string NativeEnvironmentString;
+typedef std::map<NativeEnvironmentString, NativeEnvironmentString>
+ EnvironmentMap;
+
+// See general comments for the Windows version above.
+//
+// This Posix version takes and returns a Posix-style environment block, which
+// is a null-terminated list of pointers to null-terminated strings. The
+// returned array will have appended to it the storage for the array itself so
+// there is only one pointer to manage, but this means that you can't copy the
+// array without keeping the original around.
+BASE_EXPORT std::unique_ptr<char* []> AlterEnvironment(
+ const char* const* env,
+ const EnvironmentMap& changes);
+
+#endif
+
+} // namespace base
+
+#endif // BASE_ENVIRONMENT_H_
diff --git a/libchrome/base/environment_unittest.cc b/libchrome/base/environment_unittest.cc
new file mode 100644
index 0000000..ef264cf
--- /dev/null
+++ b/libchrome/base/environment_unittest.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/environment.h"
+
+#include <memory>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+typedef PlatformTest EnvironmentTest;
+
+namespace base {
+
+TEST_F(EnvironmentTest, GetVar) {
+ // Every setup should have non-empty PATH...
+ std::unique_ptr<Environment> env(Environment::Create());
+ std::string env_value;
+ EXPECT_TRUE(env->GetVar("PATH", &env_value));
+ EXPECT_NE(env_value, "");
+}
+
+TEST_F(EnvironmentTest, GetVarReverse) {
+ std::unique_ptr<Environment> env(Environment::Create());
+ const char kFooUpper[] = "FOO";
+ const char kFooLower[] = "foo";
+
+ // Set a variable in UPPER case.
+ EXPECT_TRUE(env->SetVar(kFooUpper, kFooLower));
+
+ // And then try to get this variable passing the lower case.
+ std::string env_value;
+ EXPECT_TRUE(env->GetVar(kFooLower, &env_value));
+
+ EXPECT_STREQ(env_value.c_str(), kFooLower);
+
+ EXPECT_TRUE(env->UnSetVar(kFooUpper));
+
+ const char kBar[] = "bar";
+ // Now do the opposite, set the variable in the lower case.
+ EXPECT_TRUE(env->SetVar(kFooLower, kBar));
+
+ // And then try to get this variable passing the UPPER case.
+ EXPECT_TRUE(env->GetVar(kFooUpper, &env_value));
+
+ EXPECT_STREQ(env_value.c_str(), kBar);
+
+ EXPECT_TRUE(env->UnSetVar(kFooLower));
+}
+
+TEST_F(EnvironmentTest, HasVar) {
+ // Every setup should have PATH...
+ std::unique_ptr<Environment> env(Environment::Create());
+ EXPECT_TRUE(env->HasVar("PATH"));
+}
+
+TEST_F(EnvironmentTest, SetVar) {
+ std::unique_ptr<Environment> env(Environment::Create());
+
+ const char kFooUpper[] = "FOO";
+ const char kFooLower[] = "foo";
+ EXPECT_TRUE(env->SetVar(kFooUpper, kFooLower));
+
+ // Now verify that the environment has the new variable.
+ EXPECT_TRUE(env->HasVar(kFooUpper));
+
+ std::string var_value;
+ EXPECT_TRUE(env->GetVar(kFooUpper, &var_value));
+ EXPECT_EQ(var_value, kFooLower);
+}
+
+TEST_F(EnvironmentTest, UnSetVar) {
+ std::unique_ptr<Environment> env(Environment::Create());
+
+ const char kFooUpper[] = "FOO";
+ const char kFooLower[] = "foo";
+ // First set some environment variable.
+ EXPECT_TRUE(env->SetVar(kFooUpper, kFooLower));
+
+ // Now verify that the environment has the new variable.
+ EXPECT_TRUE(env->HasVar(kFooUpper));
+
+ // Finally verify that the environment variable was erased.
+ EXPECT_TRUE(env->UnSetVar(kFooUpper));
+
+ // And check that the variable has been unset.
+ EXPECT_FALSE(env->HasVar(kFooUpper));
+}
+
+#if defined(OS_WIN)
+
+TEST_F(EnvironmentTest, AlterEnvironment) {
+ const wchar_t empty[] = L"\0";
+ const wchar_t a2[] = L"A=2\0";
+ EnvironmentMap changes;
+ string16 e;
+
+ e = AlterEnvironment(empty, changes);
+ EXPECT_EQ(0, e[0]);
+
+ changes[L"A"] = L"1";
+ e = AlterEnvironment(empty, changes);
+ EXPECT_EQ(string16(L"A=1\0\0", 5), e);
+
+ changes.clear();
+ changes[L"A"] = string16();
+ e = AlterEnvironment(empty, changes);
+ EXPECT_EQ(string16(L"\0\0", 2), e);
+
+ changes.clear();
+ e = AlterEnvironment(a2, changes);
+ EXPECT_EQ(string16(L"A=2\0\0", 5), e);
+
+ changes.clear();
+ changes[L"A"] = L"1";
+ e = AlterEnvironment(a2, changes);
+ EXPECT_EQ(string16(L"A=1\0\0", 5), e);
+
+ changes.clear();
+ changes[L"A"] = string16();
+ e = AlterEnvironment(a2, changes);
+ EXPECT_EQ(string16(L"\0\0", 2), e);
+}
+
+#else
+
+TEST_F(EnvironmentTest, AlterEnvironment) {
+ const char* const empty[] = { NULL };
+ const char* const a2[] = { "A=2", NULL };
+ EnvironmentMap changes;
+ std::unique_ptr<char* []> e;
+
+ e = AlterEnvironment(empty, changes);
+ EXPECT_TRUE(e[0] == NULL);
+
+ changes["A"] = "1";
+ e = AlterEnvironment(empty, changes);
+ EXPECT_EQ(std::string("A=1"), e[0]);
+ EXPECT_TRUE(e[1] == NULL);
+
+ changes.clear();
+ changes["A"] = std::string();
+ e = AlterEnvironment(empty, changes);
+ EXPECT_TRUE(e[0] == NULL);
+
+ changes.clear();
+ e = AlterEnvironment(a2, changes);
+ EXPECT_EQ(std::string("A=2"), e[0]);
+ EXPECT_TRUE(e[1] == NULL);
+
+ changes.clear();
+ changes["A"] = "1";
+ e = AlterEnvironment(a2, changes);
+ EXPECT_EQ(std::string("A=1"), e[0]);
+ EXPECT_TRUE(e[1] == NULL);
+
+ changes.clear();
+ changes["A"] = std::string();
+ e = AlterEnvironment(a2, changes);
+ EXPECT_TRUE(e[0] == NULL);
+}
+
+#endif
+
+} // namespace base
diff --git a/libchrome/base/event_types.h b/libchrome/base/event_types.h
new file mode 100644
index 0000000..9905800
--- /dev/null
+++ b/libchrome/base/event_types.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_EVENT_TYPES_H_
+#define BASE_EVENT_TYPES_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(USE_X11)
+typedef union _XEvent XEvent;
+#elif defined(OS_MACOSX)
+#if defined(__OBJC__)
+@class NSEvent;
+#else // __OBJC__
+class NSEvent;
+#endif // __OBJC__
+#endif
+
+namespace base {
+
+// Cross platform typedefs for native event types.
+#if defined(OS_WIN)
+typedef MSG NativeEvent;
+#elif defined(USE_X11)
+typedef XEvent* NativeEvent;
+#elif defined(OS_MACOSX)
+typedef NSEvent* NativeEvent;
+#else
+typedef void* NativeEvent;
+#endif
+
+} // namespace base
+
+#endif // BASE_EVENT_TYPES_H_
diff --git a/libchrome/base/feature_list.cc b/libchrome/base/feature_list.cc
new file mode 100644
index 0000000..435165e
--- /dev/null
+++ b/libchrome/base/feature_list.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Pointer to the FeatureList instance singleton that was set via
+// FeatureList::SetInstance(). Does not use base/memory/singleton.h in order to
+// have more control over initialization timing. Leaky.
+FeatureList* g_instance = nullptr;
+
+// Tracks whether the FeatureList instance was initialized via an accessor.
+bool g_initialized_from_accessor = false;
+
+// Some characters are not allowed to appear in feature names or the associated
+// field trial names, as they are used as special characters for command-line
+// serialization. This function checks that the strings are ASCII (since they
+// are used in command-line API functions that require ASCII) and whether there
+// are any reserved characters present, returning true if the string is valid.
+// Only called in DCHECKs.
+bool IsValidFeatureOrFieldTrialName(const std::string& name) {
+ return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
+}
+
+} // namespace
+
+FeatureList::FeatureList() {}
+
+FeatureList::~FeatureList() {}
+
+void FeatureList::InitializeFromCommandLine(
+ const std::string& enable_features,
+ const std::string& disable_features) {
+ DCHECK(!initialized_);
+
+ // Process disabled features first, so that disabled ones take precedence over
+ // enabled ones (since RegisterOverride() uses insert()).
+ RegisterOverridesFromCommandLine(disable_features, OVERRIDE_DISABLE_FEATURE);
+ RegisterOverridesFromCommandLine(enable_features, OVERRIDE_ENABLE_FEATURE);
+
+ initialized_from_command_line_ = true;
+}
+
+bool FeatureList::IsFeatureOverriddenFromCommandLine(
+ const std::string& feature_name,
+ OverrideState state) const {
+ auto it = overrides_.find(feature_name);
+ return it != overrides_.end() && it->second.overridden_state == state &&
+ !it->second.overridden_by_field_trial;
+}
+
+void FeatureList::AssociateReportingFieldTrial(
+ const std::string& feature_name,
+ OverrideState for_overridden_state,
+ FieldTrial* field_trial) {
+ DCHECK(
+ IsFeatureOverriddenFromCommandLine(feature_name, for_overridden_state));
+
+ // Only one associated field trial is supported per feature. This is generally
+ // enforced server-side.
+ OverrideEntry* entry = &overrides_.find(feature_name)->second;
+ if (entry->field_trial) {
+ NOTREACHED() << "Feature " << feature_name
+ << " already has trial: " << entry->field_trial->trial_name()
+ << ", associating trial: " << field_trial->trial_name();
+ return;
+ }
+
+ entry->field_trial = field_trial;
+}
+
+void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
+ OverrideState override_state,
+ FieldTrial* field_trial) {
+ DCHECK(field_trial);
+ DCHECK(!ContainsKey(overrides_, feature_name) ||
+ !overrides_.find(feature_name)->second.field_trial)
+ << "Feature " << feature_name
+ << " has conflicting field trial overrides: "
+ << overrides_.find(feature_name)->second.field_trial->trial_name()
+ << " / " << field_trial->trial_name();
+
+ RegisterOverride(feature_name, override_state, field_trial);
+}
+
+void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
+ std::string* disable_overrides) {
+ DCHECK(initialized_);
+
+ enable_overrides->clear();
+ disable_overrides->clear();
+
+ // Note: Since |overrides_| is a std::map, iteration will be in alphabetical
+ // order. This not guaranteed to users of this function, but is useful for
+ // tests to assume the order.
+ for (const auto& entry : overrides_) {
+ std::string* target_list = nullptr;
+ switch (entry.second.overridden_state) {
+ case OVERRIDE_USE_DEFAULT:
+ case OVERRIDE_ENABLE_FEATURE:
+ target_list = enable_overrides;
+ break;
+ case OVERRIDE_DISABLE_FEATURE:
+ target_list = disable_overrides;
+ break;
+ }
+
+ if (!target_list->empty())
+ target_list->push_back(',');
+ if (entry.second.overridden_state == OVERRIDE_USE_DEFAULT)
+ target_list->push_back('*');
+ target_list->append(entry.first);
+ if (entry.second.field_trial) {
+ target_list->push_back('<');
+ target_list->append(entry.second.field_trial->trial_name());
+ }
+ }
+}
+
+// static
+bool FeatureList::IsEnabled(const Feature& feature) {
+ if (!g_instance) {
+ g_initialized_from_accessor = true;
+ return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+ }
+ return g_instance->IsFeatureEnabled(feature);
+}
+
+// static
+FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
+ return GetInstance()->GetAssociatedFieldTrial(feature);
+}
+
+// static
+std::vector<std::string> FeatureList::SplitFeatureListString(
+ const std::string& input) {
+ return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+// static
+bool FeatureList::InitializeInstance(const std::string& enable_features,
+ const std::string& disable_features) {
+ // We want to initialize a new instance here to support command-line features
+ // in testing better. For example, we initialize a dummy instance in
+ // base/test/test_suite.cc, and override it in content/browser/
+ // browser_main_loop.cc.
+ // On the other hand, we want to avoid re-initialization from command line.
+ // For example, we initialize an instance in chrome/browser/
+ // chrome_browser_main.cc and do not override it in content/browser/
+ // browser_main_loop.cc.
+ // If the singleton was previously initialized from within an accessor, we
+ // want to prevent callers from reinitializing the singleton and masking the
+ // accessor call(s) which likely returned incorrect information.
+ CHECK(!g_initialized_from_accessor);
+ bool instance_existed_before = false;
+ if (g_instance) {
+ if (g_instance->initialized_from_command_line_)
+ return false;
+
+ delete g_instance;
+ g_instance = nullptr;
+ instance_existed_before = true;
+ }
+
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ feature_list->InitializeFromCommandLine(enable_features, disable_features);
+ base::FeatureList::SetInstance(std::move(feature_list));
+ return !instance_existed_before;
+}
+
+// static
+FeatureList* FeatureList::GetInstance() {
+ return g_instance;
+}
+
+// static
+void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
+ DCHECK(!g_instance);
+ instance->FinalizeInitialization();
+
+ // Note: Intentional leak of global singleton.
+ g_instance = instance.release();
+}
+
+// static
+void FeatureList::ClearInstanceForTesting() {
+ delete g_instance;
+ g_instance = nullptr;
+ g_initialized_from_accessor = false;
+}
+
+void FeatureList::FinalizeInitialization() {
+ DCHECK(!initialized_);
+ initialized_ = true;
+}
+
+bool FeatureList::IsFeatureEnabled(const Feature& feature) {
+ DCHECK(initialized_);
+ DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+ DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+ auto it = overrides_.find(feature.name);
+ if (it != overrides_.end()) {
+ const OverrideEntry& entry = it->second;
+
+ // Activate the corresponding field trial, if necessary.
+ if (entry.field_trial)
+ entry.field_trial->group();
+
+ // TODO(asvitkine) Expand this section as more support is added.
+
+ // If marked as OVERRIDE_USE_DEFAULT, simply return the default state below.
+ if (entry.overridden_state != OVERRIDE_USE_DEFAULT)
+ return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
+ }
+ // Otherwise, return the default state.
+ return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+}
+
+FieldTrial* FeatureList::GetAssociatedFieldTrial(const Feature& feature) {
+ DCHECK(initialized_);
+ DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+ DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+ auto it = overrides_.find(feature.name);
+ if (it != overrides_.end()) {
+ const OverrideEntry& entry = it->second;
+ return entry.field_trial;
+ }
+
+ return nullptr;
+}
+
+void FeatureList::RegisterOverridesFromCommandLine(
+ const std::string& feature_list,
+ OverrideState overridden_state) {
+ for (const auto& value : SplitFeatureListString(feature_list)) {
+ StringPiece feature_name(value);
+ base::FieldTrial* trial = nullptr;
+
+ // The entry may be of the form FeatureName<FieldTrialName - in which case,
+ // this splits off the field trial name and associates it with the override.
+ std::string::size_type pos = feature_name.find('<');
+ if (pos != std::string::npos) {
+ feature_name.set(value.data(), pos);
+ trial = base::FieldTrialList::Find(value.substr(pos + 1));
+ }
+
+ RegisterOverride(feature_name, overridden_state, trial);
+ }
+}
+
+void FeatureList::RegisterOverride(StringPiece feature_name,
+ OverrideState overridden_state,
+ FieldTrial* field_trial) {
+ DCHECK(!initialized_);
+ if (field_trial) {
+ DCHECK(IsValidFeatureOrFieldTrialName(field_trial->trial_name()))
+ << field_trial->trial_name();
+ }
+ if (feature_name.starts_with("*")) {
+ feature_name = feature_name.substr(1);
+ overridden_state = OVERRIDE_USE_DEFAULT;
+ }
+
+ // Note: The semantics of insert() is that it does not overwrite the entry if
+ // one already exists for the key. Thus, only the first override for a given
+ // feature name takes effect.
+ overrides_.insert(std::make_pair(
+ feature_name.as_string(), OverrideEntry(overridden_state, field_trial)));
+}
+
+bool FeatureList::CheckFeatureIdentity(const Feature& feature) {
+ AutoLock auto_lock(feature_identity_tracker_lock_);
+
+ auto it = feature_identity_tracker_.find(feature.name);
+ if (it == feature_identity_tracker_.end()) {
+ // If it's not tracked yet, register it.
+ feature_identity_tracker_[feature.name] = &feature;
+ return true;
+ }
+ // Compare address of |feature| to the existing tracked entry.
+ return it->second == &feature;
+}
+
+FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
+ FieldTrial* field_trial)
+ : overridden_state(overridden_state),
+ field_trial(field_trial),
+ overridden_by_field_trial(field_trial != nullptr) {}
+
+} // namespace base
diff --git a/libchrome/base/feature_list.h b/libchrome/base/feature_list.h
new file mode 100644
index 0000000..e9ed00a
--- /dev/null
+++ b/libchrome/base/feature_list.h
@@ -0,0 +1,260 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FEATURE_LIST_H_
+#define BASE_FEATURE_LIST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FieldTrial;
+
+// Specifies whether a given feature is enabled or disabled by default.
+enum FeatureState {
+ FEATURE_DISABLED_BY_DEFAULT,
+ FEATURE_ENABLED_BY_DEFAULT,
+};
+
+// The Feature struct is used to define the default state for a feature. See
+// comment below for more details. There must only ever be one struct instance
+// for a given feature name - generally defined as a constant global variable or
+// file static.
+struct BASE_EXPORT Feature {
+ // The name of the feature. This should be unique to each feature and is used
+ // for enabling/disabling features via command line flags and experiments.
+ const char* const name;
+
+ // The default state (i.e. enabled or disabled) for this feature.
+ const FeatureState default_state;
+};
+
+// The FeatureList class is used to determine whether a given feature is on or
+// off. It provides an authoritative answer, taking into account command-line
+// overrides and experimental control.
+//
+// The basic use case is for any feature that can be toggled (e.g. through
+// command-line or an experiment) to have a defined Feature struct, e.g.:
+//
+// const base::Feature kMyGreatFeature {
+// "MyGreatFeature", base::FEATURE_ENABLED_BY_DEFAULT
+// };
+//
+// Then, client code that wishes to query the state of the feature would check:
+//
+// if (base::FeatureList::IsEnabled(kMyGreatFeature)) {
+// // Feature code goes here.
+// }
+//
+// Behind the scenes, the above call would take into account any command-line
+// flags to enable or disable the feature, any experiments that may control it
+// and finally its default state (in that order of priority), to determine
+// whether the feature is on.
+//
+// Features can be explicitly forced on or off by specifying a list of comma-
+// separated feature names via the following command-line flags:
+//
+// --enable-features=Feature5,Feature7
+// --disable-features=Feature1,Feature2,Feature3
+//
+// After initialization (which should be done single-threaded), the FeatureList
+// API is thread safe.
+//
+// Note: This class is a singleton, but does not use base/memory/singleton.h in
+// order to have control over its initialization sequence. Specifically, the
+// intended use is to create an instance of this class and fully initialize it,
+// before setting it as the singleton for a process, via SetInstance().
+class BASE_EXPORT FeatureList {
+ public:
+ FeatureList();
+ ~FeatureList();
+
+ // Initializes feature overrides via command-line flags |enable_features| and
+ // |disable_features|, each of which is a comma-separated list of features to
+ // enable or disable, respectively. If a feature appears on both lists, then
+ // it will be disabled. If a list entry has the format "FeatureName<TrialName"
+ // then this initialization will also associate the feature state override
+ // with the named field trial, if it exists. If a feature name is prefixed
+ // with the '*' character, it will be created with OVERRIDE_USE_DEFAULT -
+ // which is useful for associating with a trial while using the default state.
+ // Must only be invoked during the initialization phase (before
+ // FinalizeInitialization() has been called).
+ void InitializeFromCommandLine(const std::string& enable_features,
+ const std::string& disable_features);
+
+ // Specifies whether a feature override enables or disables the feature.
+ enum OverrideState {
+ OVERRIDE_USE_DEFAULT,
+ OVERRIDE_DISABLE_FEATURE,
+ OVERRIDE_ENABLE_FEATURE,
+ };
+
+ // Returns true if the state of |feature_name| has been overridden via
+ // |InitializeFromCommandLine()|.
+ bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
+ OverrideState state) const;
+
+ // Associates a field trial for reporting purposes corresponding to the
+ // command-line setting the feature state to |for_overridden_state|. The trial
+ // will be activated when the state of the feature is first queried. This
+ // should be called during registration, after InitializeFromCommandLine() has
+ // been called but before the instance is registered via SetInstance().
+ void AssociateReportingFieldTrial(const std::string& feature_name,
+ OverrideState for_overridden_state,
+ FieldTrial* field_trial);
+
+ // Registers a field trial to override the enabled state of the specified
+ // feature to |override_state|. Command-line overrides still take precedence
+ // over field trials, so this will have no effect if the feature is being
+ // overridden from the command-line. The associated field trial will be
+ // activated when the feature state for this feature is queried. This should
+ // be called during registration, after InitializeFromCommandLine() has been
+ // called but before the instance is registered via SetInstance().
+ void RegisterFieldTrialOverride(const std::string& feature_name,
+ OverrideState override_state,
+ FieldTrial* field_trial);
+
+ // Returns comma-separated lists of feature names (in the same format that is
+ // accepted by InitializeFromCommandLine()) corresponding to features that
+ // have been overridden - either through command-line or via FieldTrials. For
+ // those features that have an associated FieldTrial, the output entry will be
+ // of the format "FeatureName<TrialName", where "TrialName" is the name of the
+ // FieldTrial. Features that have overrides with OVERRIDE_USE_DEFAULT will be
+ // added to |enable_overrides| with a '*' character prefix. Must be called
+ // only after the instance has been initialized and registered.
+ void GetFeatureOverrides(std::string* enable_overrides,
+ std::string* disable_overrides);
+
+ // Returns whether the given |feature| is enabled. Must only be called after
+ // the singleton instance has been registered via SetInstance(). Additionally,
+ // a feature with a given name must only have a single corresponding Feature
+ // struct, which is checked in builds with DCHECKs enabled.
+ static bool IsEnabled(const Feature& feature);
+
+ // Returns the field trial associated with the given |feature|. Must only be
+ // called after the singleton instance has been registered via SetInstance().
+ static FieldTrial* GetFieldTrial(const Feature& feature);
+
+ // Splits a comma-separated string containing feature names into a vector.
+ static std::vector<std::string> SplitFeatureListString(
+ const std::string& input);
+
+ // Initializes and sets an instance of FeatureList with feature overrides via
+ // command-line flags |enable_features| and |disable_features| if one has not
+ // already been set from command-line flags. Returns true if an instance did
+ // not previously exist. See InitializeFromCommandLine() for more details
+ // about |enable_features| and |disable_features| parameters.
+ static bool InitializeInstance(const std::string& enable_features,
+ const std::string& disable_features);
+
+ // Returns the singleton instance of FeatureList. Will return null until an
+ // instance is registered via SetInstance().
+ static FeatureList* GetInstance();
+
+ // Registers the given |instance| to be the singleton feature list for this
+ // process. This should only be called once and |instance| must not be null.
+ static void SetInstance(std::unique_ptr<FeatureList> instance);
+
+ // Clears the previously-registered singleton instance for tests.
+ static void ClearInstanceForTesting();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+
+ struct OverrideEntry {
+ // The overridden enable (on/off) state of the feature.
+ const OverrideState overridden_state;
+
+ // An optional associated field trial, which will be activated when the
+ // state of the feature is queried for the first time. Weak pointer to the
+ // FieldTrial object that is owned by the FieldTrialList singleton.
+ base::FieldTrial* field_trial;
+
+ // Specifies whether the feature's state is overridden by |field_trial|.
+ // If it's not, and |field_trial| is not null, it means it is simply an
+ // associated field trial for reporting purposes (and |overridden_state|
+ // came from the command-line).
+ const bool overridden_by_field_trial;
+
+ // TODO(asvitkine): Expand this as more support is added.
+
+ // Constructs an OverrideEntry for the given |overridden_state|. If
+ // |field_trial| is not null, it implies that |overridden_state| comes from
+ // the trial, so |overridden_by_field_trial| will be set to true.
+ OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
+ };
+
+ // Finalizes the initialization state of the FeatureList, so that no further
+ // overrides can be registered. This is called by SetInstance() on the
+ // singleton feature list that is being registered.
+ void FinalizeInitialization();
+
+ // Returns whether the given |feature| is enabled. This is invoked by the
+ // public FeatureList::IsEnabled() static function on the global singleton.
+ // Requires the FeatureList to have already been fully initialized.
+ bool IsFeatureEnabled(const Feature& feature);
+
+ // Returns the field trial associated with the given |feature|. This is
+ // invoked by the public FeatureList::GetFieldTrial() static function on the
+ // global singleton. Requires the FeatureList to have already been fully
+ // initialized.
+ base::FieldTrial* GetAssociatedFieldTrial(const Feature& feature);
+
+ // For each feature name in comma-separated list of strings |feature_list|,
+ // registers an override with the specified |overridden_state|. Also, will
+ // associate an optional named field trial if the entry is of the format
+ // "FeatureName<TrialName".
+ void RegisterOverridesFromCommandLine(const std::string& feature_list,
+ OverrideState overridden_state);
+
+ // Registers an override for feature |feature_name|. The override specifies
+ // whether the feature should be on or off (via |overridden_state|), which
+ // will take precedence over the feature's default state. If |field_trial| is
+ // not null, registers the specified field trial object to be associated with
+ // the feature, which will activate the field trial when the feature state is
+ // queried. If an override is already registered for the given feature, it
+ // will not be changed.
+ void RegisterOverride(StringPiece feature_name,
+ OverrideState overridden_state,
+ FieldTrial* field_trial);
+
+ // Verifies that there's only a single definition of a Feature struct for a
+ // given feature name. Keeps track of the first seen Feature struct for each
+ // feature. Returns false when called on a Feature struct with a different
+ // address than the first one it saw for that feature name. Used only from
+ // DCHECKs and tests.
+ bool CheckFeatureIdentity(const Feature& feature);
+
+ // Map from feature name to an OverrideEntry struct for the feature, if it
+ // exists.
+ std::map<std::string, OverrideEntry> overrides_;
+
+ // Locked map that keeps track of seen features, to ensure a single feature is
+ // only defined once. This verification is only done in builds with DCHECKs
+ // enabled.
+ Lock feature_identity_tracker_lock_;
+ std::map<std::string, const Feature*> feature_identity_tracker_;
+
+ // Whether this object has been fully initialized. This gets set to true as a
+ // result of FinalizeInitialization().
+ bool initialized_ = false;
+
+ // Whether this object has been initialized from command line.
+ bool initialized_from_command_line_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(FeatureList);
+};
+
+} // namespace base
+
+#endif // BASE_FEATURE_LIST_H_
diff --git a/libchrome/base/feature_list_unittest.cc b/libchrome/base/feature_list_unittest.cc
new file mode 100644
index 0000000..9d1dcb7
--- /dev/null
+++ b/libchrome/base/feature_list_unittest.cc
@@ -0,0 +1,471 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const char kFeatureOnByDefaultName[] = "OnByDefault";
+struct Feature kFeatureOnByDefault {
+ kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+};
+
+const char kFeatureOffByDefaultName[] = "OffByDefault";
+struct Feature kFeatureOffByDefault {
+ kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
+};
+
+std::string SortFeatureListString(const std::string& feature_list) {
+ std::vector<std::string> features =
+ FeatureList::SplitFeatureListString(feature_list);
+ std::sort(features.begin(), features.end());
+ return JoinString(features, ",");
+}
+
+} // namespace
+
+class FeatureListTest : public testing::Test {
+ public:
+ FeatureListTest() : feature_list_(nullptr) {
+ RegisterFeatureListInstance(WrapUnique(new FeatureList));
+ }
+ ~FeatureListTest() override { ClearFeatureListInstance(); }
+
+ void RegisterFeatureListInstance(std::unique_ptr<FeatureList> feature_list) {
+ FeatureList::ClearInstanceForTesting();
+ feature_list_ = feature_list.get();
+ FeatureList::SetInstance(std::move(feature_list));
+ }
+ void ClearFeatureListInstance() {
+ FeatureList::ClearInstanceForTesting();
+ feature_list_ = nullptr;
+ }
+
+ FeatureList* feature_list() { return feature_list_; }
+
+ private:
+ // Weak. Owned by the FeatureList::SetInstance().
+ FeatureList* feature_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(FeatureListTest);
+};
+
+TEST_F(FeatureListTest, DefaultStates) {
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine) {
+ struct {
+ const char* enable_features;
+ const char* disable_features;
+ bool expected_feature_on_state;
+ bool expected_feature_off_state;
+ } test_cases[] = {
+ {"", "", true, false},
+ {"OffByDefault", "", true, true},
+ {"OffByDefault", "OnByDefault", false, true},
+ {"OnByDefault,OffByDefault", "", true, true},
+ {"", "OnByDefault,OffByDefault", false, false},
+ // In the case an entry is both, disable takes precedence.
+ {"OnByDefault", "OnByDefault,OffByDefault", false, false},
+ };
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ const auto& test_case = test_cases[i];
+ SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+ test_case.enable_features,
+ test_case.disable_features));
+
+ ClearFeatureListInstance();
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine(test_case.enable_features,
+ test_case.disable_features);
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ EXPECT_EQ(test_case.expected_feature_on_state,
+ FeatureList::IsEnabled(kFeatureOnByDefault))
+ << i;
+ EXPECT_EQ(test_case.expected_feature_off_state,
+ FeatureList::IsEnabled(kFeatureOffByDefault))
+ << i;
+ }
+}
+
+TEST_F(FeatureListTest, CheckFeatureIdentity) {
+ // Tests that CheckFeatureIdentity() correctly detects when two different
+ // structs with the same feature name are passed to it.
+
+ // Call it twice for each feature at the top of the file, since the first call
+ // makes it remember the entry and the second call will verify it.
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+
+ // Now, call it with a distinct struct for |kFeatureOnByDefaultName|, which
+ // should return false.
+ struct Feature kFeatureOnByDefault2 {
+ kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+ };
+ EXPECT_FALSE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault2));
+}
+
+TEST_F(FeatureListTest, FieldTrialOverrides) {
+ struct {
+ FeatureList::OverrideState trial1_state;
+ FeatureList::OverrideState trial2_state;
+ } test_cases[] = {
+ {FeatureList::OVERRIDE_DISABLE_FEATURE,
+ FeatureList::OVERRIDE_DISABLE_FEATURE},
+ {FeatureList::OVERRIDE_DISABLE_FEATURE,
+ FeatureList::OVERRIDE_ENABLE_FEATURE},
+ {FeatureList::OVERRIDE_ENABLE_FEATURE,
+ FeatureList::OVERRIDE_DISABLE_FEATURE},
+ {FeatureList::OVERRIDE_ENABLE_FEATURE,
+ FeatureList::OVERRIDE_ENABLE_FEATURE},
+ };
+
+ FieldTrial::ActiveGroup active_group;
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ const auto& test_case = test_cases[i];
+ SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]", i));
+
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+ FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+ FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+ feature_list->RegisterFieldTrialOverride(kFeatureOnByDefaultName,
+ test_case.trial1_state, trial1);
+ feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+ test_case.trial2_state, trial2);
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ // Initially, neither trial should be active.
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+ const bool expected_enabled_1 =
+ (test_case.trial1_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+ EXPECT_EQ(expected_enabled_1, FeatureList::IsEnabled(kFeatureOnByDefault));
+ // The above should have activated |trial1|.
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+ const bool expected_enabled_2 =
+ (test_case.trial2_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+ EXPECT_EQ(expected_enabled_2, FeatureList::IsEnabled(kFeatureOffByDefault));
+ // The above should have activated |trial2|.
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+ }
+}
+
+TEST_F(FeatureListTest, FieldTrialAssociateUseDefault) {
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+ FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+ FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ // Initially, neither trial should be active.
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+ // Check the feature enabled state is its default.
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ // The above should have activated |trial1|.
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+ // Check the feature enabled state is its default.
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ // The above should have activated |trial2|.
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+}
+
+TEST_F(FeatureListTest, CommandLineTakesPrecedenceOverFieldTrial) {
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+ // The feature is explicitly enabled on the command-line.
+ feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+ // But the FieldTrial would set the feature to disabled.
+ FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample2", "A");
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, trial);
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+ // Command-line should take precedence.
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ // Since the feature is on due to the command-line, and not as a result of the
+ // field trial, the field trial should not be activated (since the Associate*
+ // API wasn't used.)
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+}
+
+TEST_F(FeatureListTest, IsFeatureOverriddenFromCommandLine) {
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+ // No features are overridden from the command line yet
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+ // Now, enable |kFeatureOffByDefaultName| via the command-line.
+ feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+ // It should now be overridden for the enabled group.
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+ // Register a field trial to associate with the feature and ensure that the
+ // results are still the same.
+ feature_list->AssociateReportingFieldTrial(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+ FieldTrialList::CreateFieldTrial("Trial1", "A"));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+ // Now, register a field trial to override |kFeatureOnByDefaultName| state
+ // and check that the function still returns false for that feature.
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+ FieldTrialList::CreateFieldTrial("Trial2", "A"));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ // Check the expected feature states for good measure.
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+}
+
+TEST_F(FeatureListTest, AssociateReportingFieldTrial) {
+ struct {
+ const char* enable_features;
+ const char* disable_features;
+ bool expected_enable_trial_created;
+ bool expected_disable_trial_created;
+ } test_cases[] = {
+ // If no enable/disable flags are specified, no trials should be created.
+ {"", "", false, false},
+ // Enabling the feature should result in the enable trial created.
+ {kFeatureOffByDefaultName, "", true, false},
+ // Disabling the feature should result in the disable trial created.
+ {"", kFeatureOffByDefaultName, false, true},
+ };
+
+ const char kTrialName[] = "ForcingTrial";
+ const char kForcedOnGroupName[] = "ForcedOn";
+ const char kForcedOffGroupName[] = "ForcedOff";
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ const auto& test_case = test_cases[i];
+ SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+ test_case.enable_features,
+ test_case.disable_features));
+
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine(test_case.enable_features,
+ test_case.disable_features);
+
+ FieldTrial* enable_trial = nullptr;
+ if (feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE)) {
+ enable_trial = base::FieldTrialList::CreateFieldTrial(kTrialName,
+ kForcedOnGroupName);
+ feature_list->AssociateReportingFieldTrial(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+ enable_trial);
+ }
+ FieldTrial* disable_trial = nullptr;
+ if (feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+ disable_trial = base::FieldTrialList::CreateFieldTrial(
+ kTrialName, kForcedOffGroupName);
+ feature_list->AssociateReportingFieldTrial(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+ disable_trial);
+ }
+ EXPECT_EQ(test_case.expected_enable_trial_created, enable_trial != nullptr);
+ EXPECT_EQ(test_case.expected_disable_trial_created,
+ disable_trial != nullptr);
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+ if (disable_trial) {
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+ EXPECT_EQ(kForcedOffGroupName, disable_trial->group_name());
+ } else if (enable_trial) {
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+ EXPECT_EQ(kForcedOnGroupName, enable_trial->group_name());
+ }
+ }
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides) {
+ ClearFeatureListInstance();
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine("A,X", "D");
+
+ FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+ feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+ FeatureList::OVERRIDE_ENABLE_FEATURE,
+ trial);
+
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ std::string enable_features;
+ std::string disable_features;
+ FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+ &disable_features);
+ EXPECT_EQ("A,OffByDefault<Trial,X", SortFeatureListString(enable_features));
+ EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides_UseDefault) {
+ ClearFeatureListInstance();
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine("A,X", "D");
+
+ FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ std::string enable_features;
+ std::string disable_features;
+ FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+ &disable_features);
+ EXPECT_EQ("*OffByDefault<Trial,A,X", SortFeatureListString(enable_features));
+ EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFieldTrial) {
+ ClearFeatureListInstance();
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ EXPECT_EQ(trial, FeatureList::GetFieldTrial(kFeatureOnByDefault));
+ EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_WithFieldTrials) {
+ ClearFeatureListInstance();
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial("Trial", "Group");
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine("A,OffByDefault<Trial,X", "D");
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive("Trial"));
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive("Trial"));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_UseDefault) {
+ ClearFeatureListInstance();
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial("T1", "Group");
+ FieldTrialList::CreateFieldTrial("T2", "Group");
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine(
+ "A,*OffByDefault<T1,*OnByDefault<T2,X", "D");
+ RegisterFeatureListInstance(std::move(feature_list));
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive("T1"));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive("T1"));
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive("T2"));
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive("T2"));
+}
+
+TEST_F(FeatureListTest, InitializeInstance) {
+ ClearFeatureListInstance();
+
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ FeatureList::SetInstance(std::move(feature_list));
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+ // Initialize from command line if we haven't yet.
+ FeatureList::InitializeInstance("", kFeatureOnByDefaultName);
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+ // Do not initialize from commandline if we have already.
+ FeatureList::InitializeInstance(kFeatureOffByDefaultName, "");
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
+ ClearFeatureListInstance();
+ // This test case simulates the calling pattern found in code which does not
+ // explicitly initialize the features list.
+ // All IsEnabled() calls should return the default value in this scenario.
+ EXPECT_EQ(nullptr, FeatureList::GetInstance());
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_EQ(nullptr, FeatureList::GetInstance());
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+} // namespace base
diff --git a/libchrome/base/file_descriptor_posix.h b/libchrome/base/file_descriptor_posix.h
new file mode 100644
index 0000000..2a36611
--- /dev/null
+++ b/libchrome/base/file_descriptor_posix.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_DESCRIPTOR_POSIX_H_
+#define BASE_FILE_DESCRIPTOR_POSIX_H_
+
+#include "base/files/file.h"
+#include "base/files/scoped_file.h"
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// We introduct a special structure for file descriptors in order that we are
+// able to use template specialisation to special-case their handling.
+//
+// IMPORTANT: This is primarily intended for use when sending file descriptors
+// over IPC. Even if |auto_close| is true, base::FileDescriptor does NOT close()
+// |fd| when going out of scope. Instead, a consumer of a base::FileDescriptor
+// must invoke close() on |fd| if |auto_close| is true.
+//
+// In the case of IPC, the the IPC subsystem knows to close() |fd| after sending
+// a message that contains a base::FileDescriptor if auto_close == true. On the
+// other end, the receiver must make sure to close() |fd| after it has finished
+// processing the IPC message. See the IPC::ParamTraits<> specialization in
+// ipc/ipc_message_utils.h for all the details.
+// -----------------------------------------------------------------------------
+struct FileDescriptor {
+ FileDescriptor() : fd(-1), auto_close(false) {}
+
+ FileDescriptor(int ifd, bool iauto_close) : fd(ifd), auto_close(iauto_close) {
+ }
+
+ FileDescriptor(File file) : fd(file.TakePlatformFile()), auto_close(true) {}
+ explicit FileDescriptor(ScopedFD fd) : fd(fd.release()), auto_close(true) {}
+
+ bool operator==(const FileDescriptor& other) const {
+ return (fd == other.fd && auto_close == other.auto_close);
+ }
+
+ bool operator!=(const FileDescriptor& other) const {
+ return !operator==(other);
+ }
+
+ // A comparison operator so that we can use these as keys in a std::map.
+ bool operator<(const FileDescriptor& other) const {
+ return other.fd < fd;
+ }
+
+ int fd;
+ // If true, this file descriptor should be closed after it has been used. For
+ // example an IPC system might interpret this flag as indicating that the
+ // file descriptor it has been given should be closed after use.
+ bool auto_close;
+};
+
+} // namespace base
+
+#endif // BASE_FILE_DESCRIPTOR_POSIX_H_
diff --git a/libchrome/base/file_version_info.h b/libchrome/base/file_version_info.h
new file mode 100644
index 0000000..3b9457c
--- /dev/null
+++ b/libchrome/base/file_version_info.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_VERSION_INFO_H_
+#define BASE_FILE_VERSION_INFO_H_
+
+#include <string>
+
+#include "build/build_config.h"
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+class FilePath;
+}
+
+// Provides an interface for accessing the version information for a file. This
+// is the information you access when you select a file in the Windows Explorer,
+// right-click select Properties, then click the Version tab, and on the Mac
+// when you select a file in the Finder and do a Get Info.
+//
+// This list of properties is straight out of Win32's VerQueryValue
+// <http://msdn.microsoft.com/en-us/library/ms647464.aspx> and the Mac
+// version returns values from the Info.plist as appropriate. TODO(avi): make
+// this a less-obvious Windows-ism.
+
+class BASE_EXPORT FileVersionInfo {
+ public:
+ virtual ~FileVersionInfo() {}
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ // Creates a FileVersionInfo for the specified path. Returns NULL if something
+ // goes wrong (typically the file does not exit or cannot be opened). The
+ // returned object should be deleted when you are done with it.
+ static FileVersionInfo* CreateFileVersionInfo(
+ const base::FilePath& file_path);
+#endif // OS_WIN || OS_MACOSX
+
+#if defined(OS_WIN)
+ // Creates a FileVersionInfo for the specified module. Returns NULL in case
+ // of error. The returned object should be deleted when you are done with it.
+ static FileVersionInfo* CreateFileVersionInfoForModule(HMODULE module);
+#else
+ // Creates a FileVersionInfo for the current module. Returns NULL in case
+ // of error. The returned object should be deleted when you are done with it.
+ static FileVersionInfo* CreateFileVersionInfoForCurrentModule();
+#endif // OS_WIN
+
+ // Accessors to the different version properties.
+ // Returns an empty string if the property is not found.
+ virtual base::string16 company_name() = 0;
+ virtual base::string16 company_short_name() = 0;
+ virtual base::string16 product_name() = 0;
+ virtual base::string16 product_short_name() = 0;
+ virtual base::string16 internal_name() = 0;
+ virtual base::string16 product_version() = 0;
+ virtual base::string16 private_build() = 0;
+ virtual base::string16 special_build() = 0;
+ virtual base::string16 comments() = 0;
+ virtual base::string16 original_filename() = 0;
+ virtual base::string16 file_description() = 0;
+ virtual base::string16 file_version() = 0;
+ virtual base::string16 legal_copyright() = 0;
+ virtual base::string16 legal_trademarks() = 0;
+ virtual base::string16 last_change() = 0;
+ virtual bool is_official_build() = 0;
+};
+
+#endif // BASE_FILE_VERSION_INFO_H_
diff --git a/libchrome/base/file_version_info_unittest.cc b/libchrome/base/file_version_info_unittest.cc
new file mode 100644
index 0000000..67edc77
--- /dev/null
+++ b/libchrome/base/file_version_info_unittest.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_version_info.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/path_service.h"
+#include "base/file_version_info_win.h"
+#endif
+
+using base::FilePath;
+
+namespace {
+
+#if defined(OS_WIN)
+FilePath GetTestDataPath() {
+ FilePath path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ path = path.AppendASCII("base");
+ path = path.AppendASCII("test");
+ path = path.AppendASCII("data");
+ path = path.AppendASCII("file_version_info_unittest");
+ return path;
+}
+#endif
+
+} // namespace
+
+#if defined(OS_WIN)
+TEST(FileVersionInfoTest, HardCodedProperties) {
+ const wchar_t kDLLName[] = {L"FileVersionInfoTest1.dll"};
+
+ const wchar_t* const kExpectedValues[15] = {
+ // FileVersionInfoTest.dll
+ L"Goooooogle", // company_name
+ L"Google", // company_short_name
+ L"This is the product name", // product_name
+ L"This is the product short name", // product_short_name
+ L"The Internal Name", // internal_name
+ L"4.3.2.1", // product_version
+ L"Private build property", // private_build
+ L"Special build property", // special_build
+ L"This is a particularly interesting comment", // comments
+ L"This is the original filename", // original_filename
+ L"This is my file description", // file_description
+ L"1.2.3.4", // file_version
+ L"This is the legal copyright", // legal_copyright
+ L"This is the legal trademarks", // legal_trademarks
+ L"This is the last change", // last_change
+ };
+
+ FilePath dll_path = GetTestDataPath();
+ dll_path = dll_path.Append(kDLLName);
+
+ std::unique_ptr<FileVersionInfo> version_info(
+ FileVersionInfo::CreateFileVersionInfo(dll_path));
+
+ int j = 0;
+ EXPECT_EQ(kExpectedValues[j++], version_info->company_name());
+ EXPECT_EQ(kExpectedValues[j++], version_info->company_short_name());
+ EXPECT_EQ(kExpectedValues[j++], version_info->product_name());
+ EXPECT_EQ(kExpectedValues[j++], version_info->product_short_name());
+ EXPECT_EQ(kExpectedValues[j++], version_info->internal_name());
+ EXPECT_EQ(kExpectedValues[j++], version_info->product_version());
+ EXPECT_EQ(kExpectedValues[j++], version_info->private_build());
+ EXPECT_EQ(kExpectedValues[j++], version_info->special_build());
+ EXPECT_EQ(kExpectedValues[j++], version_info->comments());
+ EXPECT_EQ(kExpectedValues[j++], version_info->original_filename());
+ EXPECT_EQ(kExpectedValues[j++], version_info->file_description());
+ EXPECT_EQ(kExpectedValues[j++], version_info->file_version());
+ EXPECT_EQ(kExpectedValues[j++], version_info->legal_copyright());
+ EXPECT_EQ(kExpectedValues[j++], version_info->legal_trademarks());
+ EXPECT_EQ(kExpectedValues[j++], version_info->last_change());
+}
+#endif
+
+#if defined(OS_WIN)
+TEST(FileVersionInfoTest, IsOfficialBuild) {
+ const wchar_t* kDLLNames[] = {
+ L"FileVersionInfoTest1.dll",
+ L"FileVersionInfoTest2.dll"
+ };
+
+ const bool kExpected[] = {
+ true,
+ false,
+ };
+
+ // Test consistency check.
+ ASSERT_EQ(arraysize(kDLLNames), arraysize(kExpected));
+
+ for (size_t i = 0; i < arraysize(kDLLNames); ++i) {
+ FilePath dll_path = GetTestDataPath();
+ dll_path = dll_path.Append(kDLLNames[i]);
+
+ std::unique_ptr<FileVersionInfo> version_info(
+ FileVersionInfo::CreateFileVersionInfo(dll_path));
+
+ EXPECT_EQ(kExpected[i], version_info->is_official_build());
+ }
+}
+#endif
+
+#if defined(OS_WIN)
+TEST(FileVersionInfoTest, CustomProperties) {
+ FilePath dll_path = GetTestDataPath();
+ dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
+
+ std::unique_ptr<FileVersionInfo> version_info(
+ FileVersionInfo::CreateFileVersionInfo(dll_path));
+
+ // Test few existing properties.
+ std::wstring str;
+ FileVersionInfoWin* version_info_win =
+ static_cast<FileVersionInfoWin*>(version_info.get());
+ EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 1", &str));
+ EXPECT_EQ(L"Un", str);
+ EXPECT_EQ(L"Un", version_info_win->GetStringValue(L"Custom prop 1"));
+
+ EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 2", &str));
+ EXPECT_EQ(L"Deux", str);
+ EXPECT_EQ(L"Deux", version_info_win->GetStringValue(L"Custom prop 2"));
+
+ EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 3", &str));
+ EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043", str);
+ EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043",
+ version_info_win->GetStringValue(L"Custom prop 3"));
+
+ // Test an non-existing property.
+ EXPECT_FALSE(version_info_win->GetValue(L"Unknown property", &str));
+ EXPECT_EQ(L"", version_info_win->GetStringValue(L"Unknown property"));
+}
+#endif
diff --git a/libchrome/base/files/dir_reader_fallback.h b/libchrome/base/files/dir_reader_fallback.h
new file mode 100644
index 0000000..4bc199a
--- /dev/null
+++ b/libchrome/base/files/dir_reader_fallback.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_DIR_READER_FALLBACK_H_
+#define BASE_FILES_DIR_READER_FALLBACK_H_
+
+namespace base {
+
+class DirReaderFallback {
+ public:
+ // Open a directory. If |IsValid| is true, then |Next| can be called to start
+ // the iteration at the beginning of the directory.
+ explicit DirReaderFallback(const char* /* directory_path */) {}
+
+ // After construction, IsValid returns true iff the directory was
+ // successfully opened.
+ bool IsValid() const { return false; }
+
+ // Move to the next entry returning false if the iteration is complete.
+ bool Next() { return false; }
+
+ // Return the name of the current directory entry.
+ const char* name() { return nullptr;}
+
+ // Return the file descriptor which is being used.
+ int fd() const { return -1; }
+
+ // Returns true if this is a no-op fallback class (for testing).
+ static bool IsFallback() { return true; }
+};
+
+} // namespace base
+
+#endif // BASE_FILES_DIR_READER_FALLBACK_H_
diff --git a/libchrome/base/files/dir_reader_linux.h b/libchrome/base/files/dir_reader_linux.h
new file mode 100644
index 0000000..4ce0c34
--- /dev/null
+++ b/libchrome/base/files/dir_reader_linux.h
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_DIR_READER_LINUX_H_
+#define BASE_FILES_DIR_READER_LINUX_H_
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+
+// See the comments in dir_reader_posix.h about this.
+
+namespace base {
+
+struct linux_dirent {
+ uint64_t d_ino;
+ int64_t d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[0];
+};
+
+class DirReaderLinux {
+ public:
+ explicit DirReaderLinux(const char* directory_path)
+ : fd_(open(directory_path, O_RDONLY | O_DIRECTORY)),
+ offset_(0),
+ size_(0) {
+ memset(buf_, 0, sizeof(buf_));
+ }
+
+ ~DirReaderLinux() {
+ if (fd_ >= 0) {
+ if (IGNORE_EINTR(close(fd_)))
+ RAW_LOG(ERROR, "Failed to close directory handle");
+ }
+ }
+
+ bool IsValid() const {
+ return fd_ >= 0;
+ }
+
+ // Move to the next entry returning false if the iteration is complete.
+ bool Next() {
+ if (size_) {
+ linux_dirent* dirent = reinterpret_cast<linux_dirent*>(&buf_[offset_]);
+ offset_ += dirent->d_reclen;
+ }
+
+ if (offset_ != size_)
+ return true;
+
+ const int r = syscall(__NR_getdents64, fd_, buf_, sizeof(buf_));
+ if (r == 0)
+ return false;
+ if (r == -1) {
+ DPLOG(FATAL) << "getdents64 returned an error: " << errno;
+ return false;
+ }
+ size_ = r;
+ offset_ = 0;
+ return true;
+ }
+
+ const char* name() const {
+ if (!size_)
+ return nullptr;
+
+ const linux_dirent* dirent =
+ reinterpret_cast<const linux_dirent*>(&buf_[offset_]);
+ return dirent->d_name;
+ }
+
+ int fd() const {
+ return fd_;
+ }
+
+ static bool IsFallback() {
+ return false;
+ }
+
+ private:
+ const int fd_;
+ unsigned char buf_[512];
+ size_t offset_;
+ size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(DirReaderLinux);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_DIR_READER_LINUX_H_
diff --git a/libchrome/base/files/dir_reader_posix.h b/libchrome/base/files/dir_reader_posix.h
new file mode 100644
index 0000000..6a32d9f
--- /dev/null
+++ b/libchrome/base/files/dir_reader_posix.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_DIR_READER_POSIX_H_
+#define BASE_FILES_DIR_READER_POSIX_H_
+
+#include "build/build_config.h"
+
+// This header provides a class, DirReaderPosix, which allows one to open and
+// read from directories without allocating memory. For the interface, see
+// the generic fallback in dir_reader_fallback.h.
+
+// Mac note: OS X has getdirentries, but it only works if we restrict Chrome to
+// 32-bit inodes. There is a getdirentries64 syscall in 10.6, but it's not
+// wrapped and the direct syscall interface is unstable. Using an unstable API
+// seems worse than falling back to enumerating all file descriptors so we will
+// probably never implement this on the Mac.
+
+#if defined(OS_LINUX)
+#include "base/files/dir_reader_linux.h"
+#else
+#include "base/files/dir_reader_fallback.h"
+#endif
+
+namespace base {
+
+#if defined(OS_LINUX)
+typedef DirReaderLinux DirReaderPosix;
+#else
+typedef DirReaderFallback DirReaderPosix;
+#endif
+
+} // namespace base
+
+#endif // BASE_FILES_DIR_READER_POSIX_H_
diff --git a/libchrome/base/files/dir_reader_posix_unittest.cc b/libchrome/base/files/dir_reader_posix_unittest.cc
new file mode 100644
index 0000000..a75858f
--- /dev/null
+++ b/libchrome/base/files/dir_reader_posix_unittest.cc
@@ -0,0 +1,95 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/dir_reader_posix.h"
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#endif
+
+namespace base {
+
+TEST(DirReaderPosixUnittest, Read) {
+ static const unsigned kNumFiles = 100;
+
+ if (DirReaderPosix::IsFallback())
+ return;
+
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ const char* dir = temp_dir.path().value().c_str();
+ ASSERT_TRUE(dir);
+
+ const int prev_wd = open(".", O_RDONLY | O_DIRECTORY);
+ DCHECK_GE(prev_wd, 0);
+
+ PCHECK(chdir(dir) == 0);
+
+ for (unsigned i = 0; i < kNumFiles; i++) {
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%d", i);
+ const int fd = open(buf, O_CREAT | O_RDONLY | O_EXCL, 0600);
+ PCHECK(fd >= 0);
+ PCHECK(close(fd) == 0);
+ }
+
+ std::set<unsigned> seen;
+
+ DirReaderPosix reader(dir);
+ EXPECT_TRUE(reader.IsValid());
+
+ if (!reader.IsValid())
+ return;
+
+ bool seen_dot = false, seen_dotdot = false;
+
+ for (; reader.Next(); ) {
+ if (strcmp(reader.name(), ".") == 0) {
+ seen_dot = true;
+ continue;
+ }
+ if (strcmp(reader.name(), "..") == 0) {
+ seen_dotdot = true;
+ continue;
+ }
+
+ SCOPED_TRACE(testing::Message() << "reader.name(): " << reader.name());
+
+ char *endptr;
+ const unsigned long value = strtoul(reader.name(), &endptr, 10);
+
+ EXPECT_FALSE(*endptr);
+ EXPECT_LT(value, kNumFiles);
+ EXPECT_EQ(0u, seen.count(value));
+ seen.insert(value);
+ }
+
+ for (unsigned i = 0; i < kNumFiles; i++) {
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%d", i);
+ PCHECK(unlink(buf) == 0);
+ }
+
+ PCHECK(rmdir(dir) == 0);
+
+ PCHECK(fchdir(prev_wd) == 0);
+ PCHECK(close(prev_wd) == 0);
+
+ EXPECT_TRUE(seen_dot);
+ EXPECT_TRUE(seen_dotdot);
+ EXPECT_EQ(kNumFiles, seen.size());
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file.cc b/libchrome/base/files/file.cc
new file mode 100644
index 0000000..ab05630
--- /dev/null
+++ b/libchrome/base/files/file.cc
@@ -0,0 +1,149 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_tracing.h"
+#include "base/metrics/histogram.h"
+#include "base/timer/elapsed_timer.h"
+#include "build/build_config.h"
+
+namespace base {
+
+File::Info::Info()
+ : size(0),
+ is_directory(false),
+ is_symbolic_link(false) {
+}
+
+File::Info::~Info() {
+}
+
+File::File()
+ : error_details_(FILE_ERROR_FAILED),
+ created_(false),
+ async_(false) {
+}
+
+#if !defined(OS_NACL)
+File::File(const FilePath& path, uint32_t flags)
+ : error_details_(FILE_OK), created_(false), async_(false) {
+ Initialize(path, flags);
+}
+#endif
+
+File::File(PlatformFile platform_file)
+ : file_(platform_file),
+ error_details_(FILE_OK),
+ created_(false),
+ async_(false) {
+#if defined(OS_POSIX)
+ DCHECK_GE(platform_file, -1);
+#endif
+}
+
+File::File(Error error_details)
+ : error_details_(error_details),
+ created_(false),
+ async_(false) {
+}
+
+File::File(File&& other)
+ : file_(other.TakePlatformFile()),
+ tracing_path_(other.tracing_path_),
+ error_details_(other.error_details()),
+ created_(other.created()),
+ async_(other.async_) {}
+
+File::~File() {
+ // Go through the AssertIOAllowed logic.
+ Close();
+}
+
+// static
+File File::CreateForAsyncHandle(PlatformFile platform_file) {
+ File file(platform_file);
+ // It would be nice if we could validate that |platform_file| was opened with
+ // FILE_FLAG_OVERLAPPED on Windows but this doesn't appear to be possible.
+ file.async_ = true;
+ return file;
+}
+
+File& File::operator=(File&& other) {
+ DCHECK_NE(this, &other);
+ Close();
+ SetPlatformFile(other.TakePlatformFile());
+ tracing_path_ = other.tracing_path_;
+ error_details_ = other.error_details();
+ created_ = other.created();
+ async_ = other.async_;
+ return *this;
+}
+
+#if !defined(OS_NACL)
+void File::Initialize(const FilePath& path, uint32_t flags) {
+ if (path.ReferencesParent()) {
+ error_details_ = FILE_ERROR_ACCESS_DENIED;
+ return;
+ }
+ if (FileTracing::IsCategoryEnabled())
+ tracing_path_ = path;
+ SCOPED_FILE_TRACE("Initialize");
+ DoInitialize(path, flags);
+}
+#endif
+
+std::string File::ErrorToString(Error error) {
+ switch (error) {
+ case FILE_OK:
+ return "FILE_OK";
+ case FILE_ERROR_FAILED:
+ return "FILE_ERROR_FAILED";
+ case FILE_ERROR_IN_USE:
+ return "FILE_ERROR_IN_USE";
+ case FILE_ERROR_EXISTS:
+ return "FILE_ERROR_EXISTS";
+ case FILE_ERROR_NOT_FOUND:
+ return "FILE_ERROR_NOT_FOUND";
+ case FILE_ERROR_ACCESS_DENIED:
+ return "FILE_ERROR_ACCESS_DENIED";
+ case FILE_ERROR_TOO_MANY_OPENED:
+ return "FILE_ERROR_TOO_MANY_OPENED";
+ case FILE_ERROR_NO_MEMORY:
+ return "FILE_ERROR_NO_MEMORY";
+ case FILE_ERROR_NO_SPACE:
+ return "FILE_ERROR_NO_SPACE";
+ case FILE_ERROR_NOT_A_DIRECTORY:
+ return "FILE_ERROR_NOT_A_DIRECTORY";
+ case FILE_ERROR_INVALID_OPERATION:
+ return "FILE_ERROR_INVALID_OPERATION";
+ case FILE_ERROR_SECURITY:
+ return "FILE_ERROR_SECURITY";
+ case FILE_ERROR_ABORT:
+ return "FILE_ERROR_ABORT";
+ case FILE_ERROR_NOT_A_FILE:
+ return "FILE_ERROR_NOT_A_FILE";
+ case FILE_ERROR_NOT_EMPTY:
+ return "FILE_ERROR_NOT_EMPTY";
+ case FILE_ERROR_INVALID_URL:
+ return "FILE_ERROR_INVALID_URL";
+ case FILE_ERROR_IO:
+ return "FILE_ERROR_IO";
+ case FILE_ERROR_MAX:
+ break;
+ }
+
+ NOTREACHED();
+ return "";
+}
+
+bool File::Flush() {
+ ElapsedTimer timer;
+ SCOPED_FILE_TRACE("Flush");
+ bool return_value = DoFlush();
+ UMA_HISTOGRAM_TIMES("PlatformFile.FlushTime", timer.Elapsed());
+ return return_value;
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file.h b/libchrome/base/files/file.h
new file mode 100644
index 0000000..ae2bd1b
--- /dev/null
+++ b/libchrome/base/files/file.h
@@ -0,0 +1,342 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_H_
+#define BASE_FILES_FILE_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/files/file_tracing.h"
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/win/scoped_handle.h"
+#endif
+
+#if defined(OS_POSIX)
+#include <sys/stat.h>
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+using PlatformFile = HANDLE;
+
+const PlatformFile kInvalidPlatformFile = INVALID_HANDLE_VALUE;
+#elif defined(OS_POSIX)
+using PlatformFile = int;
+
+const PlatformFile kInvalidPlatformFile = -1;
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+typedef struct stat stat_wrapper_t;
+#else
+typedef struct stat64 stat_wrapper_t;
+#endif
+#endif // defined(OS_POSIX)
+
+// Thin wrapper around an OS-level file.
+// Note that this class does not provide any support for asynchronous IO, other
+// than the ability to create asynchronous handles on Windows.
+//
+// Note about const: this class does not attempt to determine if the underlying
+// file system object is affected by a particular method in order to consider
+// that method const or not. Only methods that deal with member variables in an
+// obvious non-modifying way are marked as const. Any method that forward calls
+// to the OS is not considered const, even if there is no apparent change to
+// member variables.
+class BASE_EXPORT File {
+ public:
+ // FLAG_(OPEN|CREATE).* are mutually exclusive. You should specify exactly one
+ // of the five (possibly combining with other flags) when opening or creating
+ // a file.
+ // FLAG_(WRITE|APPEND) are mutually exclusive. This is so that APPEND behavior
+ // will be consistent with O_APPEND on POSIX.
+ // FLAG_EXCLUSIVE_(READ|WRITE) only grant exclusive access to the file on
+ // creation on POSIX; for existing files, consider using Lock().
+ enum Flags {
+ FLAG_OPEN = 1 << 0, // Opens a file, only if it exists.
+ FLAG_CREATE = 1 << 1, // Creates a new file, only if it does not
+ // already exist.
+ FLAG_OPEN_ALWAYS = 1 << 2, // May create a new file.
+ FLAG_CREATE_ALWAYS = 1 << 3, // May overwrite an old file.
+ FLAG_OPEN_TRUNCATED = 1 << 4, // Opens a file and truncates it, only if it
+ // exists.
+ FLAG_READ = 1 << 5,
+ FLAG_WRITE = 1 << 6,
+ FLAG_APPEND = 1 << 7,
+ FLAG_EXCLUSIVE_READ = 1 << 8, // EXCLUSIVE is opposite of Windows SHARE.
+ FLAG_EXCLUSIVE_WRITE = 1 << 9,
+ FLAG_ASYNC = 1 << 10,
+ FLAG_TEMPORARY = 1 << 11, // Used on Windows only.
+ FLAG_HIDDEN = 1 << 12, // Used on Windows only.
+ FLAG_DELETE_ON_CLOSE = 1 << 13,
+ FLAG_WRITE_ATTRIBUTES = 1 << 14, // Used on Windows only.
+ FLAG_SHARE_DELETE = 1 << 15, // Used on Windows only.
+ FLAG_TERMINAL_DEVICE = 1 << 16, // Serial port flags.
+ FLAG_BACKUP_SEMANTICS = 1 << 17, // Used on Windows only.
+ FLAG_EXECUTE = 1 << 18, // Used on Windows only.
+ FLAG_SEQUENTIAL_SCAN = 1 << 19, // Used on Windows only.
+ };
+
+ // This enum has been recorded in multiple histograms. If the order of the
+ // fields needs to change, please ensure that those histograms are obsolete or
+ // have been moved to a different enum.
+ //
+ // FILE_ERROR_ACCESS_DENIED is returned when a call fails because of a
+ // filesystem restriction. FILE_ERROR_SECURITY is returned when a browser
+ // policy doesn't allow the operation to be executed.
+ enum Error {
+ FILE_OK = 0,
+ FILE_ERROR_FAILED = -1,
+ FILE_ERROR_IN_USE = -2,
+ FILE_ERROR_EXISTS = -3,
+ FILE_ERROR_NOT_FOUND = -4,
+ FILE_ERROR_ACCESS_DENIED = -5,
+ FILE_ERROR_TOO_MANY_OPENED = -6,
+ FILE_ERROR_NO_MEMORY = -7,
+ FILE_ERROR_NO_SPACE = -8,
+ FILE_ERROR_NOT_A_DIRECTORY = -9,
+ FILE_ERROR_INVALID_OPERATION = -10,
+ FILE_ERROR_SECURITY = -11,
+ FILE_ERROR_ABORT = -12,
+ FILE_ERROR_NOT_A_FILE = -13,
+ FILE_ERROR_NOT_EMPTY = -14,
+ FILE_ERROR_INVALID_URL = -15,
+ FILE_ERROR_IO = -16,
+ // Put new entries here and increment FILE_ERROR_MAX.
+ FILE_ERROR_MAX = -17
+ };
+
+ // This explicit mapping matches both FILE_ on Windows and SEEK_ on Linux.
+ enum Whence {
+ FROM_BEGIN = 0,
+ FROM_CURRENT = 1,
+ FROM_END = 2
+ };
+
+ // Used to hold information about a given file.
+ // If you add more fields to this structure (platform-specific fields are OK),
+ // make sure to update all functions that use it in file_util_{win|posix}.cc,
+ // too, and the ParamTraits<base::File::Info> implementation in
+ // ipc/ipc_message_utils.cc.
+ struct BASE_EXPORT Info {
+ Info();
+ ~Info();
+#if defined(OS_POSIX)
+ // Fills this struct with values from |stat_info|.
+ void FromStat(const stat_wrapper_t& stat_info);
+#endif
+
+ // The size of the file in bytes. Undefined when is_directory is true.
+ int64_t size;
+
+ // True if the file corresponds to a directory.
+ bool is_directory;
+
+ // True if the file corresponds to a symbolic link. For Windows currently
+ // not supported and thus always false.
+ bool is_symbolic_link;
+
+ // The last modified time of a file.
+ Time last_modified;
+
+ // The last accessed time of a file.
+ Time last_accessed;
+
+ // The creation time of a file.
+ Time creation_time;
+ };
+
+ File();
+
+ // Creates or opens the given file. This will fail with 'access denied' if the
+ // |path| contains path traversal ('..') components.
+ File(const FilePath& path, uint32_t flags);
+
+ // Takes ownership of |platform_file|.
+ explicit File(PlatformFile platform_file);
+
+ // Creates an object with a specific error_details code.
+ explicit File(Error error_details);
+
+ File(File&& other);
+
+ ~File();
+
+ // Takes ownership of |platform_file|.
+ static File CreateForAsyncHandle(PlatformFile platform_file);
+
+ File& operator=(File&& other);
+
+ // Creates or opens the given file.
+ void Initialize(const FilePath& path, uint32_t flags);
+
+ // Returns |true| if the handle / fd wrapped by this object is valid. This
+ // method doesn't interact with the file system (and is safe to be called from
+ // ThreadRestrictions::SetIOAllowed(false) threads).
+ bool IsValid() const;
+
+ // Returns true if a new file was created (or an old one truncated to zero
+ // length to simulate a new file, which can happen with
+ // FLAG_CREATE_ALWAYS), and false otherwise.
+ bool created() const { return created_; }
+
+ // Returns the OS result of opening this file. Note that the way to verify
+ // the success of the operation is to use IsValid(), not this method:
+ // File file(path, flags);
+ // if (!file.IsValid())
+ // return;
+ Error error_details() const { return error_details_; }
+
+ PlatformFile GetPlatformFile() const;
+ PlatformFile TakePlatformFile();
+
+ // Destroying this object closes the file automatically.
+ void Close();
+
+ // Changes current position in the file to an |offset| relative to an origin
+ // defined by |whence|. Returns the resultant current position in the file
+ // (relative to the start) or -1 in case of error.
+ int64_t Seek(Whence whence, int64_t offset);
+
+ // Reads the given number of bytes (or until EOF is reached) starting with the
+ // given offset. Returns the number of bytes read, or -1 on error. Note that
+ // this function makes a best effort to read all data on all platforms, so it
+ // is not intended for stream oriented files but instead for cases when the
+ // normal expectation is that actually |size| bytes are read unless there is
+ // an error.
+ int Read(int64_t offset, char* data, int size);
+
+ // Same as above but without seek.
+ int ReadAtCurrentPos(char* data, int size);
+
+ // Reads the given number of bytes (or until EOF is reached) starting with the
+ // given offset, but does not make any effort to read all data on all
+ // platforms. Returns the number of bytes read, or -1 on error.
+ int ReadNoBestEffort(int64_t offset, char* data, int size);
+
+ // Same as above but without seek.
+ int ReadAtCurrentPosNoBestEffort(char* data, int size);
+
+ // Writes the given buffer into the file at the given offset, overwritting any
+ // data that was previously there. Returns the number of bytes written, or -1
+ // on error. Note that this function makes a best effort to write all data on
+ // all platforms.
+ // Ignores the offset and writes to the end of the file if the file was opened
+ // with FLAG_APPEND.
+ int Write(int64_t offset, const char* data, int size);
+
+ // Save as above but without seek.
+ int WriteAtCurrentPos(const char* data, int size);
+
+ // Save as above but does not make any effort to write all data on all
+ // platforms. Returns the number of bytes written, or -1 on error.
+ int WriteAtCurrentPosNoBestEffort(const char* data, int size);
+
+ // Returns the current size of this file, or a negative number on failure.
+ int64_t GetLength();
+
+ // Truncates the file to the given length. If |length| is greater than the
+ // current size of the file, the file is extended with zeros. If the file
+ // doesn't exist, |false| is returned.
+ bool SetLength(int64_t length);
+
+ // Instructs the filesystem to flush the file to disk. (POSIX: fsync, Windows:
+ // FlushFileBuffers).
+ bool Flush();
+
+ // Updates the file times.
+ bool SetTimes(Time last_access_time, Time last_modified_time);
+
+ // Returns some basic information for the given file.
+ bool GetInfo(Info* info);
+
+ // Attempts to take an exclusive write lock on the file. Returns immediately
+ // (i.e. does not wait for another process to unlock the file). If the lock
+ // was obtained, the result will be FILE_OK. A lock only guarantees
+ // that other processes may not also take a lock on the same file with the
+ // same API - it may still be opened, renamed, unlinked, etc.
+ //
+ // Common semantics:
+ // * Locks are held by processes, but not inherited by child processes.
+ // * Locks are released by the OS on file close or process termination.
+ // * Locks are reliable only on local filesystems.
+ // * Duplicated file handles may also write to locked files.
+ // Windows-specific semantics:
+ // * Locks are mandatory for read/write APIs, advisory for mapping APIs.
+ // * Within a process, locking the same file (by the same or new handle)
+ // will fail.
+ // POSIX-specific semantics:
+ // * Locks are advisory only.
+ // * Within a process, locking the same file (by the same or new handle)
+ // will succeed.
+ // * Closing any descriptor on a given file releases the lock.
+ Error Lock();
+
+ // Unlock a file previously locked.
+ Error Unlock();
+
+ // Returns a new object referencing this file for use within the current
+ // process. Handling of FLAG_DELETE_ON_CLOSE varies by OS. On POSIX, the File
+ // object that was created or initialized with this flag will have unlinked
+ // the underlying file when it was created or opened. On Windows, the
+ // underlying file is deleted when the last handle to it is closed.
+ File Duplicate();
+
+ bool async() const { return async_; }
+
+#if defined(OS_WIN)
+ static Error OSErrorToFileError(DWORD last_error);
+#elif defined(OS_POSIX)
+ static Error OSErrorToFileError(int saved_errno);
+#endif
+
+ // Converts an error value to a human-readable form. Used for logging.
+ static std::string ErrorToString(Error error);
+
+ private:
+ friend class FileTracing::ScopedTrace;
+
+ // Creates or opens the given file. Only called if |path| has no
+ // traversal ('..') components.
+ void DoInitialize(const FilePath& path, uint32_t flags);
+
+ // TODO(tnagel): Reintegrate into Flush() once histogram isn't needed anymore,
+ // cf. issue 473337.
+ bool DoFlush();
+
+ void SetPlatformFile(PlatformFile file);
+
+#if defined(OS_WIN)
+ win::ScopedHandle file_;
+#elif defined(OS_POSIX)
+ ScopedFD file_;
+#endif
+
+ // A path to use for tracing purposes. Set if file tracing is enabled during
+ // |Initialize()|.
+ FilePath tracing_path_;
+
+ // Object tied to the lifetime of |this| that enables/disables tracing.
+ FileTracing::ScopedEnabler trace_enabler_;
+
+ Error error_details_;
+ bool created_;
+ bool async_;
+
+ DISALLOW_COPY_AND_ASSIGN(File);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_H_
+
diff --git a/libchrome/base/files/file_enumerator.cc b/libchrome/base/files/file_enumerator.cc
new file mode 100644
index 0000000..9749980
--- /dev/null
+++ b/libchrome/base/files/file_enumerator.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_enumerator.h"
+
+#include "base/files/file_util.h"
+
+namespace base {
+
+FileEnumerator::FileInfo::~FileInfo() {
+}
+
+bool FileEnumerator::ShouldSkip(const FilePath& path) {
+ FilePath::StringType basename = path.BaseName().value();
+ return basename == FILE_PATH_LITERAL(".") ||
+ (basename == FILE_PATH_LITERAL("..") &&
+ !(INCLUDE_DOT_DOT & file_type_));
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_enumerator.h b/libchrome/base/files/file_enumerator.h
new file mode 100644
index 0000000..7cac8dd
--- /dev/null
+++ b/libchrome/base/files/file_enumerator.h
@@ -0,0 +1,162 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_ENUMERATOR_H_
+#define BASE_FILES_FILE_ENUMERATOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <stack>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+// A class for enumerating the files in a provided path. The order of the
+// results is not guaranteed.
+//
+// This is blocking. Do not use on critical threads.
+//
+// Example:
+//
+// base::FileEnumerator enum(my_dir, false, base::FileEnumerator::FILES,
+// FILE_PATH_LITERAL("*.txt"));
+// for (base::FilePath name = enum.Next(); !name.empty(); name = enum.Next())
+// ...
+class BASE_EXPORT FileEnumerator {
+ public:
+ // Note: copy & assign supported.
+ class BASE_EXPORT FileInfo {
+ public:
+ FileInfo();
+ ~FileInfo();
+
+ bool IsDirectory() const;
+
+ // The name of the file. This will not include any path information. This
+ // is in constrast to the value returned by FileEnumerator.Next() which
+ // includes the |root_path| passed into the FileEnumerator constructor.
+ FilePath GetName() const;
+
+ int64_t GetSize() const;
+ Time GetLastModifiedTime() const;
+
+#if defined(OS_WIN)
+ // Note that the cAlternateFileName (used to hold the "short" 8.3 name)
+ // of the WIN32_FIND_DATA will be empty. Since we don't use short file
+ // names, we tell Windows to omit it which speeds up the query slightly.
+ const WIN32_FIND_DATA& find_data() const { return find_data_; }
+#elif defined(OS_POSIX)
+ const struct stat& stat() const { return stat_; }
+#endif
+
+ private:
+ friend class FileEnumerator;
+
+#if defined(OS_WIN)
+ WIN32_FIND_DATA find_data_;
+#elif defined(OS_POSIX)
+ struct stat stat_;
+ FilePath filename_;
+#endif
+ };
+
+ enum FileType {
+ FILES = 1 << 0,
+ DIRECTORIES = 1 << 1,
+ INCLUDE_DOT_DOT = 1 << 2,
+#if defined(OS_POSIX)
+ SHOW_SYM_LINKS = 1 << 4,
+#endif
+ };
+
+ // |root_path| is the starting directory to search for. It may or may not end
+ // in a slash.
+ //
+ // If |recursive| is true, this will enumerate all matches in any
+ // subdirectories matched as well. It does a breadth-first search, so all
+ // files in one directory will be returned before any files in a
+ // subdirectory.
+ //
+ // |file_type|, a bit mask of FileType, specifies whether the enumerator
+ // should match files, directories, or both.
+ //
+ // |pattern| is an optional pattern for which files to match. This
+ // works like shell globbing. For example, "*.txt" or "Foo???.doc".
+ // However, be careful in specifying patterns that aren't cross platform
+ // since the underlying code uses OS-specific matching routines. In general,
+ // Windows matching is less featureful than others, so test there first.
+ // If unspecified, this will match all files.
+ // NOTE: the pattern only matches the contents of root_path, not files in
+ // recursive subdirectories.
+ // TODO(erikkay): Fix the pattern matching to work at all levels.
+ FileEnumerator(const FilePath& root_path,
+ bool recursive,
+ int file_type);
+ FileEnumerator(const FilePath& root_path,
+ bool recursive,
+ int file_type,
+ const FilePath::StringType& pattern);
+ ~FileEnumerator();
+
+ // Returns the next file or an empty string if there are no more results.
+ //
+ // The returned path will incorporate the |root_path| passed in the
+ // constructor: "<root_path>/file_name.txt". If the |root_path| is absolute,
+ // then so will be the result of Next().
+ FilePath Next();
+
+ // Write the file info into |info|.
+ FileInfo GetInfo() const;
+
+ private:
+ // Returns true if the given path should be skipped in enumeration.
+ bool ShouldSkip(const FilePath& path);
+
+#if defined(OS_WIN)
+ // True when find_data_ is valid.
+ bool has_find_data_;
+ WIN32_FIND_DATA find_data_;
+ HANDLE find_handle_;
+#elif defined(OS_POSIX)
+
+ // Read the filenames in source into the vector of DirectoryEntryInfo's
+ static bool ReadDirectory(std::vector<FileInfo>* entries,
+ const FilePath& source, bool show_links);
+
+ // The files in the current directory
+ std::vector<FileInfo> directory_entries_;
+
+ // The next entry to use from the directory_entries_ vector
+ size_t current_directory_entry_;
+#endif
+
+ FilePath root_path_;
+ bool recursive_;
+ int file_type_;
+ FilePath::StringType pattern_; // Empty when we want to find everything.
+
+ // A stack that keeps track of which subdirectories we still need to
+ // enumerate in the breadth-first search.
+ std::stack<FilePath> pending_paths_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileEnumerator);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_ENUMERATOR_H_
diff --git a/libchrome/base/files/file_enumerator_posix.cc b/libchrome/base/files/file_enumerator_posix.cc
new file mode 100644
index 0000000..fb4010a
--- /dev/null
+++ b/libchrome/base/files/file_enumerator_posix.cc
@@ -0,0 +1,162 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_enumerator.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fnmatch.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// FileEnumerator::FileInfo ----------------------------------------------------
+
+FileEnumerator::FileInfo::FileInfo() {
+ memset(&stat_, 0, sizeof(stat_));
+}
+
+bool FileEnumerator::FileInfo::IsDirectory() const {
+ return S_ISDIR(stat_.st_mode);
+}
+
+FilePath FileEnumerator::FileInfo::GetName() const {
+ return filename_;
+}
+
+int64_t FileEnumerator::FileInfo::GetSize() const {
+ return stat_.st_size;
+}
+
+base::Time FileEnumerator::FileInfo::GetLastModifiedTime() const {
+ return base::Time::FromTimeT(stat_.st_mtime);
+}
+
+// FileEnumerator --------------------------------------------------------------
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+ bool recursive,
+ int file_type)
+ : current_directory_entry_(0),
+ root_path_(root_path),
+ recursive_(recursive),
+ file_type_(file_type) {
+ // INCLUDE_DOT_DOT must not be specified if recursive.
+ DCHECK(!(recursive && (INCLUDE_DOT_DOT & file_type_)));
+ pending_paths_.push(root_path);
+}
+
+FileEnumerator::FileEnumerator(const FilePath& root_path,
+ bool recursive,
+ int file_type,
+ const FilePath::StringType& pattern)
+ : current_directory_entry_(0),
+ root_path_(root_path),
+ recursive_(recursive),
+ file_type_(file_type),
+ pattern_(root_path.Append(pattern).value()) {
+ // INCLUDE_DOT_DOT must not be specified if recursive.
+ DCHECK(!(recursive && (INCLUDE_DOT_DOT & file_type_)));
+ // The Windows version of this code appends the pattern to the root_path,
+ // potentially only matching against items in the top-most directory.
+ // Do the same here.
+ if (pattern.empty())
+ pattern_ = FilePath::StringType();
+ pending_paths_.push(root_path);
+}
+
+FileEnumerator::~FileEnumerator() {
+}
+
+FilePath FileEnumerator::Next() {
+ ++current_directory_entry_;
+
+ // While we've exhausted the entries in the current directory, do the next
+ while (current_directory_entry_ >= directory_entries_.size()) {
+ if (pending_paths_.empty())
+ return FilePath();
+
+ root_path_ = pending_paths_.top();
+ root_path_ = root_path_.StripTrailingSeparators();
+ pending_paths_.pop();
+
+ std::vector<FileInfo> entries;
+ if (!ReadDirectory(&entries, root_path_, file_type_ & SHOW_SYM_LINKS))
+ continue;
+
+ directory_entries_.clear();
+ current_directory_entry_ = 0;
+ for (std::vector<FileInfo>::const_iterator i = entries.begin();
+ i != entries.end(); ++i) {
+ FilePath full_path = root_path_.Append(i->filename_);
+ if (ShouldSkip(full_path))
+ continue;
+
+ if (pattern_.size() &&
+ fnmatch(pattern_.c_str(), full_path.value().c_str(), FNM_NOESCAPE))
+ continue;
+
+ if (recursive_ && S_ISDIR(i->stat_.st_mode))
+ pending_paths_.push(full_path);
+
+ if ((S_ISDIR(i->stat_.st_mode) && (file_type_ & DIRECTORIES)) ||
+ (!S_ISDIR(i->stat_.st_mode) && (file_type_ & FILES)))
+ directory_entries_.push_back(*i);
+ }
+ }
+
+ return root_path_.Append(
+ directory_entries_[current_directory_entry_].filename_);
+}
+
+FileEnumerator::FileInfo FileEnumerator::GetInfo() const {
+ return directory_entries_[current_directory_entry_];
+}
+
+bool FileEnumerator::ReadDirectory(std::vector<FileInfo>* entries,
+ const FilePath& source, bool show_links) {
+ base::ThreadRestrictions::AssertIOAllowed();
+ DIR* dir = opendir(source.value().c_str());
+ if (!dir)
+ return false;
+
+#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_BSD) && \
+ !defined(OS_SOLARIS) && !defined(OS_ANDROID)
+ #error Port warning: depending on the definition of struct dirent, \
+ additional space for pathname may be needed
+#endif
+
+ struct dirent dent_buf;
+ struct dirent* dent;
+ while (readdir_r(dir, &dent_buf, &dent) == 0 && dent) {
+ FileInfo info;
+ info.filename_ = FilePath(dent->d_name);
+
+ FilePath full_name = source.Append(dent->d_name);
+ int ret;
+ if (show_links)
+ ret = lstat(full_name.value().c_str(), &info.stat_);
+ else
+ ret = stat(full_name.value().c_str(), &info.stat_);
+ if (ret < 0) {
+ // Print the stat() error message unless it was ENOENT and we're
+ // following symlinks.
+ if (!(errno == ENOENT && !show_links)) {
+ DPLOG(ERROR) << "Couldn't stat "
+ << source.Append(dent->d_name).value();
+ }
+ memset(&info.stat_, 0, sizeof(info.stat_));
+ }
+ entries->push_back(info);
+ }
+
+ closedir(dir);
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_path.cc b/libchrome/base/files/file_path.cc
new file mode 100644
index 0000000..29f12a8
--- /dev/null
+++ b/libchrome/base/files/file_path.cc
@@ -0,0 +1,1339 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path.h"
+
+#include <string.h>
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_cftyperef.h"
+#include "base/third_party/icu/icu_utf.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+namespace base {
+
+using StringType = FilePath::StringType;
+using StringPieceType = FilePath::StringPieceType;
+
+namespace {
+
+const char* const kCommonDoubleExtensionSuffixes[] = { "gz", "z", "bz2", "bz" };
+const char* const kCommonDoubleExtensions[] = { "user.js" };
+
+const FilePath::CharType kStringTerminator = FILE_PATH_LITERAL('\0');
+
+// If this FilePath contains a drive letter specification, returns the
+// position of the last character of the drive letter specification,
+// otherwise returns npos. This can only be true on Windows, when a pathname
+// begins with a letter followed by a colon. On other platforms, this always
+// returns npos.
+StringPieceType::size_type FindDriveLetter(StringPieceType path) {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ // This is dependent on an ASCII-based character set, but that's a
+ // reasonable assumption. iswalpha can be too inclusive here.
+ if (path.length() >= 2 && path[1] == L':' &&
+ ((path[0] >= L'A' && path[0] <= L'Z') ||
+ (path[0] >= L'a' && path[0] <= L'z'))) {
+ return 1;
+ }
+#else
+ (void)path; // Avoid an unused warning.
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+ return StringType::npos;
+}
+
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+bool EqualDriveLetterCaseInsensitive(StringPieceType a, StringPieceType b) {
+ size_t a_letter_pos = FindDriveLetter(a);
+ size_t b_letter_pos = FindDriveLetter(b);
+
+ if (a_letter_pos == StringType::npos || b_letter_pos == StringType::npos)
+ return a == b;
+
+ StringPieceType a_letter(a.substr(0, a_letter_pos + 1));
+ StringPieceType b_letter(b.substr(0, b_letter_pos + 1));
+ if (!StartsWith(a_letter, b_letter, CompareCase::INSENSITIVE_ASCII))
+ return false;
+
+ StringPieceType a_rest(a.substr(a_letter_pos + 1));
+ StringPieceType b_rest(b.substr(b_letter_pos + 1));
+ return a_rest == b_rest;
+}
+#endif // defined(FILE_PATH_USES_DRIVE_LETTERS)
+
+bool IsPathAbsolute(StringPieceType path) {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ StringType::size_type letter = FindDriveLetter(path);
+ if (letter != StringType::npos) {
+ // Look for a separator right after the drive specification.
+ return path.length() > letter + 1 &&
+ FilePath::IsSeparator(path[letter + 1]);
+ }
+ // Look for a pair of leading separators.
+ return path.length() > 1 &&
+ FilePath::IsSeparator(path[0]) && FilePath::IsSeparator(path[1]);
+#else // FILE_PATH_USES_DRIVE_LETTERS
+ // Look for a separator in the first position.
+ return path.length() > 0 && FilePath::IsSeparator(path[0]);
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+}
+
+bool AreAllSeparators(const StringType& input) {
+ for (StringType::const_iterator it = input.begin();
+ it != input.end(); ++it) {
+ if (!FilePath::IsSeparator(*it))
+ return false;
+ }
+
+ return true;
+}
+
+// Find the position of the '.' that separates the extension from the rest
+// of the file name. The position is relative to BaseName(), not value().
+// Returns npos if it can't find an extension.
+StringType::size_type FinalExtensionSeparatorPosition(const StringType& path) {
+ // Special case "." and ".."
+ if (path == FilePath::kCurrentDirectory || path == FilePath::kParentDirectory)
+ return StringType::npos;
+
+ return path.rfind(FilePath::kExtensionSeparator);
+}
+
+// Same as above, but allow a second extension component of up to 4
+// characters when the rightmost extension component is a common double
+// extension (gz, bz2, Z). For example, foo.tar.gz or foo.tar.Z would have
+// extension components of '.tar.gz' and '.tar.Z' respectively.
+StringType::size_type ExtensionSeparatorPosition(const StringType& path) {
+ const StringType::size_type last_dot = FinalExtensionSeparatorPosition(path);
+
+ // No extension, or the extension is the whole filename.
+ if (last_dot == StringType::npos || last_dot == 0U)
+ return last_dot;
+
+ const StringType::size_type penultimate_dot =
+ path.rfind(FilePath::kExtensionSeparator, last_dot - 1);
+ const StringType::size_type last_separator =
+ path.find_last_of(FilePath::kSeparators, last_dot - 1,
+ FilePath::kSeparatorsLength - 1);
+
+ if (penultimate_dot == StringType::npos ||
+ (last_separator != StringType::npos &&
+ penultimate_dot < last_separator)) {
+ return last_dot;
+ }
+
+ for (size_t i = 0; i < arraysize(kCommonDoubleExtensions); ++i) {
+ StringType extension(path, penultimate_dot + 1);
+ if (LowerCaseEqualsASCII(extension, kCommonDoubleExtensions[i]))
+ return penultimate_dot;
+ }
+
+ StringType extension(path, last_dot + 1);
+ for (size_t i = 0; i < arraysize(kCommonDoubleExtensionSuffixes); ++i) {
+ if (LowerCaseEqualsASCII(extension, kCommonDoubleExtensionSuffixes[i])) {
+ if ((last_dot - penultimate_dot) <= 5U &&
+ (last_dot - penultimate_dot) > 1U) {
+ return penultimate_dot;
+ }
+ }
+ }
+
+ return last_dot;
+}
+
+// Returns true if path is "", ".", or "..".
+bool IsEmptyOrSpecialCase(const StringType& path) {
+ // Special cases "", ".", and ".."
+ if (path.empty() || path == FilePath::kCurrentDirectory ||
+ path == FilePath::kParentDirectory) {
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace
+
+FilePath::FilePath() {
+}
+
+FilePath::FilePath(const FilePath& that) : path_(that.path_) {
+}
+
+FilePath::FilePath(StringPieceType path) {
+ path.CopyToString(&path_);
+ StringType::size_type nul_pos = path_.find(kStringTerminator);
+ if (nul_pos != StringType::npos)
+ path_.erase(nul_pos, StringType::npos);
+}
+
+FilePath::~FilePath() {
+}
+
+FilePath& FilePath::operator=(const FilePath& that) {
+ path_ = that.path_;
+ return *this;
+}
+
+bool FilePath::operator==(const FilePath& that) const {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ return EqualDriveLetterCaseInsensitive(this->path_, that.path_);
+#else // defined(FILE_PATH_USES_DRIVE_LETTERS)
+ return path_ == that.path_;
+#endif // defined(FILE_PATH_USES_DRIVE_LETTERS)
+}
+
+bool FilePath::operator!=(const FilePath& that) const {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ return !EqualDriveLetterCaseInsensitive(this->path_, that.path_);
+#else // defined(FILE_PATH_USES_DRIVE_LETTERS)
+ return path_ != that.path_;
+#endif // defined(FILE_PATH_USES_DRIVE_LETTERS)
+}
+
+// static
+bool FilePath::IsSeparator(CharType character) {
+ for (size_t i = 0; i < kSeparatorsLength - 1; ++i) {
+ if (character == kSeparators[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void FilePath::GetComponents(std::vector<StringType>* components) const {
+ DCHECK(components);
+ if (!components)
+ return;
+ components->clear();
+ if (value().empty())
+ return;
+
+ std::vector<StringType> ret_val;
+ FilePath current = *this;
+ FilePath base;
+
+ // Capture path components.
+ while (current != current.DirName()) {
+ base = current.BaseName();
+ if (!AreAllSeparators(base.value()))
+ ret_val.push_back(base.value());
+ current = current.DirName();
+ }
+
+ // Capture root, if any.
+ base = current.BaseName();
+ if (!base.value().empty() && base.value() != kCurrentDirectory)
+ ret_val.push_back(current.BaseName().value());
+
+ // Capture drive letter, if any.
+ FilePath dir = current.DirName();
+ StringType::size_type letter = FindDriveLetter(dir.value());
+ if (letter != StringType::npos) {
+ ret_val.push_back(StringType(dir.value(), 0, letter + 1));
+ }
+
+ *components = std::vector<StringType>(ret_val.rbegin(), ret_val.rend());
+}
+
+bool FilePath::IsParent(const FilePath& child) const {
+ return AppendRelativePath(child, NULL);
+}
+
+bool FilePath::AppendRelativePath(const FilePath& child,
+ FilePath* path) const {
+ std::vector<StringType> parent_components;
+ std::vector<StringType> child_components;
+ GetComponents(&parent_components);
+ child.GetComponents(&child_components);
+
+ if (parent_components.empty() ||
+ parent_components.size() >= child_components.size())
+ return false;
+
+ std::vector<StringType>::const_iterator parent_comp =
+ parent_components.begin();
+ std::vector<StringType>::const_iterator child_comp =
+ child_components.begin();
+
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ // Windows can access case sensitive filesystems, so component
+ // comparisions must be case sensitive, but drive letters are
+ // never case sensitive.
+ if ((FindDriveLetter(*parent_comp) != StringType::npos) &&
+ (FindDriveLetter(*child_comp) != StringType::npos)) {
+ if (!StartsWith(*parent_comp, *child_comp, CompareCase::INSENSITIVE_ASCII))
+ return false;
+ ++parent_comp;
+ ++child_comp;
+ }
+#endif // defined(FILE_PATH_USES_DRIVE_LETTERS)
+
+ while (parent_comp != parent_components.end()) {
+ if (*parent_comp != *child_comp)
+ return false;
+ ++parent_comp;
+ ++child_comp;
+ }
+
+ if (path != NULL) {
+ for (; child_comp != child_components.end(); ++child_comp) {
+ *path = path->Append(*child_comp);
+ }
+ }
+ return true;
+}
+
+// libgen's dirname and basename aren't guaranteed to be thread-safe and aren't
+// guaranteed to not modify their input strings, and in fact are implemented
+// differently in this regard on different platforms. Don't use them, but
+// adhere to their behavior.
+FilePath FilePath::DirName() const {
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ // The drive letter, if any, always needs to remain in the output. If there
+ // is no drive letter, as will always be the case on platforms which do not
+ // support drive letters, letter will be npos, or -1, so the comparisons and
+ // resizes below using letter will still be valid.
+ StringType::size_type letter = FindDriveLetter(new_path.path_);
+
+ StringType::size_type last_separator =
+ new_path.path_.find_last_of(kSeparators, StringType::npos,
+ kSeparatorsLength - 1);
+ if (last_separator == StringType::npos) {
+ // path_ is in the current directory.
+ new_path.path_.resize(letter + 1);
+ } else if (last_separator == letter + 1) {
+ // path_ is in the root directory.
+ new_path.path_.resize(letter + 2);
+ } else if (last_separator == letter + 2 &&
+ IsSeparator(new_path.path_[letter + 1])) {
+ // path_ is in "//" (possibly with a drive letter); leave the double
+ // separator intact indicating alternate root.
+ new_path.path_.resize(letter + 3);
+ } else if (last_separator != 0) {
+ // path_ is somewhere else, trim the basename.
+ new_path.path_.resize(last_separator);
+ }
+
+ new_path.StripTrailingSeparatorsInternal();
+ if (!new_path.path_.length())
+ new_path.path_ = kCurrentDirectory;
+
+ return new_path;
+}
+
+FilePath FilePath::BaseName() const {
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ // The drive letter, if any, is always stripped.
+ StringType::size_type letter = FindDriveLetter(new_path.path_);
+ if (letter != StringType::npos) {
+ new_path.path_.erase(0, letter + 1);
+ }
+
+ // Keep everything after the final separator, but if the pathname is only
+ // one character and it's a separator, leave it alone.
+ StringType::size_type last_separator =
+ new_path.path_.find_last_of(kSeparators, StringType::npos,
+ kSeparatorsLength - 1);
+ if (last_separator != StringType::npos &&
+ last_separator < new_path.path_.length() - 1) {
+ new_path.path_.erase(0, last_separator + 1);
+ }
+
+ return new_path;
+}
+
+StringType FilePath::Extension() const {
+ FilePath base(BaseName());
+ const StringType::size_type dot = ExtensionSeparatorPosition(base.path_);
+ if (dot == StringType::npos)
+ return StringType();
+
+ return base.path_.substr(dot, StringType::npos);
+}
+
+StringType FilePath::FinalExtension() const {
+ FilePath base(BaseName());
+ const StringType::size_type dot = FinalExtensionSeparatorPosition(base.path_);
+ if (dot == StringType::npos)
+ return StringType();
+
+ return base.path_.substr(dot, StringType::npos);
+}
+
+FilePath FilePath::RemoveExtension() const {
+ if (Extension().empty())
+ return *this;
+
+ const StringType::size_type dot = ExtensionSeparatorPosition(path_);
+ if (dot == StringType::npos)
+ return *this;
+
+ return FilePath(path_.substr(0, dot));
+}
+
+FilePath FilePath::RemoveFinalExtension() const {
+ if (FinalExtension().empty())
+ return *this;
+
+ const StringType::size_type dot = FinalExtensionSeparatorPosition(path_);
+ if (dot == StringType::npos)
+ return *this;
+
+ return FilePath(path_.substr(0, dot));
+}
+
+FilePath FilePath::InsertBeforeExtension(StringPieceType suffix) const {
+ if (suffix.empty())
+ return FilePath(path_);
+
+ if (IsEmptyOrSpecialCase(BaseName().value()))
+ return FilePath();
+
+ StringType ext = Extension();
+ StringType ret = RemoveExtension().value();
+ suffix.AppendToString(&ret);
+ ret.append(ext);
+ return FilePath(ret);
+}
+
+FilePath FilePath::InsertBeforeExtensionASCII(StringPiece suffix)
+ const {
+ DCHECK(IsStringASCII(suffix));
+#if defined(OS_WIN)
+ return InsertBeforeExtension(ASCIIToUTF16(suffix));
+#elif defined(OS_POSIX)
+ return InsertBeforeExtension(suffix);
+#endif
+}
+
+FilePath FilePath::AddExtension(StringPieceType extension) const {
+ if (IsEmptyOrSpecialCase(BaseName().value()))
+ return FilePath();
+
+ // If the new extension is "" or ".", then just return the current FilePath.
+ if (extension.empty() ||
+ (extension.size() == 1 && extension[0] == kExtensionSeparator))
+ return *this;
+
+ StringType str = path_;
+ if (extension[0] != kExtensionSeparator &&
+ *(str.end() - 1) != kExtensionSeparator) {
+ str.append(1, kExtensionSeparator);
+ }
+ extension.AppendToString(&str);
+ return FilePath(str);
+}
+
+FilePath FilePath::ReplaceExtension(StringPieceType extension) const {
+ if (IsEmptyOrSpecialCase(BaseName().value()))
+ return FilePath();
+
+ FilePath no_ext = RemoveExtension();
+ // If the new extension is "" or ".", then just remove the current extension.
+ if (extension.empty() ||
+ (extension.size() == 1 && extension[0] == kExtensionSeparator))
+ return no_ext;
+
+ StringType str = no_ext.value();
+ if (extension[0] != kExtensionSeparator)
+ str.append(1, kExtensionSeparator);
+ extension.AppendToString(&str);
+ return FilePath(str);
+}
+
+bool FilePath::MatchesExtension(StringPieceType extension) const {
+ DCHECK(extension.empty() || extension[0] == kExtensionSeparator);
+
+ StringType current_extension = Extension();
+
+ if (current_extension.length() != extension.length())
+ return false;
+
+ return FilePath::CompareEqualIgnoreCase(extension, current_extension);
+}
+
+FilePath FilePath::Append(StringPieceType component) const {
+ StringPieceType appended = component;
+ StringType without_nuls;
+
+ StringType::size_type nul_pos = component.find(kStringTerminator);
+ if (nul_pos != StringPieceType::npos) {
+ component.substr(0, nul_pos).CopyToString(&without_nuls);
+ appended = StringPieceType(without_nuls);
+ }
+
+ DCHECK(!IsPathAbsolute(appended));
+
+ if (path_.compare(kCurrentDirectory) == 0) {
+ // Append normally doesn't do any normalization, but as a special case,
+ // when appending to kCurrentDirectory, just return a new path for the
+ // component argument. Appending component to kCurrentDirectory would
+ // serve no purpose other than needlessly lengthening the path, and
+ // it's likely in practice to wind up with FilePath objects containing
+ // only kCurrentDirectory when calling DirName on a single relative path
+ // component.
+ return FilePath(appended);
+ }
+
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ // Don't append a separator if the path is empty (indicating the current
+ // directory) or if the path component is empty (indicating nothing to
+ // append).
+ if (!appended.empty() && !new_path.path_.empty()) {
+ // Don't append a separator if the path still ends with a trailing
+ // separator after stripping (indicating the root directory).
+ if (!IsSeparator(new_path.path_.back())) {
+ // Don't append a separator if the path is just a drive letter.
+ if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
+ new_path.path_.append(1, kSeparators[0]);
+ }
+ }
+ }
+
+ appended.AppendToString(&new_path.path_);
+ return new_path;
+}
+
+FilePath FilePath::Append(const FilePath& component) const {
+ return Append(component.value());
+}
+
+FilePath FilePath::AppendASCII(StringPiece component) const {
+ DCHECK(base::IsStringASCII(component));
+#if defined(OS_WIN)
+ return Append(ASCIIToUTF16(component));
+#elif defined(OS_POSIX)
+ return Append(component);
+#endif
+}
+
+bool FilePath::IsAbsolute() const {
+ return IsPathAbsolute(path_);
+}
+
+bool FilePath::EndsWithSeparator() const {
+ if (empty())
+ return false;
+ return IsSeparator(path_.back());
+}
+
+FilePath FilePath::AsEndingWithSeparator() const {
+ if (EndsWithSeparator() || path_.empty())
+ return *this;
+
+ StringType path_str;
+ path_str.reserve(path_.length() + 1); // Only allocate string once.
+
+ path_str = path_;
+ path_str.append(&kSeparators[0], 1);
+ return FilePath(path_str);
+}
+
+FilePath FilePath::StripTrailingSeparators() const {
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ return new_path;
+}
+
+bool FilePath::ReferencesParent() const {
+ std::vector<StringType> components;
+ GetComponents(&components);
+
+ std::vector<StringType>::const_iterator it = components.begin();
+ for (; it != components.end(); ++it) {
+ const StringType& component = *it;
+ // Windows has odd, undocumented behavior with path components containing
+ // only whitespace and . characters. So, if all we see is . and
+ // whitespace, then we treat any .. sequence as referencing parent.
+ // For simplicity we enforce this on all platforms.
+ if (component.find_first_not_of(FILE_PATH_LITERAL(". \n\r\t")) ==
+ std::string::npos &&
+ component.find(kParentDirectory) != std::string::npos) {
+ return true;
+ }
+ }
+ return false;
+}
+
+#if defined(OS_POSIX)
+// See file_path.h for a discussion of the encoding of paths on POSIX
+// platforms. These encoding conversion functions are not quite correct.
+
+string16 FilePath::LossyDisplayName() const {
+ return WideToUTF16(SysNativeMBToWide(path_));
+}
+
+std::string FilePath::MaybeAsASCII() const {
+ if (base::IsStringASCII(path_))
+ return path_;
+ return std::string();
+}
+
+std::string FilePath::AsUTF8Unsafe() const {
+#if defined(SYSTEM_NATIVE_UTF8)
+ return value();
+#else
+ return WideToUTF8(SysNativeMBToWide(value()));
+#endif
+}
+
+string16 FilePath::AsUTF16Unsafe() const {
+#if defined(SYSTEM_NATIVE_UTF8)
+ return UTF8ToUTF16(value());
+#else
+ return WideToUTF16(SysNativeMBToWide(value()));
+#endif
+}
+
+// static
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
+#if defined(SYSTEM_NATIVE_UTF8)
+ return FilePath(utf8);
+#else
+ return FilePath(SysWideToNativeMB(UTF8ToWide(utf8)));
+#endif
+}
+
+// static
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
+#if defined(SYSTEM_NATIVE_UTF8)
+ return FilePath(UTF16ToUTF8(utf16));
+#else
+ return FilePath(SysWideToNativeMB(UTF16ToWide(utf16.as_string())));
+#endif
+}
+
+#elif defined(OS_WIN)
+string16 FilePath::LossyDisplayName() const {
+ return path_;
+}
+
+std::string FilePath::MaybeAsASCII() const {
+ if (base::IsStringASCII(path_))
+ return UTF16ToASCII(path_);
+ return std::string();
+}
+
+std::string FilePath::AsUTF8Unsafe() const {
+ return WideToUTF8(value());
+}
+
+string16 FilePath::AsUTF16Unsafe() const {
+ return value();
+}
+
+// static
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
+ return FilePath(UTF8ToWide(utf8));
+}
+
+// static
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
+ return FilePath(utf16);
+}
+#endif
+
+void FilePath::GetSizeForPickle(PickleSizer* sizer) const {
+#if defined(OS_WIN)
+ sizer->AddString16(path_);
+#else
+ sizer->AddString(path_);
+#endif
+}
+
+void FilePath::WriteToPickle(Pickle* pickle) const {
+#if defined(OS_WIN)
+ pickle->WriteString16(path_);
+#else
+ pickle->WriteString(path_);
+#endif
+}
+
+bool FilePath::ReadFromPickle(PickleIterator* iter) {
+#if defined(OS_WIN)
+ if (!iter->ReadString16(&path_))
+ return false;
+#else
+ if (!iter->ReadString(&path_))
+ return false;
+#endif
+
+ if (path_.find(kStringTerminator) != StringType::npos)
+ return false;
+
+ return true;
+}
+
+#if defined(OS_WIN)
+// Windows specific implementation of file string comparisons.
+
+int FilePath::CompareIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ static decltype(::CharUpperW)* const char_upper_api =
+ reinterpret_cast<decltype(::CharUpperW)*>(
+ ::GetProcAddress(::GetModuleHandle(L"user32.dll"), "CharUpperW"));
+ CHECK(char_upper_api);
+ // Perform character-wise upper case comparison rather than using the
+ // fully Unicode-aware CompareString(). For details see:
+ // http://blogs.msdn.com/michkap/archive/2005/10/17/481600.aspx
+ StringPieceType::const_iterator i1 = string1.begin();
+ StringPieceType::const_iterator i2 = string2.begin();
+ StringPieceType::const_iterator string1end = string1.end();
+ StringPieceType::const_iterator string2end = string2.end();
+ for ( ; i1 != string1end && i2 != string2end; ++i1, ++i2) {
+ wchar_t c1 =
+ (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
+ wchar_t c2 =
+ (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
+ if (c1 < c2)
+ return -1;
+ if (c1 > c2)
+ return 1;
+ }
+ if (i1 != string1end)
+ return 1;
+ if (i2 != string2end)
+ return -1;
+ return 0;
+}
+
+#elif defined(OS_MACOSX)
+// Mac OS X specific implementation of file string comparisons.
+
+// cf. http://developer.apple.com/mac/library/technotes/tn/tn1150.html#UnicodeSubtleties
+//
+// "When using CreateTextEncoding to create a text encoding, you should set
+// the TextEncodingBase to kTextEncodingUnicodeV2_0, set the
+// TextEncodingVariant to kUnicodeCanonicalDecompVariant, and set the
+// TextEncodingFormat to kUnicode16BitFormat. Using these values ensures that
+// the Unicode will be in the same form as on an HFS Plus volume, even as the
+// Unicode standard evolves."
+//
+// Another technical article for X 10.4 updates this: one should use
+// the new (unambiguous) kUnicodeHFSPlusDecompVariant.
+// cf. http://developer.apple.com/mac/library/releasenotes/TextFonts/RN-TEC/index.html
+//
+// This implementation uses CFStringGetFileSystemRepresentation() to get the
+// decomposed form, and an adapted version of the FastUnicodeCompare as
+// described in the tech note to compare the strings.
+
+// Character conversion table for FastUnicodeCompare()
+//
+// The lower case table consists of a 256-entry high-byte table followed by
+// some number of 256-entry subtables. The high-byte table contains either an
+// offset to the subtable for characters with that high byte or zero, which
+// means that there are no case mappings or ignored characters in that block.
+// Ignored characters are mapped to zero.
+//
+// cf. downloadable file linked in
+// http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+
+namespace {
+
+const UInt16 lower_case_table[] = {
+ // High-byte indices ( == 0 iff no case mapping and no ignorables )
+
+ /* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00,
+
+ // Table 1 (for high byte 0x00)
+
+ /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+ /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+ 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+ /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+ 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+ /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+ 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+ /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+ /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+ /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+ 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+ /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+ 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
+ /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+ 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+ /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+ 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+ /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+ 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+ /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+ 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+ /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7,
+ 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+ /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+ 0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF,
+ /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+ 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+ /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+ 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF,
+
+ // Table 2 (for high byte 0x01)
+
+ /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107,
+ 0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F,
+ /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117,
+ 0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F,
+ /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127,
+ 0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F,
+ /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137,
+ 0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140,
+ /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147,
+ 0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F,
+ /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157,
+ 0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F,
+ /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167,
+ 0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F,
+ /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177,
+ 0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F,
+ /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188,
+ 0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259,
+ /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268,
+ 0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275,
+ /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8,
+ 0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF,
+ /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292,
+ 0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF,
+ /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9,
+ 0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF,
+ /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7,
+ 0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF,
+ /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7,
+ 0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF,
+ /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7,
+ 0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF,
+
+ // Table 3 (for high byte 0x03)
+
+ /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307,
+ 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F,
+ /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317,
+ 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F,
+ /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327,
+ 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F,
+ /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337,
+ 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F,
+ /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347,
+ 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F,
+ /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357,
+ 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F,
+ /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367,
+ 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F,
+ /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377,
+ 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F,
+ /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387,
+ 0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F,
+ /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
+ 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
+ /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
+ 0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF,
+ /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7,
+ 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF,
+ /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7,
+ 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF,
+ /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7,
+ 0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF,
+ /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7,
+ 0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF,
+ /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7,
+ 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF,
+
+ // Table 4 (for high byte 0x04)
+
+ /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407,
+ 0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F,
+ /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
+ 0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
+ /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
+ 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
+ /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437,
+ 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F,
+ /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447,
+ 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F,
+ /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457,
+ 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F,
+ /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467,
+ 0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F,
+ /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477,
+ 0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F,
+ /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487,
+ 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F,
+ /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497,
+ 0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F,
+ /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7,
+ 0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF,
+ /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7,
+ 0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF,
+ /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8,
+ 0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF,
+ /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7,
+ 0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF,
+ /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7,
+ 0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF,
+ /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7,
+ 0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF,
+
+ // Table 5 (for high byte 0x05)
+
+ /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507,
+ 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F,
+ /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517,
+ 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F,
+ /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527,
+ 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F,
+ /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
+ 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
+ /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
+ 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
+ /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557,
+ 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F,
+ /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567,
+ 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F,
+ /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577,
+ 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F,
+ /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587,
+ 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F,
+ /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597,
+ 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F,
+ /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7,
+ 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF,
+ /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7,
+ 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF,
+ /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7,
+ 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF,
+ /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7,
+ 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF,
+ /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7,
+ 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF,
+ /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7,
+ 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF,
+
+ // Table 6 (for high byte 0x10)
+
+ /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
+ 0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F,
+ /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017,
+ 0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F,
+ /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
+ 0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F,
+ /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037,
+ 0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F,
+ /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047,
+ 0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F,
+ /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057,
+ 0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F,
+ /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067,
+ 0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F,
+ /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077,
+ 0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F,
+ /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087,
+ 0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F,
+ /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097,
+ 0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F,
+ /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
+ 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
+ /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
+ 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
+ /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7,
+ 0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF,
+ /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7,
+ 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF,
+ /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7,
+ 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF,
+ /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7,
+ 0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF,
+
+ // Table 7 (for high byte 0x20)
+
+ /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
+ 0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017,
+ 0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F,
+ /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027,
+ 0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F,
+ /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037,
+ 0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F,
+ /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047,
+ 0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F,
+ /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057,
+ 0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F,
+ /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067,
+ 0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077,
+ 0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F,
+ /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087,
+ 0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F,
+ /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097,
+ 0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F,
+ /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7,
+ 0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF,
+ /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7,
+ 0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF,
+ /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7,
+ 0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF,
+ /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7,
+ 0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF,
+ /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7,
+ 0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF,
+ /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7,
+ 0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF,
+
+ // Table 8 (for high byte 0x21)
+
+ /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107,
+ 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F,
+ /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117,
+ 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F,
+ /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127,
+ 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F,
+ /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137,
+ 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F,
+ /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147,
+ 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F,
+ /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157,
+ 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F,
+ /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
+ 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
+ /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177,
+ 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F,
+ /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187,
+ 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F,
+ /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197,
+ 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F,
+ /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7,
+ 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF,
+ /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7,
+ 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF,
+ /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7,
+ 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF,
+ /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7,
+ 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF,
+ /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7,
+ 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF,
+ /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7,
+ 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF,
+
+ // Table 9 (for high byte 0xFE)
+
+ /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07,
+ 0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F,
+ /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17,
+ 0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F,
+ /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27,
+ 0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F,
+ /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37,
+ 0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F,
+ /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47,
+ 0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F,
+ /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57,
+ 0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F,
+ /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67,
+ 0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F,
+ /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77,
+ 0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F,
+ /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87,
+ 0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F,
+ /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97,
+ 0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F,
+ /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7,
+ 0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF,
+ /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7,
+ 0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF,
+ /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7,
+ 0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF,
+ /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7,
+ 0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF,
+ /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7,
+ 0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF,
+ /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7,
+ 0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000,
+
+ // Table 10 (for high byte 0xFF)
+
+ /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07,
+ 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F,
+ /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17,
+ 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F,
+ /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
+ 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
+ /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
+ 0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F,
+ /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47,
+ 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F,
+ /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57,
+ 0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F,
+ /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67,
+ 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F,
+ /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77,
+ 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F,
+ /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87,
+ 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F,
+ /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97,
+ 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F,
+ /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7,
+ 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF,
+ /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7,
+ 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF,
+ /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7,
+ 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF,
+ /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7,
+ 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF,
+ /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7,
+ 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF,
+ /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7,
+ 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF,
+};
+
+// Returns the next non-ignorable codepoint within string starting from the
+// position indicated by index, or zero if there are no more.
+// The passed-in index is automatically advanced as the characters in the input
+// HFS-decomposed UTF-8 strings are read.
+inline int HFSReadNextNonIgnorableCodepoint(const char* string,
+ int length,
+ int* index) {
+ int codepoint = 0;
+ while (*index < length && codepoint == 0) {
+ // CBU8_NEXT returns a value < 0 in error cases. For purposes of string
+ // comparison, we just use that value and flag it with DCHECK.
+ CBU8_NEXT(string, *index, length, codepoint);
+ DCHECK_GT(codepoint, 0);
+ if (codepoint > 0) {
+ // Check if there is a subtable for this upper byte.
+ int lookup_offset = lower_case_table[codepoint >> 8];
+ if (lookup_offset != 0)
+ codepoint = lower_case_table[lookup_offset + (codepoint & 0x00FF)];
+ // Note: codepoint1 may be again 0 at this point if the character was
+ // an ignorable.
+ }
+ }
+ return codepoint;
+}
+
+} // namespace
+
+// Special UTF-8 version of FastUnicodeCompare. Cf:
+// http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+// The input strings must be in the special HFS decomposed form.
+int FilePath::HFSFastUnicodeCompare(StringPieceType string1,
+ StringPieceType string2) {
+ int length1 = string1.length();
+ int length2 = string2.length();
+ int index1 = 0;
+ int index2 = 0;
+
+ for (;;) {
+ int codepoint1 = HFSReadNextNonIgnorableCodepoint(string1.data(),
+ length1,
+ &index1);
+ int codepoint2 = HFSReadNextNonIgnorableCodepoint(string2.data(),
+ length2,
+ &index2);
+ if (codepoint1 != codepoint2)
+ return (codepoint1 < codepoint2) ? -1 : 1;
+ if (codepoint1 == 0) {
+ DCHECK_EQ(index1, length1);
+ DCHECK_EQ(index2, length2);
+ return 0;
+ }
+ }
+}
+
+StringType FilePath::GetHFSDecomposedForm(StringPieceType string) {
+ StringType result;
+ ScopedCFTypeRef<CFStringRef> cfstring(
+ CFStringCreateWithBytesNoCopy(
+ NULL,
+ reinterpret_cast<const UInt8*>(string.data()),
+ string.length(),
+ kCFStringEncodingUTF8,
+ false,
+ kCFAllocatorNull));
+ if (cfstring) {
+ // Query the maximum length needed to store the result. In most cases this
+ // will overestimate the required space. The return value also already
+ // includes the space needed for a terminating 0.
+ CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
+ DCHECK_GT(length, 0); // should be at least 1 for the 0-terminator.
+ // Reserve enough space for CFStringGetFileSystemRepresentation to write
+ // into. Also set the length to the maximum so that we can shrink it later.
+ // (Increasing rather than decreasing it would clobber the string contents!)
+ result.reserve(length);
+ result.resize(length - 1);
+ Boolean success = CFStringGetFileSystemRepresentation(cfstring,
+ &result[0],
+ length);
+ if (success) {
+ // Reduce result.length() to actual string length.
+ result.resize(strlen(result.c_str()));
+ } else {
+ // An error occurred -> clear result.
+ result.clear();
+ }
+ }
+ return result;
+}
+
+int FilePath::CompareIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ // Quick checks for empty strings - these speed things up a bit and make the
+ // following code cleaner.
+ if (string1.empty())
+ return string2.empty() ? 0 : -1;
+ if (string2.empty())
+ return 1;
+
+ StringType hfs1 = GetHFSDecomposedForm(string1);
+ StringType hfs2 = GetHFSDecomposedForm(string2);
+
+ // GetHFSDecomposedForm() returns an empty string in an error case.
+ if (hfs1.empty() || hfs2.empty()) {
+ NOTREACHED();
+ ScopedCFTypeRef<CFStringRef> cfstring1(
+ CFStringCreateWithBytesNoCopy(
+ NULL,
+ reinterpret_cast<const UInt8*>(string1.data()),
+ string1.length(),
+ kCFStringEncodingUTF8,
+ false,
+ kCFAllocatorNull));
+ ScopedCFTypeRef<CFStringRef> cfstring2(
+ CFStringCreateWithBytesNoCopy(
+ NULL,
+ reinterpret_cast<const UInt8*>(string2.data()),
+ string2.length(),
+ kCFStringEncodingUTF8,
+ false,
+ kCFAllocatorNull));
+ return CFStringCompare(cfstring1,
+ cfstring2,
+ kCFCompareCaseInsensitive);
+ }
+
+ return HFSFastUnicodeCompare(hfs1, hfs2);
+}
+
+#else // << WIN. MACOSX | other (POSIX) >>
+
+// Generic Posix system comparisons.
+int FilePath::CompareIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ // Specifically need null termianted strings for this API call.
+ int comparison = strcasecmp(string1.as_string().c_str(),
+ string2.as_string().c_str());
+ if (comparison < 0)
+ return -1;
+ if (comparison > 0)
+ return 1;
+ return 0;
+}
+
+#endif // OS versions of CompareIgnoreCase()
+
+
+void FilePath::StripTrailingSeparatorsInternal() {
+ // If there is no drive letter, start will be 1, which will prevent stripping
+ // the leading separator if there is only one separator. If there is a drive
+ // letter, start will be set appropriately to prevent stripping the first
+ // separator following the drive letter, if a separator immediately follows
+ // the drive letter.
+ StringType::size_type start = FindDriveLetter(path_) + 2;
+
+ StringType::size_type last_stripped = StringType::npos;
+ for (StringType::size_type pos = path_.length();
+ pos > start && IsSeparator(path_[pos - 1]);
+ --pos) {
+ // If the string only has two separators and they're at the beginning,
+ // don't strip them, unless the string began with more than two separators.
+ if (pos != start + 1 || last_stripped == start + 2 ||
+ !IsSeparator(path_[start - 1])) {
+ path_.resize(pos - 1);
+ last_stripped = pos;
+ }
+ }
+}
+
+FilePath FilePath::NormalizePathSeparators() const {
+ return NormalizePathSeparatorsTo(kSeparators[0]);
+}
+
+FilePath FilePath::NormalizePathSeparatorsTo(CharType separator) const {
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ DCHECK_NE(kSeparators + kSeparatorsLength,
+ std::find(kSeparators, kSeparators + kSeparatorsLength, separator));
+ StringType copy = path_;
+ for (size_t i = 0; i < kSeparatorsLength; ++i) {
+ std::replace(copy.begin(), copy.end(), kSeparators[i], separator);
+ }
+ return FilePath(copy);
+#else
+ (void)separator; // Avoid an unused warning.
+ return *this;
+#endif
+}
+
+#if defined(OS_ANDROID)
+bool FilePath::IsContentUri() const {
+ return StartsWith(path_, "content://", base::CompareCase::INSENSITIVE_ASCII);
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/files/file_path.h b/libchrome/base/files/file_path.h
new file mode 100644
index 0000000..3234df7
--- /dev/null
+++ b/libchrome/base/files/file_path.h
@@ -0,0 +1,479 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FilePath is a container for pathnames stored in a platform's native string
+// type, providing containers for manipulation in according with the
+// platform's conventions for pathnames. It supports the following path
+// types:
+//
+// POSIX Windows
+// --------------- ----------------------------------
+// Fundamental type char[] wchar_t[]
+// Encoding unspecified* UTF-16
+// Separator / \, tolerant of /
+// Drive letters no case-insensitive A-Z followed by :
+// Alternate root // (surprise!) \\, for UNC paths
+//
+// * The encoding need not be specified on POSIX systems, although some
+// POSIX-compliant systems do specify an encoding. Mac OS X uses UTF-8.
+// Chrome OS also uses UTF-8.
+// Linux does not specify an encoding, but in practice, the locale's
+// character set may be used.
+//
+// For more arcane bits of path trivia, see below.
+//
+// FilePath objects are intended to be used anywhere paths are. An
+// application may pass FilePath objects around internally, masking the
+// underlying differences between systems, only differing in implementation
+// where interfacing directly with the system. For example, a single
+// OpenFile(const FilePath &) function may be made available, allowing all
+// callers to operate without regard to the underlying implementation. On
+// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
+// wrap _wfopen_s, perhaps both by calling file_path.value().c_str(). This
+// allows each platform to pass pathnames around without requiring conversions
+// between encodings, which has an impact on performance, but more imporantly,
+// has an impact on correctness on platforms that do not have well-defined
+// encodings for pathnames.
+//
+// Several methods are available to perform common operations on a FilePath
+// object, such as determining the parent directory (DirName), isolating the
+// final path component (BaseName), and appending a relative pathname string
+// to an existing FilePath object (Append). These methods are highly
+// recommended over attempting to split and concatenate strings directly.
+// These methods are based purely on string manipulation and knowledge of
+// platform-specific pathname conventions, and do not consult the filesystem
+// at all, making them safe to use without fear of blocking on I/O operations.
+// These methods do not function as mutators but instead return distinct
+// instances of FilePath objects, and are therefore safe to use on const
+// objects. The objects themselves are safe to share between threads.
+//
+// To aid in initialization of FilePath objects from string literals, a
+// FILE_PATH_LITERAL macro is provided, which accounts for the difference
+// between char[]-based pathnames on POSIX systems and wchar_t[]-based
+// pathnames on Windows.
+//
+// As a precaution against premature truncation, paths can't contain NULs.
+//
+// Because a FilePath object should not be instantiated at the global scope,
+// instead, use a FilePath::CharType[] and initialize it with
+// FILE_PATH_LITERAL. At runtime, a FilePath object can be created from the
+// character array. Example:
+//
+// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
+// |
+// | void Function() {
+// | FilePath log_file_path(kLogFileName);
+// | [...]
+// | }
+//
+// WARNING: FilePaths should ALWAYS be displayed with LTR directionality, even
+// when the UI language is RTL. This means you always need to pass filepaths
+// through base::i18n::WrapPathWithLTRFormatting() before displaying it in the
+// RTL UI.
+//
+// This is a very common source of bugs, please try to keep this in mind.
+//
+// ARCANE BITS OF PATH TRIVIA
+//
+// - A double leading slash is actually part of the POSIX standard. Systems
+// are allowed to treat // as an alternate root, as Windows does for UNC
+// (network share) paths. Most POSIX systems don't do anything special
+// with two leading slashes, but FilePath handles this case properly
+// in case it ever comes across such a system. FilePath needs this support
+// for Windows UNC paths, anyway.
+// References:
+// The Open Group Base Specifications Issue 7, sections 3.267 ("Pathname")
+// and 4.12 ("Pathname Resolution"), available at:
+// http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_267
+// http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
+//
+// - Windows treats c:\\ the same way it treats \\. This was intended to
+// allow older applications that require drive letters to support UNC paths
+// like \\server\share\path, by permitting c:\\server\share\path as an
+// equivalent. Since the OS treats these paths specially, FilePath needs
+// to do the same. Since Windows can use either / or \ as the separator,
+// FilePath treats c://, c:\\, //, and \\ all equivalently.
+// Reference:
+// The Old New Thing, "Why is a drive letter permitted in front of UNC
+// paths (sometimes)?", available at:
+// http://blogs.msdn.com/oldnewthing/archive/2005/11/22/495740.aspx
+
+#ifndef BASE_FILES_FILE_PATH_H_
+#define BASE_FILES_FILE_PATH_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+// Windows-style drive letter support and pathname separator characters can be
+// enabled and disabled independently, to aid testing. These #defines are
+// here so that the same setting can be used in both the implementation and
+// in the unit test.
+#if defined(OS_WIN)
+#define FILE_PATH_USES_DRIVE_LETTERS
+#define FILE_PATH_USES_WIN_SEPARATORS
+#endif // OS_WIN
+
+// To print path names portably use PRIsFP (based on PRIuS and friends from
+// C99 and format_macros.h) like this:
+// base::StringPrintf("Path is %" PRIsFP ".\n", path.value().c_str());
+#if defined(OS_POSIX)
+#define PRIsFP "s"
+#elif defined(OS_WIN)
+#define PRIsFP "ls"
+#endif // OS_WIN
+
+namespace base {
+
+class Pickle;
+class PickleIterator;
+class PickleSizer;
+
+// An abstraction to isolate users from the differences between native
+// pathnames on different platforms.
+class BASE_EXPORT FilePath {
+ public:
+#if defined(OS_POSIX)
+ // On most platforms, native pathnames are char arrays, and the encoding
+ // may or may not be specified. On Mac OS X, native pathnames are encoded
+ // in UTF-8.
+ typedef std::string StringType;
+#elif defined(OS_WIN)
+ // On Windows, for Unicode-aware applications, native pathnames are wchar_t
+ // arrays encoded in UTF-16.
+ typedef std::wstring StringType;
+#endif // OS_WIN
+
+ typedef BasicStringPiece<StringType> StringPieceType;
+ typedef StringType::value_type CharType;
+
+ // Null-terminated array of separators used to separate components in
+ // hierarchical paths. Each character in this array is a valid separator,
+ // but kSeparators[0] is treated as the canonical separator and will be used
+ // when composing pathnames.
+ static const CharType kSeparators[];
+
+ // arraysize(kSeparators).
+ static const size_t kSeparatorsLength;
+
+ // A special path component meaning "this directory."
+ static const CharType kCurrentDirectory[];
+
+ // A special path component meaning "the parent directory."
+ static const CharType kParentDirectory[];
+
+ // The character used to identify a file extension.
+ static const CharType kExtensionSeparator;
+
+ FilePath();
+ FilePath(const FilePath& that);
+ explicit FilePath(StringPieceType path);
+ ~FilePath();
+ FilePath& operator=(const FilePath& that);
+
+ bool operator==(const FilePath& that) const;
+
+ bool operator!=(const FilePath& that) const;
+
+ // Required for some STL containers and operations
+ bool operator<(const FilePath& that) const {
+ return path_ < that.path_;
+ }
+
+ const StringType& value() const { return path_; }
+
+ bool empty() const { return path_.empty(); }
+
+ void clear() { path_.clear(); }
+
+ // Returns true if |character| is in kSeparators.
+ static bool IsSeparator(CharType character);
+
+ // Returns a vector of all of the components of the provided path. It is
+ // equivalent to calling DirName().value() on the path's root component,
+ // and BaseName().value() on each child component.
+ //
+ // To make sure this is lossless so we can differentiate absolute and
+ // relative paths, the root slash will be included even though no other
+ // slashes will be. The precise behavior is:
+ //
+ // Posix: "/foo/bar" -> [ "/", "foo", "bar" ]
+ // Windows: "C:\foo\bar" -> [ "C:", "\\", "foo", "bar" ]
+ void GetComponents(std::vector<FilePath::StringType>* components) const;
+
+ // Returns true if this FilePath is a strict parent of the |child|. Absolute
+ // and relative paths are accepted i.e. is /foo parent to /foo/bar and
+ // is foo parent to foo/bar. Does not convert paths to absolute, follow
+ // symlinks or directory navigation (e.g. ".."). A path is *NOT* its own
+ // parent.
+ bool IsParent(const FilePath& child) const;
+
+ // If IsParent(child) holds, appends to path (if non-NULL) the
+ // relative path to child and returns true. For example, if parent
+ // holds "/Users/johndoe/Library/Application Support", child holds
+ // "/Users/johndoe/Library/Application Support/Google/Chrome/Default", and
+ // *path holds "/Users/johndoe/Library/Caches", then after
+ // parent.AppendRelativePath(child, path) is called *path will hold
+ // "/Users/johndoe/Library/Caches/Google/Chrome/Default". Otherwise,
+ // returns false.
+ bool AppendRelativePath(const FilePath& child, FilePath* path) const;
+
+ // Returns a FilePath corresponding to the directory containing the path
+ // named by this object, stripping away the file component. If this object
+ // only contains one component, returns a FilePath identifying
+ // kCurrentDirectory. If this object already refers to the root directory,
+ // returns a FilePath identifying the root directory.
+ FilePath DirName() const WARN_UNUSED_RESULT;
+
+ // Returns a FilePath corresponding to the last path component of this
+ // object, either a file or a directory. If this object already refers to
+ // the root directory, returns a FilePath identifying the root directory;
+ // this is the only situation in which BaseName will return an absolute path.
+ FilePath BaseName() const WARN_UNUSED_RESULT;
+
+ // Returns ".jpg" for path "C:\pics\jojo.jpg", or an empty string if
+ // the file has no extension. If non-empty, Extension() will always start
+ // with precisely one ".". The following code should always work regardless
+ // of the value of path. For common double-extensions like .tar.gz and
+ // .user.js, this method returns the combined extension. For a single
+ // component, use FinalExtension().
+ // new_path = path.RemoveExtension().value().append(path.Extension());
+ // ASSERT(new_path == path.value());
+ // NOTE: this is different from the original file_util implementation which
+ // returned the extension without a leading "." ("jpg" instead of ".jpg")
+ StringType Extension() const WARN_UNUSED_RESULT;
+
+ // Returns the path's file extension, as in Extension(), but will
+ // never return a double extension.
+ //
+ // TODO(davidben): Check all our extension-sensitive code to see if
+ // we can rename this to Extension() and the other to something like
+ // LongExtension(), defaulting to short extensions and leaving the
+ // long "extensions" to logic like base::GetUniquePathNumber().
+ StringType FinalExtension() const WARN_UNUSED_RESULT;
+
+ // Returns "C:\pics\jojo" for path "C:\pics\jojo.jpg"
+ // NOTE: this is slightly different from the similar file_util implementation
+ // which returned simply 'jojo'.
+ FilePath RemoveExtension() const WARN_UNUSED_RESULT;
+
+ // Removes the path's file extension, as in RemoveExtension(), but
+ // ignores double extensions.
+ FilePath RemoveFinalExtension() const WARN_UNUSED_RESULT;
+
+ // Inserts |suffix| after the file name portion of |path| but before the
+ // extension. Returns "" if BaseName() == "." or "..".
+ // Examples:
+ // path == "C:\pics\jojo.jpg" suffix == " (1)", returns "C:\pics\jojo (1).jpg"
+ // path == "jojo.jpg" suffix == " (1)", returns "jojo (1).jpg"
+ // path == "C:\pics\jojo" suffix == " (1)", returns "C:\pics\jojo (1)"
+ // path == "C:\pics.old\jojo" suffix == " (1)", returns "C:\pics.old\jojo (1)"
+ FilePath InsertBeforeExtension(
+ StringPieceType suffix) const WARN_UNUSED_RESULT;
+ FilePath InsertBeforeExtensionASCII(
+ StringPiece suffix) const WARN_UNUSED_RESULT;
+
+ // Adds |extension| to |file_name|. Returns the current FilePath if
+ // |extension| is empty. Returns "" if BaseName() == "." or "..".
+ FilePath AddExtension(StringPieceType extension) const WARN_UNUSED_RESULT;
+
+ // Replaces the extension of |file_name| with |extension|. If |file_name|
+ // does not have an extension, then |extension| is added. If |extension| is
+ // empty, then the extension is removed from |file_name|.
+ // Returns "" if BaseName() == "." or "..".
+ FilePath ReplaceExtension(StringPieceType extension) const WARN_UNUSED_RESULT;
+
+ // Returns true if the file path matches the specified extension. The test is
+ // case insensitive. Don't forget the leading period if appropriate.
+ bool MatchesExtension(StringPieceType extension) const;
+
+ // Returns a FilePath by appending a separator and the supplied path
+ // component to this object's path. Append takes care to avoid adding
+ // excessive separators if this object's path already ends with a separator.
+ // If this object's path is kCurrentDirectory, a new FilePath corresponding
+ // only to |component| is returned. |component| must be a relative path;
+ // it is an error to pass an absolute path.
+ FilePath Append(StringPieceType component) const WARN_UNUSED_RESULT;
+ FilePath Append(const FilePath& component) const WARN_UNUSED_RESULT;
+
+ // Although Windows StringType is std::wstring, since the encoding it uses for
+ // paths is well defined, it can handle ASCII path components as well.
+ // Mac uses UTF8, and since ASCII is a subset of that, it works there as well.
+ // On Linux, although it can use any 8-bit encoding for paths, we assume that
+ // ASCII is a valid subset, regardless of the encoding, since many operating
+ // system paths will always be ASCII.
+ FilePath AppendASCII(StringPiece component) const WARN_UNUSED_RESULT;
+
+ // Returns true if this FilePath contains an absolute path. On Windows, an
+ // absolute path begins with either a drive letter specification followed by
+ // a separator character, or with two separator characters. On POSIX
+ // platforms, an absolute path begins with a separator character.
+ bool IsAbsolute() const;
+
+ // Returns true if the patch ends with a path separator character.
+ bool EndsWithSeparator() const WARN_UNUSED_RESULT;
+
+ // Returns a copy of this FilePath that ends with a trailing separator. If
+ // the input path is empty, an empty FilePath will be returned.
+ FilePath AsEndingWithSeparator() const WARN_UNUSED_RESULT;
+
+ // Returns a copy of this FilePath that does not end with a trailing
+ // separator.
+ FilePath StripTrailingSeparators() const WARN_UNUSED_RESULT;
+
+ // Returns true if this FilePath contains an attempt to reference a parent
+ // directory (e.g. has a path component that is "..").
+ bool ReferencesParent() const;
+
+ // Return a Unicode human-readable version of this path.
+ // Warning: you can *not*, in general, go from a display name back to a real
+ // path. Only use this when displaying paths to users, not just when you
+ // want to stuff a string16 into some other API.
+ string16 LossyDisplayName() const;
+
+ // Return the path as ASCII, or the empty string if the path is not ASCII.
+ // This should only be used for cases where the FilePath is representing a
+ // known-ASCII filename.
+ std::string MaybeAsASCII() const;
+
+ // Return the path as UTF-8.
+ //
+ // This function is *unsafe* as there is no way to tell what encoding is
+ // used in file names on POSIX systems other than Mac and Chrome OS,
+ // although UTF-8 is practically used everywhere these days. To mitigate
+ // the encoding issue, this function internally calls
+ // SysNativeMBToWide() on POSIX systems other than Mac and Chrome OS,
+ // per assumption that the current locale's encoding is used in file
+ // names, but this isn't a perfect solution.
+ //
+ // Once it becomes safe to to stop caring about non-UTF-8 file names,
+ // the SysNativeMBToWide() hack will be removed from the code, along
+ // with "Unsafe" in the function name.
+ std::string AsUTF8Unsafe() const;
+
+ // Similar to AsUTF8Unsafe, but returns UTF-16 instead.
+ string16 AsUTF16Unsafe() const;
+
+ // Returns a FilePath object from a path name in UTF-8. This function
+ // should only be used for cases where you are sure that the input
+ // string is UTF-8.
+ //
+ // Like AsUTF8Unsafe(), this function is unsafe. This function
+ // internally calls SysWideToNativeMB() on POSIX systems other than Mac
+ // and Chrome OS, to mitigate the encoding issue. See the comment at
+ // AsUTF8Unsafe() for details.
+ static FilePath FromUTF8Unsafe(StringPiece utf8);
+
+ // Similar to FromUTF8Unsafe, but accepts UTF-16 instead.
+ static FilePath FromUTF16Unsafe(StringPiece16 utf16);
+
+ void GetSizeForPickle(PickleSizer* sizer) const;
+ void WriteToPickle(Pickle* pickle) const;
+ bool ReadFromPickle(PickleIterator* iter);
+
+ // Normalize all path separators to backslash on Windows
+ // (if FILE_PATH_USES_WIN_SEPARATORS is true), or do nothing on POSIX systems.
+ FilePath NormalizePathSeparators() const;
+
+ // Normalize all path separattors to given type on Windows
+ // (if FILE_PATH_USES_WIN_SEPARATORS is true), or do nothing on POSIX systems.
+ FilePath NormalizePathSeparatorsTo(CharType separator) const;
+
+ // Compare two strings in the same way the file system does.
+ // Note that these always ignore case, even on file systems that are case-
+ // sensitive. If case-sensitive comparison is ever needed, add corresponding
+ // methods here.
+ // The methods are written as a static method so that they can also be used
+ // on parts of a file path, e.g., just the extension.
+ // CompareIgnoreCase() returns -1, 0 or 1 for less-than, equal-to and
+ // greater-than respectively.
+ static int CompareIgnoreCase(StringPieceType string1,
+ StringPieceType string2);
+ static bool CompareEqualIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ return CompareIgnoreCase(string1, string2) == 0;
+ }
+ static bool CompareLessIgnoreCase(StringPieceType string1,
+ StringPieceType string2) {
+ return CompareIgnoreCase(string1, string2) < 0;
+ }
+
+#if defined(OS_MACOSX)
+ // Returns the string in the special canonical decomposed form as defined for
+ // HFS, which is close to, but not quite, decomposition form D. See
+ // http://developer.apple.com/mac/library/technotes/tn/tn1150.html#UnicodeSubtleties
+ // for further comments.
+ // Returns the epmty string if the conversion failed.
+ static StringType GetHFSDecomposedForm(StringPieceType string);
+
+ // Special UTF-8 version of FastUnicodeCompare. Cf:
+ // http://developer.apple.com/mac/library/technotes/tn/tn1150.html#StringComparisonAlgorithm
+ // IMPORTANT: The input strings must be in the special HFS decomposed form!
+ // (cf. above GetHFSDecomposedForm method)
+ static int HFSFastUnicodeCompare(StringPieceType string1,
+ StringPieceType string2);
+#endif
+
+#if defined(OS_ANDROID)
+ // On android, file selection dialog can return a file with content uri
+ // scheme(starting with content://). Content uri needs to be opened with
+ // ContentResolver to guarantee that the app has appropriate permissions
+ // to access it.
+ // Returns true if the path is a content uri, or false otherwise.
+ bool IsContentUri() const;
+#endif
+
+ private:
+ // Remove trailing separators from this object. If the path is absolute, it
+ // will never be stripped any more than to refer to the absolute root
+ // directory, so "////" will become "/", not "". A leading pair of
+ // separators is never stripped, to support alternate roots. This is used to
+ // support UNC paths on Windows.
+ void StripTrailingSeparatorsInternal();
+
+ StringType path_;
+};
+
+// This is required by googletest to print a readable output on test failures.
+// This is declared here for use in gtest-based unit tests but is defined in
+// the test_support_base target. Depend on that to use this in your unit test.
+// This should not be used in production code - call ToString() instead.
+void PrintTo(const FilePath& path, std::ostream* out);
+
+} // namespace base
+
+// Macros for string literal initialization of FilePath::CharType[], and for
+// using a FilePath::CharType[] in a printf-style format string.
+#if defined(OS_POSIX)
+#define FILE_PATH_LITERAL(x) x
+#define PRFilePath "s"
+#elif defined(OS_WIN)
+#define FILE_PATH_LITERAL(x) L ## x
+#define PRFilePath "ls"
+#endif // OS_WIN
+
+// Provide a hash function so that hash_sets and maps can contain FilePath
+// objects.
+namespace BASE_HASH_NAMESPACE {
+
+template<>
+struct hash<base::FilePath> {
+ size_t operator()(const base::FilePath& f) const {
+ return hash<base::FilePath::StringType>()(f.value());
+ }
+};
+
+} // namespace BASE_HASH_NAMESPACE
+
+#endif // BASE_FILES_FILE_PATH_H_
diff --git a/libchrome/base/files/file_path_constants.cc b/libchrome/base/files/file_path_constants.cc
new file mode 100644
index 0000000..0b74846
--- /dev/null
+++ b/libchrome/base/files/file_path_constants.cc
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+
+namespace base {
+
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("\\/");
+#else // FILE_PATH_USES_WIN_SEPARATORS
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("/");
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+
+const size_t FilePath::kSeparatorsLength = arraysize(kSeparators);
+
+const FilePath::CharType FilePath::kCurrentDirectory[] = FILE_PATH_LITERAL(".");
+const FilePath::CharType FilePath::kParentDirectory[] = FILE_PATH_LITERAL("..");
+
+const FilePath::CharType FilePath::kExtensionSeparator = FILE_PATH_LITERAL('.');
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_unittest.cc b/libchrome/base/files/file_path_unittest.cc
new file mode 100644
index 0000000..d8c5969
--- /dev/null
+++ b/libchrome/base/files/file_path_unittest.cc
@@ -0,0 +1,1318 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <sstream>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/scoped_locale.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+#if defined(OS_POSIX)
+#include "base/test/scoped_locale.h"
+#endif
+
+// This macro helps avoid wrapped lines in the test structs.
+#define FPL(x) FILE_PATH_LITERAL(x)
+
+// This macro constructs strings which can contain NULs.
+#define FPS(x) FilePath::StringType(FPL(x), arraysize(FPL(x)) - 1)
+
+namespace base {
+
+struct UnaryTestData {
+ const FilePath::CharType* input;
+ const FilePath::CharType* expected;
+};
+
+struct UnaryBooleanTestData {
+ const FilePath::CharType* input;
+ bool expected;
+};
+
+struct BinaryTestData {
+ const FilePath::CharType* inputs[2];
+ const FilePath::CharType* expected;
+};
+
+struct BinaryBooleanTestData {
+ const FilePath::CharType* inputs[2];
+ bool expected;
+};
+
+struct BinaryIntTestData {
+ const FilePath::CharType* inputs[2];
+ int expected;
+};
+
+struct UTF8TestData {
+ const FilePath::CharType* native;
+ const char* utf8;
+};
+
+// file_util winds up using autoreleased objects on the Mac, so this needs
+// to be a PlatformTest
+typedef PlatformTest FilePathTest;
+
+TEST_F(FilePathTest, DirName) {
+ const struct UnaryTestData cases[] = {
+ { FPL(""), FPL(".") },
+ { FPL("aa"), FPL(".") },
+ { FPL("/aa/bb"), FPL("/aa") },
+ { FPL("/aa/bb/"), FPL("/aa") },
+ { FPL("/aa/bb//"), FPL("/aa") },
+ { FPL("/aa/bb/ccc"), FPL("/aa/bb") },
+ { FPL("/aa"), FPL("/") },
+ { FPL("/aa/"), FPL("/") },
+ { FPL("/"), FPL("/") },
+ { FPL("//"), FPL("//") },
+ { FPL("///"), FPL("/") },
+ { FPL("aa/"), FPL(".") },
+ { FPL("aa/bb"), FPL("aa") },
+ { FPL("aa/bb/"), FPL("aa") },
+ { FPL("aa/bb//"), FPL("aa") },
+ { FPL("aa//bb//"), FPL("aa") },
+ { FPL("aa//bb/"), FPL("aa") },
+ { FPL("aa//bb"), FPL("aa") },
+ { FPL("//aa/bb"), FPL("//aa") },
+ { FPL("//aa/"), FPL("//") },
+ { FPL("//aa"), FPL("//") },
+ { FPL("0:"), FPL(".") },
+ { FPL("@:"), FPL(".") },
+ { FPL("[:"), FPL(".") },
+ { FPL("`:"), FPL(".") },
+ { FPL("{:"), FPL(".") },
+ { FPL("\xB3:"), FPL(".") },
+ { FPL("\xC5:"), FPL(".") },
+#if defined(OS_WIN)
+ { FPL("\x0143:"), FPL(".") },
+#endif // OS_WIN
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("c:"), FPL("c:") },
+ { FPL("C:"), FPL("C:") },
+ { FPL("A:"), FPL("A:") },
+ { FPL("Z:"), FPL("Z:") },
+ { FPL("a:"), FPL("a:") },
+ { FPL("z:"), FPL("z:") },
+ { FPL("c:aa"), FPL("c:") },
+ { FPL("c:/"), FPL("c:/") },
+ { FPL("c://"), FPL("c://") },
+ { FPL("c:///"), FPL("c:/") },
+ { FPL("c:/aa"), FPL("c:/") },
+ { FPL("c:/aa/"), FPL("c:/") },
+ { FPL("c:/aa/bb"), FPL("c:/aa") },
+ { FPL("c:aa/bb"), FPL("c:aa") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("\\aa\\bb"), FPL("\\aa") },
+ { FPL("\\aa\\bb\\"), FPL("\\aa") },
+ { FPL("\\aa\\bb\\\\"), FPL("\\aa") },
+ { FPL("\\aa\\bb\\ccc"), FPL("\\aa\\bb") },
+ { FPL("\\aa"), FPL("\\") },
+ { FPL("\\aa\\"), FPL("\\") },
+ { FPL("\\"), FPL("\\") },
+ { FPL("\\\\"), FPL("\\\\") },
+ { FPL("\\\\\\"), FPL("\\") },
+ { FPL("aa\\"), FPL(".") },
+ { FPL("aa\\bb"), FPL("aa") },
+ { FPL("aa\\bb\\"), FPL("aa") },
+ { FPL("aa\\bb\\\\"), FPL("aa") },
+ { FPL("aa\\\\bb\\\\"), FPL("aa") },
+ { FPL("aa\\\\bb\\"), FPL("aa") },
+ { FPL("aa\\\\bb"), FPL("aa") },
+ { FPL("\\\\aa\\bb"), FPL("\\\\aa") },
+ { FPL("\\\\aa\\"), FPL("\\\\") },
+ { FPL("\\\\aa"), FPL("\\\\") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("c:\\"), FPL("c:\\") },
+ { FPL("c:\\\\"), FPL("c:\\\\") },
+ { FPL("c:\\\\\\"), FPL("c:\\") },
+ { FPL("c:\\aa"), FPL("c:\\") },
+ { FPL("c:\\aa\\"), FPL("c:\\") },
+ { FPL("c:\\aa\\bb"), FPL("c:\\aa") },
+ { FPL("c:aa\\bb"), FPL("c:aa") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ FilePath observed = input.DirName();
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+
+TEST_F(FilePathTest, BaseName) {
+ const struct UnaryTestData cases[] = {
+ { FPL(""), FPL("") },
+ { FPL("aa"), FPL("aa") },
+ { FPL("/aa/bb"), FPL("bb") },
+ { FPL("/aa/bb/"), FPL("bb") },
+ { FPL("/aa/bb//"), FPL("bb") },
+ { FPL("/aa/bb/ccc"), FPL("ccc") },
+ { FPL("/aa"), FPL("aa") },
+ { FPL("/"), FPL("/") },
+ { FPL("//"), FPL("//") },
+ { FPL("///"), FPL("/") },
+ { FPL("aa/"), FPL("aa") },
+ { FPL("aa/bb"), FPL("bb") },
+ { FPL("aa/bb/"), FPL("bb") },
+ { FPL("aa/bb//"), FPL("bb") },
+ { FPL("aa//bb//"), FPL("bb") },
+ { FPL("aa//bb/"), FPL("bb") },
+ { FPL("aa//bb"), FPL("bb") },
+ { FPL("//aa/bb"), FPL("bb") },
+ { FPL("//aa/"), FPL("aa") },
+ { FPL("//aa"), FPL("aa") },
+ { FPL("0:"), FPL("0:") },
+ { FPL("@:"), FPL("@:") },
+ { FPL("[:"), FPL("[:") },
+ { FPL("`:"), FPL("`:") },
+ { FPL("{:"), FPL("{:") },
+ { FPL("\xB3:"), FPL("\xB3:") },
+ { FPL("\xC5:"), FPL("\xC5:") },
+#if defined(OS_WIN)
+ { FPL("\x0143:"), FPL("\x0143:") },
+#endif // OS_WIN
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("c:"), FPL("") },
+ { FPL("C:"), FPL("") },
+ { FPL("A:"), FPL("") },
+ { FPL("Z:"), FPL("") },
+ { FPL("a:"), FPL("") },
+ { FPL("z:"), FPL("") },
+ { FPL("c:aa"), FPL("aa") },
+ { FPL("c:/"), FPL("/") },
+ { FPL("c://"), FPL("//") },
+ { FPL("c:///"), FPL("/") },
+ { FPL("c:/aa"), FPL("aa") },
+ { FPL("c:/aa/"), FPL("aa") },
+ { FPL("c:/aa/bb"), FPL("bb") },
+ { FPL("c:aa/bb"), FPL("bb") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("\\aa\\bb"), FPL("bb") },
+ { FPL("\\aa\\bb\\"), FPL("bb") },
+ { FPL("\\aa\\bb\\\\"), FPL("bb") },
+ { FPL("\\aa\\bb\\ccc"), FPL("ccc") },
+ { FPL("\\aa"), FPL("aa") },
+ { FPL("\\"), FPL("\\") },
+ { FPL("\\\\"), FPL("\\\\") },
+ { FPL("\\\\\\"), FPL("\\") },
+ { FPL("aa\\"), FPL("aa") },
+ { FPL("aa\\bb"), FPL("bb") },
+ { FPL("aa\\bb\\"), FPL("bb") },
+ { FPL("aa\\bb\\\\"), FPL("bb") },
+ { FPL("aa\\\\bb\\\\"), FPL("bb") },
+ { FPL("aa\\\\bb\\"), FPL("bb") },
+ { FPL("aa\\\\bb"), FPL("bb") },
+ { FPL("\\\\aa\\bb"), FPL("bb") },
+ { FPL("\\\\aa\\"), FPL("aa") },
+ { FPL("\\\\aa"), FPL("aa") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("c:\\"), FPL("\\") },
+ { FPL("c:\\\\"), FPL("\\\\") },
+ { FPL("c:\\\\\\"), FPL("\\") },
+ { FPL("c:\\aa"), FPL("aa") },
+ { FPL("c:\\aa\\"), FPL("aa") },
+ { FPL("c:\\aa\\bb"), FPL("bb") },
+ { FPL("c:aa\\bb"), FPL("bb") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ FilePath observed = input.BaseName();
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+
+TEST_F(FilePathTest, Append) {
+ const struct BinaryTestData cases[] = {
+ { { FPL(""), FPL("cc") }, FPL("cc") },
+ { { FPL("."), FPL("ff") }, FPL("ff") },
+ { { FPL("/"), FPL("cc") }, FPL("/cc") },
+ { { FPL("/aa"), FPL("") }, FPL("/aa") },
+ { { FPL("/aa/"), FPL("") }, FPL("/aa") },
+ { { FPL("//aa"), FPL("") }, FPL("//aa") },
+ { { FPL("//aa/"), FPL("") }, FPL("//aa") },
+ { { FPL("//"), FPL("aa") }, FPL("//aa") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:"), FPL("a") }, FPL("c:a") },
+ { { FPL("c:"), FPL("") }, FPL("c:") },
+ { { FPL("c:/"), FPL("a") }, FPL("c:/a") },
+ { { FPL("c://"), FPL("a") }, FPL("c://a") },
+ { { FPL("c:///"), FPL("a") }, FPL("c:/a") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ // Append introduces the default separator character, so these test cases
+ // need to be defined with different expected results on platforms that use
+ // different default separator characters.
+ { { FPL("\\"), FPL("cc") }, FPL("\\cc") },
+ { { FPL("\\aa"), FPL("") }, FPL("\\aa") },
+ { { FPL("\\aa\\"), FPL("") }, FPL("\\aa") },
+ { { FPL("\\\\aa"), FPL("") }, FPL("\\\\aa") },
+ { { FPL("\\\\aa\\"), FPL("") }, FPL("\\\\aa") },
+ { { FPL("\\\\"), FPL("aa") }, FPL("\\\\aa") },
+ { { FPL("/aa/bb"), FPL("cc") }, FPL("/aa/bb\\cc") },
+ { { FPL("/aa/bb/"), FPL("cc") }, FPL("/aa/bb\\cc") },
+ { { FPL("aa/bb/"), FPL("cc") }, FPL("aa/bb\\cc") },
+ { { FPL("aa/bb"), FPL("cc") }, FPL("aa/bb\\cc") },
+ { { FPL("a/b"), FPL("c") }, FPL("a/b\\c") },
+ { { FPL("a/b/"), FPL("c") }, FPL("a/b\\c") },
+ { { FPL("//aa"), FPL("bb") }, FPL("//aa\\bb") },
+ { { FPL("//aa/"), FPL("bb") }, FPL("//aa\\bb") },
+ { { FPL("\\aa\\bb"), FPL("cc") }, FPL("\\aa\\bb\\cc") },
+ { { FPL("\\aa\\bb\\"), FPL("cc") }, FPL("\\aa\\bb\\cc") },
+ { { FPL("aa\\bb\\"), FPL("cc") }, FPL("aa\\bb\\cc") },
+ { { FPL("aa\\bb"), FPL("cc") }, FPL("aa\\bb\\cc") },
+ { { FPL("a\\b"), FPL("c") }, FPL("a\\b\\c") },
+ { { FPL("a\\b\\"), FPL("c") }, FPL("a\\b\\c") },
+ { { FPL("\\\\aa"), FPL("bb") }, FPL("\\\\aa\\bb") },
+ { { FPL("\\\\aa\\"), FPL("bb") }, FPL("\\\\aa\\bb") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:\\"), FPL("a") }, FPL("c:\\a") },
+ { { FPL("c:\\\\"), FPL("a") }, FPL("c:\\\\a") },
+ { { FPL("c:\\\\\\"), FPL("a") }, FPL("c:\\a") },
+ { { FPL("c:\\"), FPL("") }, FPL("c:\\") },
+ { { FPL("c:\\a"), FPL("b") }, FPL("c:\\a\\b") },
+ { { FPL("c:\\a\\"), FPL("b") }, FPL("c:\\a\\b") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#else // FILE_PATH_USES_WIN_SEPARATORS
+ { { FPL("/aa/bb"), FPL("cc") }, FPL("/aa/bb/cc") },
+ { { FPL("/aa/bb/"), FPL("cc") }, FPL("/aa/bb/cc") },
+ { { FPL("aa/bb/"), FPL("cc") }, FPL("aa/bb/cc") },
+ { { FPL("aa/bb"), FPL("cc") }, FPL("aa/bb/cc") },
+ { { FPL("a/b"), FPL("c") }, FPL("a/b/c") },
+ { { FPL("a/b/"), FPL("c") }, FPL("a/b/c") },
+ { { FPL("//aa"), FPL("bb") }, FPL("//aa/bb") },
+ { { FPL("//aa/"), FPL("bb") }, FPL("//aa/bb") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:/"), FPL("a") }, FPL("c:/a") },
+ { { FPL("c:/"), FPL("") }, FPL("c:/") },
+ { { FPL("c:/a"), FPL("b") }, FPL("c:/a/b") },
+ { { FPL("c:/a/"), FPL("b") }, FPL("c:/a/b") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath root(cases[i].inputs[0]);
+ FilePath::StringType leaf(cases[i].inputs[1]);
+ FilePath observed_str = root.Append(leaf);
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_str.value()) <<
+ "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+ FilePath observed_path = root.Append(FilePath(leaf));
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_path.value()) <<
+ "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+
+ // TODO(erikkay): It would be nice to have a unicode test append value to
+ // handle the case when AppendASCII is passed UTF8
+#if defined(OS_WIN)
+ std::string ascii = WideToUTF8(leaf);
+#elif defined(OS_POSIX)
+ std::string ascii = leaf;
+#endif
+ observed_str = root.AppendASCII(ascii);
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed_str.value()) <<
+ "i: " << i << ", root: " << root.value() << ", leaf: " << leaf;
+ }
+}
+
+TEST_F(FilePathTest, StripTrailingSeparators) {
+ const struct UnaryTestData cases[] = {
+ { FPL(""), FPL("") },
+ { FPL("/"), FPL("/") },
+ { FPL("//"), FPL("//") },
+ { FPL("///"), FPL("/") },
+ { FPL("////"), FPL("/") },
+ { FPL("a/"), FPL("a") },
+ { FPL("a//"), FPL("a") },
+ { FPL("a///"), FPL("a") },
+ { FPL("a////"), FPL("a") },
+ { FPL("/a"), FPL("/a") },
+ { FPL("/a/"), FPL("/a") },
+ { FPL("/a//"), FPL("/a") },
+ { FPL("/a///"), FPL("/a") },
+ { FPL("/a////"), FPL("/a") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("c:"), FPL("c:") },
+ { FPL("c:/"), FPL("c:/") },
+ { FPL("c://"), FPL("c://") },
+ { FPL("c:///"), FPL("c:/") },
+ { FPL("c:////"), FPL("c:/") },
+ { FPL("c:/a"), FPL("c:/a") },
+ { FPL("c:/a/"), FPL("c:/a") },
+ { FPL("c:/a//"), FPL("c:/a") },
+ { FPL("c:/a///"), FPL("c:/a") },
+ { FPL("c:/a////"), FPL("c:/a") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("\\"), FPL("\\") },
+ { FPL("\\\\"), FPL("\\\\") },
+ { FPL("\\\\\\"), FPL("\\") },
+ { FPL("\\\\\\\\"), FPL("\\") },
+ { FPL("a\\"), FPL("a") },
+ { FPL("a\\\\"), FPL("a") },
+ { FPL("a\\\\\\"), FPL("a") },
+ { FPL("a\\\\\\\\"), FPL("a") },
+ { FPL("\\a"), FPL("\\a") },
+ { FPL("\\a\\"), FPL("\\a") },
+ { FPL("\\a\\\\"), FPL("\\a") },
+ { FPL("\\a\\\\\\"), FPL("\\a") },
+ { FPL("\\a\\\\\\\\"), FPL("\\a") },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("c:\\"), FPL("c:\\") },
+ { FPL("c:\\\\"), FPL("c:\\\\") },
+ { FPL("c:\\\\\\"), FPL("c:\\") },
+ { FPL("c:\\\\\\\\"), FPL("c:\\") },
+ { FPL("c:\\a"), FPL("c:\\a") },
+ { FPL("c:\\a\\"), FPL("c:\\a") },
+ { FPL("c:\\a\\\\"), FPL("c:\\a") },
+ { FPL("c:\\a\\\\\\"), FPL("c:\\a") },
+ { FPL("c:\\a\\\\\\\\"), FPL("c:\\a") },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ FilePath observed = input.StripTrailingSeparators();
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+
+TEST_F(FilePathTest, IsAbsolute) {
+ const struct UnaryBooleanTestData cases[] = {
+ { FPL(""), false },
+ { FPL("a"), false },
+ { FPL("c:"), false },
+ { FPL("c:a"), false },
+ { FPL("a/b"), false },
+ { FPL("//"), true },
+ { FPL("//a"), true },
+ { FPL("c:a/b"), false },
+ { FPL("?:/a"), false },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("/"), false },
+ { FPL("/a"), false },
+ { FPL("/."), false },
+ { FPL("/.."), false },
+ { FPL("c:/"), true },
+ { FPL("c:/a"), true },
+ { FPL("c:/."), true },
+ { FPL("c:/.."), true },
+ { FPL("C:/a"), true },
+ { FPL("d:/a"), true },
+#else // FILE_PATH_USES_DRIVE_LETTERS
+ { FPL("/"), true },
+ { FPL("/a"), true },
+ { FPL("/."), true },
+ { FPL("/.."), true },
+ { FPL("c:/"), false },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("a\\b"), false },
+ { FPL("\\\\"), true },
+ { FPL("\\\\a"), true },
+ { FPL("a\\b"), false },
+ { FPL("\\\\"), true },
+ { FPL("//a"), true },
+ { FPL("c:a\\b"), false },
+ { FPL("?:\\a"), false },
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("\\"), false },
+ { FPL("\\a"), false },
+ { FPL("\\."), false },
+ { FPL("\\.."), false },
+ { FPL("c:\\"), true },
+ { FPL("c:\\"), true },
+ { FPL("c:\\a"), true },
+ { FPL("c:\\."), true },
+ { FPL("c:\\.."), true },
+ { FPL("C:\\a"), true },
+ { FPL("d:\\a"), true },
+#else // FILE_PATH_USES_DRIVE_LETTERS
+ { FPL("\\"), true },
+ { FPL("\\a"), true },
+ { FPL("\\."), true },
+ { FPL("\\.."), true },
+ { FPL("c:\\"), false },
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ bool observed = input.IsAbsolute();
+ EXPECT_EQ(cases[i].expected, observed) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+
+TEST_F(FilePathTest, PathComponentsTest) {
+ const struct UnaryTestData cases[] = {
+ { FPL("//foo/bar/baz/"), FPL("|//|foo|bar|baz")},
+ { FPL("///"), FPL("|/")},
+ { FPL("/foo//bar//baz/"), FPL("|/|foo|bar|baz")},
+ { FPL("/foo/bar/baz/"), FPL("|/|foo|bar|baz")},
+ { FPL("/foo/bar/baz//"), FPL("|/|foo|bar|baz")},
+ { FPL("/foo/bar/baz///"), FPL("|/|foo|bar|baz")},
+ { FPL("/foo/bar/baz"), FPL("|/|foo|bar|baz")},
+ { FPL("/foo/bar.bot/baz.txt"), FPL("|/|foo|bar.bot|baz.txt")},
+ { FPL("//foo//bar/baz"), FPL("|//|foo|bar|baz")},
+ { FPL("/"), FPL("|/")},
+ { FPL("foo"), FPL("|foo")},
+ { FPL(""), FPL("")},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { FPL("e:/foo"), FPL("|e:|/|foo")},
+ { FPL("e:/"), FPL("|e:|/")},
+ { FPL("e:"), FPL("|e:")},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("../foo"), FPL("|..|foo")},
+ { FPL("./foo"), FPL("|foo")},
+ { FPL("../foo/bar/"), FPL("|..|foo|bar") },
+ { FPL("\\\\foo\\bar\\baz\\"), FPL("|\\\\|foo|bar|baz")},
+ { FPL("\\\\\\"), FPL("|\\")},
+ { FPL("\\foo\\\\bar\\\\baz\\"), FPL("|\\|foo|bar|baz")},
+ { FPL("\\foo\\bar\\baz\\"), FPL("|\\|foo|bar|baz")},
+ { FPL("\\foo\\bar\\baz\\\\"), FPL("|\\|foo|bar|baz")},
+ { FPL("\\foo\\bar\\baz\\\\\\"), FPL("|\\|foo|bar|baz")},
+ { FPL("\\foo\\bar\\baz"), FPL("|\\|foo|bar|baz")},
+ { FPL("\\foo\\bar/baz\\\\\\"), FPL("|\\|foo|bar|baz")},
+ { FPL("/foo\\bar\\baz"), FPL("|/|foo|bar|baz")},
+ { FPL("\\foo\\bar.bot\\baz.txt"), FPL("|\\|foo|bar.bot|baz.txt")},
+ { FPL("\\\\foo\\\\bar\\baz"), FPL("|\\\\|foo|bar|baz")},
+ { FPL("\\"), FPL("|\\")},
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ std::vector<FilePath::StringType> comps;
+ input.GetComponents(&comps);
+
+ FilePath::StringType observed;
+ for (size_t j = 0; j < comps.size(); ++j) {
+ observed.append(FILE_PATH_LITERAL("|"), 1);
+ observed.append(comps[j]);
+ }
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+
+TEST_F(FilePathTest, IsParentTest) {
+ const struct BinaryBooleanTestData cases[] = {
+ { { FPL("/"), FPL("/foo/bar/baz") }, true},
+ { { FPL("/foo/bar"), FPL("/foo/bar/baz") }, true},
+ { { FPL("/foo/bar/"), FPL("/foo/bar/baz") }, true},
+ { { FPL("//foo/bar/"), FPL("//foo/bar/baz") }, true},
+ { { FPL("/foo/bar"), FPL("/foo2/bar/baz") }, false},
+ { { FPL("/foo/bar.txt"), FPL("/foo/bar/baz") }, false},
+ { { FPL("/foo/bar"), FPL("/foo/bar2/baz") }, false},
+ { { FPL("/foo/bar"), FPL("/foo/bar") }, false},
+ { { FPL("/foo/bar/baz"), FPL("/foo/bar") }, false},
+ { { FPL("foo/bar"), FPL("foo/bar/baz") }, true},
+ { { FPL("foo/bar"), FPL("foo2/bar/baz") }, false},
+ { { FPL("foo/bar"), FPL("foo/bar2/baz") }, false},
+ { { FPL(""), FPL("foo") }, false},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:/foo/bar"), FPL("c:/foo/bar/baz") }, true},
+ { { FPL("E:/foo/bar"), FPL("e:/foo/bar/baz") }, true},
+ { { FPL("f:/foo/bar"), FPL("F:/foo/bar/baz") }, true},
+ { { FPL("E:/Foo/bar"), FPL("e:/foo/bar/baz") }, false},
+ { { FPL("f:/foo/bar"), FPL("F:/foo/Bar/baz") }, false},
+ { { FPL("c:/"), FPL("c:/foo/bar/baz") }, true},
+ { { FPL("c:"), FPL("c:/foo/bar/baz") }, true},
+ { { FPL("c:/foo/bar"), FPL("d:/foo/bar/baz") }, false},
+ { { FPL("c:/foo/bar"), FPL("D:/foo/bar/baz") }, false},
+ { { FPL("C:/foo/bar"), FPL("d:/foo/bar/baz") }, false},
+ { { FPL("c:/foo/bar"), FPL("c:/foo2/bar/baz") }, false},
+ { { FPL("e:/foo/bar"), FPL("E:/foo2/bar/baz") }, false},
+ { { FPL("F:/foo/bar"), FPL("f:/foo2/bar/baz") }, false},
+ { { FPL("c:/foo/bar"), FPL("c:/foo/bar2/baz") }, false},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("\\foo\\bar"), FPL("\\foo\\bar\\baz") }, true},
+ { { FPL("\\foo/bar"), FPL("\\foo\\bar\\baz") }, true},
+ { { FPL("\\foo/bar"), FPL("\\foo/bar/baz") }, true},
+ { { FPL("\\"), FPL("\\foo\\bar\\baz") }, true},
+ { { FPL(""), FPL("\\foo\\bar\\baz") }, false},
+ { { FPL("\\foo\\bar"), FPL("\\foo2\\bar\\baz") }, false},
+ { { FPL("\\foo\\bar"), FPL("\\foo\\bar2\\baz") }, false},
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath parent(cases[i].inputs[0]);
+ FilePath child(cases[i].inputs[1]);
+
+ EXPECT_EQ(parent.IsParent(child), cases[i].expected) <<
+ "i: " << i << ", parent: " << parent.value() << ", child: " <<
+ child.value();
+ }
+}
+
+TEST_F(FilePathTest, AppendRelativePathTest) {
+ const struct BinaryTestData cases[] = {
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("/"), FPL("/foo/bar/baz") }, FPL("foo\\bar\\baz")},
+#else // FILE_PATH_USES_WIN_SEPARATORS
+ { { FPL("/"), FPL("/foo/bar/baz") }, FPL("foo/bar/baz")},
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ { { FPL("/foo/bar"), FPL("/foo/bar/baz") }, FPL("baz")},
+ { { FPL("/foo/bar/"), FPL("/foo/bar/baz") }, FPL("baz")},
+ { { FPL("//foo/bar/"), FPL("//foo/bar/baz") }, FPL("baz")},
+ { { FPL("/foo/bar"), FPL("/foo2/bar/baz") }, FPL("")},
+ { { FPL("/foo/bar.txt"), FPL("/foo/bar/baz") }, FPL("")},
+ { { FPL("/foo/bar"), FPL("/foo/bar2/baz") }, FPL("")},
+ { { FPL("/foo/bar"), FPL("/foo/bar") }, FPL("")},
+ { { FPL("/foo/bar/baz"), FPL("/foo/bar") }, FPL("")},
+ { { FPL("foo/bar"), FPL("foo/bar/baz") }, FPL("baz")},
+ { { FPL("foo/bar"), FPL("foo2/bar/baz") }, FPL("")},
+ { { FPL("foo/bar"), FPL("foo/bar2/baz") }, FPL("")},
+ { { FPL(""), FPL("foo") }, FPL("")},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:/foo/bar"), FPL("c:/foo/bar/baz") }, FPL("baz")},
+ { { FPL("E:/foo/bar"), FPL("e:/foo/bar/baz") }, FPL("baz")},
+ { { FPL("f:/foo/bar"), FPL("F:/foo/bar/baz") }, FPL("baz")},
+ { { FPL("E:/Foo/bar"), FPL("e:/foo/bar/baz") }, FPL("")},
+ { { FPL("f:/foo/bar"), FPL("F:/foo/Bar/baz") }, FPL("")},
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("c:/"), FPL("c:/foo/bar/baz") }, FPL("foo\\bar\\baz")},
+ // TODO(akalin): Figure out how to handle the corner case in the
+ // commented-out test case below. Appending to an empty path gives
+ // /foo\bar\baz but appending to a nonempty path "blah" gives
+ // blah\foo\bar\baz.
+ // { { FPL("c:"), FPL("c:/foo/bar/baz") }, FPL("foo\\bar\\baz")},
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ { { FPL("c:/foo/bar"), FPL("d:/foo/bar/baz") }, FPL("")},
+ { { FPL("c:/foo/bar"), FPL("D:/foo/bar/baz") }, FPL("")},
+ { { FPL("C:/foo/bar"), FPL("d:/foo/bar/baz") }, FPL("")},
+ { { FPL("c:/foo/bar"), FPL("c:/foo2/bar/baz") }, FPL("")},
+ { { FPL("e:/foo/bar"), FPL("E:/foo2/bar/baz") }, FPL("")},
+ { { FPL("F:/foo/bar"), FPL("f:/foo2/bar/baz") }, FPL("")},
+ { { FPL("c:/foo/bar"), FPL("c:/foo/bar2/baz") }, FPL("")},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("\\foo\\bar"), FPL("\\foo\\bar\\baz") }, FPL("baz")},
+ { { FPL("\\foo/bar"), FPL("\\foo\\bar\\baz") }, FPL("baz")},
+ { { FPL("\\foo/bar"), FPL("\\foo/bar/baz") }, FPL("baz")},
+ { { FPL("\\"), FPL("\\foo\\bar\\baz") }, FPL("foo\\bar\\baz")},
+ { { FPL(""), FPL("\\foo\\bar\\baz") }, FPL("")},
+ { { FPL("\\foo\\bar"), FPL("\\foo2\\bar\\baz") }, FPL("")},
+ { { FPL("\\foo\\bar"), FPL("\\foo\\bar2\\baz") }, FPL("")},
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ const FilePath base(FPL("blah"));
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath parent(cases[i].inputs[0]);
+ FilePath child(cases[i].inputs[1]);
+ {
+ FilePath result;
+ bool success = parent.AppendRelativePath(child, &result);
+ EXPECT_EQ(cases[i].expected[0] != '\0', success) <<
+ "i: " << i << ", parent: " << parent.value() << ", child: " <<
+ child.value();
+ EXPECT_STREQ(cases[i].expected, result.value().c_str()) <<
+ "i: " << i << ", parent: " << parent.value() << ", child: " <<
+ child.value();
+ }
+ {
+ FilePath result(base);
+ bool success = parent.AppendRelativePath(child, &result);
+ EXPECT_EQ(cases[i].expected[0] != '\0', success) <<
+ "i: " << i << ", parent: " << parent.value() << ", child: " <<
+ child.value();
+ EXPECT_EQ(base.Append(cases[i].expected).value(), result.value()) <<
+ "i: " << i << ", parent: " << parent.value() << ", child: " <<
+ child.value();
+ }
+ }
+}
+
+TEST_F(FilePathTest, EqualityTest) {
+ const struct BinaryBooleanTestData cases[] = {
+ { { FPL("/foo/bar/baz"), FPL("/foo/bar/baz") }, true},
+ { { FPL("/foo/bar"), FPL("/foo/bar/baz") }, false},
+ { { FPL("/foo/bar/baz"), FPL("/foo/bar") }, false},
+ { { FPL("//foo/bar/"), FPL("//foo/bar/") }, true},
+ { { FPL("/foo/bar"), FPL("/foo2/bar") }, false},
+ { { FPL("/foo/bar.txt"), FPL("/foo/bar") }, false},
+ { { FPL("foo/bar"), FPL("foo/bar") }, true},
+ { { FPL("foo/bar"), FPL("foo/bar/baz") }, false},
+ { { FPL(""), FPL("foo") }, false},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:/foo/bar"), FPL("c:/foo/bar") }, true},
+ { { FPL("E:/foo/bar"), FPL("e:/foo/bar") }, true},
+ { { FPL("f:/foo/bar"), FPL("F:/foo/bar") }, true},
+ { { FPL("E:/Foo/bar"), FPL("e:/foo/bar") }, false},
+ { { FPL("f:/foo/bar"), FPL("F:/foo/Bar") }, false},
+ { { FPL("c:/"), FPL("c:/") }, true},
+ { { FPL("c:"), FPL("c:") }, true},
+ { { FPL("c:/foo/bar"), FPL("d:/foo/bar") }, false},
+ { { FPL("c:/foo/bar"), FPL("D:/foo/bar") }, false},
+ { { FPL("C:/foo/bar"), FPL("d:/foo/bar") }, false},
+ { { FPL("c:/foo/bar"), FPL("c:/foo2/bar") }, false},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("\\foo\\bar"), FPL("\\foo\\bar") }, true},
+ { { FPL("\\foo/bar"), FPL("\\foo/bar") }, true},
+ { { FPL("\\foo/bar"), FPL("\\foo\\bar") }, false},
+ { { FPL("\\"), FPL("\\") }, true},
+ { { FPL("\\"), FPL("/") }, false},
+ { { FPL(""), FPL("\\") }, false},
+ { { FPL("\\foo\\bar"), FPL("\\foo2\\bar") }, false},
+ { { FPL("\\foo\\bar"), FPL("\\foo\\bar2") }, false},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:\\foo\\bar"), FPL("c:\\foo\\bar") }, true},
+ { { FPL("E:\\foo\\bar"), FPL("e:\\foo\\bar") }, true},
+ { { FPL("f:\\foo\\bar"), FPL("F:\\foo/bar") }, false},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath a(cases[i].inputs[0]);
+ FilePath b(cases[i].inputs[1]);
+
+ EXPECT_EQ(a == b, cases[i].expected) <<
+ "equality i: " << i << ", a: " << a.value() << ", b: " <<
+ b.value();
+ }
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath a(cases[i].inputs[0]);
+ FilePath b(cases[i].inputs[1]);
+
+ EXPECT_EQ(a != b, !cases[i].expected) <<
+ "inequality i: " << i << ", a: " << a.value() << ", b: " <<
+ b.value();
+ }
+}
+
+TEST_F(FilePathTest, Extension) {
+ FilePath base_dir(FILE_PATH_LITERAL("base_dir"));
+
+ FilePath jpg = base_dir.Append(FILE_PATH_LITERAL("foo.jpg"));
+ EXPECT_EQ(FILE_PATH_LITERAL(".jpg"), jpg.Extension());
+ EXPECT_EQ(FILE_PATH_LITERAL(".jpg"), jpg.FinalExtension());
+
+ FilePath base = jpg.BaseName().RemoveExtension();
+ EXPECT_EQ(FILE_PATH_LITERAL("foo"), base.value());
+
+ FilePath path_no_ext = base_dir.Append(base);
+ EXPECT_EQ(path_no_ext.value(), jpg.RemoveExtension().value());
+
+ EXPECT_EQ(path_no_ext.value(), path_no_ext.RemoveExtension().value());
+ EXPECT_EQ(FILE_PATH_LITERAL(""), path_no_ext.Extension());
+ EXPECT_EQ(FILE_PATH_LITERAL(""), path_no_ext.FinalExtension());
+}
+
+TEST_F(FilePathTest, Extension2) {
+ const struct UnaryTestData cases[] = {
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("C:\\a\\b\\c.ext"), FPL(".ext") },
+ { FPL("C:\\a\\b\\c."), FPL(".") },
+ { FPL("C:\\a\\b\\c"), FPL("") },
+ { FPL("C:\\a\\b\\"), FPL("") },
+ { FPL("C:\\a\\b.\\"), FPL(".") },
+ { FPL("C:\\a\\b\\c.ext1.ext2"), FPL(".ext2") },
+ { FPL("C:\\foo.bar\\\\\\"), FPL(".bar") },
+ { FPL("C:\\foo.bar\\.."), FPL("") },
+ { FPL("C:\\foo.bar\\..\\\\"), FPL("") },
+#endif
+ { FPL("/foo/bar/baz.ext"), FPL(".ext") },
+ { FPL("/foo/bar/baz."), FPL(".") },
+ { FPL("/foo/bar/baz.."), FPL(".") },
+ { FPL("/foo/bar/baz"), FPL("") },
+ { FPL("/foo/bar/"), FPL("") },
+ { FPL("/foo/bar./"), FPL(".") },
+ { FPL("/foo/bar/baz.ext1.ext2"), FPL(".ext2") },
+ { FPL("/subversion-1.6.12.zip"), FPL(".zip") },
+ { FPL("/foo.12345.gz"), FPL(".gz") },
+ { FPL("/foo..gz"), FPL(".gz") },
+ { FPL("."), FPL("") },
+ { FPL(".."), FPL("") },
+ { FPL("./foo"), FPL("") },
+ { FPL("./foo.ext"), FPL(".ext") },
+ { FPL("/foo.ext1/bar.ext2"), FPL(".ext2") },
+ { FPL("/foo.bar////"), FPL(".bar") },
+ { FPL("/foo.bar/.."), FPL("") },
+ { FPL("/foo.bar/..////"), FPL("") },
+ { FPL("/foo.1234.luser.js"), FPL(".js") },
+ { FPL("/user.js"), FPL(".js") },
+ };
+ const struct UnaryTestData double_extension_cases[] = {
+ { FPL("/foo.tar.gz"), FPL(".tar.gz") },
+ { FPL("/foo.tar.Z"), FPL(".tar.Z") },
+ { FPL("/foo.tar.bz2"), FPL(".tar.bz2") },
+ { FPL("/foo.1234.gz"), FPL(".1234.gz") },
+ { FPL("/foo.1234.tar.gz"), FPL(".tar.gz") },
+ { FPL("/foo.tar.tar.gz"), FPL(".tar.gz") },
+ { FPL("/foo.tar.gz.gz"), FPL(".gz.gz") },
+ { FPL("/foo.1234.user.js"), FPL(".user.js") },
+ { FPL("foo.user.js"), FPL(".user.js") },
+ { FPL("/foo.tar.bz"), FPL(".tar.bz") },
+ };
+ for (unsigned int i = 0; i < arraysize(cases); ++i) {
+ FilePath path(cases[i].input);
+ FilePath::StringType extension = path.Extension();
+ FilePath::StringType final_extension = path.FinalExtension();
+ EXPECT_STREQ(cases[i].expected, extension.c_str())
+ << "i: " << i << ", path: " << path.value();
+ EXPECT_STREQ(cases[i].expected, final_extension.c_str())
+ << "i: " << i << ", path: " << path.value();
+ }
+ for (unsigned int i = 0; i < arraysize(double_extension_cases); ++i) {
+ FilePath path(double_extension_cases[i].input);
+ FilePath::StringType extension = path.Extension();
+ EXPECT_STREQ(double_extension_cases[i].expected, extension.c_str())
+ << "i: " << i << ", path: " << path.value();
+ }
+}
+
+TEST_F(FilePathTest, InsertBeforeExtension) {
+ const struct BinaryTestData cases[] = {
+ { { FPL(""), FPL("") }, FPL("") },
+ { { FPL(""), FPL("txt") }, FPL("") },
+ { { FPL("."), FPL("txt") }, FPL("") },
+ { { FPL(".."), FPL("txt") }, FPL("") },
+ { { FPL("foo.dll"), FPL("txt") }, FPL("footxt.dll") },
+ { { FPL("."), FPL("") }, FPL(".") },
+ { { FPL("foo.dll"), FPL(".txt") }, FPL("foo.txt.dll") },
+ { { FPL("foo"), FPL("txt") }, FPL("footxt") },
+ { { FPL("foo"), FPL(".txt") }, FPL("foo.txt") },
+ { { FPL("foo.baz.dll"), FPL("txt") }, FPL("foo.baztxt.dll") },
+ { { FPL("foo.baz.dll"), FPL(".txt") }, FPL("foo.baz.txt.dll") },
+ { { FPL("foo.dll"), FPL("") }, FPL("foo.dll") },
+ { { FPL("foo.dll"), FPL(".") }, FPL("foo..dll") },
+ { { FPL("foo"), FPL("") }, FPL("foo") },
+ { { FPL("foo"), FPL(".") }, FPL("foo.") },
+ { { FPL("foo.baz.dll"), FPL("") }, FPL("foo.baz.dll") },
+ { { FPL("foo.baz.dll"), FPL(".") }, FPL("foo.baz..dll") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("\\"), FPL("") }, FPL("\\") },
+ { { FPL("\\"), FPL("txt") }, FPL("\\txt") },
+ { { FPL("\\."), FPL("txt") }, FPL("") },
+ { { FPL("\\.."), FPL("txt") }, FPL("") },
+ { { FPL("\\."), FPL("") }, FPL("\\.") },
+ { { FPL("C:\\bar\\foo.dll"), FPL("txt") },
+ FPL("C:\\bar\\footxt.dll") },
+ { { FPL("C:\\bar.baz\\foodll"), FPL("txt") },
+ FPL("C:\\bar.baz\\foodlltxt") },
+ { { FPL("C:\\bar.baz\\foo.dll"), FPL("txt") },
+ FPL("C:\\bar.baz\\footxt.dll") },
+ { { FPL("C:\\bar.baz\\foo.dll.exe"), FPL("txt") },
+ FPL("C:\\bar.baz\\foo.dlltxt.exe") },
+ { { FPL("C:\\bar.baz\\foo"), FPL("") },
+ FPL("C:\\bar.baz\\foo") },
+ { { FPL("C:\\bar.baz\\foo.exe"), FPL("") },
+ FPL("C:\\bar.baz\\foo.exe") },
+ { { FPL("C:\\bar.baz\\foo.dll.exe"), FPL("") },
+ FPL("C:\\bar.baz\\foo.dll.exe") },
+ { { FPL("C:\\bar\\baz\\foo.exe"), FPL(" (1)") },
+ FPL("C:\\bar\\baz\\foo (1).exe") },
+ { { FPL("C:\\foo.baz\\\\"), FPL(" (1)") }, FPL("C:\\foo (1).baz") },
+ { { FPL("C:\\foo.baz\\..\\"), FPL(" (1)") }, FPL("") },
+#endif
+ { { FPL("/"), FPL("") }, FPL("/") },
+ { { FPL("/"), FPL("txt") }, FPL("/txt") },
+ { { FPL("/."), FPL("txt") }, FPL("") },
+ { { FPL("/.."), FPL("txt") }, FPL("") },
+ { { FPL("/."), FPL("") }, FPL("/.") },
+ { { FPL("/bar/foo.dll"), FPL("txt") }, FPL("/bar/footxt.dll") },
+ { { FPL("/bar.baz/foodll"), FPL("txt") }, FPL("/bar.baz/foodlltxt") },
+ { { FPL("/bar.baz/foo.dll"), FPL("txt") }, FPL("/bar.baz/footxt.dll") },
+ { { FPL("/bar.baz/foo.dll.exe"), FPL("txt") },
+ FPL("/bar.baz/foo.dlltxt.exe") },
+ { { FPL("/bar.baz/foo"), FPL("") }, FPL("/bar.baz/foo") },
+ { { FPL("/bar.baz/foo.exe"), FPL("") }, FPL("/bar.baz/foo.exe") },
+ { { FPL("/bar.baz/foo.dll.exe"), FPL("") }, FPL("/bar.baz/foo.dll.exe") },
+ { { FPL("/bar/baz/foo.exe"), FPL(" (1)") }, FPL("/bar/baz/foo (1).exe") },
+ { { FPL("/bar/baz/..////"), FPL(" (1)") }, FPL("") },
+ };
+ for (unsigned int i = 0; i < arraysize(cases); ++i) {
+ FilePath path(cases[i].inputs[0]);
+ FilePath result = path.InsertBeforeExtension(cases[i].inputs[1]);
+ EXPECT_EQ(cases[i].expected, result.value()) << "i: " << i <<
+ ", path: " << path.value() << ", insert: " << cases[i].inputs[1];
+ }
+}
+
+TEST_F(FilePathTest, RemoveExtension) {
+ const struct UnaryTestData cases[] = {
+ { FPL(""), FPL("") },
+ { FPL("."), FPL(".") },
+ { FPL(".."), FPL("..") },
+ { FPL("foo.dll"), FPL("foo") },
+ { FPL("./foo.dll"), FPL("./foo") },
+ { FPL("foo..dll"), FPL("foo.") },
+ { FPL("foo"), FPL("foo") },
+ { FPL("foo."), FPL("foo") },
+ { FPL("foo.."), FPL("foo.") },
+ { FPL("foo.baz.dll"), FPL("foo.baz") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { FPL("C:\\foo.bar\\foo"), FPL("C:\\foo.bar\\foo") },
+ { FPL("C:\\foo.bar\\..\\\\"), FPL("C:\\foo.bar\\..\\\\") },
+#endif
+ { FPL("/foo.bar/foo"), FPL("/foo.bar/foo") },
+ { FPL("/foo.bar/..////"), FPL("/foo.bar/..////") },
+ };
+ for (unsigned int i = 0; i < arraysize(cases); ++i) {
+ FilePath path(cases[i].input);
+ FilePath removed = path.RemoveExtension();
+ FilePath removed_final = path.RemoveFinalExtension();
+ EXPECT_EQ(cases[i].expected, removed.value()) << "i: " << i <<
+ ", path: " << path.value();
+ EXPECT_EQ(cases[i].expected, removed_final.value()) << "i: " << i <<
+ ", path: " << path.value();
+ }
+ {
+ FilePath path(FPL("foo.tar.gz"));
+ FilePath removed = path.RemoveExtension();
+ FilePath removed_final = path.RemoveFinalExtension();
+ EXPECT_EQ(FPL("foo"), removed.value()) << ", path: " << path.value();
+ EXPECT_EQ(FPL("foo.tar"), removed_final.value()) << ", path: "
+ << path.value();
+ }
+}
+
+TEST_F(FilePathTest, ReplaceExtension) {
+ const struct BinaryTestData cases[] = {
+ { { FPL(""), FPL("") }, FPL("") },
+ { { FPL(""), FPL("txt") }, FPL("") },
+ { { FPL("."), FPL("txt") }, FPL("") },
+ { { FPL(".."), FPL("txt") }, FPL("") },
+ { { FPL("."), FPL("") }, FPL("") },
+ { { FPL("foo.dll"), FPL("txt") }, FPL("foo.txt") },
+ { { FPL("./foo.dll"), FPL("txt") }, FPL("./foo.txt") },
+ { { FPL("foo..dll"), FPL("txt") }, FPL("foo..txt") },
+ { { FPL("foo.dll"), FPL(".txt") }, FPL("foo.txt") },
+ { { FPL("foo"), FPL("txt") }, FPL("foo.txt") },
+ { { FPL("foo."), FPL("txt") }, FPL("foo.txt") },
+ { { FPL("foo.."), FPL("txt") }, FPL("foo..txt") },
+ { { FPL("foo"), FPL(".txt") }, FPL("foo.txt") },
+ { { FPL("foo.baz.dll"), FPL("txt") }, FPL("foo.baz.txt") },
+ { { FPL("foo.baz.dll"), FPL(".txt") }, FPL("foo.baz.txt") },
+ { { FPL("foo.dll"), FPL("") }, FPL("foo") },
+ { { FPL("foo.dll"), FPL(".") }, FPL("foo") },
+ { { FPL("foo"), FPL("") }, FPL("foo") },
+ { { FPL("foo"), FPL(".") }, FPL("foo") },
+ { { FPL("foo.baz.dll"), FPL("") }, FPL("foo.baz") },
+ { { FPL("foo.baz.dll"), FPL(".") }, FPL("foo.baz") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("C:\\foo.bar\\foo"), FPL("baz") }, FPL("C:\\foo.bar\\foo.baz") },
+ { { FPL("C:\\foo.bar\\..\\\\"), FPL("baz") }, FPL("") },
+#endif
+ { { FPL("/foo.bar/foo"), FPL("baz") }, FPL("/foo.bar/foo.baz") },
+ { { FPL("/foo.bar/..////"), FPL("baz") }, FPL("") },
+ };
+ for (unsigned int i = 0; i < arraysize(cases); ++i) {
+ FilePath path(cases[i].inputs[0]);
+ FilePath replaced = path.ReplaceExtension(cases[i].inputs[1]);
+ EXPECT_EQ(cases[i].expected, replaced.value()) << "i: " << i <<
+ ", path: " << path.value() << ", replace: " << cases[i].inputs[1];
+ }
+}
+
+TEST_F(FilePathTest, AddExtension) {
+ const struct BinaryTestData cases[] = {
+ { { FPL(""), FPL("") }, FPL("") },
+ { { FPL(""), FPL("txt") }, FPL("") },
+ { { FPL("."), FPL("txt") }, FPL("") },
+ { { FPL(".."), FPL("txt") }, FPL("") },
+ { { FPL("."), FPL("") }, FPL("") },
+ { { FPL("foo.dll"), FPL("txt") }, FPL("foo.dll.txt") },
+ { { FPL("./foo.dll"), FPL("txt") }, FPL("./foo.dll.txt") },
+ { { FPL("foo..dll"), FPL("txt") }, FPL("foo..dll.txt") },
+ { { FPL("foo.dll"), FPL(".txt") }, FPL("foo.dll.txt") },
+ { { FPL("foo"), FPL("txt") }, FPL("foo.txt") },
+ { { FPL("foo."), FPL("txt") }, FPL("foo.txt") },
+ { { FPL("foo.."), FPL("txt") }, FPL("foo..txt") },
+ { { FPL("foo"), FPL(".txt") }, FPL("foo.txt") },
+ { { FPL("foo.baz.dll"), FPL("txt") }, FPL("foo.baz.dll.txt") },
+ { { FPL("foo.baz.dll"), FPL(".txt") }, FPL("foo.baz.dll.txt") },
+ { { FPL("foo.dll"), FPL("") }, FPL("foo.dll") },
+ { { FPL("foo.dll"), FPL(".") }, FPL("foo.dll") },
+ { { FPL("foo"), FPL("") }, FPL("foo") },
+ { { FPL("foo"), FPL(".") }, FPL("foo") },
+ { { FPL("foo.baz.dll"), FPL("") }, FPL("foo.baz.dll") },
+ { { FPL("foo.baz.dll"), FPL(".") }, FPL("foo.baz.dll") },
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("C:\\foo.bar\\foo"), FPL("baz") }, FPL("C:\\foo.bar\\foo.baz") },
+ { { FPL("C:\\foo.bar\\..\\\\"), FPL("baz") }, FPL("") },
+#endif
+ { { FPL("/foo.bar/foo"), FPL("baz") }, FPL("/foo.bar/foo.baz") },
+ { { FPL("/foo.bar/..////"), FPL("baz") }, FPL("") },
+ };
+ for (unsigned int i = 0; i < arraysize(cases); ++i) {
+ FilePath path(cases[i].inputs[0]);
+ FilePath added = path.AddExtension(cases[i].inputs[1]);
+ EXPECT_EQ(cases[i].expected, added.value()) << "i: " << i <<
+ ", path: " << path.value() << ", add: " << cases[i].inputs[1];
+ }
+}
+
+TEST_F(FilePathTest, MatchesExtension) {
+ const struct BinaryBooleanTestData cases[] = {
+ { { FPL("foo"), FPL("") }, true},
+ { { FPL("foo"), FPL(".") }, false},
+ { { FPL("foo."), FPL("") }, false},
+ { { FPL("foo."), FPL(".") }, true},
+ { { FPL("foo.txt"), FPL(".dll") }, false},
+ { { FPL("foo.txt"), FPL(".txt") }, true},
+ { { FPL("foo.txt.dll"), FPL(".txt") }, false},
+ { { FPL("foo.txt.dll"), FPL(".dll") }, true},
+ { { FPL("foo.TXT"), FPL(".txt") }, true},
+ { { FPL("foo.txt"), FPL(".TXT") }, true},
+ { { FPL("foo.tXt"), FPL(".txt") }, true},
+ { { FPL("foo.txt"), FPL(".tXt") }, true},
+ { { FPL("foo.tXt"), FPL(".TXT") }, true},
+ { { FPL("foo.tXt"), FPL(".tXt") }, true},
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ { { FPL("c:/foo.txt.dll"), FPL(".txt") }, false},
+ { { FPL("c:/foo.txt"), FPL(".txt") }, true},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ { { FPL("c:\\bar\\foo.txt.dll"), FPL(".txt") }, false},
+ { { FPL("c:\\bar\\foo.txt"), FPL(".txt") }, true},
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+ { { FPL("/bar/foo.txt.dll"), FPL(".txt") }, false},
+ { { FPL("/bar/foo.txt"), FPL(".txt") }, true},
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ // Umlauts A, O, U: direct comparison, and upper case vs. lower case
+ { { FPL("foo.\u00E4\u00F6\u00FC"), FPL(".\u00E4\u00F6\u00FC") }, true},
+ { { FPL("foo.\u00C4\u00D6\u00DC"), FPL(".\u00E4\u00F6\u00FC") }, true},
+ // C with circumflex: direct comparison, and upper case vs. lower case
+ { { FPL("foo.\u0109"), FPL(".\u0109") }, true},
+ { { FPL("foo.\u0108"), FPL(".\u0109") }, true},
+#endif
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath path(cases[i].inputs[0]);
+ FilePath::StringType ext(cases[i].inputs[1]);
+
+ EXPECT_EQ(cases[i].expected, path.MatchesExtension(ext)) <<
+ "i: " << i << ", path: " << path.value() << ", ext: " << ext;
+ }
+}
+
+TEST_F(FilePathTest, CompareIgnoreCase) {
+ const struct BinaryIntTestData cases[] = {
+ { { FPL("foo"), FPL("foo") }, 0},
+ { { FPL("FOO"), FPL("foo") }, 0},
+ { { FPL("foo.ext"), FPL("foo.ext") }, 0},
+ { { FPL("FOO.EXT"), FPL("foo.ext") }, 0},
+ { { FPL("Foo.Ext"), FPL("foo.ext") }, 0},
+ { { FPL("foO"), FPL("foo") }, 0},
+ { { FPL("foo"), FPL("foO") }, 0},
+ { { FPL("fOo"), FPL("foo") }, 0},
+ { { FPL("foo"), FPL("fOo") }, 0},
+ { { FPL("bar"), FPL("foo") }, -1},
+ { { FPL("foo"), FPL("bar") }, 1},
+ { { FPL("BAR"), FPL("foo") }, -1},
+ { { FPL("FOO"), FPL("bar") }, 1},
+ { { FPL("bar"), FPL("FOO") }, -1},
+ { { FPL("foo"), FPL("BAR") }, 1},
+ { { FPL("BAR"), FPL("FOO") }, -1},
+ { { FPL("FOO"), FPL("BAR") }, 1},
+ // German "Eszett" (lower case and the new-fangled upper case)
+ // Note that uc(<lowercase eszett>) => "SS", NOT <uppercase eszett>!
+ // However, neither Windows nor Mac OSX converts these.
+ // (or even have glyphs for <uppercase eszett>)
+ { { FPL("\u00DF"), FPL("\u00DF") }, 0},
+ { { FPL("\u1E9E"), FPL("\u1E9E") }, 0},
+ { { FPL("\u00DF"), FPL("\u1E9E") }, -1},
+ { { FPL("SS"), FPL("\u00DF") }, -1},
+ { { FPL("SS"), FPL("\u1E9E") }, -1},
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ // Umlauts A, O, U: direct comparison, and upper case vs. lower case
+ { { FPL("\u00E4\u00F6\u00FC"), FPL("\u00E4\u00F6\u00FC") }, 0},
+ { { FPL("\u00C4\u00D6\u00DC"), FPL("\u00E4\u00F6\u00FC") }, 0},
+ // C with circumflex: direct comparison, and upper case vs. lower case
+ { { FPL("\u0109"), FPL("\u0109") }, 0},
+ { { FPL("\u0108"), FPL("\u0109") }, 0},
+ // Cyrillic letter SHA: direct comparison, and upper case vs. lower case
+ { { FPL("\u0428"), FPL("\u0428") }, 0},
+ { { FPL("\u0428"), FPL("\u0448") }, 0},
+ // Greek letter DELTA: direct comparison, and upper case vs. lower case
+ { { FPL("\u0394"), FPL("\u0394") }, 0},
+ { { FPL("\u0394"), FPL("\u03B4") }, 0},
+ // Japanese full-width A: direct comparison, and upper case vs. lower case
+ // Note that full-width and standard characters are considered different.
+ { { FPL("\uFF21"), FPL("\uFF21") }, 0},
+ { { FPL("\uFF21"), FPL("\uFF41") }, 0},
+ { { FPL("A"), FPL("\uFF21") }, -1},
+ { { FPL("A"), FPL("\uFF41") }, -1},
+ { { FPL("a"), FPL("\uFF21") }, -1},
+ { { FPL("a"), FPL("\uFF41") }, -1},
+#endif
+#if defined(OS_MACOSX)
+ // Codepoints > 0x1000
+ // Georgian letter DON: direct comparison, and upper case vs. lower case
+ { { FPL("\u10A3"), FPL("\u10A3") }, 0},
+ { { FPL("\u10A3"), FPL("\u10D3") }, 0},
+ // Combining characters vs. pre-composed characters, upper and lower case
+ { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("\u1E31\u1E77\u1E53n") }, 0},
+ { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("kuon") }, 1},
+ { { FPL("kuon"), FPL("k\u0301u\u032Do\u0304\u0301n") }, -1},
+ { { FPL("K\u0301U\u032DO\u0304\u0301N"), FPL("KUON") }, 1},
+ { { FPL("KUON"), FPL("K\u0301U\u032DO\u0304\u0301N") }, -1},
+ { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("KUON") }, 1},
+ { { FPL("K\u0301U\u032DO\u0304\u0301N"), FPL("\u1E31\u1E77\u1E53n") }, 0},
+ { { FPL("k\u0301u\u032Do\u0304\u0301n"), FPL("\u1E30\u1E76\u1E52n") }, 0},
+ { { FPL("k\u0301u\u032Do\u0304\u0302n"), FPL("\u1E30\u1E76\u1E52n") }, 1},
+#endif
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath::StringType s1(cases[i].inputs[0]);
+ FilePath::StringType s2(cases[i].inputs[1]);
+ int result = FilePath::CompareIgnoreCase(s1, s2);
+ EXPECT_EQ(cases[i].expected, result) <<
+ "i: " << i << ", s1: " << s1 << ", s2: " << s2;
+ }
+}
+
+TEST_F(FilePathTest, ReferencesParent) {
+ const struct UnaryBooleanTestData cases[] = {
+ { FPL("."), false },
+ { FPL(".."), true },
+ { FPL(".. "), true },
+ { FPL(" .."), true },
+ { FPL("..."), true },
+ { FPL("a.."), false },
+ { FPL("..a"), false },
+ { FPL("../"), true },
+ { FPL("/.."), true },
+ { FPL("/../"), true },
+ { FPL("/a../"), false },
+ { FPL("/..a/"), false },
+ { FPL("//.."), true },
+ { FPL("..//"), true },
+ { FPL("//..//"), true },
+ { FPL("a//..//c"), true },
+ { FPL("../b/c"), true },
+ { FPL("/../b/c"), true },
+ { FPL("a/b/.."), true },
+ { FPL("a/b/../"), true },
+ { FPL("a/../c"), true },
+ { FPL("a/b/c"), false },
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ bool observed = input.ReferencesParent();
+ EXPECT_EQ(cases[i].expected, observed) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+
+TEST_F(FilePathTest, FromUTF8Unsafe_And_AsUTF8Unsafe) {
+ const struct UTF8TestData cases[] = {
+ { FPL("foo.txt"), "foo.txt" },
+ // "aeo" with accents. Use http://0xcc.net/jsescape/ to decode them.
+ { FPL("\u00E0\u00E8\u00F2.txt"), "\xC3\xA0\xC3\xA8\xC3\xB2.txt" },
+ // Full-width "ABC".
+ { FPL("\uFF21\uFF22\uFF23.txt"),
+ "\xEF\xBC\xA1\xEF\xBC\xA2\xEF\xBC\xA3.txt" },
+ };
+
+#if !defined(SYSTEM_NATIVE_UTF8) && defined(OS_LINUX)
+ ScopedLocale locale("en_US.UTF-8");
+#endif
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ // Test FromUTF8Unsafe() works.
+ FilePath from_utf8 = FilePath::FromUTF8Unsafe(cases[i].utf8);
+ EXPECT_EQ(cases[i].native, from_utf8.value())
+ << "i: " << i << ", input: " << cases[i].native;
+ // Test AsUTF8Unsafe() works.
+ FilePath from_native = FilePath(cases[i].native);
+ EXPECT_EQ(cases[i].utf8, from_native.AsUTF8Unsafe())
+ << "i: " << i << ", input: " << cases[i].native;
+ // Test the two file paths are identical.
+ EXPECT_EQ(from_utf8.value(), from_native.value());
+ }
+}
+
+TEST_F(FilePathTest, ConstructWithNUL) {
+ // Assert FPS() works.
+ ASSERT_EQ(3U, FPS("a\0b").length());
+
+ // Test constructor strips '\0'
+ FilePath path(FPS("a\0b"));
+ EXPECT_EQ(1U, path.value().length());
+ EXPECT_EQ(FPL("a"), path.value());
+}
+
+TEST_F(FilePathTest, AppendWithNUL) {
+ // Assert FPS() works.
+ ASSERT_EQ(3U, FPS("b\0b").length());
+
+ // Test Append() strips '\0'
+ FilePath path(FPL("a"));
+ path = path.Append(FPS("b\0b"));
+ EXPECT_EQ(3U, path.value().length());
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+ EXPECT_EQ(FPL("a\\b"), path.value());
+#else
+ EXPECT_EQ(FPL("a/b"), path.value());
+#endif
+}
+
+TEST_F(FilePathTest, ReferencesParentWithNUL) {
+ // Assert FPS() works.
+ ASSERT_EQ(3U, FPS("..\0").length());
+
+ // Test ReferencesParent() doesn't break with "..\0"
+ FilePath path(FPS("..\0"));
+ EXPECT_TRUE(path.ReferencesParent());
+}
+
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+TEST_F(FilePathTest, NormalizePathSeparators) {
+ const struct UnaryTestData cases[] = {
+ { FPL("foo/bar"), FPL("foo\\bar") },
+ { FPL("foo/bar\\betz"), FPL("foo\\bar\\betz") },
+ { FPL("foo\\bar"), FPL("foo\\bar") },
+ { FPL("foo\\bar/betz"), FPL("foo\\bar\\betz") },
+ { FPL("foo"), FPL("foo") },
+ // Trailing slashes don't automatically get stripped. That's what
+ // StripTrailingSeparators() is for.
+ { FPL("foo\\"), FPL("foo\\") },
+ { FPL("foo/"), FPL("foo\\") },
+ { FPL("foo/bar\\"), FPL("foo\\bar\\") },
+ { FPL("foo\\bar/"), FPL("foo\\bar\\") },
+ { FPL("foo/bar/"), FPL("foo\\bar\\") },
+ { FPL("foo\\bar\\"), FPL("foo\\bar\\") },
+ { FPL("\\foo/bar"), FPL("\\foo\\bar") },
+ { FPL("/foo\\bar"), FPL("\\foo\\bar") },
+ { FPL("c:/foo/bar/"), FPL("c:\\foo\\bar\\") },
+ { FPL("/foo/bar/"), FPL("\\foo\\bar\\") },
+ { FPL("\\foo\\bar\\"), FPL("\\foo\\bar\\") },
+ { FPL("c:\\foo/bar"), FPL("c:\\foo\\bar") },
+ { FPL("//foo\\bar\\"), FPL("\\\\foo\\bar\\") },
+ { FPL("\\\\foo\\bar\\"), FPL("\\\\foo\\bar\\") },
+ { FPL("//foo\\bar\\"), FPL("\\\\foo\\bar\\") },
+ // This method does not normalize the number of path separators.
+ { FPL("foo\\\\bar"), FPL("foo\\\\bar") },
+ { FPL("foo//bar"), FPL("foo\\\\bar") },
+ { FPL("foo/\\bar"), FPL("foo\\\\bar") },
+ { FPL("foo\\/bar"), FPL("foo\\\\bar") },
+ { FPL("///foo\\\\bar"), FPL("\\\\\\foo\\\\bar") },
+ { FPL("foo//bar///"), FPL("foo\\\\bar\\\\\\") },
+ { FPL("foo/\\bar/\\"), FPL("foo\\\\bar\\\\") },
+ { FPL("/\\foo\\/bar"), FPL("\\\\foo\\\\bar") },
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ FilePath observed = input.NormalizePathSeparators();
+ EXPECT_EQ(FilePath::StringType(cases[i].expected), observed.value()) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+#endif
+
+TEST_F(FilePathTest, EndsWithSeparator) {
+ const UnaryBooleanTestData cases[] = {
+ { FPL(""), false },
+ { FPL("/"), true },
+ { FPL("foo/"), true },
+ { FPL("bar"), false },
+ { FPL("/foo/bar"), false },
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input = FilePath(cases[i].input).NormalizePathSeparators();
+ EXPECT_EQ(cases[i].expected, input.EndsWithSeparator());
+ }
+}
+
+TEST_F(FilePathTest, AsEndingWithSeparator) {
+ const UnaryTestData cases[] = {
+ { FPL(""), FPL("") },
+ { FPL("/"), FPL("/") },
+ { FPL("foo"), FPL("foo/") },
+ { FPL("foo/"), FPL("foo/") }
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input = FilePath(cases[i].input).NormalizePathSeparators();
+ FilePath expected = FilePath(cases[i].expected).NormalizePathSeparators();
+ EXPECT_EQ(expected.value(), input.AsEndingWithSeparator().value());
+ }
+}
+
+#if defined(OS_ANDROID)
+TEST_F(FilePathTest, ContentUriTest) {
+ const struct UnaryBooleanTestData cases[] = {
+ { FPL("content://foo.bar"), true },
+ { FPL("content://foo.bar/"), true },
+ { FPL("content://foo/bar"), true },
+ { FPL("CoNTenT://foo.bar"), true },
+ { FPL("content://"), true },
+ { FPL("content:///foo.bar"), true },
+ { FPL("content://3foo/bar"), true },
+ { FPL("content://_foo/bar"), true },
+ { FPL(".. "), false },
+ { FPL("foo.bar"), false },
+ { FPL("content:foo.bar"), false },
+ { FPL("content:/foo.ba"), false },
+ { FPL("content:/dir/foo.bar"), false },
+ { FPL("content: //foo.bar"), false },
+ { FPL("content%2a%2f%2f"), false },
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ FilePath input(cases[i].input);
+ bool observed = input.IsContentUri();
+ EXPECT_EQ(cases[i].expected, observed) <<
+ "i: " << i << ", input: " << input.value();
+ }
+}
+#endif
+
+// Test the PrintTo overload for FilePath (used when a test fails to compare two
+// FilePaths).
+TEST_F(FilePathTest, PrintTo) {
+ std::stringstream ss;
+ FilePath fp(FPL("foo"));
+ base::PrintTo(fp, &ss);
+ EXPECT_EQ("foo", ss.str());
+}
+
+// Test GetHFSDecomposedForm should return empty result for invalid UTF-8
+// strings.
+#if defined(OS_MACOSX)
+TEST_F(FilePathTest, GetHFSDecomposedFormWithInvalidInput) {
+ const FilePath::CharType* cases[] = {
+ FPL("\xc3\x28"),
+ FPL("\xe2\x82\x28"),
+ FPL("\xe2\x28\xa1"),
+ FPL("\xf0\x28\x8c\xbc"),
+ FPL("\xf0\x28\x8c\x28"),
+ };
+ for (const auto& invalid_input : cases) {
+ FilePath::StringType observed = FilePath::GetHFSDecomposedForm(
+ invalid_input);
+ EXPECT_TRUE(observed.empty());
+ }
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_watcher.cc b/libchrome/base/files/file_path_watcher.cc
new file mode 100644
index 0000000..a4624ab
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Cross platform methods for FilePathWatcher. See the various platform
+// specific implementation files, too.
+
+#include "base/files/file_path_watcher.h"
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "build/build_config.h"
+
+namespace base {
+
+FilePathWatcher::~FilePathWatcher() {
+ impl_->Cancel();
+}
+
+// static
+void FilePathWatcher::CancelWatch(
+ const scoped_refptr<PlatformDelegate>& delegate) {
+ delegate->CancelOnMessageLoopThread();
+}
+
+// static
+bool FilePathWatcher::RecursiveWatchAvailable() {
+#if (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_WIN) || \
+ defined(OS_LINUX) || defined(OS_ANDROID)
+ return true;
+#else
+ // FSEvents isn't available on iOS.
+ return false;
+#endif
+}
+
+FilePathWatcher::PlatformDelegate::PlatformDelegate(): cancelled_(false) {
+}
+
+FilePathWatcher::PlatformDelegate::~PlatformDelegate() {
+ DCHECK(is_cancelled());
+}
+
+bool FilePathWatcher::Watch(const FilePath& path,
+ bool recursive,
+ const Callback& callback) {
+ DCHECK(path.IsAbsolute());
+ return impl_->Watch(path, recursive, callback);
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_watcher.h b/libchrome/base/files/file_path_watcher.h
new file mode 100644
index 0000000..d5c6db1
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher.h
@@ -0,0 +1,111 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module provides a way to monitor a file or directory for changes.
+
+#ifndef BASE_FILES_FILE_PATH_WATCHER_H_
+#define BASE_FILES_FILE_PATH_WATCHER_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// This class lets you register interest in changes on a FilePath.
+// The callback will get called whenever the file or directory referenced by the
+// FilePath is changed, including created or deleted. Due to limitations in the
+// underlying OS APIs, FilePathWatcher has slightly different semantics on OS X
+// than on Windows or Linux. FilePathWatcher on Linux and Windows will detect
+// modifications to files in a watched directory. FilePathWatcher on Mac will
+// detect the creation and deletion of files in a watched directory, but will
+// not detect modifications to those files. See file_path_watcher_kqueue.cc for
+// details.
+class BASE_EXPORT FilePathWatcher {
+ public:
+ // Callback type for Watch(). |path| points to the file that was updated,
+ // and |error| is true if the platform specific code detected an error. In
+ // that case, the callback won't be invoked again.
+ typedef base::Callback<void(const FilePath& path, bool error)> Callback;
+
+ // Used internally to encapsulate different members on different platforms.
+ class PlatformDelegate : public base::RefCountedThreadSafe<PlatformDelegate> {
+ public:
+ PlatformDelegate();
+
+ // Start watching for the given |path| and notify |delegate| about changes.
+ virtual bool Watch(const FilePath& path,
+ bool recursive,
+ const Callback& callback) WARN_UNUSED_RESULT = 0;
+
+ // Stop watching. This is called from FilePathWatcher's dtor in order to
+ // allow to shut down properly while the object is still alive.
+ // It can be called from any thread.
+ virtual void Cancel() = 0;
+
+ protected:
+ friend class base::RefCountedThreadSafe<PlatformDelegate>;
+ friend class FilePathWatcher;
+
+ virtual ~PlatformDelegate();
+
+ // Stop watching. This is only called on the thread of the appropriate
+ // message loop. Since it can also be called more than once, it should
+ // check |is_cancelled()| to avoid duplicate work.
+ virtual void CancelOnMessageLoopThread() = 0;
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner() const {
+ return task_runner_;
+ }
+
+ void set_task_runner(scoped_refptr<base::SingleThreadTaskRunner> runner) {
+ task_runner_ = std::move(runner);
+ }
+
+ // Must be called before the PlatformDelegate is deleted.
+ void set_cancelled() {
+ cancelled_ = true;
+ }
+
+ bool is_cancelled() const {
+ return cancelled_;
+ }
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ bool cancelled_;
+ };
+
+ FilePathWatcher();
+ virtual ~FilePathWatcher();
+
+ // A callback that always cleans up the PlatformDelegate, either when executed
+ // or when deleted without having been executed at all, as can happen during
+ // shutdown.
+ static void CancelWatch(const scoped_refptr<PlatformDelegate>& delegate);
+
+ // Returns true if the platform and OS version support recursive watches.
+ static bool RecursiveWatchAvailable();
+
+ // Invokes |callback| whenever updates to |path| are detected. This should be
+ // called at most once, and from a MessageLoop of TYPE_IO. Set |recursive| to
+ // true, to watch |path| and its children. The callback will be invoked on
+ // the same loop. Returns true on success.
+ //
+ // Recursive watch is not supported on all platforms and file systems.
+ // Watch() will return false in the case of failure.
+ bool Watch(const FilePath& path, bool recursive, const Callback& callback);
+
+ private:
+ scoped_refptr<PlatformDelegate> impl_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcher);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_PATH_WATCHER_H_
diff --git a/libchrome/base/files/file_path_watcher_fsevents.cc b/libchrome/base/files/file_path_watcher_fsevents.cc
new file mode 100644
index 0000000..e9d2508
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_fsevents.cc
@@ -0,0 +1,276 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher_fsevents.h"
+
+#include <dispatch/dispatch.h>
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+// The latency parameter passed to FSEventsStreamCreate().
+const CFAbsoluteTime kEventLatencySeconds = 0.3;
+
+// Resolve any symlinks in the path.
+FilePath ResolvePath(const FilePath& path) {
+ const unsigned kMaxLinksToResolve = 255;
+
+ std::vector<FilePath::StringType> component_vector;
+ path.GetComponents(&component_vector);
+ std::list<FilePath::StringType>
+ components(component_vector.begin(), component_vector.end());
+
+ FilePath result;
+ unsigned resolve_count = 0;
+ while (resolve_count < kMaxLinksToResolve && !components.empty()) {
+ FilePath component(*components.begin());
+ components.pop_front();
+
+ FilePath current;
+ if (component.IsAbsolute()) {
+ current = component;
+ } else {
+ current = result.Append(component);
+ }
+
+ FilePath target;
+ if (ReadSymbolicLink(current, &target)) {
+ if (target.IsAbsolute())
+ result.clear();
+ std::vector<FilePath::StringType> target_components;
+ target.GetComponents(&target_components);
+ components.insert(components.begin(), target_components.begin(),
+ target_components.end());
+ resolve_count++;
+ } else {
+ result = current;
+ }
+ }
+
+ if (resolve_count >= kMaxLinksToResolve)
+ result.clear();
+ return result;
+}
+
+} // namespace
+
+FilePathWatcherFSEvents::FilePathWatcherFSEvents()
+ : queue_(dispatch_queue_create(
+ base::StringPrintf(
+ "org.chromium.base.FilePathWatcher.%p", this).c_str(),
+ DISPATCH_QUEUE_SERIAL)),
+ fsevent_stream_(nullptr) {
+}
+
+bool FilePathWatcherFSEvents::Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) {
+ DCHECK(MessageLoopForIO::current());
+ DCHECK(!callback.is_null());
+ DCHECK(callback_.is_null());
+
+ // This class could support non-recursive watches, but that is currently
+ // left to FilePathWatcherKQueue.
+ if (!recursive)
+ return false;
+
+ set_task_runner(ThreadTaskRunnerHandle::Get());
+ callback_ = callback;
+
+ FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
+ // The block runtime would implicitly capture the reference, not the object
+ // it's referencing. Copy the path into a local, so that the value is
+ // captured by the block's scope.
+ const FilePath path_copy(path);
+
+ dispatch_async(queue_, ^{
+ StartEventStream(start_event, path_copy);
+ });
+ return true;
+}
+
+void FilePathWatcherFSEvents::Cancel() {
+ set_cancelled();
+ callback_.Reset();
+
+ // Switch to the dispatch queue to tear down the event stream. As the queue
+ // is owned by this object, and this method is called from the destructor,
+ // execute the block synchronously.
+ dispatch_sync(queue_, ^{
+ CancelOnMessageLoopThread();
+ });
+}
+
+// static
+void FilePathWatcherFSEvents::FSEventsCallback(
+ ConstFSEventStreamRef stream,
+ void* event_watcher,
+ size_t num_events,
+ void* event_paths,
+ const FSEventStreamEventFlags flags[],
+ const FSEventStreamEventId event_ids[]) {
+ FilePathWatcherFSEvents* watcher =
+ reinterpret_cast<FilePathWatcherFSEvents*>(event_watcher);
+ bool root_changed = watcher->ResolveTargetPath();
+ std::vector<FilePath> paths;
+ FSEventStreamEventId root_change_at = FSEventStreamGetLatestEventId(stream);
+ for (size_t i = 0; i < num_events; i++) {
+ if (flags[i] & kFSEventStreamEventFlagRootChanged)
+ root_changed = true;
+ if (event_ids[i])
+ root_change_at = std::min(root_change_at, event_ids[i]);
+ paths.push_back(FilePath(
+ reinterpret_cast<char**>(event_paths)[i]).StripTrailingSeparators());
+ }
+
+ // Reinitialize the event stream if we find changes to the root. This is
+ // necessary since FSEvents doesn't report any events for the subtree after
+ // the directory to be watched gets created.
+ if (root_changed) {
+ // Resetting the event stream from within the callback fails (FSEvents spews
+ // bad file descriptor errors), so post a task to do the reset.
+ dispatch_async(watcher->queue_, ^{
+ watcher->UpdateEventStream(root_change_at);
+ });
+ }
+
+ watcher->OnFilePathsChanged(paths);
+}
+
+FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
+ // This method may be called on either the libdispatch or task_runner()
+ // thread. Checking callback_ on the libdispatch thread here is safe because
+ // it is executing in a task posted by Cancel() which first reset callback_.
+ // PostTask forms a sufficient memory barrier to ensure that the value is
+ // consistent on the target thread.
+ DCHECK(callback_.is_null())
+ << "Cancel() must be called before FilePathWatcher is destroyed.";
+}
+
+void FilePathWatcherFSEvents::OnFilePathsChanged(
+ const std::vector<FilePath>& paths) {
+ DCHECK(!resolved_target_.empty());
+ task_runner()->PostTask(
+ FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths,
+ target_, resolved_target_));
+}
+
+void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
+ const FilePath& target,
+ const FilePath& resolved_target) {
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+
+ // Don't issue callbacks after Cancel() has been called.
+ if (is_cancelled() || callback_.is_null()) {
+ return;
+ }
+
+ for (const FilePath& path : paths) {
+ if (resolved_target.IsParent(path) || resolved_target == path) {
+ callback_.Run(target, false);
+ return;
+ }
+ }
+}
+
+void FilePathWatcherFSEvents::CancelOnMessageLoopThread() {
+ // For all other implementations, the "message loop thread" is the IO thread,
+ // as returned by task_runner(). This implementation, however, needs to
+ // cancel pending work on the Dispatch Queue thread.
+
+ if (fsevent_stream_) {
+ DestroyEventStream();
+ target_.clear();
+ resolved_target_.clear();
+ }
+}
+
+void FilePathWatcherFSEvents::UpdateEventStream(
+ FSEventStreamEventId start_event) {
+ // It can happen that the watcher gets canceled while tasks that call this
+ // function are still in flight, so abort if this situation is detected.
+ if (resolved_target_.empty())
+ return;
+
+ if (fsevent_stream_)
+ DestroyEventStream();
+
+ ScopedCFTypeRef<CFStringRef> cf_path(CFStringCreateWithCString(
+ NULL, resolved_target_.value().c_str(), kCFStringEncodingMacHFS));
+ ScopedCFTypeRef<CFStringRef> cf_dir_path(CFStringCreateWithCString(
+ NULL, resolved_target_.DirName().value().c_str(),
+ kCFStringEncodingMacHFS));
+ CFStringRef paths_array[] = { cf_path.get(), cf_dir_path.get() };
+ ScopedCFTypeRef<CFArrayRef> watched_paths(CFArrayCreate(
+ NULL, reinterpret_cast<const void**>(paths_array), arraysize(paths_array),
+ &kCFTypeArrayCallBacks));
+
+ FSEventStreamContext context;
+ context.version = 0;
+ context.info = this;
+ context.retain = NULL;
+ context.release = NULL;
+ context.copyDescription = NULL;
+
+ fsevent_stream_ = FSEventStreamCreate(NULL, &FSEventsCallback, &context,
+ watched_paths,
+ start_event,
+ kEventLatencySeconds,
+ kFSEventStreamCreateFlagWatchRoot);
+ FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
+
+ if (!FSEventStreamStart(fsevent_stream_)) {
+ task_runner()->PostTask(
+ FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
+ }
+}
+
+bool FilePathWatcherFSEvents::ResolveTargetPath() {
+ FilePath resolved = ResolvePath(target_).StripTrailingSeparators();
+ bool changed = resolved != resolved_target_;
+ resolved_target_ = resolved;
+ if (resolved_target_.empty()) {
+ task_runner()->PostTask(
+ FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
+ }
+ return changed;
+}
+
+void FilePathWatcherFSEvents::ReportError(const FilePath& target) {
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ if (!callback_.is_null()) {
+ callback_.Run(target, true);
+ }
+}
+
+void FilePathWatcherFSEvents::DestroyEventStream() {
+ FSEventStreamStop(fsevent_stream_);
+ FSEventStreamInvalidate(fsevent_stream_);
+ FSEventStreamRelease(fsevent_stream_);
+ fsevent_stream_ = NULL;
+}
+
+void FilePathWatcherFSEvents::StartEventStream(FSEventStreamEventId start_event,
+ const FilePath& path) {
+ DCHECK(resolved_target_.empty());
+
+ target_ = path;
+ ResolveTargetPath();
+ UpdateEventStream(start_event);
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_watcher_fsevents.h b/libchrome/base/files/file_path_watcher_fsevents.h
new file mode 100644
index 0000000..cfbe020
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_fsevents.h
@@ -0,0 +1,100 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_PATH_WATCHER_FSEVENTS_H_
+#define BASE_FILES_FILE_PATH_WATCHER_FSEVENTS_H_
+
+#include <CoreServices/CoreServices.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/files/file_path_watcher.h"
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/macros.h"
+
+namespace base {
+
+// Mac-specific file watcher implementation based on FSEvents.
+// There are trade-offs between the FSEvents implementation and a kqueue
+// implementation. The biggest issues are that FSEvents on 10.6 sometimes drops
+// events and kqueue does not trigger for modifications to a file in a watched
+// directory. See file_path_watcher_mac.cc for the code that decides when to
+// use which one.
+class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
+ public:
+ FilePathWatcherFSEvents();
+
+ // FilePathWatcher::PlatformDelegate overrides.
+ bool Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) override;
+ void Cancel() override;
+
+ private:
+ static void FSEventsCallback(ConstFSEventStreamRef stream,
+ void* event_watcher,
+ size_t num_events,
+ void* event_paths,
+ const FSEventStreamEventFlags flags[],
+ const FSEventStreamEventId event_ids[]);
+
+ ~FilePathWatcherFSEvents() override;
+
+ // Called from FSEventsCallback whenever there is a change to the paths.
+ void OnFilePathsChanged(const std::vector<FilePath>& paths);
+
+ // Called on the message_loop() thread to dispatch path events. Can't access
+ // target_ and resolved_target_ directly as those are modified on the
+ // libdispatch thread.
+ void DispatchEvents(const std::vector<FilePath>& paths,
+ const FilePath& target,
+ const FilePath& resolved_target);
+
+ // Cleans up and stops the event stream.
+ void CancelOnMessageLoopThread() override;
+
+ // (Re-)Initialize the event stream to start reporting events from
+ // |start_event|.
+ void UpdateEventStream(FSEventStreamEventId start_event);
+
+ // Returns true if resolving the target path got a different result than
+ // last time it was done.
+ bool ResolveTargetPath();
+
+ // Report an error watching the given target.
+ void ReportError(const FilePath& target);
+
+ // Destroy the event stream.
+ void DestroyEventStream();
+
+ // Start watching the FSEventStream.
+ void StartEventStream(FSEventStreamEventId start_event, const FilePath& path);
+
+ // Callback to notify upon changes.
+ // (Only accessed from the message_loop() thread.)
+ FilePathWatcher::Callback callback_;
+
+ // The dispatch queue on which the the event stream is scheduled.
+ ScopedDispatchObject<dispatch_queue_t> queue_;
+
+ // Target path to watch (passed to callback).
+ // (Only accessed from the libdispatch queue.)
+ FilePath target_;
+
+ // Target path with all symbolic links resolved.
+ // (Only accessed from the libdispatch queue.)
+ FilePath resolved_target_;
+
+ // Backend stream we receive event callbacks from (strong reference).
+ // (Only accessed from the libdispatch queue.)
+ FSEventStreamRef fsevent_stream_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_PATH_WATCHER_FSEVENTS_H_
diff --git a/libchrome/base/files/file_path_watcher_kqueue.cc b/libchrome/base/files/file_path_watcher_kqueue.cc
new file mode 100644
index 0000000..6d034cd
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_kqueue.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher_kqueue.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/param.h>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+// On some platforms these are not defined.
+#if !defined(EV_RECEIPT)
+#define EV_RECEIPT 0
+#endif
+#if !defined(O_EVTONLY)
+#define O_EVTONLY O_RDONLY
+#endif
+
+namespace base {
+
+FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {}
+
+FilePathWatcherKQueue::~FilePathWatcherKQueue() {}
+
+void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
+ CloseFileDescriptor(&event.ident);
+ EventData* entry = EventDataForKevent(event);
+ delete entry;
+ event.udata = NULL;
+}
+
+int FilePathWatcherKQueue::EventsForPath(FilePath path, EventVector* events) {
+ DCHECK(MessageLoopForIO::current());
+ // Make sure that we are working with a clean slate.
+ DCHECK(events->empty());
+
+ std::vector<FilePath::StringType> components;
+ path.GetComponents(&components);
+
+ if (components.size() < 1) {
+ return -1;
+ }
+
+ int last_existing_entry = 0;
+ FilePath built_path;
+ bool path_still_exists = true;
+ for (std::vector<FilePath::StringType>::iterator i = components.begin();
+ i != components.end(); ++i) {
+ if (i == components.begin()) {
+ built_path = FilePath(*i);
+ } else {
+ built_path = built_path.Append(*i);
+ }
+ uintptr_t fd = kNoFileDescriptor;
+ if (path_still_exists) {
+ fd = FileDescriptorForPath(built_path);
+ if (fd == kNoFileDescriptor) {
+ path_still_exists = false;
+ } else {
+ ++last_existing_entry;
+ }
+ }
+ FilePath::StringType subdir = (i != (components.end() - 1)) ? *(i + 1) : "";
+ EventData* data = new EventData(built_path, subdir);
+ struct kevent event;
+ EV_SET(&event, fd, EVFILT_VNODE, (EV_ADD | EV_CLEAR | EV_RECEIPT),
+ (NOTE_DELETE | NOTE_WRITE | NOTE_ATTRIB |
+ NOTE_RENAME | NOTE_REVOKE | NOTE_EXTEND), 0, data);
+ events->push_back(event);
+ }
+ return last_existing_entry;
+}
+
+uintptr_t FilePathWatcherKQueue::FileDescriptorForPath(const FilePath& path) {
+ int fd = HANDLE_EINTR(open(path.value().c_str(), O_EVTONLY));
+ if (fd == -1)
+ return kNoFileDescriptor;
+ return fd;
+}
+
+void FilePathWatcherKQueue::CloseFileDescriptor(uintptr_t* fd) {
+ if (*fd == kNoFileDescriptor) {
+ return;
+ }
+
+ if (IGNORE_EINTR(close(*fd)) != 0) {
+ DPLOG(ERROR) << "close";
+ }
+ *fd = kNoFileDescriptor;
+}
+
+bool FilePathWatcherKQueue::AreKeventValuesValid(struct kevent* kevents,
+ int count) {
+ if (count < 0) {
+ DPLOG(ERROR) << "kevent";
+ return false;
+ }
+ bool valid = true;
+ for (int i = 0; i < count; ++i) {
+ if (kevents[i].flags & EV_ERROR && kevents[i].data) {
+ // Find the kevent in |events_| that matches the kevent with the error.
+ EventVector::iterator event = events_.begin();
+ for (; event != events_.end(); ++event) {
+ if (event->ident == kevents[i].ident) {
+ break;
+ }
+ }
+ std::string path_name;
+ if (event != events_.end()) {
+ EventData* event_data = EventDataForKevent(*event);
+ if (event_data != NULL) {
+ path_name = event_data->path_.value();
+ }
+ }
+ if (path_name.empty()) {
+ path_name = base::StringPrintf(
+ "fd %ld", reinterpret_cast<long>(&kevents[i].ident));
+ }
+ DLOG(ERROR) << "Error: " << kevents[i].data << " for " << path_name;
+ valid = false;
+ }
+ }
+ return valid;
+}
+
+void FilePathWatcherKQueue::HandleAttributesChange(
+ const EventVector::iterator& event,
+ bool* target_file_affected,
+ bool* update_watches) {
+ EventVector::iterator next_event = event + 1;
+ EventData* next_event_data = EventDataForKevent(*next_event);
+ // Check to see if the next item in path is still accessible.
+ uintptr_t have_access = FileDescriptorForPath(next_event_data->path_);
+ if (have_access == kNoFileDescriptor) {
+ *target_file_affected = true;
+ *update_watches = true;
+ EventVector::iterator local_event(event);
+ for (; local_event != events_.end(); ++local_event) {
+ // Close all nodes from the event down. This has the side effect of
+ // potentially rendering other events in |updates| invalid.
+ // There is no need to remove the events from |kqueue_| because this
+ // happens as a side effect of closing the file descriptor.
+ CloseFileDescriptor(&local_event->ident);
+ }
+ } else {
+ CloseFileDescriptor(&have_access);
+ }
+}
+
+void FilePathWatcherKQueue::HandleDeleteOrMoveChange(
+ const EventVector::iterator& event,
+ bool* target_file_affected,
+ bool* update_watches) {
+ *target_file_affected = true;
+ *update_watches = true;
+ EventVector::iterator local_event(event);
+ for (; local_event != events_.end(); ++local_event) {
+ // Close all nodes from the event down. This has the side effect of
+ // potentially rendering other events in |updates| invalid.
+ // There is no need to remove the events from |kqueue_| because this
+ // happens as a side effect of closing the file descriptor.
+ CloseFileDescriptor(&local_event->ident);
+ }
+}
+
+void FilePathWatcherKQueue::HandleCreateItemChange(
+ const EventVector::iterator& event,
+ bool* target_file_affected,
+ bool* update_watches) {
+ // Get the next item in the path.
+ EventVector::iterator next_event = event + 1;
+ // Check to see if it already has a valid file descriptor.
+ if (!IsKeventFileDescriptorOpen(*next_event)) {
+ EventData* next_event_data = EventDataForKevent(*next_event);
+ // If not, attempt to open a file descriptor for it.
+ next_event->ident = FileDescriptorForPath(next_event_data->path_);
+ if (IsKeventFileDescriptorOpen(*next_event)) {
+ *update_watches = true;
+ if (next_event_data->subdir_.empty()) {
+ *target_file_affected = true;
+ }
+ }
+ }
+}
+
+bool FilePathWatcherKQueue::UpdateWatches(bool* target_file_affected) {
+ // Iterate over events adding kevents for items that exist to the kqueue.
+ // Then check to see if new components in the path have been created.
+ // Repeat until no new components in the path are detected.
+ // This is to get around races in directory creation in a watched path.
+ bool update_watches = true;
+ while (update_watches) {
+ size_t valid;
+ for (valid = 0; valid < events_.size(); ++valid) {
+ if (!IsKeventFileDescriptorOpen(events_[valid])) {
+ break;
+ }
+ }
+ if (valid == 0) {
+ // The root of the file path is inaccessible?
+ return false;
+ }
+
+ EventVector updates(valid);
+ int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], valid, &updates[0],
+ valid, NULL));
+ if (!AreKeventValuesValid(&updates[0], count)) {
+ return false;
+ }
+ update_watches = false;
+ for (; valid < events_.size(); ++valid) {
+ EventData* event_data = EventDataForKevent(events_[valid]);
+ events_[valid].ident = FileDescriptorForPath(event_data->path_);
+ if (IsKeventFileDescriptorOpen(events_[valid])) {
+ update_watches = true;
+ if (event_data->subdir_.empty()) {
+ *target_file_affected = true;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+void FilePathWatcherKQueue::OnFileCanReadWithoutBlocking(int fd) {
+ DCHECK(MessageLoopForIO::current());
+ DCHECK_EQ(fd, kqueue_);
+ DCHECK(events_.size());
+
+ // Request the file system update notifications that have occurred and return
+ // them in |updates|. |count| will contain the number of updates that have
+ // occurred.
+ EventVector updates(events_.size());
+ struct timespec timeout = {0, 0};
+ int count = HANDLE_EINTR(kevent(kqueue_, NULL, 0, &updates[0], updates.size(),
+ &timeout));
+
+ // Error values are stored within updates, so check to make sure that no
+ // errors occurred.
+ if (!AreKeventValuesValid(&updates[0], count)) {
+ callback_.Run(target_, true /* error */);
+ Cancel();
+ return;
+ }
+
+ bool update_watches = false;
+ bool send_notification = false;
+
+ // Iterate through each of the updates and react to them.
+ for (int i = 0; i < count; ++i) {
+ // Find our kevent record that matches the update notification.
+ EventVector::iterator event = events_.begin();
+ for (; event != events_.end(); ++event) {
+ if (!IsKeventFileDescriptorOpen(*event) ||
+ event->ident == updates[i].ident) {
+ break;
+ }
+ }
+ if (event == events_.end() || !IsKeventFileDescriptorOpen(*event)) {
+ // The event may no longer exist in |events_| because another event
+ // modified |events_| in such a way to make it invalid. For example if
+ // the path is /foo/bar/bam and foo is deleted, NOTE_DELETE events for
+ // foo, bar and bam will be sent. If foo is processed first, then
+ // the file descriptors for bar and bam will already be closed and set
+ // to -1 before they get a chance to be processed.
+ continue;
+ }
+
+ EventData* event_data = EventDataForKevent(*event);
+
+ // If the subdir is empty, this is the last item on the path and is the
+ // target file.
+ bool target_file_affected = event_data->subdir_.empty();
+ if ((updates[i].fflags & NOTE_ATTRIB) && !target_file_affected) {
+ HandleAttributesChange(event, &target_file_affected, &update_watches);
+ }
+ if (updates[i].fflags & (NOTE_DELETE | NOTE_REVOKE | NOTE_RENAME)) {
+ HandleDeleteOrMoveChange(event, &target_file_affected, &update_watches);
+ }
+ if ((updates[i].fflags & NOTE_WRITE) && !target_file_affected) {
+ HandleCreateItemChange(event, &target_file_affected, &update_watches);
+ }
+ send_notification |= target_file_affected;
+ }
+
+ if (update_watches) {
+ if (!UpdateWatches(&send_notification)) {
+ callback_.Run(target_, true /* error */);
+ Cancel();
+ }
+ }
+
+ if (send_notification) {
+ callback_.Run(target_, false);
+ }
+}
+
+void FilePathWatcherKQueue::OnFileCanWriteWithoutBlocking(int /* fd */) {
+ NOTREACHED();
+}
+
+void FilePathWatcherKQueue::WillDestroyCurrentMessageLoop() {
+ CancelOnMessageLoopThread();
+}
+
+bool FilePathWatcherKQueue::Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) {
+ DCHECK(MessageLoopForIO::current());
+ DCHECK(target_.value().empty()); // Can only watch one path.
+ DCHECK(!callback.is_null());
+ DCHECK_EQ(kqueue_, -1);
+
+ if (recursive) {
+ // Recursive watch is not supported using kqueue.
+ NOTIMPLEMENTED();
+ return false;
+ }
+
+ callback_ = callback;
+ target_ = path;
+
+ MessageLoop::current()->AddDestructionObserver(this);
+ io_task_runner_ = ThreadTaskRunnerHandle::Get();
+
+ kqueue_ = kqueue();
+ if (kqueue_ == -1) {
+ DPLOG(ERROR) << "kqueue";
+ return false;
+ }
+
+ int last_entry = EventsForPath(target_, &events_);
+ DCHECK_NE(last_entry, 0);
+
+ EventVector responses(last_entry);
+
+ int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry,
+ &responses[0], last_entry, NULL));
+ if (!AreKeventValuesValid(&responses[0], count)) {
+ // Calling Cancel() here to close any file descriptors that were opened.
+ // This would happen in the destructor anyways, but FilePathWatchers tend to
+ // be long lived, and if an error has occurred, there is no reason to waste
+ // the file descriptors.
+ Cancel();
+ return false;
+ }
+
+ return MessageLoopForIO::current()->WatchFileDescriptor(
+ kqueue_, true, MessageLoopForIO::WATCH_READ, &kqueue_watcher_, this);
+}
+
+void FilePathWatcherKQueue::Cancel() {
+ SingleThreadTaskRunner* task_runner = io_task_runner_.get();
+ if (!task_runner) {
+ set_cancelled();
+ return;
+ }
+ if (!task_runner->BelongsToCurrentThread()) {
+ task_runner->PostTask(FROM_HERE,
+ base::Bind(&FilePathWatcherKQueue::Cancel, this));
+ return;
+ }
+ CancelOnMessageLoopThread();
+}
+
+void FilePathWatcherKQueue::CancelOnMessageLoopThread() {
+ DCHECK(MessageLoopForIO::current());
+ if (!is_cancelled()) {
+ set_cancelled();
+ kqueue_watcher_.StopWatchingFileDescriptor();
+ if (IGNORE_EINTR(close(kqueue_)) != 0) {
+ DPLOG(ERROR) << "close kqueue";
+ }
+ kqueue_ = -1;
+ std::for_each(events_.begin(), events_.end(), ReleaseEvent);
+ events_.clear();
+ io_task_runner_ = NULL;
+ MessageLoop::current()->RemoveDestructionObserver(this);
+ callback_.Reset();
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_watcher_kqueue.h b/libchrome/base/files/file_path_watcher_kqueue.h
new file mode 100644
index 0000000..d9db8c2
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_kqueue.h
@@ -0,0 +1,133 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
+#define BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
+
+#include <sys/event.h>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/files/file_path_watcher.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// Mac-specific file watcher implementation based on kqueue.
+// The Linux and Windows versions are able to detect:
+// - file creation/deletion/modification in a watched directory
+// - file creation/deletion/modification for a watched file
+// - modifications to the paths to a watched object that would affect the
+// object such as renaming/attibute changes etc.
+// The kqueue implementation will handle all of the items in the list above
+// except for detecting modifications to files in a watched directory. It will
+// detect the creation and deletion of files, just not the modification of
+// files. It does however detect the attribute changes that the FSEvents impl
+// would miss.
+class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
+ public MessageLoopForIO::Watcher,
+ public MessageLoop::DestructionObserver {
+ public:
+ FilePathWatcherKQueue();
+
+ // MessageLoopForIO::Watcher overrides.
+ void OnFileCanReadWithoutBlocking(int fd) override;
+ void OnFileCanWriteWithoutBlocking(int fd) override;
+
+ // MessageLoop::DestructionObserver overrides.
+ void WillDestroyCurrentMessageLoop() override;
+
+ // FilePathWatcher::PlatformDelegate overrides.
+ bool Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) override;
+ void Cancel() override;
+
+ protected:
+ ~FilePathWatcherKQueue() override;
+
+ private:
+ class EventData {
+ public:
+ EventData(const FilePath& path, const FilePath::StringType& subdir)
+ : path_(path), subdir_(subdir) { }
+ FilePath path_; // Full path to this item.
+ FilePath::StringType subdir_; // Path to any sub item.
+ };
+
+ typedef std::vector<struct kevent> EventVector;
+
+ // Can only be called on |io_task_runner_|'s thread.
+ void CancelOnMessageLoopThread() override;
+
+ // Returns true if the kevent values are error free.
+ bool AreKeventValuesValid(struct kevent* kevents, int count);
+
+ // Respond to a change of attributes of the path component represented by
+ // |event|. Sets |target_file_affected| to true if |target_| is affected.
+ // Sets |update_watches| to true if |events_| need to be updated.
+ void HandleAttributesChange(const EventVector::iterator& event,
+ bool* target_file_affected,
+ bool* update_watches);
+
+ // Respond to a move or deletion of the path component represented by
+ // |event|. Sets |target_file_affected| to true if |target_| is affected.
+ // Sets |update_watches| to true if |events_| need to be updated.
+ void HandleDeleteOrMoveChange(const EventVector::iterator& event,
+ bool* target_file_affected,
+ bool* update_watches);
+
+ // Respond to a creation of an item in the path component represented by
+ // |event|. Sets |target_file_affected| to true if |target_| is affected.
+ // Sets |update_watches| to true if |events_| need to be updated.
+ void HandleCreateItemChange(const EventVector::iterator& event,
+ bool* target_file_affected,
+ bool* update_watches);
+
+ // Update |events_| with the current status of the system.
+ // Sets |target_file_affected| to true if |target_| is affected.
+ // Returns false if an error occurs.
+ bool UpdateWatches(bool* target_file_affected);
+
+ // Fills |events| with one kevent per component in |path|.
+ // Returns the number of valid events created where a valid event is
+ // defined as one that has a ident (file descriptor) field != -1.
+ static int EventsForPath(FilePath path, EventVector *events);
+
+ // Release a kevent generated by EventsForPath.
+ static void ReleaseEvent(struct kevent& event);
+
+ // Returns a file descriptor that will not block the system from deleting
+ // the file it references.
+ static uintptr_t FileDescriptorForPath(const FilePath& path);
+
+ static const uintptr_t kNoFileDescriptor = static_cast<uintptr_t>(-1);
+
+ // Closes |*fd| and sets |*fd| to -1.
+ static void CloseFileDescriptor(uintptr_t* fd);
+
+ // Returns true if kevent has open file descriptor.
+ static bool IsKeventFileDescriptorOpen(const struct kevent& event) {
+ return event.ident != kNoFileDescriptor;
+ }
+
+ static EventData* EventDataForKevent(const struct kevent& event) {
+ return reinterpret_cast<EventData*>(event.udata);
+ }
+
+ EventVector events_;
+ scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+ MessageLoopForIO::FileDescriptorWatcher kqueue_watcher_;
+ FilePathWatcher::Callback callback_;
+ FilePath target_;
+ int kqueue_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherKQueue);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
diff --git a/libchrome/base/files/file_path_watcher_linux.cc b/libchrome/base/files/file_path_watcher_linux.cc
new file mode 100644
index 0000000..87bddd3
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_linux.cc
@@ -0,0 +1,689 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/inotify.h>
+#include <sys/ioctl.h>
+#include <sys/select.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/containers/hash_tables.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl;
+
+// Singleton to manage all inotify watches.
+// TODO(tony): It would be nice if this wasn't a singleton.
+// http://crbug.com/38174
+class InotifyReader {
+ public:
+ typedef int Watch; // Watch descriptor used by AddWatch and RemoveWatch.
+ static const Watch kInvalidWatch = -1;
+
+ // Watch directory |path| for changes. |watcher| will be notified on each
+ // change. Returns kInvalidWatch on failure.
+ Watch AddWatch(const FilePath& path, FilePathWatcherImpl* watcher);
+
+ // Remove |watch| if it's valid.
+ void RemoveWatch(Watch watch, FilePathWatcherImpl* watcher);
+
+ // Callback for InotifyReaderTask.
+ void OnInotifyEvent(const inotify_event* event);
+
+ private:
+ friend struct DefaultLazyInstanceTraits<InotifyReader>;
+
+ typedef std::set<FilePathWatcherImpl*> WatcherSet;
+
+ InotifyReader();
+ ~InotifyReader();
+
+ // We keep track of which delegates want to be notified on which watches.
+ hash_map<Watch, WatcherSet> watchers_;
+
+ // Lock to protect watchers_.
+ Lock lock_;
+
+ // Separate thread on which we run blocking read for inotify events.
+ Thread thread_;
+
+ // File descriptor returned by inotify_init.
+ const int inotify_fd_;
+
+ // Use self-pipe trick to unblock select during shutdown.
+ int shutdown_pipe_[2];
+
+ // Flag set to true when startup was successful.
+ bool valid_;
+
+ DISALLOW_COPY_AND_ASSIGN(InotifyReader);
+};
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
+ public MessageLoop::DestructionObserver {
+ public:
+ FilePathWatcherImpl();
+
+ // Called for each event coming from the watch. |fired_watch| identifies the
+ // watch that fired, |child| indicates what has changed, and is relative to
+ // the currently watched path for |fired_watch|.
+ //
+ // |created| is true if the object appears.
+ // |deleted| is true if the object disappears.
+ // |is_dir| is true if the object is a directory.
+ void OnFilePathChanged(InotifyReader::Watch fired_watch,
+ const FilePath::StringType& child,
+ bool created,
+ bool deleted,
+ bool is_dir);
+
+ protected:
+ ~FilePathWatcherImpl() override {}
+
+ private:
+ // Start watching |path| for changes and notify |delegate| on each change.
+ // Returns true if watch for |path| has been added successfully.
+ bool Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) override;
+
+ // Cancel the watch. This unregisters the instance with InotifyReader.
+ void Cancel() override;
+
+ // Cleans up and stops observing the message_loop() thread.
+ void CancelOnMessageLoopThread() override;
+
+ // Deletion of the FilePathWatcher will call Cancel() to dispose of this
+ // object in the right thread. This also observes destruction of the required
+ // cleanup thread, in case it quits before Cancel() is called.
+ void WillDestroyCurrentMessageLoop() override;
+
+ // Inotify watches are installed for all directory components of |target_|.
+ // A WatchEntry instance holds:
+ // - |watch|: the watch descriptor for a component.
+ // - |subdir|: the subdirectory that identifies the next component.
+ // - For the last component, there is no next component, so it is empty.
+ // - |linkname|: the target of the symlink.
+ // - Only if the target being watched is a symbolic link.
+ struct WatchEntry {
+ explicit WatchEntry(const FilePath::StringType& dirname)
+ : watch(InotifyReader::kInvalidWatch),
+ subdir(dirname) {}
+
+ InotifyReader::Watch watch;
+ FilePath::StringType subdir;
+ FilePath::StringType linkname;
+ };
+ typedef std::vector<WatchEntry> WatchVector;
+
+ // Reconfigure to watch for the most specific parent directory of |target_|
+ // that exists. Also calls UpdateRecursiveWatches() below.
+ void UpdateWatches();
+
+ // Reconfigure to recursively watch |target_| and all its sub-directories.
+ // - This is a no-op if the watch is not recursive.
+ // - If |target_| does not exist, then clear all the recursive watches.
+ // - Assuming |target_| exists, passing kInvalidWatch as |fired_watch| forces
+ // addition of recursive watches for |target_|.
+ // - Otherwise, only the directory associated with |fired_watch| and its
+ // sub-directories will be reconfigured.
+ void UpdateRecursiveWatches(InotifyReader::Watch fired_watch, bool is_dir);
+
+ // Enumerate recursively through |path| and add / update watches.
+ void UpdateRecursiveWatchesForPath(const FilePath& path);
+
+ // Do internal bookkeeping to update mappings between |watch| and its
+ // associated full path |path|.
+ void TrackWatchForRecursion(InotifyReader::Watch watch, const FilePath& path);
+
+ // Remove all the recursive watches.
+ void RemoveRecursiveWatches();
+
+ // |path| is a symlink to a non-existent target. Attempt to add a watch to
+ // the link target's parent directory. Update |watch_entry| on success.
+ void AddWatchForBrokenSymlink(const FilePath& path, WatchEntry* watch_entry);
+
+ bool HasValidWatchVector() const;
+
+ // Callback to notify upon changes.
+ FilePathWatcher::Callback callback_;
+
+ // The file or directory we're supposed to watch.
+ FilePath target_;
+
+ bool recursive_;
+
+ // The vector of watches and next component names for all path components,
+ // starting at the root directory. The last entry corresponds to the watch for
+ // |target_| and always stores an empty next component name in |subdir|.
+ WatchVector watches_;
+
+ hash_map<InotifyReader::Watch, FilePath> recursive_paths_by_watch_;
+ std::map<FilePath, InotifyReader::Watch> recursive_watches_by_path_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
+};
+
+void InotifyReaderCallback(InotifyReader* reader, int inotify_fd,
+ int shutdown_fd) {
+ // Make sure the file descriptors are good for use with select().
+ CHECK_LE(0, inotify_fd);
+ CHECK_GT(FD_SETSIZE, inotify_fd);
+ CHECK_LE(0, shutdown_fd);
+ CHECK_GT(FD_SETSIZE, shutdown_fd);
+
+ trace_event::TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop();
+
+ while (true) {
+ fd_set rfds;
+ FD_ZERO(&rfds);
+ FD_SET(inotify_fd, &rfds);
+ FD_SET(shutdown_fd, &rfds);
+
+ // Wait until some inotify events are available.
+ int select_result =
+ HANDLE_EINTR(select(std::max(inotify_fd, shutdown_fd) + 1,
+ &rfds, NULL, NULL, NULL));
+ if (select_result < 0) {
+ DPLOG(WARNING) << "select failed";
+ return;
+ }
+
+ if (FD_ISSET(shutdown_fd, &rfds))
+ return;
+
+ // Adjust buffer size to current event queue size.
+ int buffer_size;
+ int ioctl_result = HANDLE_EINTR(ioctl(inotify_fd, FIONREAD,
+ &buffer_size));
+
+ if (ioctl_result != 0) {
+ DPLOG(WARNING) << "ioctl failed";
+ return;
+ }
+
+ std::vector<char> buffer(buffer_size);
+
+ ssize_t bytes_read = HANDLE_EINTR(read(inotify_fd, &buffer[0],
+ buffer_size));
+
+ if (bytes_read < 0) {
+ DPLOG(WARNING) << "read from inotify fd failed";
+ return;
+ }
+
+ ssize_t i = 0;
+ while (i < bytes_read) {
+ inotify_event* event = reinterpret_cast<inotify_event*>(&buffer[i]);
+ size_t event_size = sizeof(inotify_event) + event->len;
+ DCHECK(i + event_size <= static_cast<size_t>(bytes_read));
+ reader->OnInotifyEvent(event);
+ i += event_size;
+ }
+ }
+}
+
+static LazyInstance<InotifyReader>::Leaky g_inotify_reader =
+ LAZY_INSTANCE_INITIALIZER;
+
+InotifyReader::InotifyReader()
+ : thread_("inotify_reader"),
+ inotify_fd_(inotify_init()),
+ valid_(false) {
+ if (inotify_fd_ < 0)
+ PLOG(ERROR) << "inotify_init() failed";
+
+ shutdown_pipe_[0] = -1;
+ shutdown_pipe_[1] = -1;
+ if (inotify_fd_ >= 0 && pipe(shutdown_pipe_) == 0 && thread_.Start()) {
+ thread_.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&InotifyReaderCallback, this, inotify_fd_, shutdown_pipe_[0]));
+ valid_ = true;
+ }
+}
+
+InotifyReader::~InotifyReader() {
+ if (valid_) {
+ // Write to the self-pipe so that the select call in InotifyReaderTask
+ // returns.
+ ssize_t ret = HANDLE_EINTR(write(shutdown_pipe_[1], "", 1));
+ DPCHECK(ret > 0);
+ DCHECK_EQ(ret, 1);
+ thread_.Stop();
+ }
+ if (inotify_fd_ >= 0)
+ close(inotify_fd_);
+ if (shutdown_pipe_[0] >= 0)
+ close(shutdown_pipe_[0]);
+ if (shutdown_pipe_[1] >= 0)
+ close(shutdown_pipe_[1]);
+}
+
+InotifyReader::Watch InotifyReader::AddWatch(
+ const FilePath& path, FilePathWatcherImpl* watcher) {
+ if (!valid_)
+ return kInvalidWatch;
+
+ AutoLock auto_lock(lock_);
+
+ Watch watch = inotify_add_watch(inotify_fd_, path.value().c_str(),
+ IN_ATTRIB | IN_CREATE | IN_DELETE |
+ IN_CLOSE_WRITE | IN_MOVE |
+ IN_ONLYDIR);
+
+ if (watch == kInvalidWatch)
+ return kInvalidWatch;
+
+ watchers_[watch].insert(watcher);
+
+ return watch;
+}
+
+void InotifyReader::RemoveWatch(Watch watch, FilePathWatcherImpl* watcher) {
+ if (!valid_ || (watch == kInvalidWatch))
+ return;
+
+ AutoLock auto_lock(lock_);
+
+ watchers_[watch].erase(watcher);
+
+ if (watchers_[watch].empty()) {
+ watchers_.erase(watch);
+ inotify_rm_watch(inotify_fd_, watch);
+ }
+}
+
+void InotifyReader::OnInotifyEvent(const inotify_event* event) {
+ if (event->mask & IN_IGNORED)
+ return;
+
+ FilePath::StringType child(event->len ? event->name : FILE_PATH_LITERAL(""));
+ AutoLock auto_lock(lock_);
+
+ for (WatcherSet::iterator watcher = watchers_[event->wd].begin();
+ watcher != watchers_[event->wd].end();
+ ++watcher) {
+ (*watcher)->OnFilePathChanged(event->wd,
+ child,
+ event->mask & (IN_CREATE | IN_MOVED_TO),
+ event->mask & (IN_DELETE | IN_MOVED_FROM),
+ event->mask & IN_ISDIR);
+ }
+}
+
+FilePathWatcherImpl::FilePathWatcherImpl()
+ : recursive_(false) {
+}
+
+void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
+ const FilePath::StringType& child,
+ bool created,
+ bool deleted,
+ bool is_dir) {
+ if (!task_runner()->BelongsToCurrentThread()) {
+ // Switch to task_runner() to access |watches_| safely.
+ task_runner()->PostTask(FROM_HERE,
+ Bind(&FilePathWatcherImpl::OnFilePathChanged, this,
+ fired_watch, child, created, deleted, is_dir));
+ return;
+ }
+
+ // Check to see if CancelOnMessageLoopThread() has already been called.
+ // May happen when code flow reaches here from the PostTask() above.
+ if (watches_.empty()) {
+ DCHECK(target_.empty());
+ return;
+ }
+
+ DCHECK(MessageLoopForIO::current());
+ DCHECK(HasValidWatchVector());
+
+ // Used below to avoid multiple recursive updates.
+ bool did_update = false;
+
+ // Find the entry in |watches_| that corresponds to |fired_watch|.
+ for (size_t i = 0; i < watches_.size(); ++i) {
+ const WatchEntry& watch_entry = watches_[i];
+ if (fired_watch != watch_entry.watch)
+ continue;
+
+ // Check whether a path component of |target_| changed.
+ bool change_on_target_path =
+ child.empty() ||
+ (child == watch_entry.linkname) ||
+ (child == watch_entry.subdir);
+
+ // Check if the change references |target_| or a direct child of |target_|.
+ bool target_changed;
+ if (watch_entry.subdir.empty()) {
+ // The fired watch is for a WatchEntry without a subdir. Thus for a given
+ // |target_| = "/path/to/foo", this is for "foo". Here, check either:
+ // - the target has no symlink: it is the target and it changed.
+ // - the target has a symlink, and it matches |child|.
+ target_changed = (watch_entry.linkname.empty() ||
+ child == watch_entry.linkname);
+ } else {
+ // The fired watch is for a WatchEntry with a subdir. Thus for a given
+ // |target_| = "/path/to/foo", this is for {"/", "/path", "/path/to"}.
+ // So we can safely access the next WatchEntry since we have not reached
+ // the end yet. Check |watch_entry| is for "/path/to", i.e. the next
+ // element is "foo".
+ bool next_watch_may_be_for_target = watches_[i + 1].subdir.empty();
+ if (next_watch_may_be_for_target) {
+ // The current |watch_entry| is for "/path/to", so check if the |child|
+ // that changed is "foo".
+ target_changed = watch_entry.subdir == child;
+ } else {
+ // The current |watch_entry| is not for "/path/to", so the next entry
+ // cannot be "foo". Thus |target_| has not changed.
+ target_changed = false;
+ }
+ }
+
+ // Update watches if a directory component of the |target_| path
+ // (dis)appears. Note that we don't add the additional restriction of
+ // checking the event mask to see if it is for a directory here as changes
+ // to symlinks on the target path will not have IN_ISDIR set in the event
+ // masks. As a result we may sometimes call UpdateWatches() unnecessarily.
+ if (change_on_target_path && (created || deleted) && !did_update) {
+ UpdateWatches();
+ did_update = true;
+ }
+
+ // Report the following events:
+ // - The target or a direct child of the target got changed (in case the
+ // watched path refers to a directory).
+ // - One of the parent directories got moved or deleted, since the target
+ // disappears in this case.
+ // - One of the parent directories appears. The event corresponding to
+ // the target appearing might have been missed in this case, so recheck.
+ if (target_changed ||
+ (change_on_target_path && deleted) ||
+ (change_on_target_path && created && PathExists(target_))) {
+ if (!did_update) {
+ UpdateRecursiveWatches(fired_watch, is_dir);
+ did_update = true;
+ }
+ callback_.Run(target_, false /* error */);
+ return;
+ }
+ }
+
+ if (ContainsKey(recursive_paths_by_watch_, fired_watch)) {
+ if (!did_update)
+ UpdateRecursiveWatches(fired_watch, is_dir);
+ callback_.Run(target_, false /* error */);
+ }
+}
+
+bool FilePathWatcherImpl::Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) {
+ DCHECK(target_.empty());
+ DCHECK(MessageLoopForIO::current());
+
+ set_task_runner(ThreadTaskRunnerHandle::Get());
+ callback_ = callback;
+ target_ = path;
+ recursive_ = recursive;
+ MessageLoop::current()->AddDestructionObserver(this);
+
+ std::vector<FilePath::StringType> comps;
+ target_.GetComponents(&comps);
+ DCHECK(!comps.empty());
+ for (size_t i = 1; i < comps.size(); ++i)
+ watches_.push_back(WatchEntry(comps[i]));
+ watches_.push_back(WatchEntry(FilePath::StringType()));
+ UpdateWatches();
+ return true;
+}
+
+void FilePathWatcherImpl::Cancel() {
+ if (callback_.is_null()) {
+ // Watch was never called, or the message_loop() thread is already gone.
+ set_cancelled();
+ return;
+ }
+
+ // Switch to the message_loop() if necessary so we can access |watches_|.
+ if (!task_runner()->BelongsToCurrentThread()) {
+ task_runner()->PostTask(FROM_HERE, Bind(&FilePathWatcher::CancelWatch,
+ make_scoped_refptr(this)));
+ } else {
+ CancelOnMessageLoopThread();
+ }
+}
+
+void FilePathWatcherImpl::CancelOnMessageLoopThread() {
+ DCHECK(task_runner()->BelongsToCurrentThread());
+ set_cancelled();
+
+ if (!callback_.is_null()) {
+ MessageLoop::current()->RemoveDestructionObserver(this);
+ callback_.Reset();
+ }
+
+ for (size_t i = 0; i < watches_.size(); ++i)
+ g_inotify_reader.Get().RemoveWatch(watches_[i].watch, this);
+ watches_.clear();
+ target_.clear();
+
+ if (recursive_)
+ RemoveRecursiveWatches();
+}
+
+void FilePathWatcherImpl::WillDestroyCurrentMessageLoop() {
+ CancelOnMessageLoopThread();
+}
+
+void FilePathWatcherImpl::UpdateWatches() {
+ // Ensure this runs on the message_loop() exclusively in order to avoid
+ // concurrency issues.
+ DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK(HasValidWatchVector());
+
+ // Walk the list of watches and update them as we go.
+ FilePath path(FILE_PATH_LITERAL("/"));
+ for (size_t i = 0; i < watches_.size(); ++i) {
+ WatchEntry& watch_entry = watches_[i];
+ InotifyReader::Watch old_watch = watch_entry.watch;
+ watch_entry.watch = InotifyReader::kInvalidWatch;
+ watch_entry.linkname.clear();
+ watch_entry.watch = g_inotify_reader.Get().AddWatch(path, this);
+ if (watch_entry.watch == InotifyReader::kInvalidWatch) {
+ // Ignore the error code (beyond symlink handling) to attempt to add
+ // watches on accessible children of unreadable directories. Note that
+ // this is a best-effort attempt; we may not catch events in this
+ // scenario.
+ if (IsLink(path))
+ AddWatchForBrokenSymlink(path, &watch_entry);
+ }
+ if (old_watch != watch_entry.watch)
+ g_inotify_reader.Get().RemoveWatch(old_watch, this);
+ path = path.Append(watch_entry.subdir);
+ }
+
+ UpdateRecursiveWatches(InotifyReader::kInvalidWatch,
+ false /* is directory? */);
+}
+
+void FilePathWatcherImpl::UpdateRecursiveWatches(
+ InotifyReader::Watch fired_watch,
+ bool is_dir) {
+ if (!recursive_)
+ return;
+
+ if (!DirectoryExists(target_)) {
+ RemoveRecursiveWatches();
+ return;
+ }
+
+ // Check to see if this is a forced update or if some component of |target_|
+ // has changed. For these cases, redo the watches for |target_| and below.
+ if (!ContainsKey(recursive_paths_by_watch_, fired_watch)) {
+ UpdateRecursiveWatchesForPath(target_);
+ return;
+ }
+
+ // Underneath |target_|, only directory changes trigger watch updates.
+ if (!is_dir)
+ return;
+
+ const FilePath& changed_dir = recursive_paths_by_watch_[fired_watch];
+
+ std::map<FilePath, InotifyReader::Watch>::iterator start_it =
+ recursive_watches_by_path_.lower_bound(changed_dir);
+ std::map<FilePath, InotifyReader::Watch>::iterator end_it = start_it;
+ for (; end_it != recursive_watches_by_path_.end(); ++end_it) {
+ const FilePath& cur_path = end_it->first;
+ if (!changed_dir.IsParent(cur_path))
+ break;
+ if (!DirectoryExists(cur_path))
+ g_inotify_reader.Get().RemoveWatch(end_it->second, this);
+ }
+ recursive_watches_by_path_.erase(start_it, end_it);
+ UpdateRecursiveWatchesForPath(changed_dir);
+}
+
+void FilePathWatcherImpl::UpdateRecursiveWatchesForPath(const FilePath& path) {
+ DCHECK(recursive_);
+ DCHECK(!path.empty());
+ DCHECK(DirectoryExists(path));
+
+ // Note: SHOW_SYM_LINKS exposes symlinks as symlinks, so they are ignored
+ // rather than followed. Following symlinks can easily lead to the undesirable
+ // situation where the entire file system is being watched.
+ FileEnumerator enumerator(
+ path,
+ true /* recursive enumeration */,
+ FileEnumerator::DIRECTORIES | FileEnumerator::SHOW_SYM_LINKS);
+ for (FilePath current = enumerator.Next();
+ !current.empty();
+ current = enumerator.Next()) {
+ DCHECK(enumerator.GetInfo().IsDirectory());
+
+ if (!ContainsKey(recursive_watches_by_path_, current)) {
+ // Add new watches.
+ InotifyReader::Watch watch =
+ g_inotify_reader.Get().AddWatch(current, this);
+ TrackWatchForRecursion(watch, current);
+ } else {
+ // Update existing watches.
+ InotifyReader::Watch old_watch = recursive_watches_by_path_[current];
+ DCHECK_NE(InotifyReader::kInvalidWatch, old_watch);
+ InotifyReader::Watch watch =
+ g_inotify_reader.Get().AddWatch(current, this);
+ if (watch != old_watch) {
+ g_inotify_reader.Get().RemoveWatch(old_watch, this);
+ recursive_paths_by_watch_.erase(old_watch);
+ recursive_watches_by_path_.erase(current);
+ TrackWatchForRecursion(watch, current);
+ }
+ }
+ }
+}
+
+void FilePathWatcherImpl::TrackWatchForRecursion(InotifyReader::Watch watch,
+ const FilePath& path) {
+ DCHECK(recursive_);
+ DCHECK(!path.empty());
+ DCHECK(target_.IsParent(path));
+
+ if (watch == InotifyReader::kInvalidWatch)
+ return;
+
+ DCHECK(!ContainsKey(recursive_paths_by_watch_, watch));
+ DCHECK(!ContainsKey(recursive_watches_by_path_, path));
+ recursive_paths_by_watch_[watch] = path;
+ recursive_watches_by_path_[path] = watch;
+}
+
+void FilePathWatcherImpl::RemoveRecursiveWatches() {
+ if (!recursive_)
+ return;
+
+ for (hash_map<InotifyReader::Watch, FilePath>::const_iterator it =
+ recursive_paths_by_watch_.begin();
+ it != recursive_paths_by_watch_.end();
+ ++it) {
+ g_inotify_reader.Get().RemoveWatch(it->first, this);
+ }
+ recursive_paths_by_watch_.clear();
+ recursive_watches_by_path_.clear();
+}
+
+void FilePathWatcherImpl::AddWatchForBrokenSymlink(const FilePath& path,
+ WatchEntry* watch_entry) {
+ DCHECK_EQ(InotifyReader::kInvalidWatch, watch_entry->watch);
+ FilePath link;
+ if (!ReadSymbolicLink(path, &link))
+ return;
+
+ if (!link.IsAbsolute())
+ link = path.DirName().Append(link);
+
+ // Try watching symlink target directory. If the link target is "/", then we
+ // shouldn't get here in normal situations and if we do, we'd watch "/" for
+ // changes to a component "/" which is harmless so no special treatment of
+ // this case is required.
+ InotifyReader::Watch watch =
+ g_inotify_reader.Get().AddWatch(link.DirName(), this);
+ if (watch == InotifyReader::kInvalidWatch) {
+ // TODO(craig) Symlinks only work if the parent directory for the target
+ // exist. Ideally we should make sure we've watched all the components of
+ // the symlink path for changes. See crbug.com/91561 for details.
+ DPLOG(WARNING) << "Watch failed for " << link.DirName().value();
+ return;
+ }
+ watch_entry->watch = watch;
+ watch_entry->linkname = link.BaseName().value();
+}
+
+bool FilePathWatcherImpl::HasValidWatchVector() const {
+ if (watches_.empty())
+ return false;
+ for (size_t i = 0; i < watches_.size() - 1; ++i) {
+ if (watches_[i].subdir.empty())
+ return false;
+ }
+ return watches_.back().subdir.empty();
+}
+
+} // namespace
+
+FilePathWatcher::FilePathWatcher() {
+ impl_ = new FilePathWatcherImpl();
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_watcher_mac.cc b/libchrome/base/files/file_path_watcher_mac.cc
new file mode 100644
index 0000000..7338eaf
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_mac.cc
@@ -0,0 +1,61 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+#include "base/files/file_path_watcher_kqueue.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include "base/files/file_path_watcher_fsevents.h"
+#endif
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+ public:
+ bool Watch(const FilePath& path,
+ bool recursive,
+ const FilePathWatcher::Callback& callback) override {
+ // Use kqueue for non-recursive watches and FSEvents for recursive ones.
+ DCHECK(!impl_.get());
+ if (recursive) {
+ if (!FilePathWatcher::RecursiveWatchAvailable())
+ return false;
+#if !defined(OS_IOS)
+ impl_ = new FilePathWatcherFSEvents();
+#endif // OS_IOS
+ } else {
+ impl_ = new FilePathWatcherKQueue();
+ }
+ DCHECK(impl_.get());
+ return impl_->Watch(path, recursive, callback);
+ }
+
+ void Cancel() override {
+ if (impl_.get())
+ impl_->Cancel();
+ set_cancelled();
+ }
+
+ void CancelOnMessageLoopThread() override {
+ if (impl_.get())
+ impl_->Cancel();
+ set_cancelled();
+ }
+
+ protected:
+ ~FilePathWatcherImpl() override {}
+
+ scoped_refptr<PlatformDelegate> impl_;
+};
+
+} // namespace
+
+FilePathWatcher::FilePathWatcher() {
+ impl_ = new FilePathWatcherImpl();
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_path_watcher_unittest.cc b/libchrome/base/files/file_path_watcher_unittest.cc
new file mode 100644
index 0000000..a40e485
--- /dev/null
+++ b/libchrome/base/files/file_path_watcher_unittest.cc
@@ -0,0 +1,914 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <aclapi.h>
+#elif defined(OS_POSIX)
+#include <sys/stat.h>
+#endif
+
+#include <set>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_file_util.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_ANDROID)
+#include "base/android/path_utils.h"
+#endif // defined(OS_ANDROID)
+
+namespace base {
+
+namespace {
+
+class TestDelegate;
+
+// Aggregates notifications from the test delegates and breaks the message loop
+// the test thread is waiting on once they all came in.
+class NotificationCollector
+ : public base::RefCountedThreadSafe<NotificationCollector> {
+ public:
+ NotificationCollector() : task_runner_(base::ThreadTaskRunnerHandle::Get()) {}
+
+ // Called from the file thread by the delegates.
+ void OnChange(TestDelegate* delegate) {
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(&NotificationCollector::RecordChange, this,
+ base::Unretained(delegate)));
+ }
+
+ void Register(TestDelegate* delegate) {
+ delegates_.insert(delegate);
+ }
+
+ void Reset() {
+ signaled_.clear();
+ }
+
+ bool Success() {
+ return signaled_ == delegates_;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<NotificationCollector>;
+ ~NotificationCollector() {}
+
+ void RecordChange(TestDelegate* delegate) {
+ // Warning: |delegate| is Unretained. Do not dereference.
+ ASSERT_TRUE(task_runner_->BelongsToCurrentThread());
+ ASSERT_TRUE(delegates_.count(delegate));
+ signaled_.insert(delegate);
+
+ // Check whether all delegates have been signaled.
+ if (signaled_ == delegates_)
+ task_runner_->PostTask(FROM_HERE, MessageLoop::QuitWhenIdleClosure());
+ }
+
+ // Set of registered delegates.
+ std::set<TestDelegate*> delegates_;
+
+ // Set of signaled delegates.
+ std::set<TestDelegate*> signaled_;
+
+ // The loop we should break after all delegates signaled.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+};
+
+class TestDelegateBase : public SupportsWeakPtr<TestDelegateBase> {
+ public:
+ TestDelegateBase() {}
+ virtual ~TestDelegateBase() {}
+
+ virtual void OnFileChanged(const FilePath& path, bool error) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestDelegateBase);
+};
+
+// A mock class for testing. Gmock is not appropriate because it is not
+// thread-safe for setting expectations. Thus the test code cannot safely
+// reset expectations while the file watcher is running.
+// Instead, TestDelegate gets the notifications from FilePathWatcher and uses
+// NotificationCollector to aggregate the results.
+class TestDelegate : public TestDelegateBase {
+ public:
+ explicit TestDelegate(NotificationCollector* collector)
+ : collector_(collector) {
+ collector_->Register(this);
+ }
+ ~TestDelegate() override {}
+
+ void OnFileChanged(const FilePath& path, bool error) override {
+ if (error)
+ ADD_FAILURE() << "Error " << path.value();
+ else
+ collector_->OnChange(this);
+ }
+
+ private:
+ scoped_refptr<NotificationCollector> collector_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestDelegate);
+};
+
+void SetupWatchCallback(const FilePath& target,
+ FilePathWatcher* watcher,
+ TestDelegateBase* delegate,
+ bool recursive_watch,
+ bool* result,
+ base::WaitableEvent* completion) {
+ *result = watcher->Watch(target, recursive_watch,
+ base::Bind(&TestDelegateBase::OnFileChanged,
+ delegate->AsWeakPtr()));
+ completion->Signal();
+}
+
+class FilePathWatcherTest : public testing::Test {
+ public:
+ FilePathWatcherTest()
+ : file_thread_("FilePathWatcherTest") {}
+
+ ~FilePathWatcherTest() override {}
+
+ protected:
+ void SetUp() override {
+ // Create a separate file thread in order to test proper thread usage.
+ base::Thread::Options options(MessageLoop::TYPE_IO, 0);
+ ASSERT_TRUE(file_thread_.StartWithOptions(options));
+#if defined(OS_ANDROID)
+ // Watching files is only permitted when all parent directories are
+ // accessible, which is not the case for the default temp directory
+ // on Android which is under /data/data. Use /sdcard instead.
+ // TODO(pauljensen): Remove this when crbug.com/475568 is fixed.
+ FilePath parent_dir;
+ ASSERT_TRUE(android::GetExternalStorageDirectory(&parent_dir));
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDirUnderPath(parent_dir));
+#else // defined(OS_ANDROID)
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+#endif // defined(OS_ANDROID)
+ collector_ = new NotificationCollector();
+ }
+
+ void TearDown() override { RunLoop().RunUntilIdle(); }
+
+ void DeleteDelegateOnFileThread(TestDelegate* delegate) {
+ file_thread_.task_runner()->DeleteSoon(FROM_HERE, delegate);
+ }
+
+ FilePath test_file() {
+ return temp_dir_.path().AppendASCII("FilePathWatcherTest");
+ }
+
+ FilePath test_link() {
+ return temp_dir_.path().AppendASCII("FilePathWatcherTest.lnk");
+ }
+
+ // Write |content| to |file|. Returns true on success.
+ bool WriteFile(const FilePath& file, const std::string& content) {
+ int write_size = ::base::WriteFile(file, content.c_str(), content.length());
+ return write_size == static_cast<int>(content.length());
+ }
+
+ bool SetupWatch(const FilePath& target,
+ FilePathWatcher* watcher,
+ TestDelegateBase* delegate,
+ bool recursive_watch) WARN_UNUSED_RESULT;
+
+ bool WaitForEvents() WARN_UNUSED_RESULT {
+ collector_->Reset();
+ // Make sure we timeout if we don't get notified.
+ loop_.PostDelayedTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure(),
+ TestTimeouts::action_timeout());
+ RunLoop().Run();
+ return collector_->Success();
+ }
+
+ NotificationCollector* collector() { return collector_.get(); }
+
+ MessageLoop loop_;
+ base::Thread file_thread_;
+ ScopedTempDir temp_dir_;
+ scoped_refptr<NotificationCollector> collector_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherTest);
+};
+
+bool FilePathWatcherTest::SetupWatch(const FilePath& target,
+ FilePathWatcher* watcher,
+ TestDelegateBase* delegate,
+ bool recursive_watch) {
+ base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ bool result;
+ file_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
+ recursive_watch, &result, &completion));
+ completion.Wait();
+ return result;
+}
+
+// Basic test: Create the file and verify that we notice.
+TEST_F(FilePathWatcherTest, NewFile) {
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that modifying the file is caught.
+TEST_F(FilePathWatcherTest, ModifiedFile) {
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the file is modified.
+ ASSERT_TRUE(WriteFile(test_file(), "new content"));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that moving the file into place is caught.
+TEST_F(FilePathWatcherTest, MovedFile) {
+ FilePath source_file(temp_dir_.path().AppendASCII("source"));
+ ASSERT_TRUE(WriteFile(source_file, "content"));
+
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the file is modified.
+ ASSERT_TRUE(base::Move(source_file, test_file()));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+TEST_F(FilePathWatcherTest, DeletedFile) {
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the file is deleted.
+ base::DeleteFile(test_file(), false);
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Used by the DeleteDuringNotify test below.
+// Deletes the FilePathWatcher when it's notified.
+class Deleter : public TestDelegateBase {
+ public:
+ Deleter(FilePathWatcher* watcher, MessageLoop* loop)
+ : watcher_(watcher),
+ loop_(loop) {
+ }
+ ~Deleter() override {}
+
+ void OnFileChanged(const FilePath&, bool) override {
+ watcher_.reset();
+ loop_->task_runner()->PostTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure());
+ }
+
+ FilePathWatcher* watcher() const { return watcher_.get(); }
+
+ private:
+ std::unique_ptr<FilePathWatcher> watcher_;
+ MessageLoop* loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(Deleter);
+};
+
+// Verify that deleting a watcher during the callback doesn't crash.
+TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
+ FilePathWatcher* watcher = new FilePathWatcher;
+ // Takes ownership of watcher.
+ std::unique_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
+ ASSERT_TRUE(SetupWatch(test_file(), watcher, deleter.get(), false));
+
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(WaitForEvents());
+
+ // We win if we haven't crashed yet.
+ // Might as well double-check it got deleted, too.
+ ASSERT_TRUE(deleter->watcher() == NULL);
+}
+
+// Verify that deleting the watcher works even if there is a pending
+// notification.
+// Flaky on MacOS (and ARM linux): http://crbug.com/85930
+TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ FilePathWatcher* watcher = new FilePathWatcher;
+ ASSERT_TRUE(SetupWatch(test_file(), watcher, delegate.get(), false));
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ file_thread_.task_runner()->DeleteSoon(FROM_HERE, watcher);
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
+ FilePathWatcher watcher1, watcher2;
+ std::unique_ptr<TestDelegate> delegate1(new TestDelegate(collector()));
+ std::unique_ptr<TestDelegate> delegate2(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher1, delegate1.get(), false));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher2, delegate2.get(), false));
+
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate1.release());
+ DeleteDelegateOnFileThread(delegate2.release());
+}
+
+// Verify that watching a file whose parent directory doesn't exist yet works if
+// the directory and file are created eventually.
+TEST_F(FilePathWatcherTest, NonExistentDirectory) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath file(dir.AppendASCII("file"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(base::CreateDirectory(dir));
+
+ ASSERT_TRUE(WriteFile(file, "content"));
+
+ VLOG(1) << "Waiting for file creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file, "content v2"));
+ VLOG(1) << "Waiting for file change";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(base::DeleteFile(file, false));
+ VLOG(1) << "Waiting for file deletion";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Exercises watch reconfiguration for the case that directories on the path
+// are rapidly created.
+TEST_F(FilePathWatcherTest, DirectoryChain) {
+ FilePath path(temp_dir_.path());
+ std::vector<std::string> dir_names;
+ for (int i = 0; i < 20; i++) {
+ std::string dir(base::StringPrintf("d%d", i));
+ dir_names.push_back(dir);
+ path = path.AppendASCII(dir);
+ }
+
+ FilePathWatcher watcher;
+ FilePath file(path.AppendASCII("file"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
+
+ FilePath sub_path(temp_dir_.path());
+ for (std::vector<std::string>::const_iterator d(dir_names.begin());
+ d != dir_names.end(); ++d) {
+ sub_path = sub_path.AppendASCII(*d);
+ ASSERT_TRUE(base::CreateDirectory(sub_path));
+ }
+ VLOG(1) << "Create File";
+ ASSERT_TRUE(WriteFile(file, "content"));
+ VLOG(1) << "Waiting for file creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file, "content v2"));
+ VLOG(1) << "Waiting for file modification";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+#if defined(OS_MACOSX)
+// http://crbug.com/85930
+#define DisappearingDirectory DISABLED_DisappearingDirectory
+#endif
+TEST_F(FilePathWatcherTest, DisappearingDirectory) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath file(dir.AppendASCII("file"));
+ ASSERT_TRUE(base::CreateDirectory(dir));
+ ASSERT_TRUE(WriteFile(file, "content"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(base::DeleteFile(dir, true));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Tests that a file that is deleted and reappears is tracked correctly.
+TEST_F(FilePathWatcherTest, DeleteAndRecreate) {
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(base::DeleteFile(test_file(), false));
+ VLOG(1) << "Waiting for file deletion";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ VLOG(1) << "Waiting for file creation";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+TEST_F(FilePathWatcherTest, WatchDirectory) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath file1(dir.AppendASCII("file1"));
+ FilePath file2(dir.AppendASCII("file2"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(dir, &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(base::CreateDirectory(dir));
+ VLOG(1) << "Waiting for directory creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file1, "content"));
+ VLOG(1) << "Waiting for file1 creation";
+ ASSERT_TRUE(WaitForEvents());
+
+#if !defined(OS_MACOSX)
+ // Mac implementation does not detect files modified in a directory.
+ ASSERT_TRUE(WriteFile(file1, "content v2"));
+ VLOG(1) << "Waiting for file1 modification";
+ ASSERT_TRUE(WaitForEvents());
+#endif // !OS_MACOSX
+
+ ASSERT_TRUE(base::DeleteFile(file1, false));
+ VLOG(1) << "Waiting for file1 deletion";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file2, "content"));
+ VLOG(1) << "Waiting for file2 creation";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+TEST_F(FilePathWatcherTest, MoveParent) {
+ FilePathWatcher file_watcher;
+ FilePathWatcher subdir_watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath dest(temp_dir_.path().AppendASCII("dest"));
+ FilePath subdir(dir.AppendASCII("subdir"));
+ FilePath file(subdir.AppendASCII("file"));
+ std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(file, &file_watcher, file_delegate.get(), false));
+ std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(subdir, &subdir_watcher, subdir_delegate.get(),
+ false));
+
+ // Setup a directory hierarchy.
+ ASSERT_TRUE(base::CreateDirectory(subdir));
+ ASSERT_TRUE(WriteFile(file, "content"));
+ VLOG(1) << "Waiting for file creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ // Move the parent directory.
+ base::Move(dir, dest);
+ VLOG(1) << "Waiting for directory move";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(file_delegate.release());
+ DeleteDelegateOnFileThread(subdir_delegate.release());
+}
+
+TEST_F(FilePathWatcherTest, RecursiveWatch) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
+ if (!FilePathWatcher::RecursiveWatchAvailable()) {
+ ASSERT_FALSE(setup_result);
+ DeleteDelegateOnFileThread(delegate.release());
+ return;
+ }
+ ASSERT_TRUE(setup_result);
+
+ // Main directory("dir") creation.
+ ASSERT_TRUE(base::CreateDirectory(dir));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create "$dir/file1".
+ FilePath file1(dir.AppendASCII("file1"));
+ ASSERT_TRUE(WriteFile(file1, "content"));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create "$dir/subdir".
+ FilePath subdir(dir.AppendASCII("subdir"));
+ ASSERT_TRUE(base::CreateDirectory(subdir));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create "$dir/subdir/subdir_file1".
+ FilePath subdir_file1(subdir.AppendASCII("subdir_file1"));
+ ASSERT_TRUE(WriteFile(subdir_file1, "content"));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create "$dir/subdir/subdir_child_dir".
+ FilePath subdir_child_dir(subdir.AppendASCII("subdir_child_dir"));
+ ASSERT_TRUE(base::CreateDirectory(subdir_child_dir));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create "$dir/subdir/subdir_child_dir/child_dir_file1".
+ FilePath child_dir_file1(subdir_child_dir.AppendASCII("child_dir_file1"));
+ ASSERT_TRUE(WriteFile(child_dir_file1, "content v2"));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Write into "$dir/subdir/subdir_child_dir/child_dir_file1".
+ ASSERT_TRUE(WriteFile(child_dir_file1, "content"));
+ ASSERT_TRUE(WaitForEvents());
+
+// Apps cannot change file attributes on Android in /sdcard as /sdcard uses the
+// "fuse" file system, while /data uses "ext4". Running these tests in /data
+// would be preferable and allow testing file attributes and symlinks.
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+#if !defined(OS_ANDROID)
+ // Modify "$dir/subdir/subdir_child_dir/child_dir_file1" attributes.
+ ASSERT_TRUE(base::MakeFileUnreadable(child_dir_file1));
+ ASSERT_TRUE(WaitForEvents());
+#endif
+
+ // Delete "$dir/subdir/subdir_file1".
+ ASSERT_TRUE(base::DeleteFile(subdir_file1, false));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Delete "$dir/subdir/subdir_child_dir/child_dir_file1".
+ ASSERT_TRUE(base::DeleteFile(child_dir_file1, false));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+#if defined(OS_POSIX)
+#if defined(OS_ANDROID)
+// Apps cannot create symlinks on Android in /sdcard as /sdcard uses the
+// "fuse" file system, while /data uses "ext4". Running these tests in /data
+// would be preferable and allow testing file attributes and symlinks.
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+#define RecursiveWithSymLink DISABLED_RecursiveWithSymLink
+#endif // defined(OS_ANDROID)
+TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
+ if (!FilePathWatcher::RecursiveWatchAvailable())
+ return;
+
+ FilePathWatcher watcher;
+ FilePath test_dir(temp_dir_.path().AppendASCII("test_dir"));
+ ASSERT_TRUE(base::CreateDirectory(test_dir));
+ FilePath symlink(test_dir.AppendASCII("symlink"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(symlink, &watcher, delegate.get(), true));
+
+ // Link creation.
+ FilePath target1(temp_dir_.path().AppendASCII("target1"));
+ ASSERT_TRUE(base::CreateSymbolicLink(target1, symlink));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Target1 creation.
+ ASSERT_TRUE(base::CreateDirectory(target1));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create a file in target1.
+ FilePath target1_file(target1.AppendASCII("file"));
+ ASSERT_TRUE(WriteFile(target1_file, "content"));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Link change.
+ FilePath target2(temp_dir_.path().AppendASCII("target2"));
+ ASSERT_TRUE(base::CreateDirectory(target2));
+ ASSERT_TRUE(base::DeleteFile(symlink, false));
+ ASSERT_TRUE(base::CreateSymbolicLink(target2, symlink));
+ ASSERT_TRUE(WaitForEvents());
+
+ // Create a file in target2.
+ FilePath target2_file(target2.AppendASCII("file"));
+ ASSERT_TRUE(WriteFile(target2_file, "content"));
+ ASSERT_TRUE(WaitForEvents());
+
+ DeleteDelegateOnFileThread(delegate.release());
+}
+#endif // OS_POSIX
+
+TEST_F(FilePathWatcherTest, MoveChild) {
+ FilePathWatcher file_watcher;
+ FilePathWatcher subdir_watcher;
+ FilePath source_dir(temp_dir_.path().AppendASCII("source"));
+ FilePath source_subdir(source_dir.AppendASCII("subdir"));
+ FilePath source_file(source_subdir.AppendASCII("file"));
+ FilePath dest_dir(temp_dir_.path().AppendASCII("dest"));
+ FilePath dest_subdir(dest_dir.AppendASCII("subdir"));
+ FilePath dest_file(dest_subdir.AppendASCII("file"));
+
+ // Setup a directory hierarchy.
+ ASSERT_TRUE(base::CreateDirectory(source_subdir));
+ ASSERT_TRUE(WriteFile(source_file, "content"));
+
+ std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(dest_file, &file_watcher, file_delegate.get(), false));
+ std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(dest_subdir, &subdir_watcher, subdir_delegate.get(),
+ false));
+
+ // Move the directory into place, s.t. the watched file appears.
+ ASSERT_TRUE(base::Move(source_dir, dest_dir));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(file_delegate.release());
+ DeleteDelegateOnFileThread(subdir_delegate.release());
+}
+
+// Verify that changing attributes on a file is caught
+#if defined(OS_ANDROID)
+// Apps cannot change file attributes on Android in /sdcard as /sdcard uses the
+// "fuse" file system, while /data uses "ext4". Running these tests in /data
+// would be preferable and allow testing file attributes and symlinks.
+// TODO(pauljensen): Re-enable when crbug.com/475568 is fixed and SetUp() places
+// the |temp_dir_| in /data.
+#define FileAttributesChanged DISABLED_FileAttributesChanged
+#endif // defined(OS_ANDROID
+TEST_F(FilePathWatcherTest, FileAttributesChanged) {
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the file is modified.
+ ASSERT_TRUE(base::MakeFileUnreadable(test_file()));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+#if defined(OS_LINUX)
+
+// Verify that creating a symlink is caught.
+TEST_F(FilePathWatcherTest, CreateLink) {
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ // Note that we are watching the symlink
+ ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the link is created.
+ // Note that test_file() doesn't have to exist.
+ ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that deleting a symlink is caught.
+TEST_F(FilePathWatcherTest, DeleteLink) {
+ // Unfortunately this test case only works if the link target exists.
+ // TODO(craig) fix this as part of crbug.com/91561.
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the link is deleted.
+ ASSERT_TRUE(base::DeleteFile(test_link(), false));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that modifying a target file that a link is pointing to
+// when we are watching the link is caught.
+TEST_F(FilePathWatcherTest, ModifiedLinkedFile) {
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ // Note that we are watching the symlink.
+ ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the file is modified.
+ ASSERT_TRUE(WriteFile(test_file(), "new content"));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that creating a target file that a link is pointing to
+// when we are watching the link is caught.
+TEST_F(FilePathWatcherTest, CreateTargetLinkedFile) {
+ ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ // Note that we are watching the symlink.
+ ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the target file is created.
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that deleting a target file that a link is pointing to
+// when we are watching the link is caught.
+TEST_F(FilePathWatcherTest, DeleteTargetLinkedFile) {
+ ASSERT_TRUE(WriteFile(test_file(), "content"));
+ ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ // Note that we are watching the symlink.
+ ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
+
+ // Now make sure we get notified if the target file is deleted.
+ ASSERT_TRUE(base::DeleteFile(test_file(), false));
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that watching a file whose parent directory is a link that
+// doesn't exist yet works if the symlink is created eventually.
+TEST_F(FilePathWatcherTest, LinkedDirectoryPart1) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
+ FilePath file(dir.AppendASCII("file"));
+ FilePath linkfile(link_dir.AppendASCII("file"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ // dir/file should exist.
+ ASSERT_TRUE(base::CreateDirectory(dir));
+ ASSERT_TRUE(WriteFile(file, "content"));
+ // Note that we are watching dir.lnk/file which doesn't exist yet.
+ ASSERT_TRUE(SetupWatch(linkfile, &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+ VLOG(1) << "Waiting for link creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file, "content v2"));
+ VLOG(1) << "Waiting for file change";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(base::DeleteFile(file, false));
+ VLOG(1) << "Waiting for file deletion";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that watching a file whose parent directory is a
+// dangling symlink works if the directory is created eventually.
+TEST_F(FilePathWatcherTest, LinkedDirectoryPart2) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
+ FilePath file(dir.AppendASCII("file"));
+ FilePath linkfile(link_dir.AppendASCII("file"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ // Now create the link from dir.lnk pointing to dir but
+ // neither dir nor dir/file exist yet.
+ ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+ // Note that we are watching dir.lnk/file.
+ ASSERT_TRUE(SetupWatch(linkfile, &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(base::CreateDirectory(dir));
+ ASSERT_TRUE(WriteFile(file, "content"));
+ VLOG(1) << "Waiting for dir/file creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file, "content v2"));
+ VLOG(1) << "Waiting for file change";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(base::DeleteFile(file, false));
+ VLOG(1) << "Waiting for file deletion";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+// Verify that watching a file with a symlink on the path
+// to the file works.
+TEST_F(FilePathWatcherTest, LinkedDirectoryPart3) {
+ FilePathWatcher watcher;
+ FilePath dir(temp_dir_.path().AppendASCII("dir"));
+ FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
+ FilePath file(dir.AppendASCII("file"));
+ FilePath linkfile(link_dir.AppendASCII("file"));
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(base::CreateDirectory(dir));
+ ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
+ // Note that we are watching dir.lnk/file but the file doesn't exist yet.
+ ASSERT_TRUE(SetupWatch(linkfile, &watcher, delegate.get(), false));
+
+ ASSERT_TRUE(WriteFile(file, "content"));
+ VLOG(1) << "Waiting for file creation";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(WriteFile(file, "content v2"));
+ VLOG(1) << "Waiting for file change";
+ ASSERT_TRUE(WaitForEvents());
+
+ ASSERT_TRUE(base::DeleteFile(file, false));
+ VLOG(1) << "Waiting for file deletion";
+ ASSERT_TRUE(WaitForEvents());
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+#endif // OS_LINUX
+
+enum Permission {
+ Read,
+ Write,
+ Execute
+};
+
+#if defined(OS_MACOSX)
+bool ChangeFilePermissions(const FilePath& path, Permission perm, bool allow) {
+ struct stat stat_buf;
+
+ if (stat(path.value().c_str(), &stat_buf) != 0)
+ return false;
+
+ mode_t mode = 0;
+ switch (perm) {
+ case Read:
+ mode = S_IRUSR | S_IRGRP | S_IROTH;
+ break;
+ case Write:
+ mode = S_IWUSR | S_IWGRP | S_IWOTH;
+ break;
+ case Execute:
+ mode = S_IXUSR | S_IXGRP | S_IXOTH;
+ break;
+ default:
+ ADD_FAILURE() << "unknown perm " << perm;
+ return false;
+ }
+ if (allow) {
+ stat_buf.st_mode |= mode;
+ } else {
+ stat_buf.st_mode &= ~mode;
+ }
+ return chmod(path.value().c_str(), stat_buf.st_mode) == 0;
+}
+#endif // defined(OS_MACOSX)
+
+#if defined(OS_MACOSX)
+// Linux implementation of FilePathWatcher doesn't catch attribute changes.
+// http://crbug.com/78043
+// Windows implementation of FilePathWatcher catches attribute changes that
+// don't affect the path being watched.
+// http://crbug.com/78045
+
+// Verify that changing attributes on a directory works.
+TEST_F(FilePathWatcherTest, DirAttributesChanged) {
+ FilePath test_dir1(temp_dir_.path().AppendASCII("DirAttributesChangedDir1"));
+ FilePath test_dir2(test_dir1.AppendASCII("DirAttributesChangedDir2"));
+ FilePath test_file(test_dir2.AppendASCII("DirAttributesChangedFile"));
+ // Setup a directory hierarchy.
+ ASSERT_TRUE(base::CreateDirectory(test_dir1));
+ ASSERT_TRUE(base::CreateDirectory(test_dir2));
+ ASSERT_TRUE(WriteFile(test_file, "content"));
+
+ FilePathWatcher watcher;
+ std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+ ASSERT_TRUE(SetupWatch(test_file, &watcher, delegate.get(), false));
+
+ // We should not get notified in this case as it hasn't affected our ability
+ // to access the file.
+ ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, false));
+ loop_.task_runner()->PostDelayedTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure(),
+ TestTimeouts::tiny_timeout());
+ ASSERT_FALSE(WaitForEvents());
+ ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, true));
+
+ // We should get notified in this case because filepathwatcher can no
+ // longer access the file
+ ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, false));
+ ASSERT_TRUE(WaitForEvents());
+ ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, true));
+ DeleteDelegateOnFileThread(delegate.release());
+}
+
+#endif // OS_MACOSX
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/files/file_posix.cc b/libchrome/base/files/file_posix.cc
new file mode 100644
index 0000000..12f80c4
--- /dev/null
+++ b/libchrome/base/files/file_posix.cc
@@ -0,0 +1,535 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#endif
+
+namespace base {
+
+// Make sure our Whence mappings match the system headers.
+static_assert(File::FROM_BEGIN == SEEK_SET && File::FROM_CURRENT == SEEK_CUR &&
+ File::FROM_END == SEEK_END,
+ "whence mapping must match the system headers");
+
+namespace {
+
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+int CallFstat(int fd, stat_wrapper_t *sb) {
+ ThreadRestrictions::AssertIOAllowed();
+ return fstat(fd, sb);
+}
+#else
+int CallFstat(int fd, stat_wrapper_t *sb) {
+ ThreadRestrictions::AssertIOAllowed();
+ return fstat64(fd, sb);
+}
+#endif
+
+// NaCl doesn't provide the following system calls, so either simulate them or
+// wrap them in order to minimize the number of #ifdef's in this file.
+#if !defined(OS_NACL)
+bool IsOpenAppend(PlatformFile file) {
+ return (fcntl(file, F_GETFL) & O_APPEND) != 0;
+}
+
+int CallFtruncate(PlatformFile file, int64_t length) {
+ return HANDLE_EINTR(ftruncate(file, length));
+}
+
+int CallFutimes(PlatformFile file, const struct timeval times[2]) {
+#ifdef __USE_XOPEN2K8
+ // futimens should be available, but futimes might not be
+ // http://pubs.opengroup.org/onlinepubs/9699919799/
+
+ timespec ts_times[2];
+ ts_times[0].tv_sec = times[0].tv_sec;
+ ts_times[0].tv_nsec = times[0].tv_usec * 1000;
+ ts_times[1].tv_sec = times[1].tv_sec;
+ ts_times[1].tv_nsec = times[1].tv_usec * 1000;
+
+ return futimens(file, ts_times);
+#else
+ return futimes(file, times);
+#endif
+}
+
+File::Error CallFcntlFlock(PlatformFile file, bool do_lock) {
+ struct flock lock;
+ lock.l_type = do_lock ? F_WRLCK : F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0; // Lock entire file.
+ if (HANDLE_EINTR(fcntl(file, F_SETLK, &lock)) == -1)
+ return File::OSErrorToFileError(errno);
+ return File::FILE_OK;
+}
+#else // defined(OS_NACL)
+
+bool IsOpenAppend(PlatformFile file) {
+ // NaCl doesn't implement fcntl. Since NaCl's write conforms to the POSIX
+ // standard and always appends if the file is opened with O_APPEND, just
+ // return false here.
+ return false;
+}
+
+int CallFtruncate(PlatformFile file, int64_t length) {
+ NOTIMPLEMENTED(); // NaCl doesn't implement ftruncate.
+ return 0;
+}
+
+int CallFutimes(PlatformFile file, const struct timeval times[2]) {
+ NOTIMPLEMENTED(); // NaCl doesn't implement futimes.
+ return 0;
+}
+
+File::Error CallFcntlFlock(PlatformFile file, bool do_lock) {
+ NOTIMPLEMENTED(); // NaCl doesn't implement flock struct.
+ return File::FILE_ERROR_INVALID_OPERATION;
+}
+#endif // defined(OS_NACL)
+
+} // namespace
+
+void File::Info::FromStat(const stat_wrapper_t& stat_info) {
+ is_directory = S_ISDIR(stat_info.st_mode);
+ is_symbolic_link = S_ISLNK(stat_info.st_mode);
+ size = stat_info.st_size;
+
+#if defined(OS_LINUX)
+ time_t last_modified_sec = stat_info.st_mtim.tv_sec;
+ int64_t last_modified_nsec = stat_info.st_mtim.tv_nsec;
+ time_t last_accessed_sec = stat_info.st_atim.tv_sec;
+ int64_t last_accessed_nsec = stat_info.st_atim.tv_nsec;
+ time_t creation_time_sec = stat_info.st_ctim.tv_sec;
+ int64_t creation_time_nsec = stat_info.st_ctim.tv_nsec;
+#elif defined(OS_ANDROID)
+ time_t last_modified_sec = stat_info.st_mtime;
+ int64_t last_modified_nsec = stat_info.st_mtime_nsec;
+ time_t last_accessed_sec = stat_info.st_atime;
+ int64_t last_accessed_nsec = stat_info.st_atime_nsec;
+ time_t creation_time_sec = stat_info.st_ctime;
+ int64_t creation_time_nsec = stat_info.st_ctime_nsec;
+#elif defined(OS_MACOSX) || defined(OS_IOS) || defined(OS_BSD)
+ time_t last_modified_sec = stat_info.st_mtimespec.tv_sec;
+ int64_t last_modified_nsec = stat_info.st_mtimespec.tv_nsec;
+ time_t last_accessed_sec = stat_info.st_atimespec.tv_sec;
+ int64_t last_accessed_nsec = stat_info.st_atimespec.tv_nsec;
+ time_t creation_time_sec = stat_info.st_ctimespec.tv_sec;
+ int64_t creation_time_nsec = stat_info.st_ctimespec.tv_nsec;
+#else
+ time_t last_modified_sec = stat_info.st_mtime;
+ int64_t last_modified_nsec = 0;
+ time_t last_accessed_sec = stat_info.st_atime;
+ int64_t last_accessed_nsec = 0;
+ time_t creation_time_sec = stat_info.st_ctime;
+ int64_t creation_time_nsec = 0;
+#endif
+
+ last_modified =
+ Time::FromTimeT(last_modified_sec) +
+ TimeDelta::FromMicroseconds(last_modified_nsec /
+ Time::kNanosecondsPerMicrosecond);
+
+ last_accessed =
+ Time::FromTimeT(last_accessed_sec) +
+ TimeDelta::FromMicroseconds(last_accessed_nsec /
+ Time::kNanosecondsPerMicrosecond);
+
+ creation_time =
+ Time::FromTimeT(creation_time_sec) +
+ TimeDelta::FromMicroseconds(creation_time_nsec /
+ Time::kNanosecondsPerMicrosecond);
+}
+
+bool File::IsValid() const {
+ return file_.is_valid();
+}
+
+PlatformFile File::GetPlatformFile() const {
+ return file_.get();
+}
+
+PlatformFile File::TakePlatformFile() {
+ return file_.release();
+}
+
+void File::Close() {
+ if (!IsValid())
+ return;
+
+ SCOPED_FILE_TRACE("Close");
+ ThreadRestrictions::AssertIOAllowed();
+ file_.reset();
+}
+
+int64_t File::Seek(Whence whence, int64_t offset) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+
+ SCOPED_FILE_TRACE_WITH_SIZE("Seek", offset);
+
+// Additionally check __BIONIC__ since older versions of Android don't define
+// _FILE_OFFSET_BITS.
+#if _FILE_OFFSET_BITS != 64 || defined(__BIONIC__)
+ static_assert(sizeof(int64_t) == sizeof(off64_t), "off64_t must be 64 bits");
+ return lseek64(file_.get(), static_cast<off64_t>(offset),
+ static_cast<int>(whence));
+#else
+ static_assert(sizeof(int64_t) == sizeof(off_t), "off_t must be 64 bits");
+ return lseek(file_.get(), static_cast<off_t>(offset),
+ static_cast<int>(whence));
+#endif
+}
+
+int File::Read(int64_t offset, char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+ if (size < 0)
+ return -1;
+
+ SCOPED_FILE_TRACE_WITH_SIZE("Read", size);
+
+ int bytes_read = 0;
+ int rv;
+ do {
+ rv = HANDLE_EINTR(pread(file_.get(), data + bytes_read,
+ size - bytes_read, offset + bytes_read));
+ if (rv <= 0)
+ break;
+
+ bytes_read += rv;
+ } while (bytes_read < size);
+
+ return bytes_read ? bytes_read : rv;
+}
+
+int File::ReadAtCurrentPos(char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+ if (size < 0)
+ return -1;
+
+ SCOPED_FILE_TRACE_WITH_SIZE("ReadAtCurrentPos", size);
+
+ int bytes_read = 0;
+ int rv;
+ do {
+ rv = HANDLE_EINTR(read(file_.get(), data + bytes_read, size - bytes_read));
+ if (rv <= 0)
+ break;
+
+ bytes_read += rv;
+ } while (bytes_read < size);
+
+ return bytes_read ? bytes_read : rv;
+}
+
+int File::ReadNoBestEffort(int64_t offset, char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+ SCOPED_FILE_TRACE_WITH_SIZE("ReadNoBestEffort", size);
+ return HANDLE_EINTR(pread(file_.get(), data, size, offset));
+}
+
+int File::ReadAtCurrentPosNoBestEffort(char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+ if (size < 0)
+ return -1;
+
+ SCOPED_FILE_TRACE_WITH_SIZE("ReadAtCurrentPosNoBestEffort", size);
+ return HANDLE_EINTR(read(file_.get(), data, size));
+}
+
+int File::Write(int64_t offset, const char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+
+ if (IsOpenAppend(file_.get()))
+ return WriteAtCurrentPos(data, size);
+
+ DCHECK(IsValid());
+ if (size < 0)
+ return -1;
+
+ SCOPED_FILE_TRACE_WITH_SIZE("Write", size);
+
+ int bytes_written = 0;
+ int rv;
+ do {
+ rv = HANDLE_EINTR(pwrite(file_.get(), data + bytes_written,
+ size - bytes_written, offset + bytes_written));
+ if (rv <= 0)
+ break;
+
+ bytes_written += rv;
+ } while (bytes_written < size);
+
+ return bytes_written ? bytes_written : rv;
+}
+
+int File::WriteAtCurrentPos(const char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+ if (size < 0)
+ return -1;
+
+ SCOPED_FILE_TRACE_WITH_SIZE("WriteAtCurrentPos", size);
+
+ int bytes_written = 0;
+ int rv;
+ do {
+ rv = HANDLE_EINTR(write(file_.get(), data + bytes_written,
+ size - bytes_written));
+ if (rv <= 0)
+ break;
+
+ bytes_written += rv;
+ } while (bytes_written < size);
+
+ return bytes_written ? bytes_written : rv;
+}
+
+int File::WriteAtCurrentPosNoBestEffort(const char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+ if (size < 0)
+ return -1;
+
+ SCOPED_FILE_TRACE_WITH_SIZE("WriteAtCurrentPosNoBestEffort", size);
+ return HANDLE_EINTR(write(file_.get(), data, size));
+}
+
+int64_t File::GetLength() {
+ DCHECK(IsValid());
+
+ SCOPED_FILE_TRACE("GetLength");
+
+ stat_wrapper_t file_info;
+ if (CallFstat(file_.get(), &file_info))
+ return false;
+
+ return file_info.st_size;
+}
+
+bool File::SetLength(int64_t length) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+
+ SCOPED_FILE_TRACE_WITH_SIZE("SetLength", length);
+ return !CallFtruncate(file_.get(), length);
+}
+
+bool File::SetTimes(Time last_access_time, Time last_modified_time) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+
+ SCOPED_FILE_TRACE("SetTimes");
+
+ timeval times[2];
+ times[0] = last_access_time.ToTimeVal();
+ times[1] = last_modified_time.ToTimeVal();
+
+ return !CallFutimes(file_.get(), times);
+}
+
+bool File::GetInfo(Info* info) {
+ DCHECK(IsValid());
+
+ SCOPED_FILE_TRACE("GetInfo");
+
+ stat_wrapper_t file_info;
+ if (CallFstat(file_.get(), &file_info))
+ return false;
+
+ info->FromStat(file_info);
+ return true;
+}
+
+File::Error File::Lock() {
+ SCOPED_FILE_TRACE("Lock");
+ return CallFcntlFlock(file_.get(), true);
+}
+
+File::Error File::Unlock() {
+ SCOPED_FILE_TRACE("Unlock");
+ return CallFcntlFlock(file_.get(), false);
+}
+
+File File::Duplicate() {
+ if (!IsValid())
+ return File();
+
+ SCOPED_FILE_TRACE("Duplicate");
+
+ PlatformFile other_fd = dup(GetPlatformFile());
+ if (other_fd == -1)
+ return File(OSErrorToFileError(errno));
+
+ File other(other_fd);
+ if (async())
+ other.async_ = true;
+ return other;
+}
+
+// Static.
+File::Error File::OSErrorToFileError(int saved_errno) {
+ switch (saved_errno) {
+ case EACCES:
+ case EISDIR:
+ case EROFS:
+ case EPERM:
+ return FILE_ERROR_ACCESS_DENIED;
+ case EBUSY:
+#if !defined(OS_NACL) // ETXTBSY not defined by NaCl.
+ case ETXTBSY:
+#endif
+ return FILE_ERROR_IN_USE;
+ case EEXIST:
+ return FILE_ERROR_EXISTS;
+ case EIO:
+ return FILE_ERROR_IO;
+ case ENOENT:
+ return FILE_ERROR_NOT_FOUND;
+ case EMFILE:
+ return FILE_ERROR_TOO_MANY_OPENED;
+ case ENOMEM:
+ return FILE_ERROR_NO_MEMORY;
+ case ENOSPC:
+ return FILE_ERROR_NO_SPACE;
+ case ENOTDIR:
+ return FILE_ERROR_NOT_A_DIRECTORY;
+ default:
+#if !defined(OS_NACL) // NaCl build has no metrics code.
+ UMA_HISTOGRAM_SPARSE_SLOWLY("PlatformFile.UnknownErrors.Posix",
+ saved_errno);
+#endif
+ return FILE_ERROR_FAILED;
+ }
+}
+
+// NaCl doesn't implement system calls to open files directly.
+#if !defined(OS_NACL)
+// TODO(erikkay): does it make sense to support FLAG_EXCLUSIVE_* here?
+void File::DoInitialize(const FilePath& path, uint32_t flags) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(!IsValid());
+
+ int open_flags = 0;
+ if (flags & FLAG_CREATE)
+ open_flags = O_CREAT | O_EXCL;
+
+ created_ = false;
+
+ if (flags & FLAG_CREATE_ALWAYS) {
+ DCHECK(!open_flags);
+ DCHECK(flags & FLAG_WRITE);
+ open_flags = O_CREAT | O_TRUNC;
+ }
+
+ if (flags & FLAG_OPEN_TRUNCATED) {
+ DCHECK(!open_flags);
+ DCHECK(flags & FLAG_WRITE);
+ open_flags = O_TRUNC;
+ }
+
+ if (!open_flags && !(flags & FLAG_OPEN) && !(flags & FLAG_OPEN_ALWAYS)) {
+ NOTREACHED();
+ errno = EOPNOTSUPP;
+ error_details_ = FILE_ERROR_FAILED;
+ return;
+ }
+
+ if (flags & FLAG_WRITE && flags & FLAG_READ) {
+ open_flags |= O_RDWR;
+ } else if (flags & FLAG_WRITE) {
+ open_flags |= O_WRONLY;
+ } else if (!(flags & FLAG_READ) &&
+ !(flags & FLAG_WRITE_ATTRIBUTES) &&
+ !(flags & FLAG_APPEND) &&
+ !(flags & FLAG_OPEN_ALWAYS)) {
+ NOTREACHED();
+ }
+
+ if (flags & FLAG_TERMINAL_DEVICE)
+ open_flags |= O_NOCTTY | O_NDELAY;
+
+ if (flags & FLAG_APPEND && flags & FLAG_READ)
+ open_flags |= O_APPEND | O_RDWR;
+ else if (flags & FLAG_APPEND)
+ open_flags |= O_APPEND | O_WRONLY;
+
+ static_assert(O_RDONLY == 0, "O_RDONLY must equal zero");
+
+ int mode = S_IRUSR | S_IWUSR;
+#if defined(OS_CHROMEOS)
+ mode |= S_IRGRP | S_IROTH;
+#endif
+
+ int descriptor = HANDLE_EINTR(open(path.value().c_str(), open_flags, mode));
+
+ if (flags & FLAG_OPEN_ALWAYS) {
+ if (descriptor < 0) {
+ open_flags |= O_CREAT;
+ if (flags & FLAG_EXCLUSIVE_READ || flags & FLAG_EXCLUSIVE_WRITE)
+ open_flags |= O_EXCL; // together with O_CREAT implies O_NOFOLLOW
+
+ descriptor = HANDLE_EINTR(open(path.value().c_str(), open_flags, mode));
+ if (descriptor >= 0)
+ created_ = true;
+ }
+ }
+
+ if (descriptor < 0) {
+ error_details_ = File::OSErrorToFileError(errno);
+ return;
+ }
+
+ if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
+ created_ = true;
+
+ if (flags & FLAG_DELETE_ON_CLOSE)
+ unlink(path.value().c_str());
+
+ async_ = ((flags & FLAG_ASYNC) == FLAG_ASYNC);
+ error_details_ = FILE_OK;
+ file_.reset(descriptor);
+}
+#endif // !defined(OS_NACL)
+
+bool File::DoFlush() {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(IsValid());
+
+#if defined(OS_NACL)
+ NOTIMPLEMENTED(); // NaCl doesn't implement fsync.
+ return true;
+#elif defined(OS_LINUX) || defined(OS_ANDROID)
+ return !HANDLE_EINTR(fdatasync(file_.get()));
+#else
+ return !HANDLE_EINTR(fsync(file_.get()));
+#endif
+}
+
+void File::SetPlatformFile(PlatformFile file) {
+ DCHECK(!file_.is_valid());
+ file_.reset(file);
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_tracing.cc b/libchrome/base/files/file_tracing.cc
new file mode 100644
index 0000000..6d11cbc
--- /dev/null
+++ b/libchrome/base/files/file_tracing.cc
@@ -0,0 +1,50 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_tracing.h"
+
+#include "base/files/file.h"
+
+namespace base {
+
+namespace {
+FileTracing::Provider* g_provider = nullptr;
+}
+
+// static
+bool FileTracing::IsCategoryEnabled() {
+ return g_provider && g_provider->FileTracingCategoryIsEnabled();
+}
+
+// static
+void FileTracing::SetProvider(FileTracing::Provider* provider) {
+ g_provider = provider;
+}
+
+FileTracing::ScopedEnabler::ScopedEnabler() {
+ if (g_provider)
+ g_provider->FileTracingEnable(this);
+}
+
+FileTracing::ScopedEnabler::~ScopedEnabler() {
+ if (g_provider)
+ g_provider->FileTracingDisable(this);
+}
+
+FileTracing::ScopedTrace::ScopedTrace() : id_(nullptr) {}
+
+FileTracing::ScopedTrace::~ScopedTrace() {
+ if (id_ && g_provider)
+ g_provider->FileTracingEventEnd(name_, id_);
+}
+
+void FileTracing::ScopedTrace::Initialize(const char* name,
+ File* file,
+ int64_t size) {
+ id_ = &file->trace_enabler_;
+ name_ = name;
+ g_provider->FileTracingEventBegin(name_, id_, file->tracing_path_, size);
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_tracing.h b/libchrome/base/files/file_tracing.h
new file mode 100644
index 0000000..bedd7be
--- /dev/null
+++ b/libchrome/base/files/file_tracing.h
@@ -0,0 +1,95 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_FILE_TRACING_H_
+#define BASE_FILES_FILE_TRACING_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+#define FILE_TRACING_PREFIX "File"
+
+#define SCOPED_FILE_TRACE_WITH_SIZE(name, size) \
+ FileTracing::ScopedTrace scoped_file_trace; \
+ if (FileTracing::IsCategoryEnabled()) \
+ scoped_file_trace.Initialize(FILE_TRACING_PREFIX "::" name, this, size)
+
+#define SCOPED_FILE_TRACE(name) SCOPED_FILE_TRACE_WITH_SIZE(name, 0)
+
+namespace base {
+
+class File;
+class FilePath;
+
+class BASE_EXPORT FileTracing {
+ public:
+ // Whether the file tracing category is enabled.
+ static bool IsCategoryEnabled();
+
+ class Provider {
+ public:
+ virtual ~Provider() = default;
+
+ // Whether the file tracing category is currently enabled.
+ virtual bool FileTracingCategoryIsEnabled() const = 0;
+
+ // Enables file tracing for |id|. Must be called before recording events.
+ virtual void FileTracingEnable(void* id) = 0;
+
+ // Disables file tracing for |id|.
+ virtual void FileTracingDisable(void* id) = 0;
+
+ // Begins an event for |id| with |name|. |path| tells where in the directory
+ // structure the event is happening (and may be blank). |size| is the number
+ // of bytes involved in the event.
+ virtual void FileTracingEventBegin(const char* name,
+ void* id,
+ const FilePath& path,
+ int64_t size) = 0;
+
+ // Ends an event for |id| with |name|.
+ virtual void FileTracingEventEnd(const char* name, void* id) = 0;
+ };
+
+ // Sets a global file tracing provider to query categories and record events.
+ static void SetProvider(Provider* provider);
+
+ // Enables file tracing while in scope.
+ class ScopedEnabler {
+ public:
+ ScopedEnabler();
+ ~ScopedEnabler();
+ };
+
+ class ScopedTrace {
+ public:
+ ScopedTrace();
+ ~ScopedTrace();
+
+ // Called only if the tracing category is enabled. |name| is the name of the
+ // event to trace (e.g. "Read", "Write") and must have an application
+ // lifetime (e.g. static or literal). |file| is the file being traced; must
+ // outlive this class. |size| is the size (in bytes) of this event.
+ void Initialize(const char* name, File* file, int64_t size);
+
+ private:
+ // The ID of this trace. Based on the |file| passed to |Initialize()|. Must
+ // outlive this class.
+ void* id_;
+
+ // The name of the event to trace (e.g. "Read", "Write"). Prefixed with
+ // "File".
+ const char* name_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTrace);
+ };
+
+ DISALLOW_COPY_AND_ASSIGN(FileTracing);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_FILE_TRACING_H_
diff --git a/libchrome/base/files/file_unittest.cc b/libchrome/base/files/file_unittest.cc
new file mode 100644
index 0000000..2445f7e
--- /dev/null
+++ b/libchrome/base/files/file_unittest.cc
@@ -0,0 +1,517 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::File;
+using base::FilePath;
+
+TEST(FileTest, Create) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("create_file_1");
+
+ {
+ // Don't create a File at all.
+ File file;
+ EXPECT_FALSE(file.IsValid());
+ EXPECT_EQ(base::File::FILE_ERROR_FAILED, file.error_details());
+
+ File file2(base::File::FILE_ERROR_TOO_MANY_OPENED);
+ EXPECT_FALSE(file2.IsValid());
+ EXPECT_EQ(base::File::FILE_ERROR_TOO_MANY_OPENED, file2.error_details());
+ }
+
+ {
+ // Open a file that doesn't exist.
+ File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ EXPECT_FALSE(file.IsValid());
+ EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file.error_details());
+ }
+
+ {
+ // Open or create a file.
+ File file(file_path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_TRUE(file.created());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+ }
+
+ {
+ // Open an existing file.
+ File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_FALSE(file.created());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+
+ // This time verify closing the file.
+ file.Close();
+ EXPECT_FALSE(file.IsValid());
+ }
+
+ {
+ // Open an existing file through Initialize
+ File file;
+ file.Initialize(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_FALSE(file.created());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+
+ // This time verify closing the file.
+ file.Close();
+ EXPECT_FALSE(file.IsValid());
+ }
+
+ {
+ // Create a file that exists.
+ File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_READ);
+ EXPECT_FALSE(file.IsValid());
+ EXPECT_FALSE(file.created());
+ EXPECT_EQ(base::File::FILE_ERROR_EXISTS, file.error_details());
+ }
+
+ {
+ // Create or overwrite a file.
+ File file(file_path,
+ base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_TRUE(file.created());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+ }
+
+ {
+ // Create a delete-on-close file.
+ file_path = temp_dir.path().AppendASCII("create_file_2");
+ File file(file_path,
+ base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ |
+ base::File::FLAG_DELETE_ON_CLOSE);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_TRUE(file.created());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+ }
+
+ EXPECT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, Async) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("create_file");
+
+ {
+ File file(file_path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_ASYNC);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_TRUE(file.async());
+ }
+
+ {
+ File file(file_path, base::File::FLAG_OPEN_ALWAYS);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_FALSE(file.async());
+ }
+}
+
+TEST(FileTest, DeleteOpenFile) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("create_file_1");
+
+ // Create a file.
+ File file(file_path,
+ base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ |
+ base::File::FLAG_SHARE_DELETE);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_TRUE(file.created());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+
+ // Open an existing file and mark it as delete on close.
+ File same_file(file_path,
+ base::File::FLAG_OPEN | base::File::FLAG_DELETE_ON_CLOSE |
+ base::File::FLAG_READ);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_FALSE(same_file.created());
+ EXPECT_EQ(base::File::FILE_OK, same_file.error_details());
+
+ // Close both handles and check that the file is gone.
+ file.Close();
+ same_file.Close();
+ EXPECT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, ReadWrite) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("read_write_file");
+ File file(file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE);
+ ASSERT_TRUE(file.IsValid());
+
+ char data_to_write[] = "test";
+ const int kTestDataSize = 4;
+
+ // Write 0 bytes to the file.
+ int bytes_written = file.Write(0, data_to_write, 0);
+ EXPECT_EQ(0, bytes_written);
+
+ // Write "test" to the file.
+ bytes_written = file.Write(0, data_to_write, kTestDataSize);
+ EXPECT_EQ(kTestDataSize, bytes_written);
+
+ // Read from EOF.
+ char data_read_1[32];
+ int bytes_read = file.Read(kTestDataSize, data_read_1, kTestDataSize);
+ EXPECT_EQ(0, bytes_read);
+
+ // Read from somewhere in the middle of the file.
+ const int kPartialReadOffset = 1;
+ bytes_read = file.Read(kPartialReadOffset, data_read_1, kTestDataSize);
+ EXPECT_EQ(kTestDataSize - kPartialReadOffset, bytes_read);
+ for (int i = 0; i < bytes_read; i++)
+ EXPECT_EQ(data_to_write[i + kPartialReadOffset], data_read_1[i]);
+
+ // Read 0 bytes.
+ bytes_read = file.Read(0, data_read_1, 0);
+ EXPECT_EQ(0, bytes_read);
+
+ // Read the entire file.
+ bytes_read = file.Read(0, data_read_1, kTestDataSize);
+ EXPECT_EQ(kTestDataSize, bytes_read);
+ for (int i = 0; i < bytes_read; i++)
+ EXPECT_EQ(data_to_write[i], data_read_1[i]);
+
+ // Read again, but using the trivial native wrapper.
+ bytes_read = file.ReadNoBestEffort(0, data_read_1, kTestDataSize);
+ EXPECT_LE(bytes_read, kTestDataSize);
+ for (int i = 0; i < bytes_read; i++)
+ EXPECT_EQ(data_to_write[i], data_read_1[i]);
+
+ // Write past the end of the file.
+ const int kOffsetBeyondEndOfFile = 10;
+ const int kPartialWriteLength = 2;
+ bytes_written = file.Write(kOffsetBeyondEndOfFile,
+ data_to_write, kPartialWriteLength);
+ EXPECT_EQ(kPartialWriteLength, bytes_written);
+
+ // Make sure the file was extended.
+ int64_t file_size = 0;
+ EXPECT_TRUE(GetFileSize(file_path, &file_size));
+ EXPECT_EQ(kOffsetBeyondEndOfFile + kPartialWriteLength, file_size);
+
+ // Make sure the file was zero-padded.
+ char data_read_2[32];
+ bytes_read = file.Read(0, data_read_2, static_cast<int>(file_size));
+ EXPECT_EQ(file_size, bytes_read);
+ for (int i = 0; i < kTestDataSize; i++)
+ EXPECT_EQ(data_to_write[i], data_read_2[i]);
+ for (int i = kTestDataSize; i < kOffsetBeyondEndOfFile; i++)
+ EXPECT_EQ(0, data_read_2[i]);
+ for (int i = kOffsetBeyondEndOfFile; i < file_size; i++)
+ EXPECT_EQ(data_to_write[i - kOffsetBeyondEndOfFile], data_read_2[i]);
+}
+
+TEST(FileTest, Append) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("append_file");
+ File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_APPEND);
+ ASSERT_TRUE(file.IsValid());
+
+ char data_to_write[] = "test";
+ const int kTestDataSize = 4;
+
+ // Write 0 bytes to the file.
+ int bytes_written = file.Write(0, data_to_write, 0);
+ EXPECT_EQ(0, bytes_written);
+
+ // Write "test" to the file.
+ bytes_written = file.Write(0, data_to_write, kTestDataSize);
+ EXPECT_EQ(kTestDataSize, bytes_written);
+
+ file.Close();
+ File file2(file_path,
+ base::File::FLAG_OPEN | base::File::FLAG_READ |
+ base::File::FLAG_APPEND);
+ ASSERT_TRUE(file2.IsValid());
+
+ // Test passing the file around.
+ file = std::move(file2);
+ EXPECT_FALSE(file2.IsValid());
+ ASSERT_TRUE(file.IsValid());
+
+ char append_data_to_write[] = "78";
+ const int kAppendDataSize = 2;
+
+ // Append "78" to the file.
+ bytes_written = file.Write(0, append_data_to_write, kAppendDataSize);
+ EXPECT_EQ(kAppendDataSize, bytes_written);
+
+ // Read the entire file.
+ char data_read_1[32];
+ int bytes_read = file.Read(0, data_read_1,
+ kTestDataSize + kAppendDataSize);
+ EXPECT_EQ(kTestDataSize + kAppendDataSize, bytes_read);
+ for (int i = 0; i < kTestDataSize; i++)
+ EXPECT_EQ(data_to_write[i], data_read_1[i]);
+ for (int i = 0; i < kAppendDataSize; i++)
+ EXPECT_EQ(append_data_to_write[i], data_read_1[kTestDataSize + i]);
+}
+
+
+TEST(FileTest, Length) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("truncate_file");
+ File file(file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE);
+ ASSERT_TRUE(file.IsValid());
+ EXPECT_EQ(0, file.GetLength());
+
+ // Write "test" to the file.
+ char data_to_write[] = "test";
+ int kTestDataSize = 4;
+ int bytes_written = file.Write(0, data_to_write, kTestDataSize);
+ EXPECT_EQ(kTestDataSize, bytes_written);
+
+ // Extend the file.
+ const int kExtendedFileLength = 10;
+ int64_t file_size = 0;
+ EXPECT_TRUE(file.SetLength(kExtendedFileLength));
+ EXPECT_EQ(kExtendedFileLength, file.GetLength());
+ EXPECT_TRUE(GetFileSize(file_path, &file_size));
+ EXPECT_EQ(kExtendedFileLength, file_size);
+
+ // Make sure the file was zero-padded.
+ char data_read[32];
+ int bytes_read = file.Read(0, data_read, static_cast<int>(file_size));
+ EXPECT_EQ(file_size, bytes_read);
+ for (int i = 0; i < kTestDataSize; i++)
+ EXPECT_EQ(data_to_write[i], data_read[i]);
+ for (int i = kTestDataSize; i < file_size; i++)
+ EXPECT_EQ(0, data_read[i]);
+
+ // Truncate the file.
+ const int kTruncatedFileLength = 2;
+ EXPECT_TRUE(file.SetLength(kTruncatedFileLength));
+ EXPECT_EQ(kTruncatedFileLength, file.GetLength());
+ EXPECT_TRUE(GetFileSize(file_path, &file_size));
+ EXPECT_EQ(kTruncatedFileLength, file_size);
+
+ // Make sure the file was truncated.
+ bytes_read = file.Read(0, data_read, kTestDataSize);
+ EXPECT_EQ(file_size, bytes_read);
+ for (int i = 0; i < file_size; i++)
+ EXPECT_EQ(data_to_write[i], data_read[i]);
+}
+
+// Flakily fails: http://crbug.com/86494
+#if defined(OS_ANDROID)
+TEST(FileTest, TouchGetInfo) {
+#else
+TEST(FileTest, DISABLED_TouchGetInfo) {
+#endif
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ File file(temp_dir.path().AppendASCII("touch_get_info_file"),
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE |
+ base::File::FLAG_WRITE_ATTRIBUTES);
+ ASSERT_TRUE(file.IsValid());
+
+ // Get info for a newly created file.
+ base::File::Info info;
+ EXPECT_TRUE(file.GetInfo(&info));
+
+ // Add 2 seconds to account for possible rounding errors on
+ // filesystems that use a 1s or 2s timestamp granularity.
+ base::Time now = base::Time::Now() + base::TimeDelta::FromSeconds(2);
+ EXPECT_EQ(0, info.size);
+ EXPECT_FALSE(info.is_directory);
+ EXPECT_FALSE(info.is_symbolic_link);
+ EXPECT_LE(info.last_accessed.ToInternalValue(), now.ToInternalValue());
+ EXPECT_LE(info.last_modified.ToInternalValue(), now.ToInternalValue());
+ EXPECT_LE(info.creation_time.ToInternalValue(), now.ToInternalValue());
+ base::Time creation_time = info.creation_time;
+
+ // Write "test" to the file.
+ char data[] = "test";
+ const int kTestDataSize = 4;
+ int bytes_written = file.Write(0, data, kTestDataSize);
+ EXPECT_EQ(kTestDataSize, bytes_written);
+
+ // Change the last_accessed and last_modified dates.
+ // It's best to add values that are multiples of 2 (in seconds)
+ // to the current last_accessed and last_modified times, because
+ // FATxx uses a 2s timestamp granularity.
+ base::Time new_last_accessed =
+ info.last_accessed + base::TimeDelta::FromSeconds(234);
+ base::Time new_last_modified =
+ info.last_modified + base::TimeDelta::FromMinutes(567);
+
+ EXPECT_TRUE(file.SetTimes(new_last_accessed, new_last_modified));
+
+ // Make sure the file info was updated accordingly.
+ EXPECT_TRUE(file.GetInfo(&info));
+ EXPECT_EQ(info.size, kTestDataSize);
+ EXPECT_FALSE(info.is_directory);
+ EXPECT_FALSE(info.is_symbolic_link);
+
+ // ext2/ext3 and HPS/HPS+ seem to have a timestamp granularity of 1s.
+#if defined(OS_POSIX)
+ EXPECT_EQ(info.last_accessed.ToTimeVal().tv_sec,
+ new_last_accessed.ToTimeVal().tv_sec);
+ EXPECT_EQ(info.last_modified.ToTimeVal().tv_sec,
+ new_last_modified.ToTimeVal().tv_sec);
+#else
+ EXPECT_EQ(info.last_accessed.ToInternalValue(),
+ new_last_accessed.ToInternalValue());
+ EXPECT_EQ(info.last_modified.ToInternalValue(),
+ new_last_modified.ToInternalValue());
+#endif
+
+ EXPECT_EQ(info.creation_time.ToInternalValue(),
+ creation_time.ToInternalValue());
+}
+
+TEST(FileTest, ReadAtCurrentPosition) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("read_at_current_position");
+ File file(file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE);
+ EXPECT_TRUE(file.IsValid());
+
+ const char kData[] = "test";
+ const int kDataSize = sizeof(kData) - 1;
+ EXPECT_EQ(kDataSize, file.Write(0, kData, kDataSize));
+
+ EXPECT_EQ(0, file.Seek(base::File::FROM_BEGIN, 0));
+
+ char buffer[kDataSize];
+ int first_chunk_size = kDataSize / 2;
+ EXPECT_EQ(first_chunk_size, file.ReadAtCurrentPos(buffer, first_chunk_size));
+ EXPECT_EQ(kDataSize - first_chunk_size,
+ file.ReadAtCurrentPos(buffer + first_chunk_size,
+ kDataSize - first_chunk_size));
+ EXPECT_EQ(std::string(buffer, buffer + kDataSize), std::string(kData));
+}
+
+TEST(FileTest, WriteAtCurrentPosition) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("write_at_current_position");
+ File file(file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE);
+ EXPECT_TRUE(file.IsValid());
+
+ const char kData[] = "test";
+ const int kDataSize = sizeof(kData) - 1;
+
+ int first_chunk_size = kDataSize / 2;
+ EXPECT_EQ(first_chunk_size, file.WriteAtCurrentPos(kData, first_chunk_size));
+ EXPECT_EQ(kDataSize - first_chunk_size,
+ file.WriteAtCurrentPos(kData + first_chunk_size,
+ kDataSize - first_chunk_size));
+
+ char buffer[kDataSize];
+ EXPECT_EQ(kDataSize, file.Read(0, buffer, kDataSize));
+ EXPECT_EQ(std::string(buffer, buffer + kDataSize), std::string(kData));
+}
+
+TEST(FileTest, Seek) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("seek_file");
+ File file(file_path,
+ base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE);
+ ASSERT_TRUE(file.IsValid());
+
+ const int64_t kOffset = 10;
+ EXPECT_EQ(kOffset, file.Seek(base::File::FROM_BEGIN, kOffset));
+ EXPECT_EQ(2 * kOffset, file.Seek(base::File::FROM_CURRENT, kOffset));
+ EXPECT_EQ(kOffset, file.Seek(base::File::FROM_CURRENT, -kOffset));
+ EXPECT_TRUE(file.SetLength(kOffset * 2));
+ EXPECT_EQ(kOffset, file.Seek(base::File::FROM_END, -kOffset));
+}
+
+TEST(FileTest, Duplicate) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("file");
+ File file(file_path,(base::File::FLAG_CREATE |
+ base::File::FLAG_READ |
+ base::File::FLAG_WRITE));
+ ASSERT_TRUE(file.IsValid());
+
+ File file2(file.Duplicate());
+ ASSERT_TRUE(file2.IsValid());
+
+ // Write through one handle, close it, read through the other.
+ static const char kData[] = "now is a good time.";
+ static const int kDataLen = sizeof(kData) - 1;
+
+ ASSERT_EQ(0, file.Seek(base::File::FROM_CURRENT, 0));
+ ASSERT_EQ(0, file2.Seek(base::File::FROM_CURRENT, 0));
+ ASSERT_EQ(kDataLen, file.WriteAtCurrentPos(kData, kDataLen));
+ ASSERT_EQ(kDataLen, file.Seek(base::File::FROM_CURRENT, 0));
+ ASSERT_EQ(kDataLen, file2.Seek(base::File::FROM_CURRENT, 0));
+ file.Close();
+ char buf[kDataLen];
+ ASSERT_EQ(kDataLen, file2.Read(0, &buf[0], kDataLen));
+ ASSERT_EQ(std::string(kData, kDataLen), std::string(&buf[0], kDataLen));
+}
+
+TEST(FileTest, DuplicateDeleteOnClose) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("file");
+ File file(file_path,(base::File::FLAG_CREATE |
+ base::File::FLAG_READ |
+ base::File::FLAG_WRITE |
+ base::File::FLAG_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ File file2(file.Duplicate());
+ ASSERT_TRUE(file2.IsValid());
+ file.Close();
+ file2.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+#if defined(OS_WIN)
+TEST(FileTest, GetInfoForDirectory) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath empty_dir = temp_dir.path().Append(FILE_PATH_LITERAL("gpfi_test"));
+ ASSERT_TRUE(CreateDirectory(empty_dir));
+
+ base::File dir(
+ ::CreateFile(empty_dir.value().c_str(),
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS, // Needed to open a directory.
+ NULL));
+ ASSERT_TRUE(dir.IsValid());
+
+ base::File::Info info;
+ EXPECT_TRUE(dir.GetInfo(&info));
+ EXPECT_TRUE(info.is_directory);
+ EXPECT_FALSE(info.is_symbolic_link);
+ EXPECT_EQ(0, info.size);
+}
+#endif // defined(OS_WIN)
diff --git a/libchrome/base/files/file_util.cc b/libchrome/base/files/file_util.cc
new file mode 100644
index 0000000..80fa44f
--- /dev/null
+++ b/libchrome/base/files/file_util.cc
@@ -0,0 +1,263 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#if defined(OS_WIN)
+#include <io.h>
+#endif
+#include <stdio.h>
+
+#include <fstream>
+#include <limits>
+
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if !defined(OS_NACL_NONSFI)
+namespace {
+
+// The maximum number of 'uniquified' files we will try to create.
+// This is used when the filename we're trying to download is already in use,
+// so we create a new unique filename by appending " (nnn)" before the
+// extension, where 1 <= nnn <= kMaxUniqueFiles.
+// Also used by code that cleans up said files.
+static const int kMaxUniqueFiles = 100;
+
+} // namespace
+
+int64_t ComputeDirectorySize(const FilePath& root_path) {
+ int64_t running_size = 0;
+ FileEnumerator file_iter(root_path, true, FileEnumerator::FILES);
+ while (!file_iter.Next().empty())
+ running_size += file_iter.GetInfo().GetSize();
+ return running_size;
+}
+
+bool Move(const FilePath& from_path, const FilePath& to_path) {
+ if (from_path.ReferencesParent() || to_path.ReferencesParent())
+ return false;
+ return internal::MoveUnsafe(from_path, to_path);
+}
+
+bool ContentsEqual(const FilePath& filename1, const FilePath& filename2) {
+ // We open the file in binary format even if they are text files because
+ // we are just comparing that bytes are exactly same in both files and not
+ // doing anything smart with text formatting.
+ std::ifstream file1(filename1.value().c_str(),
+ std::ios::in | std::ios::binary);
+ std::ifstream file2(filename2.value().c_str(),
+ std::ios::in | std::ios::binary);
+
+ // Even if both files aren't openable (and thus, in some sense, "equal"),
+ // any unusable file yields a result of "false".
+ if (!file1.is_open() || !file2.is_open())
+ return false;
+
+ const int BUFFER_SIZE = 2056;
+ char buffer1[BUFFER_SIZE], buffer2[BUFFER_SIZE];
+ do {
+ file1.read(buffer1, BUFFER_SIZE);
+ file2.read(buffer2, BUFFER_SIZE);
+
+ if ((file1.eof() != file2.eof()) ||
+ (file1.gcount() != file2.gcount()) ||
+ (memcmp(buffer1, buffer2, static_cast<size_t>(file1.gcount())))) {
+ file1.close();
+ file2.close();
+ return false;
+ }
+ } while (!file1.eof() || !file2.eof());
+
+ file1.close();
+ file2.close();
+ return true;
+}
+
+bool TextContentsEqual(const FilePath& filename1, const FilePath& filename2) {
+ std::ifstream file1(filename1.value().c_str(), std::ios::in);
+ std::ifstream file2(filename2.value().c_str(), std::ios::in);
+
+ // Even if both files aren't openable (and thus, in some sense, "equal"),
+ // any unusable file yields a result of "false".
+ if (!file1.is_open() || !file2.is_open())
+ return false;
+
+ do {
+ std::string line1, line2;
+ getline(file1, line1);
+ getline(file2, line2);
+
+ // Check for mismatched EOF states, or any error state.
+ if ((file1.eof() != file2.eof()) ||
+ file1.bad() || file2.bad()) {
+ return false;
+ }
+
+ // Trim all '\r' and '\n' characters from the end of the line.
+ std::string::size_type end1 = line1.find_last_not_of("\r\n");
+ if (end1 == std::string::npos)
+ line1.clear();
+ else if (end1 + 1 < line1.length())
+ line1.erase(end1 + 1);
+
+ std::string::size_type end2 = line2.find_last_not_of("\r\n");
+ if (end2 == std::string::npos)
+ line2.clear();
+ else if (end2 + 1 < line2.length())
+ line2.erase(end2 + 1);
+
+ if (line1 != line2)
+ return false;
+ } while (!file1.eof() || !file2.eof());
+
+ return true;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+bool ReadFileToStringWithMaxSize(const FilePath& path,
+ std::string* contents,
+ size_t max_size) {
+ if (contents)
+ contents->clear();
+ if (path.ReferencesParent())
+ return false;
+ FILE* file = OpenFile(path, "rb");
+ if (!file) {
+ return false;
+ }
+
+ const size_t kBufferSize = 1 << 16;
+ std::unique_ptr<char[]> buf(new char[kBufferSize]);
+ size_t len;
+ size_t size = 0;
+ bool read_status = true;
+
+ // Many files supplied in |path| have incorrect size (proc files etc).
+ // Hence, the file is read sequentially as opposed to a one-shot read.
+ while ((len = fread(buf.get(), 1, kBufferSize, file)) > 0) {
+ if (contents)
+ contents->append(buf.get(), std::min(len, max_size - size));
+
+ if ((max_size - size) < len) {
+ read_status = false;
+ break;
+ }
+
+ size += len;
+ }
+ read_status = read_status && !ferror(file);
+ CloseFile(file);
+
+ return read_status;
+}
+
+bool ReadFileToString(const FilePath& path, std::string* contents) {
+ return ReadFileToStringWithMaxSize(path, contents,
+ std::numeric_limits<size_t>::max());
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool IsDirectoryEmpty(const FilePath& dir_path) {
+ FileEnumerator files(dir_path, false,
+ FileEnumerator::FILES | FileEnumerator::DIRECTORIES);
+ if (files.Next().empty())
+ return true;
+ return false;
+}
+
+FILE* CreateAndOpenTemporaryFile(FilePath* path) {
+ FilePath directory;
+ if (!GetTempDir(&directory))
+ return NULL;
+
+ return CreateAndOpenTemporaryFileInDir(directory, path);
+}
+
+bool CreateDirectory(const FilePath& full_path) {
+ return CreateDirectoryAndGetError(full_path, NULL);
+}
+
+bool GetFileSize(const FilePath& file_path, int64_t* file_size) {
+ File::Info info;
+ if (!GetFileInfo(file_path, &info))
+ return false;
+ *file_size = info.size;
+ return true;
+}
+
+bool TouchFile(const FilePath& path,
+ const Time& last_accessed,
+ const Time& last_modified) {
+ int flags = File::FLAG_OPEN | File::FLAG_WRITE_ATTRIBUTES;
+
+#if defined(OS_WIN)
+ // On Windows, FILE_FLAG_BACKUP_SEMANTICS is needed to open a directory.
+ if (DirectoryExists(path))
+ flags |= File::FLAG_BACKUP_SEMANTICS;
+#endif // OS_WIN
+
+ File file(path, flags);
+ if (!file.IsValid())
+ return false;
+
+ return file.SetTimes(last_accessed, last_modified);
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+bool CloseFile(FILE* file) {
+ if (file == NULL)
+ return true;
+ return fclose(file) == 0;
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool TruncateFile(FILE* file) {
+ if (file == NULL)
+ return false;
+ long current_offset = ftell(file);
+ if (current_offset == -1)
+ return false;
+#if defined(OS_WIN)
+ int fd = _fileno(file);
+ if (_chsize(fd, current_offset) != 0)
+ return false;
+#else
+ int fd = fileno(file);
+ if (ftruncate(fd, current_offset) != 0)
+ return false;
+#endif
+ return true;
+}
+
+int GetUniquePathNumber(const FilePath& path,
+ const FilePath::StringType& suffix) {
+ bool have_suffix = !suffix.empty();
+ if (!PathExists(path) &&
+ (!have_suffix || !PathExists(FilePath(path.value() + suffix)))) {
+ return 0;
+ }
+
+ FilePath new_path;
+ for (int count = 1; count <= kMaxUniqueFiles; ++count) {
+ new_path = path.InsertBeforeExtensionASCII(StringPrintf(" (%d)", count));
+ if (!PathExists(new_path) &&
+ (!have_suffix || !PathExists(FilePath(new_path.value() + suffix)))) {
+ return count;
+ }
+ }
+
+ return -1;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+} // namespace base
diff --git a/libchrome/base/files/file_util.h b/libchrome/base/files/file_util.h
new file mode 100644
index 0000000..420dcae
--- /dev/null
+++ b/libchrome/base/files/file_util.h
@@ -0,0 +1,450 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions for dealing with the local
+// filesystem.
+
+#ifndef BASE_FILES_FILE_UTIL_H_
+#define BASE_FILES_FILE_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_POSIX)
+#include "base/file_descriptor_posix.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#endif
+
+namespace base {
+
+class Environment;
+class Time;
+
+//-----------------------------------------------------------------------------
+// Functions that involve filesystem access or modification:
+
+// Returns an absolute version of a relative path. Returns an empty path on
+// error. On POSIX, this function fails if the path does not exist. This
+// function can result in I/O so it can be slow.
+BASE_EXPORT FilePath MakeAbsoluteFilePath(const FilePath& input);
+
+// Returns the total number of bytes used by all the files under |root_path|.
+// If the path does not exist the function returns 0.
+//
+// This function is implemented using the FileEnumerator class so it is not
+// particularly speedy in any platform.
+BASE_EXPORT int64_t ComputeDirectorySize(const FilePath& root_path);
+
+// Deletes the given path, whether it's a file or a directory.
+// If it's a directory, it's perfectly happy to delete all of the
+// directory's contents. Passing true to recursive deletes
+// subdirectories and their contents as well.
+// Returns true if successful, false otherwise. It is considered successful
+// to attempt to delete a file that does not exist.
+//
+// In posix environment and if |path| is a symbolic link, this deletes only
+// the symlink. (even if the symlink points to a non-existent file)
+//
+// WARNING: USING THIS WITH recursive==true IS EQUIVALENT
+// TO "rm -rf", SO USE WITH CAUTION.
+BASE_EXPORT bool DeleteFile(const FilePath& path, bool recursive);
+
+#if defined(OS_WIN)
+// Schedules to delete the given path, whether it's a file or a directory, until
+// the operating system is restarted.
+// Note:
+// 1) The file/directory to be deleted should exist in a temp folder.
+// 2) The directory to be deleted must be empty.
+BASE_EXPORT bool DeleteFileAfterReboot(const FilePath& path);
+#endif
+
+// Moves the given path, whether it's a file or a directory.
+// If a simple rename is not possible, such as in the case where the paths are
+// on different volumes, this will attempt to copy and delete. Returns
+// true for success.
+// This function fails if either path contains traversal components ('..').
+BASE_EXPORT bool Move(const FilePath& from_path, const FilePath& to_path);
+
+// Renames file |from_path| to |to_path|. Both paths must be on the same
+// volume, or the function will fail. Destination file will be created
+// if it doesn't exist. Prefer this function over Move when dealing with
+// temporary files. On Windows it preserves attributes of the target file.
+// Returns true on success, leaving *error unchanged.
+// Returns false on failure and sets *error appropriately, if it is non-NULL.
+BASE_EXPORT bool ReplaceFile(const FilePath& from_path,
+ const FilePath& to_path,
+ File::Error* error);
+
+// Copies a single file. Use CopyDirectory to copy directories.
+// This function fails if either path contains traversal components ('..').
+//
+// This function keeps the metadata on Windows. The read only bit on Windows is
+// not kept.
+BASE_EXPORT bool CopyFile(const FilePath& from_path, const FilePath& to_path);
+
+// Copies the given path, and optionally all subdirectories and their contents
+// as well.
+//
+// If there are files existing under to_path, always overwrite. Returns true
+// if successful, false otherwise. Wildcards on the names are not supported.
+//
+// This function calls into CopyFile() so the same behavior w.r.t. metadata
+// applies.
+//
+// If you only need to copy a file use CopyFile, it's faster.
+BASE_EXPORT bool CopyDirectory(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive);
+
+// Returns true if the given path exists on the local filesystem,
+// false otherwise.
+BASE_EXPORT bool PathExists(const FilePath& path);
+
+// Returns true if the given path is writable by the user, false otherwise.
+BASE_EXPORT bool PathIsWritable(const FilePath& path);
+
+// Returns true if the given path exists and is a directory, false otherwise.
+BASE_EXPORT bool DirectoryExists(const FilePath& path);
+
+// Returns true if the contents of the two files given are equal, false
+// otherwise. If either file can't be read, returns false.
+BASE_EXPORT bool ContentsEqual(const FilePath& filename1,
+ const FilePath& filename2);
+
+// Returns true if the contents of the two text files given are equal, false
+// otherwise. This routine treats "\r\n" and "\n" as equivalent.
+BASE_EXPORT bool TextContentsEqual(const FilePath& filename1,
+ const FilePath& filename2);
+
+// Reads the file at |path| into |contents| and returns true on success and
+// false on error. For security reasons, a |path| containing path traversal
+// components ('..') is treated as a read error and |contents| is set to empty.
+// In case of I/O error, |contents| holds the data that could be read from the
+// file before the error occurred.
+// |contents| may be NULL, in which case this function is useful for its side
+// effect of priming the disk cache (could be used for unit tests).
+BASE_EXPORT bool ReadFileToString(const FilePath& path, std::string* contents);
+
+// Reads the file at |path| into |contents| and returns true on success and
+// false on error. For security reasons, a |path| containing path traversal
+// components ('..') is treated as a read error and |contents| is set to empty.
+// In case of I/O error, |contents| holds the data that could be read from the
+// file before the error occurred. When the file size exceeds |max_size|, the
+// function returns false with |contents| holding the file truncated to
+// |max_size|.
+// |contents| may be NULL, in which case this function is useful for its side
+// effect of priming the disk cache (could be used for unit tests).
+BASE_EXPORT bool ReadFileToStringWithMaxSize(const FilePath& path,
+ std::string* contents,
+ size_t max_size);
+
+#if defined(OS_POSIX)
+
+// Read exactly |bytes| bytes from file descriptor |fd|, storing the result
+// in |buffer|. This function is protected against EINTR and partial reads.
+// Returns true iff |bytes| bytes have been successfully read from |fd|.
+BASE_EXPORT bool ReadFromFD(int fd, char* buffer, size_t bytes);
+
+// Creates a symbolic link at |symlink| pointing to |target|. Returns
+// false on failure.
+BASE_EXPORT bool CreateSymbolicLink(const FilePath& target,
+ const FilePath& symlink);
+
+// Reads the given |symlink| and returns where it points to in |target|.
+// Returns false upon failure.
+BASE_EXPORT bool ReadSymbolicLink(const FilePath& symlink, FilePath* target);
+
+// Bits and masks of the file permission.
+enum FilePermissionBits {
+ FILE_PERMISSION_MASK = S_IRWXU | S_IRWXG | S_IRWXO,
+ FILE_PERMISSION_USER_MASK = S_IRWXU,
+ FILE_PERMISSION_GROUP_MASK = S_IRWXG,
+ FILE_PERMISSION_OTHERS_MASK = S_IRWXO,
+
+ FILE_PERMISSION_READ_BY_USER = S_IRUSR,
+ FILE_PERMISSION_WRITE_BY_USER = S_IWUSR,
+ FILE_PERMISSION_EXECUTE_BY_USER = S_IXUSR,
+ FILE_PERMISSION_READ_BY_GROUP = S_IRGRP,
+ FILE_PERMISSION_WRITE_BY_GROUP = S_IWGRP,
+ FILE_PERMISSION_EXECUTE_BY_GROUP = S_IXGRP,
+ FILE_PERMISSION_READ_BY_OTHERS = S_IROTH,
+ FILE_PERMISSION_WRITE_BY_OTHERS = S_IWOTH,
+ FILE_PERMISSION_EXECUTE_BY_OTHERS = S_IXOTH,
+};
+
+// Reads the permission of the given |path|, storing the file permission
+// bits in |mode|. If |path| is symbolic link, |mode| is the permission of
+// a file which the symlink points to.
+BASE_EXPORT bool GetPosixFilePermissions(const FilePath& path, int* mode);
+// Sets the permission of the given |path|. If |path| is symbolic link, sets
+// the permission of a file which the symlink points to.
+BASE_EXPORT bool SetPosixFilePermissions(const FilePath& path, int mode);
+
+// Returns true iff |executable| can be found in any directory specified by the
+// environment variable in |env|.
+BASE_EXPORT bool ExecutableExistsInPath(Environment* env,
+ const FilePath::StringType& executable);
+
+#endif // OS_POSIX
+
+// Returns true if the given directory is empty
+BASE_EXPORT bool IsDirectoryEmpty(const FilePath& dir_path);
+
+// Get the temporary directory provided by the system.
+//
+// WARNING: In general, you should use CreateTemporaryFile variants below
+// instead of this function. Those variants will ensure that the proper
+// permissions are set so that other users on the system can't edit them while
+// they're open (which can lead to security issues).
+BASE_EXPORT bool GetTempDir(FilePath* path);
+
+// Get the home directory. This is more complicated than just getenv("HOME")
+// as it knows to fall back on getpwent() etc.
+//
+// You should not generally call this directly. Instead use DIR_HOME with the
+// path service which will use this function but cache the value.
+// Path service may also override DIR_HOME.
+BASE_EXPORT FilePath GetHomeDir();
+
+// Creates a temporary file. The full path is placed in |path|, and the
+// function returns true if was successful in creating the file. The file will
+// be empty and all handles closed after this function returns.
+BASE_EXPORT bool CreateTemporaryFile(FilePath* path);
+
+// Same as CreateTemporaryFile but the file is created in |dir|.
+BASE_EXPORT bool CreateTemporaryFileInDir(const FilePath& dir,
+ FilePath* temp_file);
+
+// Create and open a temporary file. File is opened for read/write.
+// The full path is placed in |path|.
+// Returns a handle to the opened file or NULL if an error occurred.
+BASE_EXPORT FILE* CreateAndOpenTemporaryFile(FilePath* path);
+
+// Similar to CreateAndOpenTemporaryFile, but the file is created in |dir|.
+BASE_EXPORT FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir,
+ FilePath* path);
+
+// Create a new directory. If prefix is provided, the new directory name is in
+// the format of prefixyyyy.
+// NOTE: prefix is ignored in the POSIX implementation.
+// If success, return true and output the full path of the directory created.
+BASE_EXPORT bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+ FilePath* new_temp_path);
+
+// Create a directory within another directory.
+// Extra characters will be appended to |prefix| to ensure that the
+// new directory does not have the same name as an existing directory.
+BASE_EXPORT bool CreateTemporaryDirInDir(const FilePath& base_dir,
+ const FilePath::StringType& prefix,
+ FilePath* new_dir);
+
+// Creates a directory, as well as creating any parent directories, if they
+// don't exist. Returns 'true' on successful creation, or if the directory
+// already exists. The directory is only readable by the current user.
+// Returns true on success, leaving *error unchanged.
+// Returns false on failure and sets *error appropriately, if it is non-NULL.
+BASE_EXPORT bool CreateDirectoryAndGetError(const FilePath& full_path,
+ File::Error* error);
+
+// Backward-compatible convenience method for the above.
+BASE_EXPORT bool CreateDirectory(const FilePath& full_path);
+
+// Returns the file size. Returns true on success.
+BASE_EXPORT bool GetFileSize(const FilePath& file_path, int64_t* file_size);
+
+// Sets |real_path| to |path| with symbolic links and junctions expanded.
+// On windows, make sure the path starts with a lettered drive.
+// |path| must reference a file. Function will fail if |path| points to
+// a directory or to a nonexistent path. On windows, this function will
+// fail if |path| is a junction or symlink that points to an empty file,
+// or if |real_path| would be longer than MAX_PATH characters.
+BASE_EXPORT bool NormalizeFilePath(const FilePath& path, FilePath* real_path);
+
+#if defined(OS_WIN)
+
+// Given a path in NT native form ("\Device\HarddiskVolumeXX\..."),
+// return in |drive_letter_path| the equivalent path that starts with
+// a drive letter ("C:\..."). Return false if no such path exists.
+BASE_EXPORT bool DevicePathToDriveLetterPath(const FilePath& device_path,
+ FilePath* drive_letter_path);
+
+// Given an existing file in |path|, set |real_path| to the path
+// in native NT format, of the form "\Device\HarddiskVolumeXX\..".
+// Returns false if the path can not be found. Empty files cannot
+// be resolved with this function.
+BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
+ FilePath* nt_path);
+
+// Given an existing file in |path|, returns whether this file is on a network
+// drive or not. If |path| does not exist, this function returns false.
+BASE_EXPORT bool IsOnNetworkDrive(const base::FilePath& path);
+#endif
+
+// This function will return if the given file is a symlink or not.
+BASE_EXPORT bool IsLink(const FilePath& file_path);
+
+// Returns information about the given file path.
+BASE_EXPORT bool GetFileInfo(const FilePath& file_path, File::Info* info);
+
+// Sets the time of the last access and the time of the last modification.
+BASE_EXPORT bool TouchFile(const FilePath& path,
+ const Time& last_accessed,
+ const Time& last_modified);
+
+// Wrapper for fopen-like calls. Returns non-NULL FILE* on success.
+BASE_EXPORT FILE* OpenFile(const FilePath& filename, const char* mode);
+
+// Closes file opened by OpenFile. Returns true on success.
+BASE_EXPORT bool CloseFile(FILE* file);
+
+// Associates a standard FILE stream with an existing File. Note that this
+// functions take ownership of the existing File.
+BASE_EXPORT FILE* FileToFILE(File file, const char* mode);
+
+// Truncates an open file to end at the location of the current file pointer.
+// This is a cross-platform analog to Windows' SetEndOfFile() function.
+BASE_EXPORT bool TruncateFile(FILE* file);
+
+// Reads at most the given number of bytes from the file into the buffer.
+// Returns the number of read bytes, or -1 on error.
+BASE_EXPORT int ReadFile(const FilePath& filename, char* data, int max_size);
+
+// Writes the given buffer into the file, overwriting any data that was
+// previously there. Returns the number of bytes written, or -1 on error.
+BASE_EXPORT int WriteFile(const FilePath& filename, const char* data,
+ int size);
+
+#if defined(OS_POSIX)
+// Appends |data| to |fd|. Does not close |fd| when done. Returns true iff
+// |size| bytes of |data| were written to |fd|.
+BASE_EXPORT bool WriteFileDescriptor(const int fd, const char* data, int size);
+#endif
+
+// Appends |data| to |filename|. Returns true iff |size| bytes of |data| were
+// written to |filename|.
+BASE_EXPORT bool AppendToFile(const FilePath& filename,
+ const char* data,
+ int size);
+
+// Gets the current working directory for the process.
+BASE_EXPORT bool GetCurrentDirectory(FilePath* path);
+
+// Sets the current working directory for the process.
+BASE_EXPORT bool SetCurrentDirectory(const FilePath& path);
+
+// Attempts to find a number that can be appended to the |path| to make it
+// unique. If |path| does not exist, 0 is returned. If it fails to find such
+// a number, -1 is returned. If |suffix| is not empty, also checks the
+// existence of it with the given suffix.
+BASE_EXPORT int GetUniquePathNumber(const FilePath& path,
+ const FilePath::StringType& suffix);
+
+// Sets the given |fd| to non-blocking mode.
+// Returns true if it was able to set it in the non-blocking mode, otherwise
+// false.
+BASE_EXPORT bool SetNonBlocking(int fd);
+
+#if defined(OS_POSIX)
+// Test that |path| can only be changed by a given user and members of
+// a given set of groups.
+// Specifically, test that all parts of |path| under (and including) |base|:
+// * Exist.
+// * Are owned by a specific user.
+// * Are not writable by all users.
+// * Are owned by a member of a given set of groups, or are not writable by
+// their group.
+// * Are not symbolic links.
+// This is useful for checking that a config file is administrator-controlled.
+// |base| must contain |path|.
+BASE_EXPORT bool VerifyPathControlledByUser(const base::FilePath& base,
+ const base::FilePath& path,
+ uid_t owner_uid,
+ const std::set<gid_t>& group_gids);
+#endif // defined(OS_POSIX)
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+// Is |path| writable only by a user with administrator privileges?
+// This function uses Mac OS conventions. The super user is assumed to have
+// uid 0, and the administrator group is assumed to be named "admin".
+// Testing that |path|, and every parent directory including the root of
+// the filesystem, are owned by the superuser, controlled by the group
+// "admin", are not writable by all users, and contain no symbolic links.
+// Will return false if |path| does not exist.
+BASE_EXPORT bool VerifyPathControlledByAdmin(const base::FilePath& path);
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+// Returns the maximum length of path component on the volume containing
+// the directory |path|, in the number of FilePath::CharType, or -1 on failure.
+BASE_EXPORT int GetMaximumPathComponentLength(const base::FilePath& path);
+
+#if defined(OS_LINUX)
+// Broad categories of file systems as returned by statfs() on Linux.
+enum FileSystemType {
+ FILE_SYSTEM_UNKNOWN, // statfs failed.
+ FILE_SYSTEM_0, // statfs.f_type == 0 means unknown, may indicate AFS.
+ FILE_SYSTEM_ORDINARY, // on-disk filesystem like ext2
+ FILE_SYSTEM_NFS,
+ FILE_SYSTEM_SMB,
+ FILE_SYSTEM_CODA,
+ FILE_SYSTEM_MEMORY, // in-memory file system
+ FILE_SYSTEM_CGROUP, // cgroup control.
+ FILE_SYSTEM_OTHER, // any other value.
+ FILE_SYSTEM_TYPE_COUNT
+};
+
+// Attempts determine the FileSystemType for |path|.
+// Returns false if |path| doesn't exist.
+BASE_EXPORT bool GetFileSystemType(const FilePath& path, FileSystemType* type);
+#endif
+
+#if defined(OS_POSIX)
+// Get a temporary directory for shared memory files. The directory may depend
+// on whether the destination is intended for executable files, which in turn
+// depends on how /dev/shmem was mounted. As a result, you must supply whether
+// you intend to create executable shmem segments so this function can find
+// an appropriate location.
+BASE_EXPORT bool GetShmemTempDir(bool executable, FilePath* path);
+#endif
+
+// Internal --------------------------------------------------------------------
+
+namespace internal {
+
+// Same as Move but allows paths with traversal components.
+// Use only with extreme care.
+BASE_EXPORT bool MoveUnsafe(const FilePath& from_path,
+ const FilePath& to_path);
+
+#if defined(OS_WIN)
+// Copy from_path to to_path recursively and then delete from_path recursively.
+// Returns true if all operations succeed.
+// This function simulates Move(), but unlike Move() it works across volumes.
+// This function is not transactional.
+BASE_EXPORT bool CopyAndDeleteDirectory(const FilePath& from_path,
+ const FilePath& to_path);
+#endif // defined(OS_WIN)
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_FILES_FILE_UTIL_H_
diff --git a/libchrome/base/files/file_util_linux.cc b/libchrome/base/files/file_util_linux.cc
new file mode 100644
index 0000000..b0c6e03
--- /dev/null
+++ b/libchrome/base/files/file_util_linux.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include <errno.h>
+#include <linux/magic.h>
+#include <stdint.h>
+#include <sys/vfs.h>
+
+#include "base/files/file_path.h"
+
+namespace base {
+
+bool GetFileSystemType(const FilePath& path, FileSystemType* type) {
+ struct statfs statfs_buf;
+ if (statfs(path.value().c_str(), &statfs_buf) < 0) {
+ if (errno == ENOENT)
+ return false;
+ *type = FILE_SYSTEM_UNKNOWN;
+ return true;
+ }
+
+ // Not all possible |statfs_buf.f_type| values are in linux/magic.h.
+ // Missing values are copied from the statfs man page.
+ // In some platforms, |statfs_buf.f_type| is declared as signed, but some of
+ // the values will overflow it, causing narrowing warnings. Cast to the
+ // largest possible unsigned integer type to avoid it.
+ switch (static_cast<uintmax_t>(statfs_buf.f_type)) {
+ case 0:
+ *type = FILE_SYSTEM_0;
+ break;
+ case EXT2_SUPER_MAGIC: // Also ext3 and ext4
+ case MSDOS_SUPER_MAGIC:
+ case REISERFS_SUPER_MAGIC:
+ case BTRFS_SUPER_MAGIC:
+ case 0x5346544E: // NTFS
+ case 0x58465342: // XFS
+ case 0x3153464A: // JFS
+ *type = FILE_SYSTEM_ORDINARY;
+ break;
+ case NFS_SUPER_MAGIC:
+ *type = FILE_SYSTEM_NFS;
+ break;
+ case SMB_SUPER_MAGIC:
+ case 0xFF534D42: // CIFS
+ *type = FILE_SYSTEM_SMB;
+ break;
+ case CODA_SUPER_MAGIC:
+ *type = FILE_SYSTEM_CODA;
+ break;
+ case HUGETLBFS_MAGIC:
+ case RAMFS_MAGIC:
+ case TMPFS_MAGIC:
+ *type = FILE_SYSTEM_MEMORY;
+ break;
+ case CGROUP_SUPER_MAGIC:
+ *type = FILE_SYSTEM_CGROUP;
+ break;
+ default:
+ *type = FILE_SYSTEM_OTHER;
+ }
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_util_mac.mm b/libchrome/base/files/file_util_mac.mm
new file mode 100644
index 0000000..e9c6c65
--- /dev/null
+++ b/libchrome/base/files/file_util_mac.mm
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include <copyfile.h>
+#import <Foundation/Foundation.h>
+
+#include "base/files/file_path.h"
+#include "base/mac/foundation_util.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+ ThreadRestrictions::AssertIOAllowed();
+ if (from_path.ReferencesParent() || to_path.ReferencesParent())
+ return false;
+ return (copyfile(from_path.value().c_str(),
+ to_path.value().c_str(), NULL, COPYFILE_DATA) == 0);
+}
+
+bool GetTempDir(base::FilePath* path) {
+ NSString* tmp = NSTemporaryDirectory();
+ if (tmp == nil)
+ return false;
+ *path = base::mac::NSStringToFilePath(tmp);
+ return true;
+}
+
+FilePath GetHomeDir() {
+ NSString* tmp = NSHomeDirectory();
+ if (tmp != nil) {
+ FilePath mac_home_dir = base::mac::NSStringToFilePath(tmp);
+ if (!mac_home_dir.empty())
+ return mac_home_dir;
+ }
+
+ // Fall back on temp dir if no home directory is defined.
+ FilePath rv;
+ if (GetTempDir(&rv))
+ return rv;
+
+ // Last resort.
+ return FilePath("/tmp");
+}
+
+} // namespace base
diff --git a/libchrome/base/files/file_util_posix.cc b/libchrome/base/files/file_util_posix.cc
new file mode 100644
index 0000000..85a1b41
--- /dev/null
+++ b/libchrome/base/files/file_util_posix.cc
@@ -0,0 +1,960 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_util.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/errno.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/environment.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <AvailabilityMacros.h>
+#include "base/mac/foundation_util.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/android/content_uri_utils.h"
+#include "base/os_compat_android.h"
+#include "base/path_service.h"
+#endif
+
+#if !defined(OS_IOS)
+#include <grp.h>
+#endif
+
+namespace base {
+
+namespace {
+
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+static int CallStat(const char *path, stat_wrapper_t *sb) {
+ ThreadRestrictions::AssertIOAllowed();
+ return stat(path, sb);
+}
+static int CallLstat(const char *path, stat_wrapper_t *sb) {
+ ThreadRestrictions::AssertIOAllowed();
+ return lstat(path, sb);
+}
+#else // defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+static int CallStat(const char *path, stat_wrapper_t *sb) {
+ ThreadRestrictions::AssertIOAllowed();
+ return stat64(path, sb);
+}
+static int CallLstat(const char *path, stat_wrapper_t *sb) {
+ ThreadRestrictions::AssertIOAllowed();
+ return lstat64(path, sb);
+}
+#endif // !(defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL))
+
+#if !defined(OS_NACL_NONSFI)
+// Helper for NormalizeFilePath(), defined below.
+bool RealPath(const FilePath& path, FilePath* real_path) {
+ ThreadRestrictions::AssertIOAllowed(); // For realpath().
+ FilePath::CharType buf[PATH_MAX];
+ if (!realpath(path.value().c_str(), buf))
+ return false;
+
+ *real_path = FilePath(buf);
+ return true;
+}
+
+// Helper for VerifyPathControlledByUser.
+bool VerifySpecificPathControlledByUser(const FilePath& path,
+ uid_t owner_uid,
+ const std::set<gid_t>& group_gids) {
+ stat_wrapper_t stat_info;
+ if (CallLstat(path.value().c_str(), &stat_info) != 0) {
+ DPLOG(ERROR) << "Failed to get information on path "
+ << path.value();
+ return false;
+ }
+
+ if (S_ISLNK(stat_info.st_mode)) {
+ DLOG(ERROR) << "Path " << path.value()
+ << " is a symbolic link.";
+ return false;
+ }
+
+ if (stat_info.st_uid != owner_uid) {
+ DLOG(ERROR) << "Path " << path.value()
+ << " is owned by the wrong user.";
+ return false;
+ }
+
+ if ((stat_info.st_mode & S_IWGRP) &&
+ !ContainsKey(group_gids, stat_info.st_gid)) {
+ DLOG(ERROR) << "Path " << path.value()
+ << " is writable by an unprivileged group.";
+ return false;
+ }
+
+ if (stat_info.st_mode & S_IWOTH) {
+ DLOG(ERROR) << "Path " << path.value()
+ << " is writable by any user.";
+ return false;
+ }
+
+ return true;
+}
+
+std::string TempFileName() {
+#if defined(OS_MACOSX)
+ return StringPrintf(".%s.XXXXXX", base::mac::BaseBundleID());
+#endif
+
+#if defined(GOOGLE_CHROME_BUILD)
+ return std::string(".com.google.Chrome.XXXXXX");
+#else
+ return std::string(".org.chromium.Chromium.XXXXXX");
+#endif
+}
+
+// Creates and opens a temporary file in |directory|, returning the
+// file descriptor. |path| is set to the temporary file path.
+// This function does NOT unlink() the file.
+int CreateAndOpenFdForTemporaryFile(FilePath directory, FilePath* path) {
+ ThreadRestrictions::AssertIOAllowed(); // For call to mkstemp().
+ *path = directory.Append(base::TempFileName());
+ const std::string& tmpdir_string = path->value();
+ // this should be OK since mkstemp just replaces characters in place
+ char* buffer = const_cast<char*>(tmpdir_string.c_str());
+
+ return HANDLE_EINTR(mkstemp(buffer));
+}
+
+#if defined(OS_LINUX)
+// Determine if /dev/shm files can be mapped and then mprotect'd PROT_EXEC.
+// This depends on the mount options used for /dev/shm, which vary among
+// different Linux distributions and possibly local configuration. It also
+// depends on details of kernel--ChromeOS uses the noexec option for /dev/shm
+// but its kernel allows mprotect with PROT_EXEC anyway.
+bool DetermineDevShmExecutable() {
+ bool result = false;
+ FilePath path;
+
+ ScopedFD fd(CreateAndOpenFdForTemporaryFile(FilePath("/dev/shm"), &path));
+ if (fd.is_valid()) {
+ DeleteFile(path, false);
+ long sysconf_result = sysconf(_SC_PAGESIZE);
+ CHECK_GE(sysconf_result, 0);
+ size_t pagesize = static_cast<size_t>(sysconf_result);
+ CHECK_GE(sizeof(pagesize), sizeof(sysconf_result));
+ void* mapping = mmap(NULL, pagesize, PROT_READ, MAP_SHARED, fd.get(), 0);
+ if (mapping != MAP_FAILED) {
+ if (mprotect(mapping, pagesize, PROT_READ | PROT_EXEC) == 0)
+ result = true;
+ munmap(mapping, pagesize);
+ }
+ }
+ return result;
+}
+#endif // defined(OS_LINUX)
+#endif // !defined(OS_NACL_NONSFI)
+
+} // namespace
+
+#if !defined(OS_NACL_NONSFI)
+FilePath MakeAbsoluteFilePath(const FilePath& input) {
+ ThreadRestrictions::AssertIOAllowed();
+ char full_path[PATH_MAX];
+ if (realpath(input.value().c_str(), full_path) == NULL)
+ return FilePath();
+ return FilePath(full_path);
+}
+
+// TODO(erikkay): The Windows version of this accepts paths like "foo/bar/*"
+// which works both with and without the recursive flag. I'm not sure we need
+// that functionality. If not, remove from file_util_win.cc, otherwise add it
+// here.
+bool DeleteFile(const FilePath& path, bool recursive) {
+ ThreadRestrictions::AssertIOAllowed();
+ const char* path_str = path.value().c_str();
+ stat_wrapper_t file_info;
+ int test = CallLstat(path_str, &file_info);
+ if (test != 0) {
+ // The Windows version defines this condition as success.
+ bool ret = (errno == ENOENT || errno == ENOTDIR);
+ return ret;
+ }
+ if (!S_ISDIR(file_info.st_mode))
+ return (unlink(path_str) == 0);
+ if (!recursive)
+ return (rmdir(path_str) == 0);
+
+ bool success = true;
+ std::stack<std::string> directories;
+ directories.push(path.value());
+ FileEnumerator traversal(path, true,
+ FileEnumerator::FILES | FileEnumerator::DIRECTORIES |
+ FileEnumerator::SHOW_SYM_LINKS);
+ for (FilePath current = traversal.Next(); success && !current.empty();
+ current = traversal.Next()) {
+ if (traversal.GetInfo().IsDirectory())
+ directories.push(current.value());
+ else
+ success = (unlink(current.value().c_str()) == 0);
+ }
+
+ while (success && !directories.empty()) {
+ FilePath dir = FilePath(directories.top());
+ directories.pop();
+ success = (rmdir(dir.value().c_str()) == 0);
+ }
+ return success;
+}
+
+bool ReplaceFile(const FilePath& from_path,
+ const FilePath& to_path,
+ File::Error* error) {
+ ThreadRestrictions::AssertIOAllowed();
+ if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
+ return true;
+ if (error)
+ *error = File::OSErrorToFileError(errno);
+ return false;
+}
+
+bool CopyDirectory(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive) {
+ ThreadRestrictions::AssertIOAllowed();
+ // Some old callers of CopyDirectory want it to support wildcards.
+ // After some discussion, we decided to fix those callers.
+ // Break loudly here if anyone tries to do this.
+ DCHECK(to_path.value().find('*') == std::string::npos);
+ DCHECK(from_path.value().find('*') == std::string::npos);
+
+ if (from_path.value().size() >= PATH_MAX) {
+ return false;
+ }
+
+ // This function does not properly handle destinations within the source
+ FilePath real_to_path = to_path;
+ if (PathExists(real_to_path)) {
+ real_to_path = MakeAbsoluteFilePath(real_to_path);
+ if (real_to_path.empty())
+ return false;
+ } else {
+ real_to_path = MakeAbsoluteFilePath(real_to_path.DirName());
+ if (real_to_path.empty())
+ return false;
+ }
+ FilePath real_from_path = MakeAbsoluteFilePath(from_path);
+ if (real_from_path.empty())
+ return false;
+ if (real_to_path.value().size() >= real_from_path.value().size() &&
+ real_to_path.value().compare(0, real_from_path.value().size(),
+ real_from_path.value()) == 0) {
+ return false;
+ }
+
+ int traverse_type = FileEnumerator::FILES | FileEnumerator::SHOW_SYM_LINKS;
+ if (recursive)
+ traverse_type |= FileEnumerator::DIRECTORIES;
+ FileEnumerator traversal(from_path, recursive, traverse_type);
+
+ // We have to mimic windows behavior here. |to_path| may not exist yet,
+ // start the loop with |to_path|.
+ struct stat from_stat;
+ FilePath current = from_path;
+ if (stat(from_path.value().c_str(), &from_stat) < 0) {
+ DLOG(ERROR) << "CopyDirectory() couldn't stat source directory: "
+ << from_path.value() << " errno = " << errno;
+ return false;
+ }
+ struct stat to_path_stat;
+ FilePath from_path_base = from_path;
+ if (recursive && stat(to_path.value().c_str(), &to_path_stat) == 0 &&
+ S_ISDIR(to_path_stat.st_mode)) {
+ // If the destination already exists and is a directory, then the
+ // top level of source needs to be copied.
+ from_path_base = from_path.DirName();
+ }
+
+ // The Windows version of this function assumes that non-recursive calls
+ // will always have a directory for from_path.
+ // TODO(maruel): This is not necessary anymore.
+ DCHECK(recursive || S_ISDIR(from_stat.st_mode));
+
+ bool success = true;
+ while (success && !current.empty()) {
+ // current is the source path, including from_path, so append
+ // the suffix after from_path to to_path to create the target_path.
+ FilePath target_path(to_path);
+ if (from_path_base != current) {
+ if (!from_path_base.AppendRelativePath(current, &target_path)) {
+ success = false;
+ break;
+ }
+ }
+
+ if (S_ISDIR(from_stat.st_mode)) {
+ if (mkdir(target_path.value().c_str(),
+ (from_stat.st_mode & 01777) | S_IRUSR | S_IXUSR | S_IWUSR) !=
+ 0 &&
+ errno != EEXIST) {
+ DLOG(ERROR) << "CopyDirectory() couldn't create directory: "
+ << target_path.value() << " errno = " << errno;
+ success = false;
+ }
+ } else if (S_ISREG(from_stat.st_mode)) {
+ if (!CopyFile(current, target_path)) {
+ DLOG(ERROR) << "CopyDirectory() couldn't create file: "
+ << target_path.value();
+ success = false;
+ }
+ } else {
+ DLOG(WARNING) << "CopyDirectory() skipping non-regular file: "
+ << current.value();
+ }
+
+ current = traversal.Next();
+ if (!current.empty())
+ from_stat = traversal.GetInfo().stat();
+ }
+
+ return success;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+bool SetNonBlocking(int fd) {
+ const int flags = fcntl(fd, F_GETFL);
+ if (flags == -1)
+ return false;
+ if (flags & O_NONBLOCK)
+ return true;
+ if (HANDLE_EINTR(fcntl(fd, F_SETFL, flags | O_NONBLOCK)) == -1)
+ return false;
+ return true;
+}
+
+bool PathExists(const FilePath& path) {
+ ThreadRestrictions::AssertIOAllowed();
+#if defined(OS_ANDROID)
+ if (path.IsContentUri()) {
+ return ContentUriExists(path);
+ }
+#endif
+ return access(path.value().c_str(), F_OK) == 0;
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool PathIsWritable(const FilePath& path) {
+ ThreadRestrictions::AssertIOAllowed();
+ return access(path.value().c_str(), W_OK) == 0;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+bool DirectoryExists(const FilePath& path) {
+ ThreadRestrictions::AssertIOAllowed();
+ stat_wrapper_t file_info;
+ if (CallStat(path.value().c_str(), &file_info) == 0)
+ return S_ISDIR(file_info.st_mode);
+ return false;
+}
+
+bool ReadFromFD(int fd, char* buffer, size_t bytes) {
+ size_t total_read = 0;
+ while (total_read < bytes) {
+ ssize_t bytes_read =
+ HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
+ if (bytes_read <= 0)
+ break;
+ total_read += bytes_read;
+ }
+ return total_read == bytes;
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool CreateSymbolicLink(const FilePath& target_path,
+ const FilePath& symlink_path) {
+ DCHECK(!symlink_path.empty());
+ DCHECK(!target_path.empty());
+ return ::symlink(target_path.value().c_str(),
+ symlink_path.value().c_str()) != -1;
+}
+
+bool ReadSymbolicLink(const FilePath& symlink_path, FilePath* target_path) {
+ DCHECK(!symlink_path.empty());
+ DCHECK(target_path);
+ char buf[PATH_MAX];
+ ssize_t count = ::readlink(symlink_path.value().c_str(), buf, arraysize(buf));
+
+ if (count <= 0) {
+ target_path->clear();
+ return false;
+ }
+
+ *target_path = FilePath(FilePath::StringType(buf, count));
+ return true;
+}
+
+bool GetPosixFilePermissions(const FilePath& path, int* mode) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK(mode);
+
+ stat_wrapper_t file_info;
+ // Uses stat(), because on symbolic link, lstat() does not return valid
+ // permission bits in st_mode
+ if (CallStat(path.value().c_str(), &file_info) != 0)
+ return false;
+
+ *mode = file_info.st_mode & FILE_PERMISSION_MASK;
+ return true;
+}
+
+bool SetPosixFilePermissions(const FilePath& path,
+ int mode) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK_EQ(mode & ~FILE_PERMISSION_MASK, 0);
+
+ // Calls stat() so that we can preserve the higher bits like S_ISGID.
+ stat_wrapper_t stat_buf;
+ if (CallStat(path.value().c_str(), &stat_buf) != 0)
+ return false;
+
+ // Clears the existing permission bits, and adds the new ones.
+ mode_t updated_mode_bits = stat_buf.st_mode & ~FILE_PERMISSION_MASK;
+ updated_mode_bits |= mode & FILE_PERMISSION_MASK;
+
+ if (HANDLE_EINTR(chmod(path.value().c_str(), updated_mode_bits)) != 0)
+ return false;
+
+ return true;
+}
+
+bool ExecutableExistsInPath(Environment* env,
+ const FilePath::StringType& executable) {
+ std::string path;
+ if (!env->GetVar("PATH", &path)) {
+ LOG(ERROR) << "No $PATH variable. Assuming no " << executable << ".";
+ return false;
+ }
+
+ for (const StringPiece& cur_path :
+ SplitStringPiece(path, ":", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+ FilePath file(cur_path);
+ int permissions;
+ if (GetPosixFilePermissions(file.Append(executable), &permissions) &&
+ (permissions & FILE_PERMISSION_EXECUTE_BY_USER))
+ return true;
+ }
+ return false;
+}
+
+#if !defined(OS_MACOSX)
+// This is implemented in file_util_mac.mm for Mac.
+bool GetTempDir(FilePath* path) {
+ const char* tmp = getenv("TMPDIR");
+ if (tmp) {
+ *path = FilePath(tmp);
+ } else {
+#if defined(OS_ANDROID)
+ return PathService::Get(base::DIR_CACHE, path);
+#elif defined(__ANDROID__)
+ *path = FilePath("/data/local/tmp");
+#else
+ *path = FilePath("/tmp");
+#endif
+ }
+ return true;
+}
+#endif // !defined(OS_MACOSX)
+
+#if !defined(OS_MACOSX) // Mac implementation is in file_util_mac.mm.
+FilePath GetHomeDir() {
+#if defined(OS_CHROMEOS)
+ if (SysInfo::IsRunningOnChromeOS()) {
+ // On Chrome OS chrome::DIR_USER_DATA is overridden with a primary user
+ // homedir once it becomes available. Return / as the safe option.
+ return FilePath("/");
+ }
+#endif
+
+ const char* home_dir = getenv("HOME");
+ if (home_dir && home_dir[0])
+ return FilePath(home_dir);
+
+#if defined(OS_ANDROID)
+ DLOG(WARNING) << "OS_ANDROID: Home directory lookup not yet implemented.";
+#endif
+
+ FilePath rv;
+ if (GetTempDir(&rv))
+ return rv;
+
+ // Last resort.
+ return FilePath("/tmp");
+}
+#endif // !defined(OS_MACOSX)
+
+bool CreateTemporaryFile(FilePath* path) {
+ ThreadRestrictions::AssertIOAllowed(); // For call to close().
+ FilePath directory;
+ if (!GetTempDir(&directory))
+ return false;
+ int fd = CreateAndOpenFdForTemporaryFile(directory, path);
+ if (fd < 0)
+ return false;
+ close(fd);
+ return true;
+}
+
+FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
+ int fd = CreateAndOpenFdForTemporaryFile(dir, path);
+ if (fd < 0)
+ return NULL;
+
+ FILE* file = fdopen(fd, "a+");
+ if (!file)
+ close(fd);
+ return file;
+}
+
+bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
+ ThreadRestrictions::AssertIOAllowed(); // For call to close().
+ int fd = CreateAndOpenFdForTemporaryFile(dir, temp_file);
+ return ((fd >= 0) && !IGNORE_EINTR(close(fd)));
+}
+
+static bool CreateTemporaryDirInDirImpl(const FilePath& base_dir,
+ const FilePath::StringType& name_tmpl,
+ FilePath* new_dir) {
+ ThreadRestrictions::AssertIOAllowed(); // For call to mkdtemp().
+ DCHECK(name_tmpl.find("XXXXXX") != FilePath::StringType::npos)
+ << "Directory name template must contain \"XXXXXX\".";
+
+ FilePath sub_dir = base_dir.Append(name_tmpl);
+ std::string sub_dir_string = sub_dir.value();
+
+ // this should be OK since mkdtemp just replaces characters in place
+ char* buffer = const_cast<char*>(sub_dir_string.c_str());
+ char* dtemp = mkdtemp(buffer);
+ if (!dtemp) {
+ DPLOG(ERROR) << "mkdtemp";
+ return false;
+ }
+ *new_dir = FilePath(dtemp);
+ return true;
+}
+
+bool CreateTemporaryDirInDir(const FilePath& base_dir,
+ const FilePath::StringType& prefix,
+ FilePath* new_dir) {
+ FilePath::StringType mkdtemp_template = prefix;
+ mkdtemp_template.append(FILE_PATH_LITERAL("XXXXXX"));
+ return CreateTemporaryDirInDirImpl(base_dir, mkdtemp_template, new_dir);
+}
+
+bool CreateNewTempDirectory(const FilePath::StringType& /*prefix*/,
+ FilePath* new_temp_path) {
+ FilePath tmpdir;
+ if (!GetTempDir(&tmpdir))
+ return false;
+
+ return CreateTemporaryDirInDirImpl(tmpdir, TempFileName(), new_temp_path);
+}
+
+bool CreateDirectoryAndGetError(const FilePath& full_path,
+ File::Error* error) {
+ ThreadRestrictions::AssertIOAllowed(); // For call to mkdir().
+ std::vector<FilePath> subpaths;
+
+ // Collect a list of all parent directories.
+ FilePath last_path = full_path;
+ subpaths.push_back(full_path);
+ for (FilePath path = full_path.DirName();
+ path.value() != last_path.value(); path = path.DirName()) {
+ subpaths.push_back(path);
+ last_path = path;
+ }
+
+ // Iterate through the parents and create the missing ones.
+ for (std::vector<FilePath>::reverse_iterator i = subpaths.rbegin();
+ i != subpaths.rend(); ++i) {
+ if (DirectoryExists(*i))
+ continue;
+ if (mkdir(i->value().c_str(), 0700) == 0)
+ continue;
+ // Mkdir failed, but it might have failed with EEXIST, or some other error
+ // due to the the directory appearing out of thin air. This can occur if
+ // two processes are trying to create the same file system tree at the same
+ // time. Check to see if it exists and make sure it is a directory.
+ int saved_errno = errno;
+ if (!DirectoryExists(*i)) {
+ if (error)
+ *error = File::OSErrorToFileError(saved_errno);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool NormalizeFilePath(const FilePath& path, FilePath* normalized_path) {
+ FilePath real_path_result;
+ if (!RealPath(path, &real_path_result))
+ return false;
+
+ // To be consistant with windows, fail if |real_path_result| is a
+ // directory.
+ stat_wrapper_t file_info;
+ if (CallStat(real_path_result.value().c_str(), &file_info) != 0 ||
+ S_ISDIR(file_info.st_mode))
+ return false;
+
+ *normalized_path = real_path_result;
+ return true;
+}
+
+// TODO(rkc): Refactor GetFileInfo and FileEnumerator to handle symlinks
+// correctly. http://code.google.com/p/chromium-os/issues/detail?id=15948
+bool IsLink(const FilePath& file_path) {
+ stat_wrapper_t st;
+ // If we can't lstat the file, it's safe to assume that the file won't at
+ // least be a 'followable' link.
+ if (CallLstat(file_path.value().c_str(), &st) != 0)
+ return false;
+
+ if (S_ISLNK(st.st_mode))
+ return true;
+ else
+ return false;
+}
+
+bool GetFileInfo(const FilePath& file_path, File::Info* results) {
+ stat_wrapper_t file_info;
+#if defined(OS_ANDROID)
+ if (file_path.IsContentUri()) {
+ File file = OpenContentUriForRead(file_path);
+ if (!file.IsValid())
+ return false;
+ return file.GetInfo(results);
+ } else {
+#endif // defined(OS_ANDROID)
+ if (CallStat(file_path.value().c_str(), &file_info) != 0)
+ return false;
+#if defined(OS_ANDROID)
+ }
+#endif // defined(OS_ANDROID)
+
+ results->FromStat(file_info);
+ return true;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+FILE* OpenFile(const FilePath& filename, const char* mode) {
+ ThreadRestrictions::AssertIOAllowed();
+ FILE* result = NULL;
+ do {
+ result = fopen(filename.value().c_str(), mode);
+ } while (!result && errno == EINTR);
+ return result;
+}
+
+// NaCl doesn't implement system calls to open files directly.
+#if !defined(OS_NACL)
+FILE* FileToFILE(File file, const char* mode) {
+ FILE* stream = fdopen(file.GetPlatformFile(), mode);
+ if (stream)
+ file.TakePlatformFile();
+ return stream;
+}
+#endif // !defined(OS_NACL)
+
+int ReadFile(const FilePath& filename, char* data, int max_size) {
+ ThreadRestrictions::AssertIOAllowed();
+ int fd = HANDLE_EINTR(open(filename.value().c_str(), O_RDONLY));
+ if (fd < 0)
+ return -1;
+
+ ssize_t bytes_read = HANDLE_EINTR(read(fd, data, max_size));
+ if (IGNORE_EINTR(close(fd)) < 0)
+ return -1;
+ return bytes_read;
+}
+
+int WriteFile(const FilePath& filename, const char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ int fd = HANDLE_EINTR(creat(filename.value().c_str(), 0666));
+ if (fd < 0)
+ return -1;
+
+ int bytes_written = WriteFileDescriptor(fd, data, size) ? size : -1;
+ if (IGNORE_EINTR(close(fd)) < 0)
+ return -1;
+ return bytes_written;
+}
+
+bool WriteFileDescriptor(const int fd, const char* data, int size) {
+ // Allow for partial writes.
+ ssize_t bytes_written_total = 0;
+ for (ssize_t bytes_written_partial = 0; bytes_written_total < size;
+ bytes_written_total += bytes_written_partial) {
+ bytes_written_partial =
+ HANDLE_EINTR(write(fd, data + bytes_written_total,
+ size - bytes_written_total));
+ if (bytes_written_partial < 0)
+ return false;
+ }
+
+ return true;
+}
+
+#if !defined(OS_NACL_NONSFI)
+
+bool AppendToFile(const FilePath& filename, const char* data, int size) {
+ ThreadRestrictions::AssertIOAllowed();
+ bool ret = true;
+ int fd = HANDLE_EINTR(open(filename.value().c_str(), O_WRONLY | O_APPEND));
+ if (fd < 0) {
+ VPLOG(1) << "Unable to create file " << filename.value();
+ return false;
+ }
+
+ // This call will either write all of the data or return false.
+ if (!WriteFileDescriptor(fd, data, size)) {
+ VPLOG(1) << "Error while writing to file " << filename.value();
+ ret = false;
+ }
+
+ if (IGNORE_EINTR(close(fd)) < 0) {
+ VPLOG(1) << "Error while closing file " << filename.value();
+ return false;
+ }
+
+ return ret;
+}
+
+// Gets the current working directory for the process.
+bool GetCurrentDirectory(FilePath* dir) {
+ // getcwd can return ENOENT, which implies it checks against the disk.
+ ThreadRestrictions::AssertIOAllowed();
+
+ char system_buffer[PATH_MAX] = "";
+ if (!getcwd(system_buffer, sizeof(system_buffer))) {
+ NOTREACHED();
+ return false;
+ }
+ *dir = FilePath(system_buffer);
+ return true;
+}
+
+// Sets the current working directory for the process.
+bool SetCurrentDirectory(const FilePath& path) {
+ ThreadRestrictions::AssertIOAllowed();
+ int ret = chdir(path.value().c_str());
+ return !ret;
+}
+
+bool VerifyPathControlledByUser(const FilePath& base,
+ const FilePath& path,
+ uid_t owner_uid,
+ const std::set<gid_t>& group_gids) {
+ if (base != path && !base.IsParent(path)) {
+ DLOG(ERROR) << "|base| must be a subdirectory of |path|. base = \""
+ << base.value() << "\", path = \"" << path.value() << "\"";
+ return false;
+ }
+
+ std::vector<FilePath::StringType> base_components;
+ std::vector<FilePath::StringType> path_components;
+
+ base.GetComponents(&base_components);
+ path.GetComponents(&path_components);
+
+ std::vector<FilePath::StringType>::const_iterator ib, ip;
+ for (ib = base_components.begin(), ip = path_components.begin();
+ ib != base_components.end(); ++ib, ++ip) {
+ // |base| must be a subpath of |path|, so all components should match.
+ // If these CHECKs fail, look at the test that base is a parent of
+ // path at the top of this function.
+ DCHECK(ip != path_components.end());
+ DCHECK(*ip == *ib);
+ }
+
+ FilePath current_path = base;
+ if (!VerifySpecificPathControlledByUser(current_path, owner_uid, group_gids))
+ return false;
+
+ for (; ip != path_components.end(); ++ip) {
+ current_path = current_path.Append(*ip);
+ if (!VerifySpecificPathControlledByUser(
+ current_path, owner_uid, group_gids))
+ return false;
+ }
+ return true;
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+bool VerifyPathControlledByAdmin(const FilePath& path) {
+ const unsigned kRootUid = 0;
+ const FilePath kFileSystemRoot("/");
+
+ // The name of the administrator group on mac os.
+ const char* const kAdminGroupNames[] = {
+ "admin",
+ "wheel"
+ };
+
+ // Reading the groups database may touch the file system.
+ ThreadRestrictions::AssertIOAllowed();
+
+ std::set<gid_t> allowed_group_ids;
+ for (int i = 0, ie = arraysize(kAdminGroupNames); i < ie; ++i) {
+ struct group *group_record = getgrnam(kAdminGroupNames[i]);
+ if (!group_record) {
+ DPLOG(ERROR) << "Could not get the group ID of group \""
+ << kAdminGroupNames[i] << "\".";
+ continue;
+ }
+
+ allowed_group_ids.insert(group_record->gr_gid);
+ }
+
+ return VerifyPathControlledByUser(
+ kFileSystemRoot, path, kRootUid, allowed_group_ids);
+}
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+
+int GetMaximumPathComponentLength(const FilePath& path) {
+ ThreadRestrictions::AssertIOAllowed();
+ return pathconf(path.value().c_str(), _PC_NAME_MAX);
+}
+
+#if !defined(OS_ANDROID)
+// This is implemented in file_util_android.cc for that platform.
+bool GetShmemTempDir(bool executable, FilePath* path) {
+#if defined(OS_LINUX)
+ bool use_dev_shm = true;
+ if (executable) {
+ static const bool s_dev_shm_executable = DetermineDevShmExecutable();
+ use_dev_shm = s_dev_shm_executable;
+ }
+ if (use_dev_shm) {
+ *path = FilePath("/dev/shm");
+ return true;
+ }
+#else
+ (void)executable; // Avoid unused warning when !defined(OS_LINUX).
+#endif
+ return GetTempDir(path);
+}
+#endif // !defined(OS_ANDROID)
+
+#if !defined(OS_MACOSX)
+// Mac has its own implementation, this is for all other Posix systems.
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+ ThreadRestrictions::AssertIOAllowed();
+ File infile;
+#if defined(OS_ANDROID)
+ if (from_path.IsContentUri()) {
+ infile = OpenContentUriForRead(from_path);
+ } else {
+ infile = File(from_path, File::FLAG_OPEN | File::FLAG_READ);
+ }
+#else
+ infile = File(from_path, File::FLAG_OPEN | File::FLAG_READ);
+#endif
+ if (!infile.IsValid())
+ return false;
+
+ File outfile(to_path, File::FLAG_WRITE | File::FLAG_CREATE_ALWAYS);
+ if (!outfile.IsValid())
+ return false;
+
+ const size_t kBufferSize = 32768;
+ std::vector<char> buffer(kBufferSize);
+ bool result = true;
+
+ while (result) {
+ ssize_t bytes_read = infile.ReadAtCurrentPos(&buffer[0], buffer.size());
+ if (bytes_read < 0) {
+ result = false;
+ break;
+ }
+ if (bytes_read == 0)
+ break;
+ // Allow for partial writes
+ ssize_t bytes_written_per_read = 0;
+ do {
+ ssize_t bytes_written_partial = outfile.WriteAtCurrentPos(
+ &buffer[bytes_written_per_read], bytes_read - bytes_written_per_read);
+ if (bytes_written_partial < 0) {
+ result = false;
+ break;
+ }
+ bytes_written_per_read += bytes_written_partial;
+ } while (bytes_written_per_read < bytes_read);
+ }
+
+ return result;
+}
+#endif // !defined(OS_MACOSX)
+
+// -----------------------------------------------------------------------------
+
+namespace internal {
+
+bool MoveUnsafe(const FilePath& from_path, const FilePath& to_path) {
+ ThreadRestrictions::AssertIOAllowed();
+ // Windows compatibility: if to_path exists, from_path and to_path
+ // must be the same type, either both files, or both directories.
+ stat_wrapper_t to_file_info;
+ if (CallStat(to_path.value().c_str(), &to_file_info) == 0) {
+ stat_wrapper_t from_file_info;
+ if (CallStat(from_path.value().c_str(), &from_file_info) == 0) {
+ if (S_ISDIR(to_file_info.st_mode) != S_ISDIR(from_file_info.st_mode))
+ return false;
+ } else {
+ return false;
+ }
+ }
+
+ if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
+ return true;
+
+ if (!CopyDirectory(from_path, to_path, true))
+ return false;
+
+ DeleteFile(from_path, true);
+ return true;
+}
+
+} // namespace internal
+
+#endif // !defined(OS_NACL_NONSFI)
+} // namespace base
diff --git a/libchrome/base/files/important_file_writer.cc b/libchrome/base/files/important_file_writer.cc
new file mode 100644
index 0000000..28550ad
--- /dev/null
+++ b/libchrome/base/files/important_file_writer.cc
@@ -0,0 +1,239 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/important_file_writer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/critical_closure.h"
+#include "base/debug/alias.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/task_runner.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+const int kDefaultCommitIntervalMs = 10000;
+
+// This enum is used to define the buckets for an enumerated UMA histogram.
+// Hence,
+// (a) existing enumerated constants should never be deleted or reordered, and
+// (b) new constants should only be appended at the end of the enumeration.
+enum TempFileFailure {
+ FAILED_CREATING,
+ FAILED_OPENING,
+ FAILED_CLOSING, // Unused.
+ FAILED_WRITING,
+ FAILED_RENAMING,
+ FAILED_FLUSHING,
+ TEMP_FILE_FAILURE_MAX
+};
+
+void LogFailure(const FilePath& path, TempFileFailure failure_code,
+ StringPiece message) {
+ UMA_HISTOGRAM_ENUMERATION("ImportantFile.TempFileFailures", failure_code,
+ TEMP_FILE_FAILURE_MAX);
+ DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message;
+}
+
+// Helper function to call WriteFileAtomically() with a
+// std::unique_ptr<std::string>.
+bool WriteScopedStringToFileAtomically(const FilePath& path,
+ std::unique_ptr<std::string> data) {
+ return ImportantFileWriter::WriteFileAtomically(path, *data);
+}
+
+} // namespace
+
+// static
+bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
+ StringPiece data) {
+#if defined(OS_CHROMEOS)
+ // On Chrome OS, chrome gets killed when it cannot finish shutdown quickly,
+ // and this function seems to be one of the slowest shutdown steps.
+ // Include some info to the report for investigation. crbug.com/418627
+ // TODO(hashimoto): Remove this.
+ struct {
+ size_t data_size;
+ char path[128];
+ } file_info;
+ file_info.data_size = data.size();
+ strlcpy(file_info.path, path.value().c_str(), arraysize(file_info.path));
+ debug::Alias(&file_info);
+#endif
+
+ // Write the data to a temp file then rename to avoid data loss if we crash
+ // while writing the file. Ensure that the temp file is on the same volume
+ // as target file, so it can be moved in one step, and that the temp file
+ // is securely created.
+ FilePath tmp_file_path;
+ if (!CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) {
+ LogFailure(path, FAILED_CREATING, "could not create temporary file");
+ return false;
+ }
+
+ File tmp_file(tmp_file_path, File::FLAG_OPEN | File::FLAG_WRITE);
+ if (!tmp_file.IsValid()) {
+ LogFailure(path, FAILED_OPENING, "could not open temporary file");
+ return false;
+ }
+
+ // If this fails in the wild, something really bad is going on.
+ const int data_length = checked_cast<int32_t>(data.length());
+ int bytes_written = tmp_file.Write(0, data.data(), data_length);
+ bool flush_success = tmp_file.Flush();
+ tmp_file.Close();
+
+ if (bytes_written < data_length) {
+ LogFailure(path, FAILED_WRITING, "error writing, bytes_written=" +
+ IntToString(bytes_written));
+ DeleteFile(tmp_file_path, false);
+ return false;
+ }
+
+ if (!flush_success) {
+ LogFailure(path, FAILED_FLUSHING, "error flushing");
+ DeleteFile(tmp_file_path, false);
+ return false;
+ }
+
+ if (!ReplaceFile(tmp_file_path, path, nullptr)) {
+ LogFailure(path, FAILED_RENAMING, "could not rename temporary file");
+ DeleteFile(tmp_file_path, false);
+ return false;
+ }
+
+ return true;
+}
+
+ImportantFileWriter::ImportantFileWriter(
+ const FilePath& path,
+ scoped_refptr<SequencedTaskRunner> task_runner)
+ : ImportantFileWriter(
+ path,
+ std::move(task_runner),
+ TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)) {}
+
+ImportantFileWriter::ImportantFileWriter(
+ const FilePath& path,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ TimeDelta interval)
+ : path_(path),
+ task_runner_(std::move(task_runner)),
+ serializer_(nullptr),
+ commit_interval_(interval),
+ weak_factory_(this) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(task_runner_);
+}
+
+ImportantFileWriter::~ImportantFileWriter() {
+ // We're usually a member variable of some other object, which also tends
+ // to be our serializer. It may not be safe to call back to the parent object
+ // being destructed.
+ DCHECK(!HasPendingWrite());
+}
+
+bool ImportantFileWriter::HasPendingWrite() const {
+ DCHECK(CalledOnValidThread());
+ return timer_.IsRunning();
+}
+
+void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) {
+ DCHECK(CalledOnValidThread());
+ if (!IsValueInRangeForNumericType<int32_t>(data->length())) {
+ NOTREACHED();
+ return;
+ }
+
+ if (HasPendingWrite())
+ timer_.Stop();
+
+ auto task = Bind(&WriteScopedStringToFileAtomically, path_, Passed(&data));
+ if (!PostWriteTask(task)) {
+ // Posting the task to background message loop is not expected
+ // to fail, but if it does, avoid losing data and just hit the disk
+ // on the current thread.
+ NOTREACHED();
+
+ task.Run();
+ }
+}
+
+void ImportantFileWriter::ScheduleWrite(DataSerializer* serializer) {
+ DCHECK(CalledOnValidThread());
+
+ DCHECK(serializer);
+ serializer_ = serializer;
+
+ if (!timer_.IsRunning()) {
+ timer_.Start(FROM_HERE, commit_interval_, this,
+ &ImportantFileWriter::DoScheduledWrite);
+ }
+}
+
+void ImportantFileWriter::DoScheduledWrite() {
+ DCHECK(serializer_);
+ std::unique_ptr<std::string> data(new std::string);
+ if (serializer_->SerializeData(data.get())) {
+ WriteNow(std::move(data));
+ } else {
+ DLOG(WARNING) << "failed to serialize data to be saved in "
+ << path_.value();
+ }
+ serializer_ = nullptr;
+}
+
+void ImportantFileWriter::RegisterOnNextSuccessfulWriteCallback(
+ const Closure& on_next_successful_write) {
+ DCHECK(on_next_successful_write_.is_null());
+ on_next_successful_write_ = on_next_successful_write;
+}
+
+bool ImportantFileWriter::PostWriteTask(const Callback<bool()>& task) {
+ // TODO(gab): This code could always use PostTaskAndReplyWithResult and let
+ // ForwardSuccessfulWrite() no-op if |on_next_successful_write_| is null, but
+ // PostTaskAndReply causes memory leaks in tests (crbug.com/371974) and
+ // suppressing all of those is unrealistic hence we avoid most of them by
+ // using PostTask() in the typical scenario below.
+ if (!on_next_successful_write_.is_null()) {
+ return PostTaskAndReplyWithResult(
+ task_runner_.get(),
+ FROM_HERE,
+ MakeCriticalClosure(task),
+ Bind(&ImportantFileWriter::ForwardSuccessfulWrite,
+ weak_factory_.GetWeakPtr()));
+ }
+ return task_runner_->PostTask(
+ FROM_HERE,
+ MakeCriticalClosure(Bind(IgnoreResult(task))));
+}
+
+void ImportantFileWriter::ForwardSuccessfulWrite(bool result) {
+ DCHECK(CalledOnValidThread());
+ if (result && !on_next_successful_write_.is_null()) {
+ on_next_successful_write_.Run();
+ on_next_successful_write_.Reset();
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/files/important_file_writer.h b/libchrome/base/files/important_file_writer.h
new file mode 100644
index 0000000..0bd8a7f
--- /dev/null
+++ b/libchrome/base/files/important_file_writer.h
@@ -0,0 +1,140 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_IMPORTANT_FILE_WRITER_H_
+#define BASE_FILES_IMPORTANT_FILE_WRITER_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+class Thread;
+
+// Helper to ensure that a file won't be corrupted by the write (for example on
+// application crash). Consider a naive way to save an important file F:
+//
+// 1. Open F for writing, truncating it.
+// 2. Write new data to F.
+//
+// It's good when it works, but it gets very bad if step 2. doesn't complete.
+// It can be caused by a crash, a computer hang, or a weird I/O error. And you
+// end up with a broken file.
+//
+// To be safe, we don't start with writing directly to F. Instead, we write to
+// to a temporary file. Only after that write is successful, we rename the
+// temporary file to target filename.
+//
+// If you want to know more about this approach and ext3/ext4 fsync issues, see
+// http://blog.valerieaurora.org/2009/04/16/dont-panic-fsync-ext34-and-your-data/
+class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
+ public:
+ // Used by ScheduleSave to lazily provide the data to be saved. Allows us
+ // to also batch data serializations.
+ class BASE_EXPORT DataSerializer {
+ public:
+ // Should put serialized string in |data| and return true on successful
+ // serialization. Will be called on the same thread on which
+ // ImportantFileWriter has been created.
+ virtual bool SerializeData(std::string* data) = 0;
+
+ protected:
+ virtual ~DataSerializer() {}
+ };
+
+ // Save |data| to |path| in an atomic manner (see the class comment above).
+ // Blocks and writes data on the current thread.
+ static bool WriteFileAtomically(const FilePath& path, StringPiece data);
+
+ // Initialize the writer.
+ // |path| is the name of file to write.
+ // |task_runner| is the SequencedTaskRunner instance where on which we will
+ // execute file I/O operations.
+ // All non-const methods, ctor and dtor must be called on the same thread.
+ ImportantFileWriter(const FilePath& path,
+ scoped_refptr<SequencedTaskRunner> task_runner);
+
+ // Same as above, but with a custom commit interval.
+ ImportantFileWriter(const FilePath& path,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ TimeDelta interval);
+
+ // You have to ensure that there are no pending writes at the moment
+ // of destruction.
+ ~ImportantFileWriter();
+
+ const FilePath& path() const { return path_; }
+
+ // Returns true if there is a scheduled write pending which has not yet
+ // been started.
+ bool HasPendingWrite() const;
+
+ // Save |data| to target filename. Does not block. If there is a pending write
+ // scheduled by ScheduleWrite(), it is cancelled.
+ void WriteNow(std::unique_ptr<std::string> data);
+
+ // Schedule a save to target filename. Data will be serialized and saved
+ // to disk after the commit interval. If another ScheduleWrite is issued
+ // before that, only one serialization and write to disk will happen, and
+ // the most recent |serializer| will be used. This operation does not block.
+ // |serializer| should remain valid through the lifetime of
+ // ImportantFileWriter.
+ void ScheduleWrite(DataSerializer* serializer);
+
+ // Serialize data pending to be saved and execute write on backend thread.
+ void DoScheduledWrite();
+
+ // Registers |on_next_successful_write| to be called once, on the next
+ // successful write event. Only one callback can be set at once.
+ void RegisterOnNextSuccessfulWriteCallback(
+ const Closure& on_next_successful_write);
+
+ TimeDelta commit_interval() const {
+ return commit_interval_;
+ }
+
+ private:
+ // Helper method for WriteNow().
+ bool PostWriteTask(const Callback<bool()>& task);
+
+ // If |result| is true and |on_next_successful_write_| is set, invokes
+ // |on_successful_write_| and then resets it; no-ops otherwise.
+ void ForwardSuccessfulWrite(bool result);
+
+ // Invoked once and then reset on the next successful write event.
+ Closure on_next_successful_write_;
+
+ // Path being written to.
+ const FilePath path_;
+
+ // TaskRunner for the thread on which file I/O can be done.
+ const scoped_refptr<SequencedTaskRunner> task_runner_;
+
+ // Timer used to schedule commit after ScheduleWrite.
+ OneShotTimer timer_;
+
+ // Serializer which will provide the data to be saved.
+ DataSerializer* serializer_;
+
+ // Time delta after which scheduled data will be written to disk.
+ const TimeDelta commit_interval_;
+
+ WeakPtrFactory<ImportantFileWriter> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImportantFileWriter);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_IMPORTANT_FILE_WRITER_H_
diff --git a/libchrome/base/files/important_file_writer_unittest.cc b/libchrome/base/files/important_file_writer_unittest.cc
new file mode 100644
index 0000000..43e051e
--- /dev/null
+++ b/libchrome/base/files/important_file_writer_unittest.cc
@@ -0,0 +1,198 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/important_file_writer.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+std::string GetFileContent(const FilePath& path) {
+ std::string content;
+ if (!ReadFileToString(path, &content)) {
+ NOTREACHED();
+ }
+ return content;
+}
+
+class DataSerializer : public ImportantFileWriter::DataSerializer {
+ public:
+ explicit DataSerializer(const std::string& data) : data_(data) {
+ }
+
+ bool SerializeData(std::string* output) override {
+ output->assign(data_);
+ return true;
+ }
+
+ private:
+ const std::string data_;
+};
+
+class SuccessfulWriteObserver {
+ public:
+ SuccessfulWriteObserver() : successful_write_observed_(false) {}
+
+ // Register on_successful_write() to be called on the next successful write
+ // of |writer|.
+ void ObserveNextSuccessfulWrite(ImportantFileWriter* writer);
+
+ // Returns true if a successful write was observed via on_successful_write()
+ // and resets the observation state to false regardless.
+ bool GetAndResetObservationState();
+
+ private:
+ void on_successful_write() {
+ EXPECT_FALSE(successful_write_observed_);
+ successful_write_observed_ = true;
+ }
+
+ bool successful_write_observed_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuccessfulWriteObserver);
+};
+
+void SuccessfulWriteObserver::ObserveNextSuccessfulWrite(
+ ImportantFileWriter* writer) {
+ writer->RegisterOnNextSuccessfulWriteCallback(base::Bind(
+ &SuccessfulWriteObserver::on_successful_write, base::Unretained(this)));
+}
+
+bool SuccessfulWriteObserver::GetAndResetObservationState() {
+ bool was_successful_write_observed = successful_write_observed_;
+ successful_write_observed_ = false;
+ return was_successful_write_observed;
+}
+
+} // namespace
+
+class ImportantFileWriterTest : public testing::Test {
+ public:
+ ImportantFileWriterTest() { }
+ void SetUp() override {
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
+ file_ = temp_dir_.path().AppendASCII("test-file");
+ }
+
+ protected:
+ SuccessfulWriteObserver successful_write_observer_;
+ FilePath file_;
+ MessageLoop loop_;
+
+ private:
+ ScopedTempDir temp_dir_;
+};
+
+TEST_F(ImportantFileWriterTest, Basic) {
+ ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+ EXPECT_FALSE(PathExists(writer.path()));
+ EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ writer.WriteNow(WrapUnique(new std::string("foo")));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, BasicWithSuccessfulWriteObserver) {
+ ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+ EXPECT_FALSE(PathExists(writer.path()));
+ EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
+ writer.WriteNow(WrapUnique(new std::string("foo")));
+ RunLoop().RunUntilIdle();
+
+ // Confirm that the observer is invoked.
+ EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("foo", GetFileContent(writer.path()));
+
+ // Confirm that re-installing the observer works for another write.
+ EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
+ writer.WriteNow(WrapUnique(new std::string("bar")));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("bar", GetFileContent(writer.path()));
+
+ // Confirm that writing again without re-installing the observer doesn't
+ // result in a notification.
+ EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ writer.WriteNow(WrapUnique(new std::string("baz")));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("baz", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, ScheduleWrite) {
+ ImportantFileWriter writer(file_,
+ ThreadTaskRunnerHandle::Get(),
+ TimeDelta::FromMilliseconds(25));
+ EXPECT_FALSE(writer.HasPendingWrite());
+ DataSerializer serializer("foo");
+ writer.ScheduleWrite(&serializer);
+ EXPECT_TRUE(writer.HasPendingWrite());
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
+ TimeDelta::FromMilliseconds(100));
+ RunLoop().Run();
+ EXPECT_FALSE(writer.HasPendingWrite());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
+ ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
+ EXPECT_FALSE(writer.HasPendingWrite());
+ DataSerializer serializer("foo");
+ writer.ScheduleWrite(&serializer);
+ EXPECT_TRUE(writer.HasPendingWrite());
+ writer.DoScheduledWrite();
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
+ TimeDelta::FromMilliseconds(100));
+ RunLoop().Run();
+ EXPECT_FALSE(writer.HasPendingWrite());
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("foo", GetFileContent(writer.path()));
+}
+
+TEST_F(ImportantFileWriterTest, BatchingWrites) {
+ ImportantFileWriter writer(file_,
+ ThreadTaskRunnerHandle::Get(),
+ TimeDelta::FromMilliseconds(25));
+ DataSerializer foo("foo"), bar("bar"), baz("baz");
+ writer.ScheduleWrite(&foo);
+ writer.ScheduleWrite(&bar);
+ writer.ScheduleWrite(&baz);
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
+ TimeDelta::FromMilliseconds(100));
+ RunLoop().Run();
+ ASSERT_TRUE(PathExists(writer.path()));
+ EXPECT_EQ("baz", GetFileContent(writer.path()));
+}
+
+} // namespace base
diff --git a/libchrome/base/files/memory_mapped_file.cc b/libchrome/base/files/memory_mapped_file.cc
new file mode 100644
index 0000000..67890d6
--- /dev/null
+++ b/libchrome/base/files/memory_mapped_file.cc
@@ -0,0 +1,128 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+
+#include <utility>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
+
+namespace base {
+
+const MemoryMappedFile::Region MemoryMappedFile::Region::kWholeFile = {0, 0};
+
+bool MemoryMappedFile::Region::operator==(
+ const MemoryMappedFile::Region& other) const {
+ return other.offset == offset && other.size == size;
+}
+
+bool MemoryMappedFile::Region::operator!=(
+ const MemoryMappedFile::Region& other) const {
+ return other.offset != offset || other.size != size;
+}
+
+MemoryMappedFile::~MemoryMappedFile() {
+ CloseHandles();
+}
+
+#if !defined(OS_NACL)
+bool MemoryMappedFile::Initialize(const FilePath& file_name, Access access) {
+ if (IsValid())
+ return false;
+
+ uint32_t flags = 0;
+ switch (access) {
+ case READ_ONLY:
+ flags = File::FLAG_OPEN | File::FLAG_READ;
+ break;
+ case READ_WRITE:
+ flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
+ break;
+ case READ_WRITE_EXTEND:
+ // Can't open with "extend" because no maximum size is known.
+ NOTREACHED();
+ }
+ file_.Initialize(file_name, flags);
+
+ if (!file_.IsValid()) {
+ DLOG(ERROR) << "Couldn't open " << file_name.AsUTF8Unsafe();
+ return false;
+ }
+
+ if (!MapFileRegionToMemory(Region::kWholeFile, access)) {
+ CloseHandles();
+ return false;
+ }
+
+ return true;
+}
+
+bool MemoryMappedFile::Initialize(File file, Access access) {
+ DCHECK_NE(READ_WRITE_EXTEND, access);
+ return Initialize(std::move(file), Region::kWholeFile, access);
+}
+
+bool MemoryMappedFile::Initialize(File file,
+ const Region& region,
+ Access access) {
+ switch (access) {
+ case READ_WRITE_EXTEND:
+ // Ensure that the extended size is within limits of File.
+ if (region.size > std::numeric_limits<int64_t>::max() - region.offset) {
+ DLOG(ERROR) << "Region bounds exceed maximum for base::File.";
+ return false;
+ }
+ // Fall through.
+ case READ_ONLY:
+ case READ_WRITE:
+ // Ensure that the region values are valid.
+ if (region.offset < 0 || region.size < 0) {
+ DLOG(ERROR) << "Region bounds are not valid.";
+ return false;
+ }
+ break;
+ }
+
+ if (IsValid())
+ return false;
+
+ if (region != Region::kWholeFile) {
+ DCHECK_GE(region.offset, 0);
+ DCHECK_GT(region.size, 0);
+ }
+
+ file_ = std::move(file);
+
+ if (!MapFileRegionToMemory(region, access)) {
+ CloseHandles();
+ return false;
+ }
+
+ return true;
+}
+
+bool MemoryMappedFile::IsValid() const {
+ return data_ != NULL;
+}
+
+// static
+void MemoryMappedFile::CalculateVMAlignedBoundaries(int64_t start,
+ int64_t size,
+ int64_t* aligned_start,
+ int64_t* aligned_size,
+ int32_t* offset) {
+ // Sadly, on Windows, the mmap alignment is not just equal to the page size.
+ const int64_t mask =
+ static_cast<int64_t>(SysInfo::VMAllocationGranularity()) - 1;
+ DCHECK_LT(mask, std::numeric_limits<int32_t>::max());
+ *offset = start & mask;
+ *aligned_start = start & ~mask;
+ *aligned_size = (size + *offset + mask) & ~mask;
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/files/memory_mapped_file.h b/libchrome/base/files/memory_mapped_file.h
new file mode 100644
index 0000000..cad99f6
--- /dev/null
+++ b/libchrome/base/files/memory_mapped_file.h
@@ -0,0 +1,136 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_MEMORY_MAPPED_FILE_H_
+#define BASE_FILES_MEMORY_MAPPED_FILE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/files/file.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+class FilePath;
+
+class BASE_EXPORT MemoryMappedFile {
+ public:
+ enum Access {
+ // Mapping a file into memory effectively allows for file I/O on any thread.
+ // The accessing thread could be paused while data from the file is paged
+ // into memory. Worse, a corrupted filesystem could cause a SEGV within the
+ // program instead of just an I/O error.
+ READ_ONLY,
+
+ // This provides read/write access to a file and must be used with care of
+ // the additional subtleties involved in doing so. Though the OS will do
+ // the writing of data on its own time, too many dirty pages can cause
+ // the OS to pause the thread while it writes them out. The pause can
+ // be as much as 1s on some systems.
+ READ_WRITE,
+
+ // This provides read/write access but with the ability to write beyond
+ // the end of the existing file up to a maximum size specified as the
+ // "region". Depending on the OS, the file may or may not be immediately
+ // extended to the maximum size though it won't be loaded in RAM until
+ // needed. Note, however, that the maximum size will still be reserved
+ // in the process address space.
+ READ_WRITE_EXTEND,
+ };
+
+ // The default constructor sets all members to invalid/null values.
+ MemoryMappedFile();
+ ~MemoryMappedFile();
+
+ // Used to hold information about a region [offset + size] of a file.
+ struct BASE_EXPORT Region {
+ static const Region kWholeFile;
+
+ bool operator==(const Region& other) const;
+ bool operator!=(const Region& other) const;
+
+ // Start of the region (measured in bytes from the beginning of the file).
+ int64_t offset;
+
+ // Length of the region in bytes.
+ int64_t size;
+ };
+
+ // Opens an existing file and maps it into memory. |access| can be read-only
+ // or read/write but not read/write+extend. If this object already points
+ // to a valid memory mapped file then this method will fail and return
+ // false. If it cannot open the file, the file does not exist, or the
+ // memory mapping fails, it will return false.
+ bool Initialize(const FilePath& file_name, Access access);
+ bool Initialize(const FilePath& file_name) {
+ return Initialize(file_name, READ_ONLY);
+ }
+
+ // As above, but works with an already-opened file. |access| can be read-only
+ // or read/write but not read/write+extend. MemoryMappedFile takes ownership
+ // of |file| and closes it when done. |file| must have been opened with
+ // permissions suitable for |access|. If the memory mapping fails, it will
+ // return false.
+ bool Initialize(File file, Access access);
+ bool Initialize(File file) {
+ return Initialize(std::move(file), READ_ONLY);
+ }
+
+ // As above, but works with a region of an already-opened file. All forms of
+ // |access| are allowed. If READ_WRITE_EXTEND is specified then |region|
+ // provides the maximum size of the file. If the memory mapping fails, it
+ // return false.
+ bool Initialize(File file, const Region& region, Access access);
+ bool Initialize(File file, const Region& region) {
+ return Initialize(std::move(file), region, READ_ONLY);
+ }
+
+ const uint8_t* data() const { return data_; }
+ uint8_t* data() { return data_; }
+ size_t length() const { return length_; }
+
+ // Is file_ a valid file handle that points to an open, memory mapped file?
+ bool IsValid() const;
+
+ private:
+ // Given the arbitrarily aligned memory region [start, size], returns the
+ // boundaries of the region aligned to the granularity specified by the OS,
+ // (a page on Linux, ~32k on Windows) as follows:
+ // - |aligned_start| is page aligned and <= |start|.
+ // - |aligned_size| is a multiple of the VM granularity and >= |size|.
+ // - |offset| is the displacement of |start| w.r.t |aligned_start|.
+ static void CalculateVMAlignedBoundaries(int64_t start,
+ int64_t size,
+ int64_t* aligned_start,
+ int64_t* aligned_size,
+ int32_t* offset);
+
+ // Map the file to memory, set data_ to that memory address. Return true on
+ // success, false on any kind of failure. This is a helper for Initialize().
+ bool MapFileRegionToMemory(const Region& region, Access access);
+
+ // Closes all open handles.
+ void CloseHandles();
+
+ File file_;
+ uint8_t* data_;
+ size_t length_;
+
+#if defined(OS_WIN)
+ win::ScopedHandle file_mapping_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryMappedFile);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_MEMORY_MAPPED_FILE_H_
diff --git a/libchrome/base/files/memory_mapped_file_posix.cc b/libchrome/base/files/memory_mapped_file_posix.cc
new file mode 100644
index 0000000..4899cf0
--- /dev/null
+++ b/libchrome/base/files/memory_mapped_file_posix.cc
@@ -0,0 +1,108 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+MemoryMappedFile::MemoryMappedFile() : data_(NULL), length_(0) {
+}
+
+#if !defined(OS_NACL)
+bool MemoryMappedFile::MapFileRegionToMemory(
+ const MemoryMappedFile::Region& region,
+ Access access) {
+ ThreadRestrictions::AssertIOAllowed();
+
+ off_t map_start = 0;
+ size_t map_size = 0;
+ int32_t data_offset = 0;
+
+ if (region == MemoryMappedFile::Region::kWholeFile) {
+ int64_t file_len = file_.GetLength();
+ if (file_len == -1) {
+ DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
+ return false;
+ }
+ map_size = static_cast<size_t>(file_len);
+ length_ = map_size;
+ } else {
+ // The region can be arbitrarily aligned. mmap, instead, requires both the
+ // start and size to be page-aligned. Hence, we map here the page-aligned
+ // outer region [|aligned_start|, |aligned_start| + |size|] which contains
+ // |region| and then add up the |data_offset| displacement.
+ int64_t aligned_start = 0;
+ int64_t aligned_size = 0;
+ CalculateVMAlignedBoundaries(region.offset,
+ region.size,
+ &aligned_start,
+ &aligned_size,
+ &data_offset);
+
+ // Ensure that the casts in the mmap call below are sane.
+ if (aligned_start < 0 || aligned_size < 0 ||
+ aligned_start > std::numeric_limits<off_t>::max() ||
+ static_cast<uint64_t>(aligned_size) >
+ std::numeric_limits<size_t>::max() ||
+ static_cast<uint64_t>(region.size) >
+ std::numeric_limits<size_t>::max()) {
+ DLOG(ERROR) << "Region bounds are not valid for mmap";
+ return false;
+ }
+
+ map_start = static_cast<off_t>(aligned_start);
+ map_size = static_cast<size_t>(aligned_size);
+ length_ = static_cast<size_t>(region.size);
+ }
+
+ int flags = 0;
+ switch (access) {
+ case READ_ONLY:
+ flags |= PROT_READ;
+ break;
+ case READ_WRITE:
+ flags |= PROT_READ | PROT_WRITE;
+ break;
+ case READ_WRITE_EXTEND:
+ // POSIX won't auto-extend the file when it is written so it must first
+ // be explicitly extended to the maximum size. Zeros will fill the new
+ // space.
+ file_.SetLength(std::max(file_.GetLength(), region.offset + region.size));
+ flags |= PROT_READ | PROT_WRITE;
+ break;
+ }
+ data_ = static_cast<uint8_t*>(mmap(NULL, map_size, flags, MAP_SHARED,
+ file_.GetPlatformFile(), map_start));
+ if (data_ == MAP_FAILED) {
+ DPLOG(ERROR) << "mmap " << file_.GetPlatformFile();
+ return false;
+ }
+
+ data_ += data_offset;
+ return true;
+}
+#endif
+
+void MemoryMappedFile::CloseHandles() {
+ ThreadRestrictions::AssertIOAllowed();
+
+ if (data_ != NULL)
+ munmap(data_, length_);
+ file_.Close();
+
+ data_ = NULL;
+ length_ = 0;
+}
+
+} // namespace base
diff --git a/libchrome/base/files/scoped_file.cc b/libchrome/base/files/scoped_file.cc
new file mode 100644
index 0000000..8ce45b8
--- /dev/null
+++ b/libchrome/base/files/scoped_file.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_file.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <unistd.h>
+
+#include "base/debug/alias.h"
+#include "base/posix/eintr_wrapper.h"
+#endif
+
+namespace base {
+namespace internal {
+
+#if defined(OS_POSIX)
+
+// static
+void ScopedFDCloseTraits::Free(int fd) {
+ // It's important to crash here.
+ // There are security implications to not closing a file descriptor
+ // properly. As file descriptors are "capabilities", keeping them open
+ // would make the current process keep access to a resource. Much of
+ // Chrome relies on being able to "drop" such access.
+ // It's especially problematic on Linux with the setuid sandbox, where
+ // a single open directory would bypass the entire security model.
+ int ret = IGNORE_EINTR(close(fd));
+
+ // TODO(davidben): Remove this once it's been determined whether
+ // https://crbug.com/603354 is caused by EBADF or a network filesystem
+ // returning some other error.
+ int close_errno = errno;
+ base::debug::Alias(&close_errno);
+
+ PCHECK(0 == ret);
+}
+
+#endif // OS_POSIX
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/files/scoped_file.h b/libchrome/base/files/scoped_file.h
new file mode 100644
index 0000000..68c0415
--- /dev/null
+++ b/libchrome/base/files/scoped_file.h
@@ -0,0 +1,62 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_SCOPED_FILE_H_
+#define BASE_FILES_SCOPED_FILE_H_
+
+#include <stdio.h>
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/scoped_generic.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_POSIX)
+struct BASE_EXPORT ScopedFDCloseTraits {
+ static int InvalidValue() {
+ return -1;
+ }
+ static void Free(int fd);
+};
+#endif
+
+// Functor for |ScopedFILE| (below).
+struct ScopedFILECloser {
+ inline void operator()(FILE* x) const {
+ if (x)
+ fclose(x);
+ }
+};
+
+} // namespace internal
+
+// -----------------------------------------------------------------------------
+
+#if defined(OS_POSIX)
+// A low-level Posix file descriptor closer class. Use this when writing
+// platform-specific code, especially that does non-file-like things with the
+// FD (like sockets).
+//
+// If you're writing low-level Windows code, see base/win/scoped_handle.h
+// which provides some additional functionality.
+//
+// If you're writing cross-platform code that deals with actual files, you
+// should generally use base::File instead which can be constructed with a
+// handle, and in addition to handling ownership, has convenient cross-platform
+// file manipulation functions on it.
+typedef ScopedGeneric<int, internal::ScopedFDCloseTraits> ScopedFD;
+#endif
+
+// Automatically closes |FILE*|s.
+typedef std::unique_ptr<FILE, internal::ScopedFILECloser> ScopedFILE;
+
+} // namespace base
+
+#endif // BASE_FILES_SCOPED_FILE_H_
diff --git a/libchrome/base/files/scoped_temp_dir.cc b/libchrome/base/files/scoped_temp_dir.cc
new file mode 100644
index 0000000..27b758e
--- /dev/null
+++ b/libchrome/base/files/scoped_temp_dir.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/scoped_temp_dir.h"
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+
+namespace base {
+
+ScopedTempDir::ScopedTempDir() {
+}
+
+ScopedTempDir::~ScopedTempDir() {
+ if (!path_.empty() && !Delete())
+ DLOG(WARNING) << "Could not delete temp dir in dtor.";
+}
+
+bool ScopedTempDir::CreateUniqueTempDir() {
+ if (!path_.empty())
+ return false;
+
+ // This "scoped_dir" prefix is only used on Windows and serves as a template
+ // for the unique name.
+ if (!base::CreateNewTempDirectory(FILE_PATH_LITERAL("scoped_dir"), &path_))
+ return false;
+
+ return true;
+}
+
+bool ScopedTempDir::CreateUniqueTempDirUnderPath(const FilePath& base_path) {
+ if (!path_.empty())
+ return false;
+
+ // If |base_path| does not exist, create it.
+ if (!base::CreateDirectory(base_path))
+ return false;
+
+ // Create a new, uniquely named directory under |base_path|.
+ if (!base::CreateTemporaryDirInDir(base_path,
+ FILE_PATH_LITERAL("scoped_dir_"),
+ &path_))
+ return false;
+
+ return true;
+}
+
+bool ScopedTempDir::Set(const FilePath& path) {
+ if (!path_.empty())
+ return false;
+
+ if (!DirectoryExists(path) && !base::CreateDirectory(path))
+ return false;
+
+ path_ = path;
+ return true;
+}
+
+bool ScopedTempDir::Delete() {
+ if (path_.empty())
+ return false;
+
+ bool ret = base::DeleteFile(path_, true);
+ if (ret) {
+ // We only clear the path if deleted the directory.
+ path_.clear();
+ }
+
+ return ret;
+}
+
+FilePath ScopedTempDir::Take() {
+ FilePath ret = path_;
+ path_ = FilePath();
+ return ret;
+}
+
+bool ScopedTempDir::IsValid() const {
+ return !path_.empty() && DirectoryExists(path_);
+}
+
+} // namespace base
diff --git a/libchrome/base/files/scoped_temp_dir.h b/libchrome/base/files/scoped_temp_dir.h
new file mode 100644
index 0000000..b1f2f5b
--- /dev/null
+++ b/libchrome/base/files/scoped_temp_dir.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILES_SCOPED_TEMP_DIR_H_
+#define BASE_FILES_SCOPED_TEMP_DIR_H_
+
+// An object representing a temporary / scratch directory that should be cleaned
+// up (recursively) when this object goes out of scope. Note that since
+// deletion occurs during the destructor, no further error handling is possible
+// if the directory fails to be deleted. As a result, deletion is not
+// guaranteed by this class.
+//
+// Multiple calls to the methods which establish a temporary directory
+// (CreateUniqueTempDir, CreateUniqueTempDirUnderPath, and Set) must have
+// intervening calls to Delete or Take, or the calls will fail.
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+
+namespace base {
+
+class BASE_EXPORT ScopedTempDir {
+ public:
+ // No directory is owned/created initially.
+ ScopedTempDir();
+
+ // Recursively delete path.
+ ~ScopedTempDir();
+
+ // Creates a unique directory in TempPath, and takes ownership of it.
+ // See file_util::CreateNewTemporaryDirectory.
+ bool CreateUniqueTempDir() WARN_UNUSED_RESULT;
+
+ // Creates a unique directory under a given path, and takes ownership of it.
+ bool CreateUniqueTempDirUnderPath(const FilePath& path) WARN_UNUSED_RESULT;
+
+ // Takes ownership of directory at |path|, creating it if necessary.
+ // Don't call multiple times unless Take() has been called first.
+ bool Set(const FilePath& path) WARN_UNUSED_RESULT;
+
+ // Deletes the temporary directory wrapped by this object.
+ bool Delete() WARN_UNUSED_RESULT;
+
+ // Caller takes ownership of the temporary directory so it won't be destroyed
+ // when this object goes out of scope.
+ FilePath Take();
+
+ const FilePath& path() const { return path_; }
+
+ // Returns true if path_ is non-empty and exists.
+ bool IsValid() const;
+
+ private:
+ FilePath path_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTempDir);
+};
+
+} // namespace base
+
+#endif // BASE_FILES_SCOPED_TEMP_DIR_H_
diff --git a/libchrome/base/files/scoped_temp_dir_unittest.cc b/libchrome/base/files/scoped_temp_dir_unittest.cc
new file mode 100644
index 0000000..3b2f28e
--- /dev/null
+++ b/libchrome/base/files/scoped_temp_dir_unittest.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedTempDir, FullPath) {
+ FilePath test_path;
+ base::CreateNewTempDirectory(FILE_PATH_LITERAL("scoped_temp_dir"),
+ &test_path);
+
+ // Against an existing dir, it should get destroyed when leaving scope.
+ EXPECT_TRUE(DirectoryExists(test_path));
+ {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.Set(test_path));
+ EXPECT_TRUE(dir.IsValid());
+ }
+ EXPECT_FALSE(DirectoryExists(test_path));
+
+ {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.Set(test_path));
+ // Now the dir doesn't exist, so ensure that it gets created.
+ EXPECT_TRUE(DirectoryExists(test_path));
+ // When we call Release(), it shouldn't get destroyed when leaving scope.
+ FilePath path = dir.Take();
+ EXPECT_EQ(path.value(), test_path.value());
+ EXPECT_FALSE(dir.IsValid());
+ }
+ EXPECT_TRUE(DirectoryExists(test_path));
+
+ // Clean up.
+ {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.Set(test_path));
+ }
+ EXPECT_FALSE(DirectoryExists(test_path));
+}
+
+TEST(ScopedTempDir, TempDir) {
+ // In this case, just verify that a directory was created and that it's a
+ // child of TempDir.
+ FilePath test_path;
+ {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.CreateUniqueTempDir());
+ test_path = dir.path();
+ EXPECT_TRUE(DirectoryExists(test_path));
+ FilePath tmp_dir;
+ EXPECT_TRUE(base::GetTempDir(&tmp_dir));
+ EXPECT_TRUE(test_path.value().find(tmp_dir.value()) != std::string::npos);
+ }
+ EXPECT_FALSE(DirectoryExists(test_path));
+}
+
+TEST(ScopedTempDir, UniqueTempDirUnderPath) {
+ // Create a path which will contain a unique temp path.
+ FilePath base_path;
+ ASSERT_TRUE(base::CreateNewTempDirectory(FILE_PATH_LITERAL("base_dir"),
+ &base_path));
+
+ FilePath test_path;
+ {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.CreateUniqueTempDirUnderPath(base_path));
+ test_path = dir.path();
+ EXPECT_TRUE(DirectoryExists(test_path));
+ EXPECT_TRUE(base_path.IsParent(test_path));
+ EXPECT_TRUE(test_path.value().find(base_path.value()) != std::string::npos);
+ }
+ EXPECT_FALSE(DirectoryExists(test_path));
+ base::DeleteFile(base_path, true);
+}
+
+TEST(ScopedTempDir, MultipleInvocations) {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.CreateUniqueTempDir());
+ EXPECT_FALSE(dir.CreateUniqueTempDir());
+ EXPECT_TRUE(dir.Delete());
+ EXPECT_TRUE(dir.CreateUniqueTempDir());
+ EXPECT_FALSE(dir.CreateUniqueTempDir());
+ ScopedTempDir other_dir;
+ EXPECT_TRUE(other_dir.Set(dir.Take()));
+ EXPECT_TRUE(dir.CreateUniqueTempDir());
+ EXPECT_FALSE(dir.CreateUniqueTempDir());
+ EXPECT_FALSE(other_dir.CreateUniqueTempDir());
+}
+
+#if defined(OS_WIN)
+TEST(ScopedTempDir, LockedTempDir) {
+ ScopedTempDir dir;
+ EXPECT_TRUE(dir.CreateUniqueTempDir());
+ base::File file(dir.path().Append(FILE_PATH_LITERAL("temp")),
+ base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
+ EXPECT_TRUE(file.IsValid());
+ EXPECT_EQ(base::File::FILE_OK, file.error_details());
+ EXPECT_FALSE(dir.Delete()); // We should not be able to delete.
+ EXPECT_FALSE(dir.path().empty()); // We should still have a valid path.
+ file.Close();
+ // Now, we should be able to delete.
+ EXPECT_TRUE(dir.Delete());
+}
+#endif // defined(OS_WIN)
+
+} // namespace base
diff --git a/libchrome/base/format_macros.h b/libchrome/base/format_macros.h
new file mode 100644
index 0000000..0697c6d
--- /dev/null
+++ b/libchrome/base/format_macros.h
@@ -0,0 +1,97 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FORMAT_MACROS_H_
+#define BASE_FORMAT_MACROS_H_
+
+// This file defines the format macros for some integer types.
+
+// To print a 64-bit value in a portable way:
+// int64_t value;
+// printf("xyz:%" PRId64, value);
+// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
+//
+// For wide strings, prepend "Wide" to the macro:
+// int64_t value;
+// StringPrintf(L"xyz: %" WidePRId64, value);
+//
+// To print a size_t value in a portable way:
+// size_t size;
+// printf("xyz: %" PRIuS, size);
+// The "u" in the macro corresponds to %u, and S is for "size".
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && \
+ !defined(PRId64)
+#error "inttypes.h has already been included before this header file, but "
+#error "without __STDC_FORMAT_MACROS defined."
+#endif
+
+#if defined(OS_POSIX) && !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+
+#if defined(OS_POSIX)
+
+// GCC will concatenate wide and narrow strings correctly, so nothing needs to
+// be done here.
+#define WidePRId64 PRId64
+#define WidePRIu64 PRIu64
+#define WidePRIx64 PRIx64
+
+#if !defined(PRIuS)
+#define PRIuS "zu"
+#endif
+
+// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
+// architectures and Apple does not provides standard format macros and
+// recommends casting. This has many drawbacks, so instead define macros
+// for formatting those types.
+#if defined(OS_MACOSX)
+#if defined(ARCH_CPU_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "ld"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "lu"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "lx"
+#endif
+#else // defined(ARCH_CPU_64_BITS)
+#if !defined(PRIdNS)
+#define PRIdNS "d"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "u"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "x"
+#endif
+#endif
+#endif // defined(OS_MACOSX)
+
+#else // OS_WIN
+
+#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
+#error "inttypes.h provided by win toolchain should define these."
+#endif
+
+#define WidePRId64 L"I64d"
+#define WidePRIu64 L"I64u"
+#define WidePRIx64 L"I64x"
+
+#if !defined(PRIuS)
+#define PRIuS "Iu"
+#endif
+
+#endif
+
+#endif // BASE_FORMAT_MACROS_H_
diff --git a/libchrome/base/gmock_unittest.cc b/libchrome/base/gmock_unittest.cc
new file mode 100644
index 0000000..855380a
--- /dev/null
+++ b/libchrome/base/gmock_unittest.cc
@@ -0,0 +1,137 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This test is a simple sanity check to make sure gmock is able to build/link
+// correctly. It just instantiates a mock object and runs through a couple of
+// the basic mock features.
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Gmock matchers and actions that we use below.
+using testing::AnyOf;
+using testing::Eq;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::WithArg;
+using testing::_;
+
+namespace {
+
+// Simple class that we can mock out the behavior for. Everything is virtual
+// for easy mocking.
+class SampleClass {
+ public:
+ SampleClass() {}
+ virtual ~SampleClass() {}
+
+ virtual int ReturnSomething() {
+ return -1;
+ }
+
+ virtual void ReturnNothingConstly() const {
+ }
+
+ virtual void OutputParam(int* a) {
+ }
+
+ virtual int ReturnSecond(int a, int b) {
+ return b;
+ }
+};
+
+// Declare a mock for the class.
+class MockSampleClass : public SampleClass {
+ public:
+ MOCK_METHOD0(ReturnSomething, int());
+ MOCK_CONST_METHOD0(ReturnNothingConstly, void());
+ MOCK_METHOD1(OutputParam, void(int* a));
+ MOCK_METHOD2(ReturnSecond, int(int a, int b));
+};
+
+// Create a couple of custom actions. Custom actions can be used for adding
+// more complex behavior into your mock...though if you start needing these, ask
+// if you're asking your mock to do too much.
+ACTION(ReturnVal) {
+ // Return the first argument received.
+ return arg0;
+}
+ACTION(ReturnSecond) {
+ // Returns the second argument. This basically implemetns ReturnSecond.
+ return arg1;
+}
+
+TEST(GmockTest, SimpleMatchAndActions) {
+ // Basic test of some simple gmock matchers, actions, and cardinality
+ // expectations.
+ MockSampleClass mock;
+
+ EXPECT_CALL(mock, ReturnSomething())
+ .WillOnce(Return(1))
+ .WillOnce(Return(2))
+ .WillOnce(Return(3));
+ EXPECT_EQ(1, mock.ReturnSomething());
+ EXPECT_EQ(2, mock.ReturnSomething());
+ EXPECT_EQ(3, mock.ReturnSomething());
+
+ EXPECT_CALL(mock, ReturnNothingConstly()).Times(2);
+ mock.ReturnNothingConstly();
+ mock.ReturnNothingConstly();
+}
+
+TEST(GmockTest, AssignArgument) {
+ // Capture an argument for examination.
+ MockSampleClass mock;
+
+ EXPECT_CALL(mock, OutputParam(_))
+ .WillRepeatedly(SetArgumentPointee<0>(5));
+
+ int arg = 0;
+ mock.OutputParam(&arg);
+ EXPECT_EQ(5, arg);
+}
+
+TEST(GmockTest, SideEffects) {
+ // Capture an argument for examination.
+ MockSampleClass mock;
+
+ EXPECT_CALL(mock, OutputParam(_))
+ .WillRepeatedly(SetArgumentPointee<0>(5));
+
+ int arg = 0;
+ mock.OutputParam(&arg);
+ EXPECT_EQ(5, arg);
+}
+
+TEST(GmockTest, CustomAction_ReturnSecond) {
+ // Test a mock of the ReturnSecond behavior using an action that provides an
+ // alternate implementation of the function. Danger here though, this is
+ // starting to add too much behavior of the mock, which means the mock
+ // implementation might start to have bugs itself.
+ MockSampleClass mock;
+
+ EXPECT_CALL(mock, ReturnSecond(_, AnyOf(Eq(4), Eq(5))))
+ .WillRepeatedly(ReturnSecond());
+ EXPECT_EQ(4, mock.ReturnSecond(-1, 4));
+ EXPECT_EQ(5, mock.ReturnSecond(0, 5));
+ EXPECT_EQ(4, mock.ReturnSecond(0xdeadbeef, 4));
+ EXPECT_EQ(4, mock.ReturnSecond(112358, 4));
+ EXPECT_EQ(5, mock.ReturnSecond(1337, 5));
+}
+
+TEST(GmockTest, CustomAction_ReturnVal) {
+ // Alternate implemention of ReturnSecond using a more general custom action,
+ // and a WithArg adapter to bridge the interfaces.
+ MockSampleClass mock;
+
+ EXPECT_CALL(mock, ReturnSecond(_, AnyOf(Eq(4), Eq(5))))
+ .WillRepeatedly(WithArg<1>(ReturnVal()));
+ EXPECT_EQ(4, mock.ReturnSecond(-1, 4));
+ EXPECT_EQ(5, mock.ReturnSecond(0, 5));
+ EXPECT_EQ(4, mock.ReturnSecond(0xdeadbeef, 4));
+ EXPECT_EQ(4, mock.ReturnSecond(112358, 4));
+ EXPECT_EQ(5, mock.ReturnSecond(1337, 5));
+}
+
+} // namespace
diff --git a/libchrome/base/gtest_prod_util.h b/libchrome/base/gtest_prod_util.h
new file mode 100644
index 0000000..3289e63
--- /dev/null
+++ b/libchrome/base/gtest_prod_util.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_GTEST_PROD_UTIL_H_
+#define BASE_GTEST_PROD_UTIL_H_
+
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+// This is a wrapper for gtest's FRIEND_TEST macro that friends
+// test with all possible prefixes. This is very helpful when changing the test
+// prefix, because the friend declarations don't need to be updated.
+//
+// Example usage:
+//
+// class MyClass {
+// private:
+// void MyMethod();
+// FRIEND_TEST_ALL_PREFIXES(MyClassTest, MyMethod);
+// };
+#define FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
+ FRIEND_TEST(test_case_name, test_name); \
+ FRIEND_TEST(test_case_name, DISABLED_##test_name); \
+ FRIEND_TEST(test_case_name, FLAKY_##test_name)
+
+// C++ compilers will refuse to compile the following code:
+//
+// namespace foo {
+// class MyClass {
+// private:
+// FRIEND_TEST_ALL_PREFIXES(MyClassTest, TestMethod);
+// bool private_var;
+// };
+// } // namespace foo
+//
+// class MyClassTest::TestMethod() {
+// foo::MyClass foo_class;
+// foo_class.private_var = true;
+// }
+//
+// Unless you forward declare MyClassTest::TestMethod outside of namespace foo.
+// Use FORWARD_DECLARE_TEST to do so for all possible prefixes.
+//
+// Example usage:
+//
+// FORWARD_DECLARE_TEST(MyClassTest, TestMethod);
+//
+// namespace foo {
+// class MyClass {
+// private:
+// FRIEND_TEST_ALL_PREFIXES(::MyClassTest, TestMethod); // NOTE use of ::
+// bool private_var;
+// };
+// } // namespace foo
+//
+// class MyClassTest::TestMethod() {
+// foo::MyClass foo_class;
+// foo_class.private_var = true;
+// }
+
+#define FORWARD_DECLARE_TEST(test_case_name, test_name) \
+ class test_case_name##_##test_name##_Test; \
+ class test_case_name##_##DISABLED_##test_name##_Test; \
+ class test_case_name##_##FLAKY_##test_name##_Test
+
+#endif // BASE_GTEST_PROD_UTIL_H_
diff --git a/libchrome/base/guid.cc b/libchrome/base/guid.cc
new file mode 100644
index 0000000..5714073
--- /dev/null
+++ b/libchrome/base/guid.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/guid.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/rand_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+bool IsLowerHexDigit(char c) {
+ return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f');
+}
+
+bool IsValidGUIDInternal(const base::StringPiece& guid, bool strict) {
+ const size_t kGUIDLength = 36U;
+ if (guid.length() != kGUIDLength)
+ return false;
+
+ for (size_t i = 0; i < guid.length(); ++i) {
+ char current = guid[i];
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (current != '-')
+ return false;
+ } else {
+ if ((strict && !IsLowerHexDigit(current)) || !IsHexDigit(current))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace
+
+std::string GenerateGUID() {
+ uint64_t sixteen_bytes[2] = {base::RandUint64(), base::RandUint64()};
+
+ // Set the GUID to version 4 as described in RFC 4122, section 4.4.
+ // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+ // where y is one of [8, 9, A, B].
+
+ // Clear the version bits and set the version to 4:
+ sixteen_bytes[0] &= 0xffffffffffff0fffULL;
+ sixteen_bytes[0] |= 0x0000000000004000ULL;
+
+ // Set the two most significant bits (bits 6 and 7) of the
+ // clock_seq_hi_and_reserved to zero and one, respectively:
+ sixteen_bytes[1] &= 0x3fffffffffffffffULL;
+ sixteen_bytes[1] |= 0x8000000000000000ULL;
+
+ return RandomDataToGUIDString(sixteen_bytes);
+}
+
+bool IsValidGUID(const base::StringPiece& guid) {
+ return IsValidGUIDInternal(guid, false /* strict */);
+}
+
+bool IsValidGUIDOutputString(const base::StringPiece& guid) {
+ return IsValidGUIDInternal(guid, true /* strict */);
+}
+
+std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
+ return StringPrintf("%08x-%04x-%04x-%04x-%012llx",
+ static_cast<unsigned int>(bytes[0] >> 32),
+ static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
+ static_cast<unsigned int>(bytes[0] & 0x0000ffff),
+ static_cast<unsigned int>(bytes[1] >> 48),
+ bytes[1] & 0x0000ffffffffffffULL);
+}
+
+} // namespace base
diff --git a/libchrome/base/guid.h b/libchrome/base/guid.h
new file mode 100644
index 0000000..29c24ea
--- /dev/null
+++ b/libchrome/base/guid.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_GUID_H_
+#define BASE_GUID_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Generate a 128-bit (pseudo) random GUID in the form of version 4 as described
+// in RFC 4122, section 4.4.
+// The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+// where y is one of [8, 9, A, B].
+// The hexadecimal values "a" through "f" are output as lower case characters.
+// If GUID generation fails an empty string is returned.
+BASE_EXPORT std::string GenerateGUID();
+
+// Returns true if the input string conforms to the version 4 GUID format.
+// Note that this does NOT check if the hexadecimal values "a" through "f"
+// are in lower case characters, as Version 4 RFC says onput they're
+// case insensitive. (Use IsValidGUIDOutputString for checking if the
+// given string is valid output string)
+BASE_EXPORT bool IsValidGUID(const base::StringPiece& guid);
+
+// Returns true if the input string is valid version 4 GUID output string.
+// This also checks if the hexadecimal values "a" through "f" are in lower
+// case characters.
+BASE_EXPORT bool IsValidGUIDOutputString(const base::StringPiece& guid);
+
+// For unit testing purposes only. Do not use outside of tests.
+BASE_EXPORT std::string RandomDataToGUIDString(const uint64_t bytes[2]);
+
+} // namespace base
+
+#endif // BASE_GUID_H_
diff --git a/libchrome/base/guid_unittest.cc b/libchrome/base/guid_unittest.cc
new file mode 100644
index 0000000..70dad67
--- /dev/null
+++ b/libchrome/base/guid_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/guid.h"
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/strings/string_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+bool IsGUIDv4(const std::string& guid) {
+ // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+ // where y is one of [8, 9, A, B].
+ return IsValidGUID(guid) && guid[14] == '4' &&
+ (guid[19] == '8' || guid[19] == '9' || guid[19] == 'A' ||
+ guid[19] == 'a' || guid[19] == 'B' || guid[19] == 'b');
+}
+
+} // namespace
+
+TEST(GUIDTest, GUIDGeneratesAllZeroes) {
+ uint64_t bytes[] = {0, 0};
+ std::string clientid = RandomDataToGUIDString(bytes);
+ EXPECT_EQ("00000000-0000-0000-0000-000000000000", clientid);
+}
+
+TEST(GUIDTest, GUIDGeneratesCorrectly) {
+ uint64_t bytes[] = {0x0123456789ABCDEFULL, 0xFEDCBA9876543210ULL};
+ std::string clientid = RandomDataToGUIDString(bytes);
+ EXPECT_EQ("01234567-89ab-cdef-fedc-ba9876543210", clientid);
+}
+
+TEST(GUIDTest, GUIDCorrectlyFormatted) {
+ const int kIterations = 10;
+ for (int it = 0; it < kIterations; ++it) {
+ std::string guid = GenerateGUID();
+ EXPECT_TRUE(IsValidGUID(guid));
+ EXPECT_TRUE(IsValidGUIDOutputString(guid));
+ EXPECT_TRUE(IsValidGUID(ToLowerASCII(guid)));
+ EXPECT_TRUE(IsValidGUID(ToUpperASCII(guid)));
+ }
+}
+
+TEST(GUIDTest, GUIDBasicUniqueness) {
+ const int kIterations = 10;
+ for (int it = 0; it < kIterations; ++it) {
+ std::string guid1 = GenerateGUID();
+ std::string guid2 = GenerateGUID();
+ EXPECT_EQ(36U, guid1.length());
+ EXPECT_EQ(36U, guid2.length());
+ EXPECT_NE(guid1, guid2);
+ EXPECT_TRUE(IsGUIDv4(guid1));
+ EXPECT_TRUE(IsGUIDv4(guid2));
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/hash.cc b/libchrome/base/hash.cc
new file mode 100644
index 0000000..4dfd0d0
--- /dev/null
+++ b/libchrome/base/hash.cc
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash.h"
+
+#include <functional>
+
+namespace base {
+
+uint32_t SuperFastHash(const char* data, size_t len) {
+ std::hash<std::string> hash_fn;
+ return hash_fn(std::string(data, len));
+}
+
+} // namespace base
diff --git a/libchrome/base/hash.h b/libchrome/base/hash.h
new file mode 100644
index 0000000..7c0fba6
--- /dev/null
+++ b/libchrome/base/hash.h
@@ -0,0 +1,118 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_HASH_H_
+#define BASE_HASH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+
+// WARNING: This hash function should not be used for any cryptographic purpose.
+BASE_EXPORT uint32_t SuperFastHash(const char* data, size_t length);
+
+// Computes a hash of a memory buffer |data| of a given |length|.
+// WARNING: This hash function should not be used for any cryptographic purpose.
+inline uint32_t Hash(const char* data, size_t length) {
+ return SuperFastHash(data, length);
+}
+
+// Computes a hash of a string |str|.
+// WARNING: This hash function should not be used for any cryptographic purpose.
+inline uint32_t Hash(const std::string& str) {
+ return Hash(str.data(), str.size());
+}
+
+// Implement hashing for pairs of at-most 32 bit integer values.
+// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
+// multiply-add hashing. This algorithm, as described in
+// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
+//
+// h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
+//
+// Contact danakj@chromium.org for any questions.
+inline size_t HashInts32(uint32_t value1, uint32_t value2) {
+ uint64_t value1_64 = value1;
+ uint64_t hash64 = (value1_64 << 32) | value2;
+
+ if (sizeof(size_t) >= sizeof(uint64_t))
+ return static_cast<size_t>(hash64);
+
+ uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
+ uint32_t shift_random = 10121U << 16;
+
+ hash64 = hash64 * odd_random + shift_random;
+ size_t high_bits =
+ static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+ return high_bits;
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+inline size_t HashInts64(uint64_t value1, uint64_t value2) {
+ uint32_t short_random1 = 842304669U;
+ uint32_t short_random2 = 619063811U;
+ uint32_t short_random3 = 937041849U;
+ uint32_t short_random4 = 3309708029U;
+
+ uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
+ uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
+ uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
+ uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
+
+ uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
+ uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
+ uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
+ uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
+
+ uint64_t hash64 = product1 + product2 + product3 + product4;
+
+ if (sizeof(size_t) >= sizeof(uint64_t))
+ return static_cast<size_t>(hash64);
+
+ uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
+ uint32_t shift_random = 20591U << 16;
+
+ hash64 = hash64 * odd_random + shift_random;
+ size_t high_bits =
+ static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+ return high_bits;
+}
+
+template <typename T1, typename T2>
+inline size_t HashInts(T1 value1, T2 value2) {
+ // This condition is expected to be compile-time evaluated and optimised away
+ // in release builds.
+ if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
+ return HashInts64(value1, value2);
+
+ return HashInts32(value1, value2);
+}
+
+// A templated hasher for pairs of integer types.
+template <typename T>
+struct IntPairHash;
+
+template <typename Type1, typename Type2>
+struct IntPairHash<std::pair<Type1, Type2>> {
+ size_t operator()(std::pair<Type1, Type2> value) const {
+ return HashInts(value.first, value.second);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_HASH_H_
diff --git a/libchrome/base/i18n/OWNERS b/libchrome/base/i18n/OWNERS
new file mode 100644
index 0000000..d717b8d
--- /dev/null
+++ b/libchrome/base/i18n/OWNERS
@@ -0,0 +1 @@
+jshin@chromium.org
diff --git a/libchrome/base/i18n/base_i18n_export.h b/libchrome/base/i18n/base_i18n_export.h
new file mode 100644
index 0000000..e8a2add
--- /dev/null
+++ b/libchrome/base/i18n/base_i18n_export.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_BASE_I18N_EXPORT_H_
+#define BASE_I18N_BASE_I18N_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(BASE_I18N_IMPLEMENTATION)
+#define BASE_I18N_EXPORT __declspec(dllexport)
+#else
+#define BASE_I18N_EXPORT __declspec(dllimport)
+#endif // defined(BASE_I18N_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(BASE_I18N_IMPLEMENTATION)
+#define BASE_I18N_EXPORT __attribute__((visibility("default")))
+#else
+#define BASE_I18N_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define BASE_I18N_EXPORT
+#endif
+
+#endif // BASE_I18N_BASE_I18N_EXPORT_H_
diff --git a/libchrome/base/id_map.h b/libchrome/base/id_map.h
new file mode 100644
index 0000000..ef6b156
--- /dev/null
+++ b/libchrome/base/id_map.h
@@ -0,0 +1,285 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ID_MAP_H_
+#define BASE_ID_MAP_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <set>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+
+// Ownership semantics - own pointer means the pointer is deleted in Remove()
+// & during destruction
+enum IDMapOwnershipSemantics {
+ IDMapExternalPointer,
+ IDMapOwnPointer
+};
+
+// This object maintains a list of IDs that can be quickly converted to
+// pointers to objects. It is implemented as a hash table, optimized for
+// relatively small data sets (in the common case, there will be exactly one
+// item in the list).
+//
+// Items can be inserted into the container with arbitrary ID, but the caller
+// must ensure they are unique. Inserting IDs and relying on automatically
+// generated ones is not allowed because they can collide.
+//
+// This class does not have a virtual destructor, do not inherit from it when
+// ownership semantics are set to own because pointers will leak.
+template <typename T,
+ IDMapOwnershipSemantics OS = IDMapExternalPointer,
+ typename K = int32_t>
+class IDMap {
+ public:
+ using KeyType = K;
+
+ private:
+ typedef base::hash_map<KeyType, T*> HashTable;
+
+ public:
+ IDMap() : iteration_depth_(0), next_id_(1), check_on_null_data_(false) {
+ // A number of consumers of IDMap create it on one thread but always
+ // access it from a different, but consistent, thread (or sequence)
+ // post-construction. The first call to CalledOnValidSequencedThread()
+ // will re-bind it.
+ sequence_checker_.DetachFromSequence();
+ }
+
+ ~IDMap() {
+ // Many IDMap's are static, and hence will be destroyed on the main
+ // thread. However, all the accesses may take place on another thread (or
+ // sequence), such as the IO thread. Detaching again to clean this up.
+ sequence_checker_.DetachFromSequence();
+ Releaser<OS, 0>::release_all(&data_);
+ }
+
+ // Sets whether Add and Replace should DCHECK if passed in NULL data.
+ // Default is false.
+ void set_check_on_null_data(bool value) { check_on_null_data_ = value; }
+
+ // Adds a view with an automatically generated unique ID. See AddWithID.
+ KeyType Add(T* data) {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(!check_on_null_data_ || data);
+ KeyType this_id = next_id_;
+ DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
+ data_[this_id] = data;
+ next_id_++;
+ return this_id;
+ }
+
+ // Adds a new data member with the specified ID. The ID must not be in
+ // the list. The caller either must generate all unique IDs itself and use
+ // this function, or allow this object to generate IDs and call Add. These
+ // two methods may not be mixed, or duplicate IDs may be generated
+ void AddWithID(T* data, KeyType id) {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(!check_on_null_data_ || data);
+ DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+ data_[id] = data;
+ }
+
+ void Remove(KeyType id) {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ typename HashTable::iterator i = data_.find(id);
+ if (i == data_.end()) {
+ NOTREACHED() << "Attempting to remove an item not in the list";
+ return;
+ }
+
+ if (iteration_depth_ == 0) {
+ Releaser<OS, 0>::release(i->second);
+ data_.erase(i);
+ } else {
+ removed_ids_.insert(id);
+ }
+ }
+
+ // Replaces the value for |id| with |new_data| and returns a pointer to the
+ // existing value. If there is no entry for |id|, the map is not altered and
+ // nullptr is returned. The OwnershipSemantics of the map have no effect on
+ // how the existing value is treated, the IDMap does not delete the existing
+ // value being replaced.
+ T* Replace(KeyType id, T* new_data) {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ DCHECK(!check_on_null_data_ || new_data);
+ typename HashTable::iterator i = data_.find(id);
+ if (i == data_.end()) {
+ NOTREACHED() << "Attempting to replace an item not in the list";
+ return nullptr;
+ }
+
+ T* temp = i->second;
+ i->second = new_data;
+ return temp;
+ }
+
+ void Clear() {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ if (iteration_depth_ == 0) {
+ Releaser<OS, 0>::release_all(&data_);
+ } else {
+ for (typename HashTable::iterator i = data_.begin();
+ i != data_.end(); ++i)
+ removed_ids_.insert(i->first);
+ }
+ }
+
+ bool IsEmpty() const {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ return size() == 0u;
+ }
+
+ T* Lookup(KeyType id) const {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ typename HashTable::const_iterator i = data_.find(id);
+ if (i == data_.end())
+ return NULL;
+ return i->second;
+ }
+
+ size_t size() const {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ return data_.size() - removed_ids_.size();
+ }
+
+#if defined(UNIT_TEST)
+ int iteration_depth() const {
+ return iteration_depth_;
+ }
+#endif // defined(UNIT_TEST)
+
+ // It is safe to remove elements from the map during iteration. All iterators
+ // will remain valid.
+ template<class ReturnType>
+ class Iterator {
+ public:
+ Iterator(IDMap<T, OS, K>* map)
+ : map_(map),
+ iter_(map_->data_.begin()) {
+ Init();
+ }
+
+ Iterator(const Iterator& iter)
+ : map_(iter.map_),
+ iter_(iter.iter_) {
+ Init();
+ }
+
+ const Iterator& operator=(const Iterator& iter) {
+ map_ = iter.map;
+ iter_ = iter.iter;
+ Init();
+ return *this;
+ }
+
+ ~Iterator() {
+ DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+
+ // We're going to decrement iteration depth. Make sure it's greater than
+ // zero so that it doesn't become negative.
+ DCHECK_LT(0, map_->iteration_depth_);
+
+ if (--map_->iteration_depth_ == 0)
+ map_->Compact();
+ }
+
+ bool IsAtEnd() const {
+ DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ return iter_ == map_->data_.end();
+ }
+
+ KeyType GetCurrentKey() const {
+ DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ return iter_->first;
+ }
+
+ ReturnType* GetCurrentValue() const {
+ DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ return iter_->second;
+ }
+
+ void Advance() {
+ DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ ++iter_;
+ SkipRemovedEntries();
+ }
+
+ private:
+ void Init() {
+ DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+ ++map_->iteration_depth_;
+ SkipRemovedEntries();
+ }
+
+ void SkipRemovedEntries() {
+ while (iter_ != map_->data_.end() &&
+ map_->removed_ids_.find(iter_->first) !=
+ map_->removed_ids_.end()) {
+ ++iter_;
+ }
+ }
+
+ IDMap<T, OS, K>* map_;
+ typename HashTable::const_iterator iter_;
+ };
+
+ typedef Iterator<T> iterator;
+ typedef Iterator<const T> const_iterator;
+
+ private:
+
+ // The dummy parameter is there because C++ standard does not allow
+ // explicitly specialized templates inside classes
+ template<IDMapOwnershipSemantics OI, int dummy> struct Releaser {
+ static inline void release(T* ptr) {}
+ static inline void release_all(HashTable* table) {}
+ };
+
+ template<int dummy> struct Releaser<IDMapOwnPointer, dummy> {
+ static inline void release(T* ptr) { delete ptr;}
+ static inline void release_all(HashTable* table) {
+ for (typename HashTable::iterator i = table->begin();
+ i != table->end(); ++i) {
+ delete i->second;
+ }
+ table->clear();
+ }
+ };
+
+ void Compact() {
+ DCHECK_EQ(0, iteration_depth_);
+ for (const auto& i : removed_ids_)
+ Remove(i);
+ removed_ids_.clear();
+ }
+
+ // Keep track of how many iterators are currently iterating on us to safely
+ // handle removing items during iteration.
+ int iteration_depth_;
+
+ // Keep set of IDs that should be removed after the outermost iteration has
+ // finished. This way we manage to not invalidate the iterator when an element
+ // is removed.
+ std::set<KeyType> removed_ids_;
+
+ // The next ID that we will return from Add()
+ KeyType next_id_;
+
+ HashTable data_;
+
+ // See description above setter.
+ bool check_on_null_data_;
+
+ base::SequenceChecker sequence_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(IDMap);
+};
+
+#endif // BASE_ID_MAP_H_
diff --git a/libchrome/base/id_map_unittest.cc b/libchrome/base/id_map_unittest.cc
new file mode 100644
index 0000000..a3f0808
--- /dev/null
+++ b/libchrome/base/id_map_unittest.cc
@@ -0,0 +1,379 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/id_map.h"
+
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class TestObject {
+};
+
+class DestructorCounter {
+ public:
+ explicit DestructorCounter(int* counter) : counter_(counter) {}
+ ~DestructorCounter() { ++(*counter_); }
+
+ private:
+ int* counter_;
+};
+
+TEST(IDMapTest, Basic) {
+ IDMap<TestObject> map;
+ EXPECT_TRUE(map.IsEmpty());
+ EXPECT_EQ(0U, map.size());
+
+ TestObject obj1;
+ TestObject obj2;
+
+ int32_t id1 = map.Add(&obj1);
+ EXPECT_FALSE(map.IsEmpty());
+ EXPECT_EQ(1U, map.size());
+ EXPECT_EQ(&obj1, map.Lookup(id1));
+
+ int32_t id2 = map.Add(&obj2);
+ EXPECT_FALSE(map.IsEmpty());
+ EXPECT_EQ(2U, map.size());
+
+ EXPECT_EQ(&obj1, map.Lookup(id1));
+ EXPECT_EQ(&obj2, map.Lookup(id2));
+
+ map.Remove(id1);
+ EXPECT_FALSE(map.IsEmpty());
+ EXPECT_EQ(1U, map.size());
+
+ map.Remove(id2);
+ EXPECT_TRUE(map.IsEmpty());
+ EXPECT_EQ(0U, map.size());
+
+ map.AddWithID(&obj1, 1);
+ map.AddWithID(&obj2, 2);
+ EXPECT_EQ(&obj1, map.Lookup(1));
+ EXPECT_EQ(&obj2, map.Lookup(2));
+
+ EXPECT_EQ(&obj2, map.Replace(2, &obj1));
+ EXPECT_EQ(&obj1, map.Lookup(2));
+
+ EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
+ IDMap<TestObject> map;
+
+ TestObject obj1;
+ TestObject obj2;
+ TestObject obj3;
+
+ map.Add(&obj1);
+ map.Add(&obj2);
+ map.Add(&obj3);
+
+ {
+ IDMap<TestObject>::const_iterator iter(&map);
+
+ EXPECT_EQ(1, map.iteration_depth());
+
+ while (!iter.IsAtEnd()) {
+ map.Remove(iter.GetCurrentKey());
+ iter.Advance();
+ }
+
+ // Test that while an iterator is still in scope, we get the map emptiness
+ // right (http://crbug.com/35571).
+ EXPECT_TRUE(map.IsEmpty());
+ EXPECT_EQ(0U, map.size());
+ }
+
+ EXPECT_TRUE(map.IsEmpty());
+ EXPECT_EQ(0U, map.size());
+
+ EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
+ IDMap<TestObject> map;
+
+ const int kCount = 5;
+ TestObject obj[kCount];
+
+ for (int i = 0; i < kCount; i++)
+ map.Add(&obj[i]);
+
+ // IDMap uses a hash_map, which has no predictable iteration order.
+ int32_t ids_in_iteration_order[kCount];
+ const TestObject* objs_in_iteration_order[kCount];
+ int counter = 0;
+ for (IDMap<TestObject>::const_iterator iter(&map);
+ !iter.IsAtEnd(); iter.Advance()) {
+ ids_in_iteration_order[counter] = iter.GetCurrentKey();
+ objs_in_iteration_order[counter] = iter.GetCurrentValue();
+ counter++;
+ }
+
+ counter = 0;
+ for (IDMap<TestObject>::const_iterator iter(&map);
+ !iter.IsAtEnd(); iter.Advance()) {
+ EXPECT_EQ(1, map.iteration_depth());
+
+ switch (counter) {
+ case 0:
+ EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
+ EXPECT_EQ(objs_in_iteration_order[0], iter.GetCurrentValue());
+ map.Remove(ids_in_iteration_order[1]);
+ break;
+ case 1:
+ EXPECT_EQ(ids_in_iteration_order[2], iter.GetCurrentKey());
+ EXPECT_EQ(objs_in_iteration_order[2], iter.GetCurrentValue());
+ map.Remove(ids_in_iteration_order[3]);
+ break;
+ case 2:
+ EXPECT_EQ(ids_in_iteration_order[4], iter.GetCurrentKey());
+ EXPECT_EQ(objs_in_iteration_order[4], iter.GetCurrentValue());
+ map.Remove(ids_in_iteration_order[0]);
+ break;
+ default:
+ FAIL() << "should not have that many elements";
+ break;
+ }
+
+ counter++;
+ }
+
+ EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, CopyIterator) {
+ IDMap<TestObject> map;
+
+ TestObject obj1;
+ TestObject obj2;
+ TestObject obj3;
+
+ map.Add(&obj1);
+ map.Add(&obj2);
+ map.Add(&obj3);
+
+ EXPECT_EQ(0, map.iteration_depth());
+
+ {
+ IDMap<TestObject>::const_iterator iter1(&map);
+ EXPECT_EQ(1, map.iteration_depth());
+
+ // Make sure that copying the iterator correctly increments
+ // map's iteration depth.
+ IDMap<TestObject>::const_iterator iter2(iter1);
+ EXPECT_EQ(2, map.iteration_depth());
+ }
+
+ // Make sure after destroying all iterators the map's iteration depth
+ // returns to initial state.
+ EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, AssignIterator) {
+ IDMap<TestObject> map;
+
+ TestObject obj1;
+ TestObject obj2;
+ TestObject obj3;
+
+ map.Add(&obj1);
+ map.Add(&obj2);
+ map.Add(&obj3);
+
+ EXPECT_EQ(0, map.iteration_depth());
+
+ {
+ IDMap<TestObject>::const_iterator iter1(&map);
+ EXPECT_EQ(1, map.iteration_depth());
+
+ IDMap<TestObject>::const_iterator iter2(&map);
+ EXPECT_EQ(2, map.iteration_depth());
+
+ // Make sure that assigning the iterator correctly updates
+ // map's iteration depth (-1 for destruction, +1 for assignment).
+ EXPECT_EQ(2, map.iteration_depth());
+ }
+
+ // Make sure after destroying all iterators the map's iteration depth
+ // returns to initial state.
+ EXPECT_EQ(0, map.iteration_depth());
+}
+
+TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
+ IDMap<TestObject> map;
+
+ const int kCount = 5;
+ TestObject obj[kCount];
+
+ for (int i = 0; i < kCount; i++)
+ map.Add(&obj[i]);
+
+ // IDMap uses a hash_map, which has no predictable iteration order.
+ int32_t ids_in_iteration_order[kCount];
+ const TestObject* objs_in_iteration_order[kCount];
+ int counter = 0;
+ for (IDMap<TestObject>::const_iterator iter(&map);
+ !iter.IsAtEnd(); iter.Advance()) {
+ ids_in_iteration_order[counter] = iter.GetCurrentKey();
+ objs_in_iteration_order[counter] = iter.GetCurrentValue();
+ counter++;
+ }
+
+ counter = 0;
+ for (IDMap<TestObject>::const_iterator iter(&map);
+ !iter.IsAtEnd(); iter.Advance()) {
+ switch (counter) {
+ case 0:
+ EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
+ EXPECT_EQ(objs_in_iteration_order[0], iter.GetCurrentValue());
+ break;
+ case 1:
+ EXPECT_EQ(ids_in_iteration_order[1], iter.GetCurrentKey());
+ EXPECT_EQ(objs_in_iteration_order[1], iter.GetCurrentValue());
+ map.Clear();
+ EXPECT_TRUE(map.IsEmpty());
+ EXPECT_EQ(0U, map.size());
+ break;
+ default:
+ FAIL() << "should not have that many elements";
+ break;
+ }
+ counter++;
+ }
+
+ EXPECT_TRUE(map.IsEmpty());
+ EXPECT_EQ(0U, map.size());
+}
+
+TEST(IDMapTest, OwningPointersDeletesThemOnRemove) {
+ const int kCount = 3;
+
+ int external_del_count = 0;
+ DestructorCounter* external_obj[kCount];
+ int map_external_ids[kCount];
+
+ int owned_del_count = 0;
+ DestructorCounter* owned_obj[kCount];
+ int map_owned_ids[kCount];
+
+ IDMap<DestructorCounter> map_external;
+ IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+
+ for (int i = 0; i < kCount; ++i) {
+ external_obj[i] = new DestructorCounter(&external_del_count);
+ map_external_ids[i] = map_external.Add(external_obj[i]);
+
+ owned_obj[i] = new DestructorCounter(&owned_del_count);
+ map_owned_ids[i] = map_owned.Add(owned_obj[i]);
+ }
+
+ for (int i = 0; i < kCount; ++i) {
+ EXPECT_EQ(external_del_count, 0);
+ EXPECT_EQ(owned_del_count, i);
+
+ map_external.Remove(map_external_ids[i]);
+ map_owned.Remove(map_owned_ids[i]);
+ }
+
+ for (int i = 0; i < kCount; ++i) {
+ delete external_obj[i];
+ }
+
+ EXPECT_EQ(external_del_count, kCount);
+ EXPECT_EQ(owned_del_count, kCount);
+}
+
+TEST(IDMapTest, OwningPointersDeletesThemOnClear) {
+ const int kCount = 3;
+
+ int external_del_count = 0;
+ DestructorCounter* external_obj[kCount];
+
+ int owned_del_count = 0;
+ DestructorCounter* owned_obj[kCount];
+
+ IDMap<DestructorCounter> map_external;
+ IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+
+ for (int i = 0; i < kCount; ++i) {
+ external_obj[i] = new DestructorCounter(&external_del_count);
+ map_external.Add(external_obj[i]);
+
+ owned_obj[i] = new DestructorCounter(&owned_del_count);
+ map_owned.Add(owned_obj[i]);
+ }
+
+ EXPECT_EQ(external_del_count, 0);
+ EXPECT_EQ(owned_del_count, 0);
+
+ map_external.Clear();
+ map_owned.Clear();
+
+ EXPECT_EQ(external_del_count, 0);
+ EXPECT_EQ(owned_del_count, kCount);
+
+ for (int i = 0; i < kCount; ++i) {
+ delete external_obj[i];
+ }
+
+ EXPECT_EQ(external_del_count, kCount);
+ EXPECT_EQ(owned_del_count, kCount);
+}
+
+TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
+ const int kCount = 3;
+
+ int external_del_count = 0;
+ DestructorCounter* external_obj[kCount];
+
+ int owned_del_count = 0;
+ DestructorCounter* owned_obj[kCount];
+
+ {
+ IDMap<DestructorCounter> map_external;
+ IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+
+ for (int i = 0; i < kCount; ++i) {
+ external_obj[i] = new DestructorCounter(&external_del_count);
+ map_external.Add(external_obj[i]);
+
+ owned_obj[i] = new DestructorCounter(&owned_del_count);
+ map_owned.Add(owned_obj[i]);
+ }
+ }
+
+ EXPECT_EQ(external_del_count, 0);
+
+ for (int i = 0; i < kCount; ++i) {
+ delete external_obj[i];
+ }
+
+ EXPECT_EQ(external_del_count, kCount);
+ EXPECT_EQ(owned_del_count, kCount);
+}
+
+TEST(IDMapTest, Int64KeyType) {
+ IDMap<TestObject, IDMapExternalPointer, int64_t> map;
+ TestObject obj1;
+ const int64_t kId1 = 999999999999999999;
+
+ map.AddWithID(&obj1, kId1);
+ EXPECT_EQ(&obj1, map.Lookup(kId1));
+
+ IDMap<TestObject, IDMapExternalPointer, int64_t>::const_iterator iter(&map);
+ ASSERT_FALSE(iter.IsAtEnd());
+ EXPECT_EQ(kId1, iter.GetCurrentKey());
+ EXPECT_EQ(&obj1, iter.GetCurrentValue());
+ iter.Advance();
+ ASSERT_TRUE(iter.IsAtEnd());
+
+ map.Remove(kId1);
+ EXPECT_TRUE(map.IsEmpty());
+}
+
+} // namespace
diff --git a/libchrome/base/ios/OWNERS b/libchrome/base/ios/OWNERS
new file mode 100644
index 0000000..06f5ff1
--- /dev/null
+++ b/libchrome/base/ios/OWNERS
@@ -0,0 +1,3 @@
+droger@chromium.org
+qsr@chromium.org
+rohitrao@chromium.org
diff --git a/libchrome/base/ios/block_types.h b/libchrome/base/ios/block_types.h
new file mode 100644
index 0000000..e4dde79
--- /dev/null
+++ b/libchrome/base/ios/block_types.h
@@ -0,0 +1,14 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_IOS_BLOCK_TYPES_H_
+#define BASE_IOS_BLOCK_TYPES_H_
+
+// A generic procedural block type that takes no arguments and returns nothing.
+typedef void (^ProceduralBlock)(void);
+
+// A block that takes no arguments and returns a bool.
+typedef bool (^ConditionBlock)(void);
+
+#endif // BASE_IOS_BLOCK_TYPES_H_
diff --git a/libchrome/base/json/json_file_value_serializer.cc b/libchrome/base/json/json_file_value_serializer.cc
new file mode 100644
index 0000000..1a9b7a2
--- /dev/null
+++ b/libchrome/base/json/json_file_value_serializer.cc
@@ -0,0 +1,120 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_file_value_serializer.h"
+
+#include "base/files/file_util.h"
+#include "base/json/json_string_value_serializer.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+using base::FilePath;
+
+const char JSONFileValueDeserializer::kAccessDenied[] = "Access denied.";
+const char JSONFileValueDeserializer::kCannotReadFile[] = "Can't read file.";
+const char JSONFileValueDeserializer::kFileLocked[] = "File locked.";
+const char JSONFileValueDeserializer::kNoSuchFile[] = "File doesn't exist.";
+
+JSONFileValueSerializer::JSONFileValueSerializer(
+ const base::FilePath& json_file_path)
+ : json_file_path_(json_file_path) {
+}
+
+JSONFileValueSerializer::~JSONFileValueSerializer() {
+}
+
+bool JSONFileValueSerializer::Serialize(const base::Value& root) {
+ return SerializeInternal(root, false);
+}
+
+bool JSONFileValueSerializer::SerializeAndOmitBinaryValues(
+ const base::Value& root) {
+ return SerializeInternal(root, true);
+}
+
+bool JSONFileValueSerializer::SerializeInternal(const base::Value& root,
+ bool omit_binary_values) {
+ std::string json_string;
+ JSONStringValueSerializer serializer(&json_string);
+ serializer.set_pretty_print(true);
+ bool result = omit_binary_values ?
+ serializer.SerializeAndOmitBinaryValues(root) :
+ serializer.Serialize(root);
+ if (!result)
+ return false;
+
+ int data_size = static_cast<int>(json_string.size());
+ if (base::WriteFile(json_file_path_, json_string.data(), data_size) !=
+ data_size)
+ return false;
+
+ return true;
+}
+
+JSONFileValueDeserializer::JSONFileValueDeserializer(
+ const base::FilePath& json_file_path)
+ : json_file_path_(json_file_path),
+ allow_trailing_comma_(false),
+ last_read_size_(0U) {
+}
+
+JSONFileValueDeserializer::~JSONFileValueDeserializer() {
+}
+
+int JSONFileValueDeserializer::ReadFileToString(std::string* json_string) {
+ DCHECK(json_string);
+ if (!base::ReadFileToString(json_file_path_, json_string)) {
+#if defined(OS_WIN)
+ int error = ::GetLastError();
+ if (error == ERROR_SHARING_VIOLATION || error == ERROR_LOCK_VIOLATION) {
+ return JSON_FILE_LOCKED;
+ } else if (error == ERROR_ACCESS_DENIED) {
+ return JSON_ACCESS_DENIED;
+ }
+#endif
+ if (!base::PathExists(json_file_path_))
+ return JSON_NO_SUCH_FILE;
+ else
+ return JSON_CANNOT_READ_FILE;
+ }
+
+ last_read_size_ = json_string->size();
+ return JSON_NO_ERROR;
+}
+
+const char* JSONFileValueDeserializer::GetErrorMessageForCode(int error_code) {
+ switch (error_code) {
+ case JSON_NO_ERROR:
+ return "";
+ case JSON_ACCESS_DENIED:
+ return kAccessDenied;
+ case JSON_CANNOT_READ_FILE:
+ return kCannotReadFile;
+ case JSON_FILE_LOCKED:
+ return kFileLocked;
+ case JSON_NO_SUCH_FILE:
+ return kNoSuchFile;
+ default:
+ NOTREACHED();
+ return "";
+ }
+}
+
+std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
+ int* error_code,
+ std::string* error_str) {
+ std::string json_string;
+ int error = ReadFileToString(&json_string);
+ if (error != JSON_NO_ERROR) {
+ if (error_code)
+ *error_code = error;
+ if (error_str)
+ *error_str = GetErrorMessageForCode(error);
+ return NULL;
+ }
+
+ JSONStringValueDeserializer deserializer(json_string);
+ deserializer.set_allow_trailing_comma(allow_trailing_comma_);
+ return deserializer.Deserialize(error_code, error_str);
+}
diff --git a/libchrome/base/json/json_file_value_serializer.h b/libchrome/base/json/json_file_value_serializer.h
new file mode 100644
index 0000000..67d2342
--- /dev/null
+++ b/libchrome/base/json/json_file_value_serializer.h
@@ -0,0 +1,106 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_FILE_VALUE_SERIALIZER_H_
+#define BASE_JSON_JSON_FILE_VALUE_SERIALIZER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/values.h"
+
+class BASE_EXPORT JSONFileValueSerializer : public base::ValueSerializer {
+ public:
+ // |json_file_path_| is the path of a file that will be destination of the
+ // serialization. The serializer will attempt to create the file at the
+ // specified location.
+ explicit JSONFileValueSerializer(const base::FilePath& json_file_path);
+
+ ~JSONFileValueSerializer() override;
+
+ // DO NOT USE except in unit tests to verify the file was written properly.
+ // We should never serialize directly to a file since this will block the
+ // thread. Instead, serialize to a string and write to the file you want on
+ // the file thread.
+ //
+ // Attempt to serialize the data structure represented by Value into
+ // JSON. If the return value is true, the result will have been written
+ // into the file whose name was passed into the constructor.
+ bool Serialize(const base::Value& root) override;
+
+ // Equivalent to Serialize(root) except binary values are omitted from the
+ // output.
+ bool SerializeAndOmitBinaryValues(const base::Value& root);
+
+ private:
+ bool SerializeInternal(const base::Value& root, bool omit_binary_values);
+
+ const base::FilePath json_file_path_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueSerializer);
+};
+
+class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
+ public:
+ // |json_file_path_| is the path of a file that will be source of the
+ // deserialization.
+ explicit JSONFileValueDeserializer(const base::FilePath& json_file_path);
+
+ ~JSONFileValueDeserializer() override;
+
+ // Attempt to deserialize the data structure encoded in the file passed
+ // in to the constructor into a structure of Value objects. If the return
+ // value is NULL, and if |error_code| is non-null, |error_code| will
+ // contain an integer error code (either JsonFileError or JsonParseError).
+ // If |error_message| is non-null, it will be filled in with a formatted
+ // error message including the location of the error if appropriate.
+ // The caller takes ownership of the returned value.
+ std::unique_ptr<base::Value> Deserialize(int* error_code,
+ std::string* error_message) override;
+
+ // This enum is designed to safely overlap with JSONReader::JsonParseError.
+ enum JsonFileError {
+ JSON_NO_ERROR = 0,
+ JSON_ACCESS_DENIED = 1000,
+ JSON_CANNOT_READ_FILE,
+ JSON_FILE_LOCKED,
+ JSON_NO_SUCH_FILE
+ };
+
+ // File-specific error messages that can be returned.
+ static const char kAccessDenied[];
+ static const char kCannotReadFile[];
+ static const char kFileLocked[];
+ static const char kNoSuchFile[];
+
+ // Convert an error code into an error message. |error_code| is assumed to
+ // be a JsonFileError.
+ static const char* GetErrorMessageForCode(int error_code);
+
+ void set_allow_trailing_comma(bool new_value) {
+ allow_trailing_comma_ = new_value;
+ }
+
+ // Returns the size (in bytes) of JSON string read from disk in the last
+ // successful |Deserialize()| call.
+ size_t get_last_read_size() const { return last_read_size_; }
+
+ private:
+ // A wrapper for ReadFileToString which returns a non-zero JsonFileError if
+ // there were file errors.
+ int ReadFileToString(std::string* json_string);
+
+ const base::FilePath json_file_path_;
+ bool allow_trailing_comma_;
+ size_t last_read_size_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueDeserializer);
+};
+
+#endif // BASE_JSON_JSON_FILE_VALUE_SERIALIZER_H_
+
diff --git a/libchrome/base/json/json_parser.cc b/libchrome/base/json/json_parser.cc
new file mode 100644
index 0000000..d97eccc
--- /dev/null
+++ b/libchrome/base/json/json_parser.cc
@@ -0,0 +1,989 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_parser.h"
+
+#include <cmath>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "base/values.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+const int kStackMaxDepth = 100;
+
+const int32_t kExtendedASCIIStart = 0x80;
+
+// DictionaryHiddenRootValue and ListHiddenRootValue are used in conjunction
+// with JSONStringValue as an optimization for reducing the number of string
+// copies. When this optimization is active, the parser uses a hidden root to
+// keep the original JSON input string live and creates JSONStringValue children
+// holding StringPiece references to the input string, avoiding about 2/3rds of
+// string memory copies. The real root value is Swap()ed into the new instance.
+class DictionaryHiddenRootValue : public DictionaryValue {
+ public:
+ DictionaryHiddenRootValue(std::unique_ptr<std::string> json,
+ std::unique_ptr<Value> root)
+ : json_(std::move(json)) {
+ DCHECK(root->IsType(Value::TYPE_DICTIONARY));
+ DictionaryValue::Swap(static_cast<DictionaryValue*>(root.get()));
+ }
+
+ void Swap(DictionaryValue* other) override {
+ DVLOG(1) << "Swap()ing a DictionaryValue inefficiently.";
+
+ // First deep copy to convert JSONStringValue to std::string and swap that
+ // copy with |other|, which contains the new contents of |this|.
+ std::unique_ptr<DictionaryValue> copy(CreateDeepCopy());
+ copy->Swap(other);
+
+ // Then erase the contents of the current dictionary and swap in the
+ // new contents, originally from |other|.
+ Clear();
+ json_.reset();
+ DictionaryValue::Swap(copy.get());
+ }
+
+ // Not overriding DictionaryValue::Remove because it just calls through to
+ // the method below.
+
+ bool RemoveWithoutPathExpansion(const std::string& key,
+ std::unique_ptr<Value>* out) override {
+ // If the caller won't take ownership of the removed value, just call up.
+ if (!out)
+ return DictionaryValue::RemoveWithoutPathExpansion(key, out);
+
+ DVLOG(1) << "Remove()ing from a DictionaryValue inefficiently.";
+
+ // Otherwise, remove the value while its still "owned" by this and copy it
+ // to convert any JSONStringValues to std::string.
+ std::unique_ptr<Value> out_owned;
+ if (!DictionaryValue::RemoveWithoutPathExpansion(key, &out_owned))
+ return false;
+
+ *out = out_owned->CreateDeepCopy();
+
+ return true;
+ }
+
+ private:
+ std::unique_ptr<std::string> json_;
+
+ DISALLOW_COPY_AND_ASSIGN(DictionaryHiddenRootValue);
+};
+
+class ListHiddenRootValue : public ListValue {
+ public:
+ ListHiddenRootValue(std::unique_ptr<std::string> json,
+ std::unique_ptr<Value> root)
+ : json_(std::move(json)) {
+ DCHECK(root->IsType(Value::TYPE_LIST));
+ ListValue::Swap(static_cast<ListValue*>(root.get()));
+ }
+
+ void Swap(ListValue* other) override {
+ DVLOG(1) << "Swap()ing a ListValue inefficiently.";
+
+ // First deep copy to convert JSONStringValue to std::string and swap that
+ // copy with |other|, which contains the new contents of |this|.
+ std::unique_ptr<ListValue> copy(CreateDeepCopy());
+ copy->Swap(other);
+
+ // Then erase the contents of the current list and swap in the new contents,
+ // originally from |other|.
+ Clear();
+ json_.reset();
+ ListValue::Swap(copy.get());
+ }
+
+ bool Remove(size_t index, std::unique_ptr<Value>* out) override {
+ // If the caller won't take ownership of the removed value, just call up.
+ if (!out)
+ return ListValue::Remove(index, out);
+
+ DVLOG(1) << "Remove()ing from a ListValue inefficiently.";
+
+ // Otherwise, remove the value while its still "owned" by this and copy it
+ // to convert any JSONStringValues to std::string.
+ std::unique_ptr<Value> out_owned;
+ if (!ListValue::Remove(index, &out_owned))
+ return false;
+
+ *out = out_owned->CreateDeepCopy();
+
+ return true;
+ }
+
+ private:
+ std::unique_ptr<std::string> json_;
+
+ DISALLOW_COPY_AND_ASSIGN(ListHiddenRootValue);
+};
+
+// A variant on StringValue that uses StringPiece instead of copying the string
+// into the Value. This can only be stored in a child of hidden root (above),
+// otherwise the referenced string will not be guaranteed to outlive it.
+class JSONStringValue : public Value {
+ public:
+ explicit JSONStringValue(StringPiece piece)
+ : Value(TYPE_STRING), string_piece_(piece) {}
+
+ // Overridden from Value:
+ bool GetAsString(std::string* out_value) const override {
+ string_piece_.CopyToString(out_value);
+ return true;
+ }
+ bool GetAsString(string16* out_value) const override {
+ *out_value = UTF8ToUTF16(string_piece_);
+ return true;
+ }
+ Value* DeepCopy() const override {
+ return new StringValue(string_piece_.as_string());
+ }
+ bool Equals(const Value* other) const override {
+ std::string other_string;
+ return other->IsType(TYPE_STRING) && other->GetAsString(&other_string) &&
+ StringPiece(other_string) == string_piece_;
+ }
+
+ private:
+ // The location in the original input stream.
+ StringPiece string_piece_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSONStringValue);
+};
+
+// Simple class that checks for maximum recursion/"stack overflow."
+class StackMarker {
+ public:
+ explicit StackMarker(int* depth) : depth_(depth) {
+ ++(*depth_);
+ DCHECK_LE(*depth_, kStackMaxDepth);
+ }
+ ~StackMarker() {
+ --(*depth_);
+ }
+
+ bool IsTooDeep() const {
+ return *depth_ >= kStackMaxDepth;
+ }
+
+ private:
+ int* const depth_;
+
+ DISALLOW_COPY_AND_ASSIGN(StackMarker);
+};
+
+} // namespace
+
+JSONParser::JSONParser(int options)
+ : options_(options),
+ start_pos_(nullptr),
+ pos_(nullptr),
+ end_pos_(nullptr),
+ index_(0),
+ stack_depth_(0),
+ line_number_(0),
+ index_last_line_(0),
+ error_code_(JSONReader::JSON_NO_ERROR),
+ error_line_(0),
+ error_column_(0) {
+}
+
+JSONParser::~JSONParser() {
+}
+
+std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
+ std::unique_ptr<std::string> input_copy;
+ // If the children of a JSON root can be detached, then hidden roots cannot
+ // be used, so do not bother copying the input because StringPiece will not
+ // be used anywhere.
+ if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
+ input_copy = MakeUnique<std::string>(input.as_string());
+ start_pos_ = input_copy->data();
+ } else {
+ start_pos_ = input.data();
+ }
+ pos_ = start_pos_;
+ end_pos_ = start_pos_ + input.length();
+ index_ = 0;
+ line_number_ = 1;
+ index_last_line_ = 0;
+
+ error_code_ = JSONReader::JSON_NO_ERROR;
+ error_line_ = 0;
+ error_column_ = 0;
+
+ // When the input JSON string starts with a UTF-8 Byte-Order-Mark
+ // <0xEF 0xBB 0xBF>, advance the start position to avoid the
+ // ParseNextToken function mis-treating a Unicode BOM as an invalid
+ // character and returning NULL.
+ if (CanConsume(3) && static_cast<uint8_t>(*pos_) == 0xEF &&
+ static_cast<uint8_t>(*(pos_ + 1)) == 0xBB &&
+ static_cast<uint8_t>(*(pos_ + 2)) == 0xBF) {
+ NextNChars(3);
+ }
+
+ // Parse the first and any nested tokens.
+ std::unique_ptr<Value> root(ParseNextToken());
+ if (!root)
+ return nullptr;
+
+ // Make sure the input stream is at an end.
+ if (GetNextToken() != T_END_OF_INPUT) {
+ if (!CanConsume(1) || (NextChar() && GetNextToken() != T_END_OF_INPUT)) {
+ ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
+ return nullptr;
+ }
+ }
+
+ // Dictionaries and lists can contain JSONStringValues, so wrap them in a
+ // hidden root.
+ if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
+ if (root->IsType(Value::TYPE_DICTIONARY)) {
+ return MakeUnique<DictionaryHiddenRootValue>(std::move(input_copy),
+ std::move(root));
+ }
+ if (root->IsType(Value::TYPE_LIST)) {
+ return MakeUnique<ListHiddenRootValue>(std::move(input_copy),
+ std::move(root));
+ }
+ if (root->IsType(Value::TYPE_STRING)) {
+ // A string type could be a JSONStringValue, but because there's no
+ // corresponding HiddenRootValue, the memory will be lost. Deep copy to
+ // preserve it.
+ return root->CreateDeepCopy();
+ }
+ }
+
+ // All other values can be returned directly.
+ return root;
+}
+
+JSONReader::JsonParseError JSONParser::error_code() const {
+ return error_code_;
+}
+
+std::string JSONParser::GetErrorMessage() const {
+ return FormatErrorMessage(error_line_, error_column_,
+ JSONReader::ErrorCodeToString(error_code_));
+}
+
+int JSONParser::error_line() const {
+ return error_line_;
+}
+
+int JSONParser::error_column() const {
+ return error_column_;
+}
+
+// StringBuilder ///////////////////////////////////////////////////////////////
+
+JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
+
+JSONParser::StringBuilder::StringBuilder(const char* pos)
+ : pos_(pos),
+ length_(0),
+ string_(nullptr) {
+}
+
+void JSONParser::StringBuilder::Swap(StringBuilder* other) {
+ std::swap(other->string_, string_);
+ std::swap(other->pos_, pos_);
+ std::swap(other->length_, length_);
+}
+
+JSONParser::StringBuilder::~StringBuilder() {
+ delete string_;
+}
+
+void JSONParser::StringBuilder::Append(const char& c) {
+ DCHECK_GE(c, 0);
+ DCHECK_LT(static_cast<unsigned char>(c), 128);
+
+ if (string_)
+ string_->push_back(c);
+ else
+ ++length_;
+}
+
+void JSONParser::StringBuilder::AppendString(const std::string& str) {
+ DCHECK(string_);
+ string_->append(str);
+}
+
+void JSONParser::StringBuilder::Convert() {
+ if (string_)
+ return;
+ string_ = new std::string(pos_, length_);
+}
+
+bool JSONParser::StringBuilder::CanBeStringPiece() const {
+ return !string_;
+}
+
+StringPiece JSONParser::StringBuilder::AsStringPiece() {
+ if (string_)
+ return StringPiece();
+ return StringPiece(pos_, length_);
+}
+
+const std::string& JSONParser::StringBuilder::AsString() {
+ if (!string_)
+ Convert();
+ return *string_;
+}
+
+// JSONParser private //////////////////////////////////////////////////////////
+
+inline bool JSONParser::CanConsume(int length) {
+ return pos_ + length <= end_pos_;
+}
+
+const char* JSONParser::NextChar() {
+ DCHECK(CanConsume(1));
+ ++index_;
+ ++pos_;
+ return pos_;
+}
+
+void JSONParser::NextNChars(int n) {
+ DCHECK(CanConsume(n));
+ index_ += n;
+ pos_ += n;
+}
+
+JSONParser::Token JSONParser::GetNextToken() {
+ EatWhitespaceAndComments();
+ if (!CanConsume(1))
+ return T_END_OF_INPUT;
+
+ switch (*pos_) {
+ case '{':
+ return T_OBJECT_BEGIN;
+ case '}':
+ return T_OBJECT_END;
+ case '[':
+ return T_ARRAY_BEGIN;
+ case ']':
+ return T_ARRAY_END;
+ case '"':
+ return T_STRING;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ return T_NUMBER;
+ case 't':
+ return T_BOOL_TRUE;
+ case 'f':
+ return T_BOOL_FALSE;
+ case 'n':
+ return T_NULL;
+ case ',':
+ return T_LIST_SEPARATOR;
+ case ':':
+ return T_OBJECT_PAIR_SEPARATOR;
+ default:
+ return T_INVALID_TOKEN;
+ }
+}
+
+void JSONParser::EatWhitespaceAndComments() {
+ while (pos_ < end_pos_) {
+ switch (*pos_) {
+ case '\r':
+ case '\n':
+ index_last_line_ = index_;
+ // Don't increment line_number_ twice for "\r\n".
+ if (!(*pos_ == '\n' && pos_ > start_pos_ && *(pos_ - 1) == '\r'))
+ ++line_number_;
+ // Fall through.
+ case ' ':
+ case '\t':
+ NextChar();
+ break;
+ case '/':
+ if (!EatComment())
+ return;
+ break;
+ default:
+ return;
+ }
+ }
+}
+
+bool JSONParser::EatComment() {
+ if (*pos_ != '/' || !CanConsume(1))
+ return false;
+
+ char next_char = *NextChar();
+ if (next_char == '/') {
+ // Single line comment, read to newline.
+ while (CanConsume(1)) {
+ next_char = *NextChar();
+ if (next_char == '\n' || next_char == '\r')
+ return true;
+ }
+ } else if (next_char == '*') {
+ char previous_char = '\0';
+ // Block comment, read until end marker.
+ while (CanConsume(1)) {
+ next_char = *NextChar();
+ if (previous_char == '*' && next_char == '/') {
+ // EatWhitespaceAndComments will inspect pos_, which will still be on
+ // the last / of the comment, so advance once more (which may also be
+ // end of input).
+ NextChar();
+ return true;
+ }
+ previous_char = next_char;
+ }
+
+ // If the comment is unterminated, GetNextToken will report T_END_OF_INPUT.
+ }
+
+ return false;
+}
+
+Value* JSONParser::ParseNextToken() {
+ return ParseToken(GetNextToken());
+}
+
+Value* JSONParser::ParseToken(Token token) {
+ switch (token) {
+ case T_OBJECT_BEGIN:
+ return ConsumeDictionary();
+ case T_ARRAY_BEGIN:
+ return ConsumeList();
+ case T_STRING:
+ return ConsumeString();
+ case T_NUMBER:
+ return ConsumeNumber();
+ case T_BOOL_TRUE:
+ case T_BOOL_FALSE:
+ case T_NULL:
+ return ConsumeLiteral();
+ default:
+ ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+ return nullptr;
+ }
+}
+
+Value* JSONParser::ConsumeDictionary() {
+ if (*pos_ != '{') {
+ ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+ return nullptr;
+ }
+
+ StackMarker depth_check(&stack_depth_);
+ if (depth_check.IsTooDeep()) {
+ ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
+ return nullptr;
+ }
+
+ std::unique_ptr<DictionaryValue> dict(new DictionaryValue);
+
+ NextChar();
+ Token token = GetNextToken();
+ while (token != T_OBJECT_END) {
+ if (token != T_STRING) {
+ ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 1);
+ return nullptr;
+ }
+
+ // First consume the key.
+ StringBuilder key;
+ if (!ConsumeStringRaw(&key)) {
+ return nullptr;
+ }
+
+ // Read the separator.
+ NextChar();
+ token = GetNextToken();
+ if (token != T_OBJECT_PAIR_SEPARATOR) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+
+ // The next token is the value. Ownership transfers to |dict|.
+ NextChar();
+ Value* value = ParseNextToken();
+ if (!value) {
+ // ReportError from deeper level.
+ return nullptr;
+ }
+
+ dict->SetWithoutPathExpansion(key.AsString(), value);
+
+ NextChar();
+ token = GetNextToken();
+ if (token == T_LIST_SEPARATOR) {
+ NextChar();
+ token = GetNextToken();
+ if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
+ ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
+ return nullptr;
+ }
+ } else if (token != T_OBJECT_END) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ return nullptr;
+ }
+ }
+
+ return dict.release();
+}
+
+Value* JSONParser::ConsumeList() {
+ if (*pos_ != '[') {
+ ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+ return nullptr;
+ }
+
+ StackMarker depth_check(&stack_depth_);
+ if (depth_check.IsTooDeep()) {
+ ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
+ return nullptr;
+ }
+
+ std::unique_ptr<ListValue> list(new ListValue);
+
+ NextChar();
+ Token token = GetNextToken();
+ while (token != T_ARRAY_END) {
+ Value* item = ParseToken(token);
+ if (!item) {
+ // ReportError from deeper level.
+ return nullptr;
+ }
+
+ list->Append(item);
+
+ NextChar();
+ token = GetNextToken();
+ if (token == T_LIST_SEPARATOR) {
+ NextChar();
+ token = GetNextToken();
+ if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
+ ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
+ return nullptr;
+ }
+ } else if (token != T_ARRAY_END) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ }
+
+ return list.release();
+}
+
+Value* JSONParser::ConsumeString() {
+ StringBuilder string;
+ if (!ConsumeStringRaw(&string))
+ return nullptr;
+
+ // Create the Value representation, using a hidden root, if configured
+ // to do so, and if the string can be represented by StringPiece.
+ if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN))
+ return new JSONStringValue(string.AsStringPiece());
+
+ if (string.CanBeStringPiece())
+ string.Convert();
+ return new StringValue(string.AsString());
+}
+
+bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
+ if (*pos_ != '"') {
+ ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+ return false;
+ }
+
+ // StringBuilder will internally build a StringPiece unless a UTF-16
+ // conversion occurs, at which point it will perform a copy into a
+ // std::string.
+ StringBuilder string(NextChar());
+
+ int length = end_pos_ - start_pos_;
+ int32_t next_char = 0;
+
+ while (CanConsume(1)) {
+ pos_ = start_pos_ + index_; // CBU8_NEXT is postcrement.
+ CBU8_NEXT(start_pos_, index_, length, next_char);
+ if (next_char < 0 || !IsValidCharacter(next_char)) {
+ ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
+ return false;
+ }
+
+ if (next_char == '"') {
+ --index_; // Rewind by one because of CBU8_NEXT.
+ out->Swap(&string);
+ return true;
+ }
+
+ // If this character is not an escape sequence...
+ if (next_char != '\\') {
+ if (next_char < kExtendedASCIIStart)
+ string.Append(static_cast<char>(next_char));
+ else
+ DecodeUTF8(next_char, &string);
+ } else {
+ // And if it is an escape sequence, the input string will be adjusted
+ // (either by combining the two characters of an encoded escape sequence,
+ // or with a UTF conversion), so using StringPiece isn't possible -- force
+ // a conversion.
+ string.Convert();
+
+ if (!CanConsume(1)) {
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+ return false;
+ }
+
+ switch (*NextChar()) {
+ // Allowed esape sequences:
+ case 'x': { // UTF-8 sequence.
+ // UTF-8 \x escape sequences are not allowed in the spec, but they
+ // are supported here for backwards-compatiblity with the old parser.
+ if (!CanConsume(2)) {
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, 1);
+ return false;
+ }
+
+ int hex_digit = 0;
+ if (!HexStringToInt(StringPiece(NextChar(), 2), &hex_digit)) {
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
+ return false;
+ }
+ NextChar();
+
+ if (hex_digit < kExtendedASCIIStart)
+ string.Append(static_cast<char>(hex_digit));
+ else
+ DecodeUTF8(hex_digit, &string);
+ break;
+ }
+ case 'u': { // UTF-16 sequence.
+ // UTF units are of the form \uXXXX.
+ if (!CanConsume(5)) { // 5 being 'u' and four HEX digits.
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+ return false;
+ }
+
+ // Skip the 'u'.
+ NextChar();
+
+ std::string utf8_units;
+ if (!DecodeUTF16(&utf8_units)) {
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
+ return false;
+ }
+
+ string.AppendString(utf8_units);
+ break;
+ }
+ case '"':
+ string.Append('"');
+ break;
+ case '\\':
+ string.Append('\\');
+ break;
+ case '/':
+ string.Append('/');
+ break;
+ case 'b':
+ string.Append('\b');
+ break;
+ case 'f':
+ string.Append('\f');
+ break;
+ case 'n':
+ string.Append('\n');
+ break;
+ case 'r':
+ string.Append('\r');
+ break;
+ case 't':
+ string.Append('\t');
+ break;
+ case 'v': // Not listed as valid escape sequence in the RFC.
+ string.Append('\v');
+ break;
+ // All other escape squences are illegal.
+ default:
+ ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
+ return false;
+ }
+ }
+ }
+
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ return false;
+}
+
+// Entry is at the first X in \uXXXX.
+bool JSONParser::DecodeUTF16(std::string* dest_string) {
+ if (!CanConsume(4))
+ return false;
+
+ // This is a 32-bit field because the shift operations in the
+ // conversion process below cause MSVC to error about "data loss."
+ // This only stores UTF-16 code units, though.
+ // Consume the UTF-16 code unit, which may be a high surrogate.
+ int code_unit16_high = 0;
+ if (!HexStringToInt(StringPiece(pos_, 4), &code_unit16_high))
+ return false;
+
+ // Only add 3, not 4, because at the end of this iteration, the parser has
+ // finished working with the last digit of the UTF sequence, meaning that
+ // the next iteration will advance to the next byte.
+ NextNChars(3);
+
+ // Used to convert the UTF-16 code units to a code point and then to a UTF-8
+ // code unit sequence.
+ char code_unit8[8] = { 0 };
+ size_t offset = 0;
+
+ // If this is a high surrogate, consume the next code unit to get the
+ // low surrogate.
+ if (CBU16_IS_SURROGATE(code_unit16_high)) {
+ // Make sure this is the high surrogate. If not, it's an encoding
+ // error.
+ if (!CBU16_IS_SURROGATE_LEAD(code_unit16_high))
+ return false;
+
+ // Make sure that the token has more characters to consume the
+ // lower surrogate.
+ if (!CanConsume(6)) // 6 being '\' 'u' and four HEX digits.
+ return false;
+ if (*NextChar() != '\\' || *NextChar() != 'u')
+ return false;
+
+ NextChar(); // Read past 'u'.
+ int code_unit16_low = 0;
+ if (!HexStringToInt(StringPiece(pos_, 4), &code_unit16_low))
+ return false;
+
+ NextNChars(3);
+
+ if (!CBU16_IS_TRAIL(code_unit16_low)) {
+ return false;
+ }
+
+ uint32_t code_point =
+ CBU16_GET_SUPPLEMENTARY(code_unit16_high, code_unit16_low);
+ if (!IsValidCharacter(code_point))
+ return false;
+
+ offset = 0;
+ CBU8_APPEND_UNSAFE(code_unit8, offset, code_point);
+ } else {
+ // Not a surrogate.
+ DCHECK(CBU16_IS_SINGLE(code_unit16_high));
+ if (!IsValidCharacter(code_unit16_high))
+ return false;
+
+ CBU8_APPEND_UNSAFE(code_unit8, offset, code_unit16_high);
+ }
+
+ dest_string->append(code_unit8);
+ return true;
+}
+
+void JSONParser::DecodeUTF8(const int32_t& point, StringBuilder* dest) {
+ DCHECK(IsValidCharacter(point));
+
+ // Anything outside of the basic ASCII plane will need to be decoded from
+ // int32_t to a multi-byte sequence.
+ if (point < kExtendedASCIIStart) {
+ dest->Append(static_cast<char>(point));
+ } else {
+ char utf8_units[4] = { 0 };
+ int offset = 0;
+ CBU8_APPEND_UNSAFE(utf8_units, offset, point);
+ dest->Convert();
+ // CBU8_APPEND_UNSAFE can overwrite up to 4 bytes, so utf8_units may not be
+ // zero terminated at this point. |offset| contains the correct length.
+ dest->AppendString(std::string(utf8_units, offset));
+ }
+}
+
+Value* JSONParser::ConsumeNumber() {
+ const char* num_start = pos_;
+ const int start_index = index_;
+ int end_index = start_index;
+
+ if (*pos_ == '-')
+ NextChar();
+
+ if (!ReadInt(false)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ end_index = index_;
+
+ // The optional fraction part.
+ if (*pos_ == '.') {
+ if (!CanConsume(1)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ NextChar();
+ if (!ReadInt(true)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ end_index = index_;
+ }
+
+ // Optional exponent part.
+ if (*pos_ == 'e' || *pos_ == 'E') {
+ NextChar();
+ if (*pos_ == '-' || *pos_ == '+')
+ NextChar();
+ if (!ReadInt(true)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ end_index = index_;
+ }
+
+ // ReadInt is greedy because numbers have no easily detectable sentinel,
+ // so save off where the parser should be on exit (see Consume invariant at
+ // the top of the header), then make sure the next token is one which is
+ // valid.
+ const char* exit_pos = pos_ - 1;
+ int exit_index = index_ - 1;
+
+ switch (GetNextToken()) {
+ case T_OBJECT_END:
+ case T_ARRAY_END:
+ case T_LIST_SEPARATOR:
+ case T_END_OF_INPUT:
+ break;
+ default:
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+
+ pos_ = exit_pos;
+ index_ = exit_index;
+
+ StringPiece num_string(num_start, end_index - start_index);
+
+ int num_int;
+ if (StringToInt(num_string, &num_int))
+ return new FundamentalValue(num_int);
+
+ double num_double;
+ if (StringToDouble(num_string.as_string(), &num_double) &&
+ std::isfinite(num_double)) {
+ return new FundamentalValue(num_double);
+ }
+
+ return nullptr;
+}
+
+bool JSONParser::ReadInt(bool allow_leading_zeros) {
+ char first = *pos_;
+ int len = 0;
+
+ char c = first;
+ while (CanConsume(1) && IsAsciiDigit(c)) {
+ c = *NextChar();
+ ++len;
+ }
+
+ if (len == 0)
+ return false;
+
+ if (!allow_leading_zeros && len > 1 && first == '0')
+ return false;
+
+ return true;
+}
+
+Value* JSONParser::ConsumeLiteral() {
+ switch (*pos_) {
+ case 't': {
+ const char kTrueLiteral[] = "true";
+ const int kTrueLen = static_cast<int>(strlen(kTrueLiteral));
+ if (!CanConsume(kTrueLen - 1) ||
+ !StringsAreEqual(pos_, kTrueLiteral, kTrueLen)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ NextNChars(kTrueLen - 1);
+ return new FundamentalValue(true);
+ }
+ case 'f': {
+ const char kFalseLiteral[] = "false";
+ const int kFalseLen = static_cast<int>(strlen(kFalseLiteral));
+ if (!CanConsume(kFalseLen - 1) ||
+ !StringsAreEqual(pos_, kFalseLiteral, kFalseLen)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ NextNChars(kFalseLen - 1);
+ return new FundamentalValue(false);
+ }
+ case 'n': {
+ const char kNullLiteral[] = "null";
+ const int kNullLen = static_cast<int>(strlen(kNullLiteral));
+ if (!CanConsume(kNullLen - 1) ||
+ !StringsAreEqual(pos_, kNullLiteral, kNullLen)) {
+ ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+ return nullptr;
+ }
+ NextNChars(kNullLen - 1);
+ return Value::CreateNullValue().release();
+ }
+ default:
+ ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
+ return nullptr;
+ }
+}
+
+// static
+bool JSONParser::StringsAreEqual(const char* one, const char* two, size_t len) {
+ return strncmp(one, two, len) == 0;
+}
+
+void JSONParser::ReportError(JSONReader::JsonParseError code,
+ int column_adjust) {
+ error_code_ = code;
+ error_line_ = line_number_;
+ error_column_ = index_ - index_last_line_ + column_adjust;
+}
+
+// static
+std::string JSONParser::FormatErrorMessage(int line, int column,
+ const std::string& description) {
+ if (line || column) {
+ return StringPrintf("Line: %i, column: %i, %s",
+ line, column, description.c_str());
+ }
+ return description;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/json/json_parser.h b/libchrome/base/json/json_parser.h
new file mode 100644
index 0000000..7539fa9
--- /dev/null
+++ b/libchrome/base/json/json_parser.h
@@ -0,0 +1,268 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_PARSER_H_
+#define BASE_JSON_JSON_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/json/json_reader.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class Value;
+
+namespace internal {
+
+class JSONParserTest;
+
+// The implementation behind the JSONReader interface. This class is not meant
+// to be used directly; it encapsulates logic that need not be exposed publicly.
+//
+// This parser guarantees O(n) time through the input string. It also optimizes
+// base::StringValue by using StringPiece where possible when returning Value
+// objects by using "hidden roots," discussed in the implementation.
+//
+// Iteration happens on the byte level, with the functions CanConsume and
+// NextChar. The conversion from byte to JSON token happens without advancing
+// the parser in GetNextToken/ParseToken, that is tokenization operates on
+// the current parser position without advancing.
+//
+// Built on top of these are a family of Consume functions that iterate
+// internally. Invariant: on entry of a Consume function, the parser is wound
+// to the first byte of a valid JSON token. On exit, it is on the last byte
+// of a token, such that the next iteration of the parser will be at the byte
+// immediately following the token, which would likely be the first byte of the
+// next token.
+class BASE_EXPORT JSONParser {
+ public:
+ explicit JSONParser(int options);
+ ~JSONParser();
+
+ // Parses the input string according to the set options and returns the
+ // result as a Value.
+ // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+ // convert to a FooValue at the same time.
+ std::unique_ptr<Value> Parse(StringPiece input);
+
+ // Returns the error code.
+ JSONReader::JsonParseError error_code() const;
+
+ // Returns the human-friendly error message.
+ std::string GetErrorMessage() const;
+
+ // Returns the error line number if parse error happened. Otherwise always
+ // returns 0.
+ int error_line() const;
+
+ // Returns the error column number if parse error happened. Otherwise always
+ // returns 0.
+ int error_column() const;
+
+ private:
+ enum Token {
+ T_OBJECT_BEGIN, // {
+ T_OBJECT_END, // }
+ T_ARRAY_BEGIN, // [
+ T_ARRAY_END, // ]
+ T_STRING,
+ T_NUMBER,
+ T_BOOL_TRUE, // true
+ T_BOOL_FALSE, // false
+ T_NULL, // null
+ T_LIST_SEPARATOR, // ,
+ T_OBJECT_PAIR_SEPARATOR, // :
+ T_END_OF_INPUT,
+ T_INVALID_TOKEN,
+ };
+
+ // A helper class used for parsing strings. One optimization performed is to
+ // create base::Value with a StringPiece to avoid unnecessary std::string
+ // copies. This is not possible if the input string needs to be decoded from
+ // UTF-16 to UTF-8, or if an escape sequence causes characters to be skipped.
+ // This class centralizes that logic.
+ class StringBuilder {
+ public:
+ // Empty constructor. Used for creating a builder with which to Swap().
+ StringBuilder();
+
+ // |pos| is the beginning of an input string, excluding the |"|.
+ explicit StringBuilder(const char* pos);
+
+ ~StringBuilder();
+
+ // Swaps the contents of |other| with this.
+ void Swap(StringBuilder* other);
+
+ // Either increases the |length_| of the string or copies the character if
+ // the StringBuilder has been converted. |c| must be in the basic ASCII
+ // plane; all other characters need to be in UTF-8 units, appended with
+ // AppendString below.
+ void Append(const char& c);
+
+ // Appends a string to the std::string. Must be Convert()ed to use.
+ void AppendString(const std::string& str);
+
+ // Converts the builder from its default StringPiece to a full std::string,
+ // performing a copy. Once a builder is converted, it cannot be made a
+ // StringPiece again.
+ void Convert();
+
+ // Returns whether the builder can be converted to a StringPiece.
+ bool CanBeStringPiece() const;
+
+ // Returns the StringPiece representation. Returns an empty piece if it
+ // cannot be converted.
+ StringPiece AsStringPiece();
+
+ // Returns the builder as a std::string.
+ const std::string& AsString();
+
+ private:
+ // The beginning of the input string.
+ const char* pos_;
+
+ // Number of bytes in |pos_| that make up the string being built.
+ size_t length_;
+
+ // The copied string representation. NULL until Convert() is called.
+ // Strong. std::unique_ptr<T> has too much of an overhead here.
+ std::string* string_;
+ };
+
+ // Quick check that the stream has capacity to consume |length| more bytes.
+ bool CanConsume(int length);
+
+ // The basic way to consume a single character in the stream. Consumes one
+ // byte of the input stream and returns a pointer to the rest of it.
+ const char* NextChar();
+
+ // Performs the equivalent of NextChar N times.
+ void NextNChars(int n);
+
+ // Skips over whitespace and comments to find the next token in the stream.
+ // This does not advance the parser for non-whitespace or comment chars.
+ Token GetNextToken();
+
+ // Consumes whitespace characters and comments until the next non-that is
+ // encountered.
+ void EatWhitespaceAndComments();
+ // Helper function that consumes a comment, assuming that the parser is
+ // currently wound to a '/'.
+ bool EatComment();
+
+ // Calls GetNextToken() and then ParseToken(). Caller owns the result.
+ Value* ParseNextToken();
+
+ // Takes a token that represents the start of a Value ("a structural token"
+ // in RFC terms) and consumes it, returning the result as an object the
+ // caller owns.
+ Value* ParseToken(Token token);
+
+ // Assuming that the parser is currently wound to '{', this parses a JSON
+ // object into a DictionaryValue.
+ Value* ConsumeDictionary();
+
+ // Assuming that the parser is wound to '[', this parses a JSON list into a
+ // ListValue.
+ Value* ConsumeList();
+
+ // Calls through ConsumeStringRaw and wraps it in a value.
+ Value* ConsumeString();
+
+ // Assuming that the parser is wound to a double quote, this parses a string,
+ // decoding any escape sequences and converts UTF-16 to UTF-8. Returns true on
+ // success and Swap()s the result into |out|. Returns false on failure with
+ // error information set.
+ bool ConsumeStringRaw(StringBuilder* out);
+ // Helper function for ConsumeStringRaw() that consumes the next four or 10
+ // bytes (parser is wound to the first character of a HEX sequence, with the
+ // potential for consuming another \uXXXX for a surrogate). Returns true on
+ // success and places the UTF8 code units in |dest_string|, and false on
+ // failure.
+ bool DecodeUTF16(std::string* dest_string);
+ // Helper function for ConsumeStringRaw() that takes a single code point,
+ // decodes it into UTF-8 units, and appends it to the given builder. The
+ // point must be valid.
+ void DecodeUTF8(const int32_t& point, StringBuilder* dest);
+
+ // Assuming that the parser is wound to the start of a valid JSON number,
+ // this parses and converts it to either an int or double value.
+ Value* ConsumeNumber();
+ // Helper that reads characters that are ints. Returns true if a number was
+ // read and false on error.
+ bool ReadInt(bool allow_leading_zeros);
+
+ // Consumes the literal values of |true|, |false|, and |null|, assuming the
+ // parser is wound to the first character of any of those.
+ Value* ConsumeLiteral();
+
+ // Compares two string buffers of a given length.
+ static bool StringsAreEqual(const char* left, const char* right, size_t len);
+
+ // Sets the error information to |code| at the current column, based on
+ // |index_| and |index_last_line_|, with an optional positive/negative
+ // adjustment by |column_adjust|.
+ void ReportError(JSONReader::JsonParseError code, int column_adjust);
+
+ // Given the line and column number of an error, formats one of the error
+ // message contants from json_reader.h for human display.
+ static std::string FormatErrorMessage(int line, int column,
+ const std::string& description);
+
+ // base::JSONParserOptions that control parsing.
+ const int options_;
+
+ // Pointer to the start of the input data.
+ const char* start_pos_;
+
+ // Pointer to the current position in the input data. Equivalent to
+ // |start_pos_ + index_|.
+ const char* pos_;
+
+ // Pointer to the last character of the input data.
+ const char* end_pos_;
+
+ // The index in the input stream to which the parser is wound.
+ int index_;
+
+ // The number of times the parser has recursed (current stack depth).
+ int stack_depth_;
+
+ // The line number that the parser is at currently.
+ int line_number_;
+
+ // The last value of |index_| on the previous line.
+ int index_last_line_;
+
+ // Error information.
+ JSONReader::JsonParseError error_code_;
+ int error_line_;
+ int error_column_;
+
+ friend class JSONParserTest;
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, NextChar);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeDictionary);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeList);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeString);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
+
+ DISALLOW_COPY_AND_ASSIGN(JSONParser);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_JSON_JSON_PARSER_H_
diff --git a/libchrome/base/json/json_parser_unittest.cc b/libchrome/base/json/json_parser_unittest.cc
new file mode 100644
index 0000000..30255ca
--- /dev/null
+++ b/libchrome/base/json/json_parser_unittest.cc
@@ -0,0 +1,327 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_parser.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/json/json_reader.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+class JSONParserTest : public testing::Test {
+ public:
+ JSONParser* NewTestParser(const std::string& input) {
+ JSONParser* parser = new JSONParser(JSON_PARSE_RFC);
+ parser->start_pos_ = input.data();
+ parser->pos_ = parser->start_pos_;
+ parser->end_pos_ = parser->start_pos_ + input.length();
+ return parser;
+ }
+
+ void TestLastThree(JSONParser* parser) {
+ EXPECT_EQ(',', *parser->NextChar());
+ EXPECT_EQ('|', *parser->NextChar());
+ EXPECT_EQ('\0', *parser->NextChar());
+ EXPECT_EQ(parser->end_pos_, parser->pos_);
+ }
+};
+
+TEST_F(JSONParserTest, NextChar) {
+ std::string input("Hello world");
+ std::unique_ptr<JSONParser> parser(NewTestParser(input));
+
+ EXPECT_EQ('H', *parser->pos_);
+ for (size_t i = 1; i < input.length(); ++i) {
+ EXPECT_EQ(input[i], *parser->NextChar());
+ }
+ EXPECT_EQ(parser->end_pos_, parser->NextChar());
+}
+
+TEST_F(JSONParserTest, ConsumeString) {
+ std::string input("\"test\",|");
+ std::unique_ptr<JSONParser> parser(NewTestParser(input));
+ std::unique_ptr<Value> value(parser->ConsumeString());
+ EXPECT_EQ('"', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ std::string str;
+ EXPECT_TRUE(value->GetAsString(&str));
+ EXPECT_EQ("test", str);
+}
+
+TEST_F(JSONParserTest, ConsumeList) {
+ std::string input("[true, false],|");
+ std::unique_ptr<JSONParser> parser(NewTestParser(input));
+ std::unique_ptr<Value> value(parser->ConsumeList());
+ EXPECT_EQ(']', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ base::ListValue* list;
+ EXPECT_TRUE(value->GetAsList(&list));
+ EXPECT_EQ(2u, list->GetSize());
+}
+
+TEST_F(JSONParserTest, ConsumeDictionary) {
+ std::string input("{\"abc\":\"def\"},|");
+ std::unique_ptr<JSONParser> parser(NewTestParser(input));
+ std::unique_ptr<Value> value(parser->ConsumeDictionary());
+ EXPECT_EQ('}', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ base::DictionaryValue* dict;
+ EXPECT_TRUE(value->GetAsDictionary(&dict));
+ std::string str;
+ EXPECT_TRUE(dict->GetString("abc", &str));
+ EXPECT_EQ("def", str);
+}
+
+TEST_F(JSONParserTest, ConsumeLiterals) {
+ // Literal |true|.
+ std::string input("true,|");
+ std::unique_ptr<JSONParser> parser(NewTestParser(input));
+ std::unique_ptr<Value> value(parser->ConsumeLiteral());
+ EXPECT_EQ('e', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ bool bool_value = false;
+ EXPECT_TRUE(value->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+
+ // Literal |false|.
+ input = "false,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeLiteral());
+ EXPECT_EQ('e', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ EXPECT_TRUE(value->GetAsBoolean(&bool_value));
+ EXPECT_FALSE(bool_value);
+
+ // Literal |null|.
+ input = "null,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeLiteral());
+ EXPECT_EQ('l', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ EXPECT_TRUE(value->IsType(Value::TYPE_NULL));
+}
+
+TEST_F(JSONParserTest, ConsumeNumbers) {
+ // Integer.
+ std::string input("1234,|");
+ std::unique_ptr<JSONParser> parser(NewTestParser(input));
+ std::unique_ptr<Value> value(parser->ConsumeNumber());
+ EXPECT_EQ('4', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ int number_i;
+ EXPECT_TRUE(value->GetAsInteger(&number_i));
+ EXPECT_EQ(1234, number_i);
+
+ // Negative integer.
+ input = "-1234,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeNumber());
+ EXPECT_EQ('4', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ EXPECT_TRUE(value->GetAsInteger(&number_i));
+ EXPECT_EQ(-1234, number_i);
+
+ // Double.
+ input = "12.34,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeNumber());
+ EXPECT_EQ('4', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ double number_d;
+ EXPECT_TRUE(value->GetAsDouble(&number_d));
+ EXPECT_EQ(12.34, number_d);
+
+ // Scientific.
+ input = "42e3,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeNumber());
+ EXPECT_EQ('3', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ EXPECT_TRUE(value->GetAsDouble(&number_d));
+ EXPECT_EQ(42000, number_d);
+
+ // Negative scientific.
+ input = "314159e-5,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeNumber());
+ EXPECT_EQ('5', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ EXPECT_TRUE(value->GetAsDouble(&number_d));
+ EXPECT_EQ(3.14159, number_d);
+
+ // Positive scientific.
+ input = "0.42e+3,|";
+ parser.reset(NewTestParser(input));
+ value.reset(parser->ConsumeNumber());
+ EXPECT_EQ('3', *parser->pos_);
+
+ TestLastThree(parser.get());
+
+ ASSERT_TRUE(value.get());
+ EXPECT_TRUE(value->GetAsDouble(&number_d));
+ EXPECT_EQ(420, number_d);
+}
+
+TEST_F(JSONParserTest, ErrorMessages) {
+ // Error strings should not be modified in case of success.
+ std::string error_message;
+ int error_code = 0;
+ std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
+ "[42]", JSON_PARSE_RFC, &error_code, &error_message);
+ EXPECT_TRUE(error_message.empty());
+ EXPECT_EQ(0, error_code);
+
+ // Test line and column counting
+ const char big_json[] = "[\n0,\n1,\n2,\n3,4,5,6 7,\n8,\n9\n]";
+ // error here ----------------------------------^
+ root = JSONReader::ReadAndReturnError(big_json, JSON_PARSE_RFC, &error_code,
+ &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(5, 10, JSONReader::kSyntaxError),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, error_code);
+
+ error_code = 0;
+ error_message = "";
+ // Test line and column counting with "\r\n" line ending
+ const char big_json_crlf[] =
+ "[\r\n0,\r\n1,\r\n2,\r\n3,4,5,6 7,\r\n8,\r\n9\r\n]";
+ // error here ----------------------^
+ root = JSONReader::ReadAndReturnError(big_json_crlf, JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(5, 10, JSONReader::kSyntaxError),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, error_code);
+
+ // Test each of the error conditions
+ root = JSONReader::ReadAndReturnError("{},{}", JSON_PARSE_RFC, &error_code,
+ &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 3,
+ JSONReader::kUnexpectedDataAfterRoot), error_message);
+ EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, error_code);
+
+ std::string nested_json;
+ for (int i = 0; i < 101; ++i) {
+ nested_json.insert(nested_json.begin(), '[');
+ nested_json.append(1, ']');
+ }
+ root = JSONReader::ReadAndReturnError(nested_json, JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 100, JSONReader::kTooMuchNesting),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_TOO_MUCH_NESTING, error_code);
+
+ root = JSONReader::ReadAndReturnError("[1,]", JSON_PARSE_RFC, &error_code,
+ &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 4, JSONReader::kTrailingComma),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+
+ root = JSONReader::ReadAndReturnError("{foo:\"bar\"}", JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 2,
+ JSONReader::kUnquotedDictionaryKey), error_message);
+ EXPECT_EQ(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, error_code);
+
+ root = JSONReader::ReadAndReturnError("{\"foo\":\"bar\",}", JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 14, JSONReader::kTrailingComma),
+ error_message);
+
+ root = JSONReader::ReadAndReturnError("[nu]", JSON_PARSE_RFC, &error_code,
+ &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 2, JSONReader::kSyntaxError),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, error_code);
+
+ root = JSONReader::ReadAndReturnError("[\"xxx\\xq\"]", JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+
+ root = JSONReader::ReadAndReturnError("[\"xxx\\uq\"]", JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+
+ root = JSONReader::ReadAndReturnError("[\"xxx\\q\"]", JSON_PARSE_RFC,
+ &error_code, &error_message);
+ EXPECT_FALSE(root.get());
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
+ error_message);
+ EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, error_code);
+}
+
+TEST_F(JSONParserTest, Decode4ByteUtf8Char) {
+ // This test strings contains a 4 byte unicode character (a smiley!) that the
+ // reader should be able to handle (the character is \xf0\x9f\x98\x87).
+ const char kUtf8Data[] =
+ "[\"😇\",[],[],[],{\"google:suggesttype\":[]}]";
+ std::string error_message;
+ int error_code = 0;
+ std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
+ kUtf8Data, JSON_PARSE_RFC, &error_code, &error_message);
+ EXPECT_TRUE(root.get()) << error_message;
+}
+
+TEST_F(JSONParserTest, DecodeUnicodeNonCharacter) {
+ // Tests Unicode code points (encoded as escaped UTF-16) that are not valid
+ // characters.
+ EXPECT_FALSE(JSONReader::Read("[\"\\ufdd0\"]"));
+ EXPECT_FALSE(JSONReader::Read("[\"\\ufffe\"]"));
+ EXPECT_FALSE(JSONReader::Read("[\"\\ud83f\\udffe\"]"));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/json/json_reader.cc b/libchrome/base/json/json_reader.cc
new file mode 100644
index 0000000..4ff7496
--- /dev/null
+++ b/libchrome/base/json/json_reader.cc
@@ -0,0 +1,121 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+
+#include "base/json/json_parser.h"
+#include "base/logging.h"
+#include "base/values.h"
+
+namespace base {
+
+// Values 1000 and above are used by JSONFileValueSerializer::JsonFileError.
+static_assert(JSONReader::JSON_PARSE_ERROR_COUNT < 1000,
+ "JSONReader error out of bounds");
+
+const char JSONReader::kInvalidEscape[] =
+ "Invalid escape sequence.";
+const char JSONReader::kSyntaxError[] =
+ "Syntax error.";
+const char JSONReader::kUnexpectedToken[] =
+ "Unexpected token.";
+const char JSONReader::kTrailingComma[] =
+ "Trailing comma not allowed.";
+const char JSONReader::kTooMuchNesting[] =
+ "Too much nesting.";
+const char JSONReader::kUnexpectedDataAfterRoot[] =
+ "Unexpected data after root element.";
+const char JSONReader::kUnsupportedEncoding[] =
+ "Unsupported encoding. JSON must be UTF-8.";
+const char JSONReader::kUnquotedDictionaryKey[] =
+ "Dictionary keys must be quoted.";
+
+JSONReader::JSONReader()
+ : JSONReader(JSON_PARSE_RFC) {
+}
+
+JSONReader::JSONReader(int options)
+ : parser_(new internal::JSONParser(options)) {
+}
+
+JSONReader::~JSONReader() {
+}
+
+// static
+std::unique_ptr<Value> JSONReader::Read(StringPiece json) {
+ internal::JSONParser parser(JSON_PARSE_RFC);
+ return parser.Parse(json);
+}
+
+// static
+std::unique_ptr<Value> JSONReader::Read(StringPiece json, int options) {
+ internal::JSONParser parser(options);
+ return parser.Parse(json);
+}
+
+
+// static
+std::unique_ptr<Value> JSONReader::ReadAndReturnError(
+ const StringPiece& json,
+ int options,
+ int* error_code_out,
+ std::string* error_msg_out,
+ int* error_line_out,
+ int* error_column_out) {
+ internal::JSONParser parser(options);
+ std::unique_ptr<Value> root(parser.Parse(json));
+ if (!root) {
+ if (error_code_out)
+ *error_code_out = parser.error_code();
+ if (error_msg_out)
+ *error_msg_out = parser.GetErrorMessage();
+ if (error_line_out)
+ *error_line_out = parser.error_line();
+ if (error_column_out)
+ *error_column_out = parser.error_column();
+ }
+
+ return root;
+}
+
+// static
+std::string JSONReader::ErrorCodeToString(JsonParseError error_code) {
+ switch (error_code) {
+ case JSON_NO_ERROR:
+ return std::string();
+ case JSON_INVALID_ESCAPE:
+ return kInvalidEscape;
+ case JSON_SYNTAX_ERROR:
+ return kSyntaxError;
+ case JSON_UNEXPECTED_TOKEN:
+ return kUnexpectedToken;
+ case JSON_TRAILING_COMMA:
+ return kTrailingComma;
+ case JSON_TOO_MUCH_NESTING:
+ return kTooMuchNesting;
+ case JSON_UNEXPECTED_DATA_AFTER_ROOT:
+ return kUnexpectedDataAfterRoot;
+ case JSON_UNSUPPORTED_ENCODING:
+ return kUnsupportedEncoding;
+ case JSON_UNQUOTED_DICTIONARY_KEY:
+ return kUnquotedDictionaryKey;
+ default:
+ NOTREACHED();
+ return std::string();
+ }
+}
+
+std::unique_ptr<Value> JSONReader::ReadToValue(StringPiece json) {
+ return parser_->Parse(json);
+}
+
+JSONReader::JsonParseError JSONReader::error_code() const {
+ return parser_->error_code();
+}
+
+std::string JSONReader::GetErrorMessage() const {
+ return parser_->GetErrorMessage();
+}
+
+} // namespace base
diff --git a/libchrome/base/json/json_reader.h b/libchrome/base/json/json_reader.h
new file mode 100644
index 0000000..a954821
--- /dev/null
+++ b/libchrome/base/json/json_reader.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A JSON parser. Converts strings of JSON into a Value object (see
+// base/values.h).
+// http://www.ietf.org/rfc/rfc4627.txt?number=4627
+//
+// Known limitations/deviations from the RFC:
+// - Only knows how to parse ints within the range of a signed 32 bit int and
+// decimal numbers within a double.
+// - Assumes input is encoded as UTF8. The spec says we should allow UTF-16
+// (BE or LE) and UTF-32 (BE or LE) as well.
+// - We limit nesting to 100 levels to prevent stack overflow (this is allowed
+// by the RFC).
+// - A Unicode FAQ ("http://unicode.org/faq/utf_bom.html") writes a data
+// stream may start with a Unicode Byte-Order-Mark (U+FEFF), i.e. the input
+// UTF-8 string for the JSONReader::JsonToValue() function may start with a
+// UTF-8 BOM (0xEF, 0xBB, 0xBF).
+// To avoid the function from mis-treating a UTF-8 BOM as an invalid
+// character, the function skips a Unicode BOM at the beginning of the
+// Unicode string (converted from the input UTF-8 string) before parsing it.
+//
+// TODO(tc): Add a parsing option to to relax object keys being wrapped in
+// double quotes
+// TODO(tc): Add an option to disable comment stripping
+
+#ifndef BASE_JSON_JSON_READER_H_
+#define BASE_JSON_JSON_READER_H_
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class Value;
+
+namespace internal {
+class JSONParser;
+}
+
+enum JSONParserOptions {
+ // Parses the input strictly according to RFC 4627, except for where noted
+ // above.
+ JSON_PARSE_RFC = 0,
+
+ // Allows commas to exist after the last element in structures.
+ JSON_ALLOW_TRAILING_COMMAS = 1 << 0,
+
+ // The parser can perform optimizations by placing hidden data in the root of
+ // the JSON object, which speeds up certain operations on children. However,
+ // if the child is Remove()d from root, it would result in use-after-free
+ // unless it is DeepCopy()ed or this option is used.
+ JSON_DETACHABLE_CHILDREN = 1 << 1,
+};
+
+class BASE_EXPORT JSONReader {
+ public:
+ // Error codes during parsing.
+ enum JsonParseError {
+ JSON_NO_ERROR = 0,
+ JSON_INVALID_ESCAPE,
+ JSON_SYNTAX_ERROR,
+ JSON_UNEXPECTED_TOKEN,
+ JSON_TRAILING_COMMA,
+ JSON_TOO_MUCH_NESTING,
+ JSON_UNEXPECTED_DATA_AFTER_ROOT,
+ JSON_UNSUPPORTED_ENCODING,
+ JSON_UNQUOTED_DICTIONARY_KEY,
+ JSON_PARSE_ERROR_COUNT
+ };
+
+ // String versions of parse error codes.
+ static const char kInvalidEscape[];
+ static const char kSyntaxError[];
+ static const char kUnexpectedToken[];
+ static const char kTrailingComma[];
+ static const char kTooMuchNesting[];
+ static const char kUnexpectedDataAfterRoot[];
+ static const char kUnsupportedEncoding[];
+ static const char kUnquotedDictionaryKey[];
+
+ // Constructs a reader with the default options, JSON_PARSE_RFC.
+ JSONReader();
+
+ // Constructs a reader with custom options.
+ explicit JSONReader(int options);
+
+ ~JSONReader();
+
+ // Reads and parses |json|, returning a Value.
+ // If |json| is not a properly formed JSON string, returns nullptr.
+ // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+ // convert to a FooValue at the same time.
+ static std::unique_ptr<Value> Read(StringPiece json);
+
+ // Same as Read() above, but the parser respects the given |options|.
+ static std::unique_ptr<Value> Read(StringPiece json, int options);
+
+ // Reads and parses |json| like Read(). |error_code_out| and |error_msg_out|
+ // are optional. If specified and nullptr is returned, they will be populated
+ // an error code and a formatted error message (including error location if
+ // appropriate). Otherwise, they will be unmodified.
+ static std::unique_ptr<Value> ReadAndReturnError(
+ const StringPiece& json,
+ int options, // JSONParserOptions
+ int* error_code_out,
+ std::string* error_msg_out,
+ int* error_line_out = nullptr,
+ int* error_column_out = nullptr);
+
+ // Converts a JSON parse error code into a human readable message.
+ // Returns an empty string if error_code is JSON_NO_ERROR.
+ static std::string ErrorCodeToString(JsonParseError error_code);
+
+ // Non-static version of Read() above.
+ std::unique_ptr<Value> ReadToValue(StringPiece json);
+
+ // Returns the error code if the last call to ReadToValue() failed.
+ // Returns JSON_NO_ERROR otherwise.
+ JsonParseError error_code() const;
+
+ // Converts error_code_ to a human-readable string, including line and column
+ // numbers if appropriate.
+ std::string GetErrorMessage() const;
+
+ private:
+ std::unique_ptr<internal::JSONParser> parser_;
+};
+
+} // namespace base
+
+#endif // BASE_JSON_JSON_READER_H_
diff --git a/libchrome/base/json/json_reader_unittest.cc b/libchrome/base/json/json_reader_unittest.cc
new file mode 100644
index 0000000..84732c4
--- /dev/null
+++ b/libchrome/base/json/json_reader_unittest.cc
@@ -0,0 +1,683 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_reader.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
+#include "base/base_paths.h"
+#include "base/path_service.h"
+#endif
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(JSONReaderTest, Reading) {
+ {
+ // some whitespace checking
+ std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ }
+
+ {
+ // Invalid JSON string
+ EXPECT_FALSE(JSONReader().ReadToValue("nu"));
+ }
+
+ {
+ // Simple bool
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ }
+
+ {
+ // Embedded comment
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ root = JSONReader().ReadToValue("40 /* comment */");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ root = JSONReader().ReadToValue("true // comment");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ root = JSONReader().ReadToValue("/* comment */\"sample string\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string value;
+ EXPECT_TRUE(root->GetAsString(&value));
+ EXPECT_EQ("sample string", value);
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(2u, list->GetSize());
+ int int_val = 0;
+ EXPECT_TRUE(list->GetInteger(0, &int_val));
+ EXPECT_EQ(1, int_val);
+ EXPECT_TRUE(list->GetInteger(1, &int_val));
+ EXPECT_EQ(3, int_val);
+ list = ListValue::From(JSONReader().ReadToValue("[1, /*a*/2, 3]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(3u, list->GetSize());
+ root = JSONReader().ReadToValue("/* comment **/42");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(42, int_val);
+ root = JSONReader().ReadToValue(
+ "/* comment **/\n"
+ "// */ 43\n"
+ "44");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(44, int_val);
+ }
+
+ {
+ // Test number formats
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ int int_val = 0;
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(43, int_val);
+ }
+
+ {
+ // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
+ EXPECT_FALSE(JSONReader().ReadToValue("043"));
+ EXPECT_FALSE(JSONReader().ReadToValue("0x43"));
+ EXPECT_FALSE(JSONReader().ReadToValue("00"));
+ }
+
+ {
+ // Test 0 (which needs to be special cased because of the leading zero
+ // clause).
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ int int_val = 1;
+ EXPECT_TRUE(root->GetAsInteger(&int_val));
+ EXPECT_EQ(0, int_val);
+ }
+
+ {
+ // Numbers that overflow ints should succeed, being internally promoted to
+ // storage as doubles
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
+ ASSERT_TRUE(root);
+ double double_val;
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(2147483648.0, double_val);
+ root = JSONReader().ReadToValue("-2147483649");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
+ }
+
+ {
+ // Parse a double
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(43.1, double_val);
+
+ root = JSONReader().ReadToValue("4.3e-1");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(.43, double_val);
+
+ root = JSONReader().ReadToValue("2.1e0");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(2.1, double_val);
+
+ root = JSONReader().ReadToValue("2.1e+0001");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(21.0, double_val);
+
+ root = JSONReader().ReadToValue("0.01");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(0.01, double_val);
+
+ root = JSONReader().ReadToValue("1.00");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ double_val = 0.0;
+ EXPECT_TRUE(root->GetAsDouble(&double_val));
+ EXPECT_DOUBLE_EQ(1.0, double_val);
+ }
+
+ {
+ // Fractional parts must have a digit before and after the decimal point.
+ EXPECT_FALSE(JSONReader().ReadToValue("1."));
+ EXPECT_FALSE(JSONReader().ReadToValue(".1"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1.e10"));
+ }
+
+ {
+ // Exponent must have a digit following the 'e'.
+ EXPECT_FALSE(JSONReader().ReadToValue("1e"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1E"));
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1."));
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1.0"));
+ }
+
+ {
+ // INF/-INF/NaN are not valid
+ EXPECT_FALSE(JSONReader().ReadToValue("1e1000"));
+ EXPECT_FALSE(JSONReader().ReadToValue("-1e1000"));
+ EXPECT_FALSE(JSONReader().ReadToValue("NaN"));
+ EXPECT_FALSE(JSONReader().ReadToValue("nan"));
+ EXPECT_FALSE(JSONReader().ReadToValue("inf"));
+ }
+
+ {
+ // Invalid number formats
+ EXPECT_FALSE(JSONReader().ReadToValue("4.3.1"));
+ EXPECT_FALSE(JSONReader().ReadToValue("4e3.1"));
+ }
+
+ {
+ // Test string parser
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("hello world", str_val);
+ }
+
+ {
+ // Empty string
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("", str_val);
+ }
+
+ {
+ // Test basic string escapes
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
+ }
+
+ {
+ // Test hex and unicode escapes including the null character.
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
+ }
+
+ {
+ // Test invalid strings
+ EXPECT_FALSE(JSONReader().ReadToValue("\"no closing quote"));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"\\z invalid escape char\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"\\xAQ invalid hex code\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("not enough hex chars\\x1\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"not enough escape chars\\u123\""));
+ EXPECT_FALSE(
+ JSONReader().ReadToValue("\"extra backslash at end of input\\\""));
+ }
+
+ {
+ // Basic array
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read("[true, false, null]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(3U, list->GetSize());
+
+ // Test with trailing comma. Should be parsed the same as above.
+ std::unique_ptr<Value> root2 =
+ JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(list->Equals(root2.get()));
+ }
+
+ {
+ // Empty array
+ std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read("[]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(0U, list->GetSize());
+ }
+
+ {
+ // Nested arrays
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader::Read("[[true], [], [false, [], [null]], null]"));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(4U, list->GetSize());
+
+ // Lots of trailing commas.
+ std::unique_ptr<Value> root2 =
+ JSONReader::Read("[[true], [], [false, [], [null, ] , ], null,]",
+ JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(list->Equals(root2.get()));
+ }
+
+ {
+ // Invalid, missing close brace.
+ EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
+
+ // Invalid, too many commas
+ EXPECT_FALSE(JSONReader::Read("[true,, null]"));
+ EXPECT_FALSE(JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS));
+
+ // Invalid, no commas
+ EXPECT_FALSE(JSONReader::Read("[true null]"));
+
+ // Invalid, trailing comma
+ EXPECT_FALSE(JSONReader::Read("[true,]"));
+ }
+
+ {
+ // Valid if we set |allow_trailing_comma| to true.
+ std::unique_ptr<ListValue> list = ListValue::From(
+ JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(1U, list->GetSize());
+ Value* tmp_value = nullptr;
+ ASSERT_TRUE(list->Get(0, &tmp_value));
+ EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
+ bool bool_value = false;
+ EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+ }
+
+ {
+ // Don't allow empty elements, even if |allow_trailing_comma| is
+ // true.
+ EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
+ }
+
+ {
+ // Test objects
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader::Read("{}"));
+ ASSERT_TRUE(dict_val);
+
+ dict_val = DictionaryValue::From(JSONReader::Read(
+ "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }"));
+ ASSERT_TRUE(dict_val);
+ double double_val = 0.0;
+ EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
+ EXPECT_DOUBLE_EQ(9.87654321, double_val);
+ Value* null_val = nullptr;
+ ASSERT_TRUE(dict_val->Get("null", &null_val));
+ EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
+ std::string str_val;
+ EXPECT_TRUE(dict_val->GetString("S", &str_val));
+ EXPECT_EQ("str", str_val);
+
+ std::unique_ptr<Value> root2 = JSONReader::Read(
+ "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+ // Test newline equivalence.
+ root2 = JSONReader::Read(
+ "{\n"
+ " \"number\":9.87654321,\n"
+ " \"null\":null,\n"
+ " \"\\x53\":\"str\",\n"
+ "}\n",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+ root2 = JSONReader::Read(
+ "{\r\n"
+ " \"number\":9.87654321,\r\n"
+ " \"null\":null,\r\n"
+ " \"\\x53\":\"str\",\r\n"
+ "}\r\n",
+ JSON_ALLOW_TRAILING_COMMAS);
+ ASSERT_TRUE(root2);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+ }
+
+ {
+ // Test nesting
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader::Read(
+ "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}"));
+ ASSERT_TRUE(dict_val);
+ DictionaryValue* inner_dict = nullptr;
+ ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
+ ListValue* inner_array = nullptr;
+ ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
+ EXPECT_EQ(1U, inner_array->GetSize());
+ bool bool_value = true;
+ EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
+ EXPECT_FALSE(bool_value);
+ inner_dict = nullptr;
+ EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
+
+ std::unique_ptr<Value> root2 = JSONReader::Read(
+ "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
+ JSON_ALLOW_TRAILING_COMMAS);
+ EXPECT_TRUE(dict_val->Equals(root2.get()));
+ }
+
+ {
+ // Test keys with periods
+ std::unique_ptr<DictionaryValue> dict_val = DictionaryValue::From(
+ JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}"));
+ ASSERT_TRUE(dict_val);
+ int integer_value = 0;
+ EXPECT_TRUE(
+ dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+ EXPECT_EQ(3, integer_value);
+ EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
+ EXPECT_EQ(2, integer_value);
+ DictionaryValue* inner_dict = nullptr;
+ ASSERT_TRUE(
+ dict_val->GetDictionaryWithoutPathExpansion("d.e.f", &inner_dict));
+ EXPECT_EQ(1U, inner_dict->size());
+ EXPECT_TRUE(
+ inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j", &integer_value));
+ EXPECT_EQ(1, integer_value);
+
+ dict_val =
+ DictionaryValue::From(JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}"));
+ ASSERT_TRUE(dict_val);
+ EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
+ EXPECT_EQ(2, integer_value);
+ EXPECT_TRUE(
+ dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+ EXPECT_EQ(1, integer_value);
+ }
+
+ {
+ // Invalid, no closing brace
+ EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
+
+ // Invalid, keys must be quoted
+ EXPECT_FALSE(JSONReader::Read("{foo:true}"));
+
+ // Invalid, trailing comma
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,}"));
+
+ // Invalid, too many commas
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}"));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+ JSON_ALLOW_TRAILING_COMMAS));
+
+ // Invalid, no separator
+ EXPECT_FALSE(JSONReader::Read("{\"a\" \"b\"}"));
+
+ // Invalid, lone comma.
+ EXPECT_FALSE(JSONReader::Read("{,}"));
+ EXPECT_FALSE(JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(
+ JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS));
+ EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+ JSON_ALLOW_TRAILING_COMMAS));
+ }
+
+ {
+ // Test stack overflow
+ std::string evil(1000000, '[');
+ evil.append(std::string(1000000, ']'));
+ EXPECT_FALSE(JSONReader::Read(evil));
+ }
+
+ {
+ // A few thousand adjacent lists is fine.
+ std::string not_evil("[");
+ not_evil.reserve(15010);
+ for (int i = 0; i < 5000; ++i)
+ not_evil.append("[],");
+ not_evil.append("[]]");
+ std::unique_ptr<ListValue> list =
+ ListValue::From(JSONReader::Read(not_evil));
+ ASSERT_TRUE(list);
+ EXPECT_EQ(5001U, list->GetSize());
+ }
+
+ {
+ // Test utf8 encoded input
+ std::unique_ptr<Value> root =
+ JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
+
+ std::unique_ptr<DictionaryValue> dict_val =
+ DictionaryValue::From(JSONReader().ReadToValue(
+ "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}"));
+ ASSERT_TRUE(dict_val);
+ EXPECT_TRUE(dict_val->GetString("path", &str_val));
+ EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
+ }
+
+ {
+ // Test invalid utf8 encoded input
+ EXPECT_FALSE(JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"123\xc0\x81\""));
+ EXPECT_FALSE(JSONReader().ReadToValue("\"abc\xc0\xae\""));
+ }
+
+ {
+ // Test utf16 encoded strings.
+ std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ(
+ "\xe2\x82\xac"
+ "3,14",
+ str_val);
+
+ root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
+ ASSERT_TRUE(root);
+ EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ str_val.clear();
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
+ }
+
+ {
+ // Test invalid utf16 strings.
+ const char* const cases[] = {
+ "\"\\u123\"", // Invalid scalar.
+ "\"\\ud83d\"", // Invalid scalar.
+ "\"\\u$%@!\"", // Invalid scalar.
+ "\"\\uzz89\"", // Invalid scalar.
+ "\"\\ud83d\\udca\"", // Invalid lower surrogate.
+ "\"\\ud83d\\ud83d\"", // Invalid lower surrogate.
+ "\"\\ud83foo\"", // No lower surrogate.
+ "\"\\ud83\\foo\"" // No lower surrogate.
+ };
+ std::unique_ptr<Value> root;
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ root = JSONReader().ReadToValue(cases[i]);
+ EXPECT_FALSE(root) << cases[i];
+ }
+ }
+
+ {
+ // Test literal root objects.
+ std::unique_ptr<Value> root = JSONReader::Read("null");
+ EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+
+ root = JSONReader::Read("true");
+ ASSERT_TRUE(root);
+ bool bool_value;
+ EXPECT_TRUE(root->GetAsBoolean(&bool_value));
+ EXPECT_TRUE(bool_value);
+
+ root = JSONReader::Read("10");
+ ASSERT_TRUE(root);
+ int integer_value;
+ EXPECT_TRUE(root->GetAsInteger(&integer_value));
+ EXPECT_EQ(10, integer_value);
+
+ root = JSONReader::Read("\"root\"");
+ ASSERT_TRUE(root);
+ std::string str_val;
+ EXPECT_TRUE(root->GetAsString(&str_val));
+ EXPECT_EQ("root", str_val);
+ }
+}
+
+#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
+TEST(JSONReaderTest, ReadFromFile) {
+ FilePath path;
+ ASSERT_TRUE(PathService::Get(base::DIR_TEST_DATA, &path));
+ path = path.AppendASCII("json");
+ ASSERT_TRUE(base::PathExists(path));
+
+ std::string input;
+ ASSERT_TRUE(ReadFileToString(path.AppendASCII("bom_feff.json"), &input));
+
+ JSONReader reader;
+ std::unique_ptr<Value> root(reader.ReadToValue(input));
+ ASSERT_TRUE(root) << reader.GetErrorMessage();
+ EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+}
+#endif // !__ANDROID__ && !__ANDROID_HOST__
+
+// Tests that the root of a JSON object can be deleted safely while its
+// children outlive it.
+TEST(JSONReaderTest, StringOptimizations) {
+ std::unique_ptr<Value> dict_literal_0;
+ std::unique_ptr<Value> dict_literal_1;
+ std::unique_ptr<Value> dict_string_0;
+ std::unique_ptr<Value> dict_string_1;
+ std::unique_ptr<Value> list_value_0;
+ std::unique_ptr<Value> list_value_1;
+
+ {
+ std::unique_ptr<Value> root = JSONReader::Read(
+ "{"
+ " \"test\": {"
+ " \"foo\": true,"
+ " \"bar\": 3.14,"
+ " \"baz\": \"bat\","
+ " \"moo\": \"cow\""
+ " },"
+ " \"list\": ["
+ " \"a\","
+ " \"b\""
+ " ]"
+ "}",
+ JSON_DETACHABLE_CHILDREN);
+ ASSERT_TRUE(root);
+
+ DictionaryValue* root_dict = nullptr;
+ ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+
+ DictionaryValue* dict = nullptr;
+ ListValue* list = nullptr;
+
+ ASSERT_TRUE(root_dict->GetDictionary("test", &dict));
+ ASSERT_TRUE(root_dict->GetList("list", &list));
+
+ ASSERT_TRUE(dict->Remove("foo", &dict_literal_0));
+ ASSERT_TRUE(dict->Remove("bar", &dict_literal_1));
+ ASSERT_TRUE(dict->Remove("baz", &dict_string_0));
+ ASSERT_TRUE(dict->Remove("moo", &dict_string_1));
+
+ ASSERT_EQ(2u, list->GetSize());
+ ASSERT_TRUE(list->Remove(0, &list_value_0));
+ ASSERT_TRUE(list->Remove(0, &list_value_1));
+ }
+
+ bool b = false;
+ double d = 0;
+ std::string s;
+
+ EXPECT_TRUE(dict_literal_0->GetAsBoolean(&b));
+ EXPECT_TRUE(b);
+
+ EXPECT_TRUE(dict_literal_1->GetAsDouble(&d));
+ EXPECT_EQ(3.14, d);
+
+ EXPECT_TRUE(dict_string_0->GetAsString(&s));
+ EXPECT_EQ("bat", s);
+
+ EXPECT_TRUE(dict_string_1->GetAsString(&s));
+ EXPECT_EQ("cow", s);
+
+ EXPECT_TRUE(list_value_0->GetAsString(&s));
+ EXPECT_EQ("a", s);
+ EXPECT_TRUE(list_value_1->GetAsString(&s));
+ EXPECT_EQ("b", s);
+}
+
+// A smattering of invalid JSON designed to test specific portions of the
+// parser implementation against buffer overflow. Best run with DCHECKs so
+// that the one in NextChar fires.
+TEST(JSONReaderTest, InvalidSanity) {
+ const char* const kInvalidJson[] = {
+ "/* test *", "{\"foo\"", "{\"foo\":", " [", "\"\\u123g\"", "{\n\"eh:\n}",
+ };
+
+ for (size_t i = 0; i < arraysize(kInvalidJson); ++i) {
+ JSONReader reader;
+ LOG(INFO) << "Sanity test " << i << ": <" << kInvalidJson[i] << ">";
+ EXPECT_FALSE(reader.ReadToValue(kInvalidJson[i]));
+ EXPECT_NE(JSONReader::JSON_NO_ERROR, reader.error_code());
+ EXPECT_NE("", reader.GetErrorMessage());
+ }
+}
+
+TEST(JSONReaderTest, IllegalTrailingNull) {
+ const char json[] = { '"', 'n', 'u', 'l', 'l', '"', '\0' };
+ std::string json_string(json, sizeof(json));
+ JSONReader reader;
+ EXPECT_FALSE(reader.ReadToValue(json_string));
+ EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, reader.error_code());
+}
+
+} // namespace base
diff --git a/libchrome/base/json/json_string_value_serializer.cc b/libchrome/base/json/json_string_value_serializer.cc
new file mode 100644
index 0000000..cd786db
--- /dev/null
+++ b/libchrome/base/json/json_string_value_serializer.cc
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_string_value_serializer.h"
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+
+using base::Value;
+
+JSONStringValueSerializer::JSONStringValueSerializer(std::string* json_string)
+ : json_string_(json_string),
+ pretty_print_(false) {
+}
+
+JSONStringValueSerializer::~JSONStringValueSerializer() {}
+
+bool JSONStringValueSerializer::Serialize(const Value& root) {
+ return SerializeInternal(root, false);
+}
+
+bool JSONStringValueSerializer::SerializeAndOmitBinaryValues(
+ const Value& root) {
+ return SerializeInternal(root, true);
+}
+
+bool JSONStringValueSerializer::SerializeInternal(const Value& root,
+ bool omit_binary_values) {
+ if (!json_string_)
+ return false;
+
+ int options = 0;
+ if (omit_binary_values)
+ options |= base::JSONWriter::OPTIONS_OMIT_BINARY_VALUES;
+ if (pretty_print_)
+ options |= base::JSONWriter::OPTIONS_PRETTY_PRINT;
+
+ return base::JSONWriter::WriteWithOptions(root, options, json_string_);
+}
+
+JSONStringValueDeserializer::JSONStringValueDeserializer(
+ const base::StringPiece& json_string)
+ : json_string_(json_string),
+ allow_trailing_comma_(false) {
+}
+
+JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
+
+std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
+ int* error_code,
+ std::string* error_str) {
+ return base::JSONReader::ReadAndReturnError(
+ json_string_, allow_trailing_comma_ ? base::JSON_ALLOW_TRAILING_COMMAS
+ : base::JSON_PARSE_RFC,
+ error_code, error_str);
+}
diff --git a/libchrome/base/json/json_string_value_serializer.h b/libchrome/base/json/json_string_value_serializer.h
new file mode 100644
index 0000000..a97da23
--- /dev/null
+++ b/libchrome/base/json/json_string_value_serializer.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_STRING_VALUE_SERIALIZER_H_
+#define BASE_JSON_JSON_STRING_VALUE_SERIALIZER_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+class BASE_EXPORT JSONStringValueSerializer : public base::ValueSerializer {
+ public:
+ // |json_string| is the string that will be the destination of the
+ // serialization. The caller of the constructor retains ownership of the
+ // string. |json_string| must not be null.
+ explicit JSONStringValueSerializer(std::string* json_string);
+
+ ~JSONStringValueSerializer() override;
+
+ // Attempt to serialize the data structure represented by Value into
+ // JSON. If the return value is true, the result will have been written
+ // into the string passed into the constructor.
+ bool Serialize(const base::Value& root) override;
+
+ // Equivalent to Serialize(root) except binary values are omitted from the
+ // output.
+ bool SerializeAndOmitBinaryValues(const base::Value& root);
+
+ void set_pretty_print(bool new_value) { pretty_print_ = new_value; }
+ bool pretty_print() { return pretty_print_; }
+
+ private:
+ bool SerializeInternal(const base::Value& root, bool omit_binary_values);
+
+ // Owned by the caller of the constructor.
+ std::string* json_string_;
+ bool pretty_print_; // If true, serialization will span multiple lines.
+
+ DISALLOW_COPY_AND_ASSIGN(JSONStringValueSerializer);
+};
+
+class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
+ public:
+ // This retains a reference to the contents of |json_string|, so the data
+ // must outlive the JSONStringValueDeserializer.
+ explicit JSONStringValueDeserializer(const base::StringPiece& json_string);
+
+ ~JSONStringValueDeserializer() override;
+
+ // Attempt to deserialize the data structure encoded in the string passed
+ // in to the constructor into a structure of Value objects. If the return
+ // value is null, and if |error_code| is non-null, |error_code| will
+ // contain an integer error code (a JsonParseError in this case).
+ // If |error_message| is non-null, it will be filled in with a formatted
+ // error message including the location of the error if appropriate.
+ // The caller takes ownership of the returned value.
+ std::unique_ptr<base::Value> Deserialize(int* error_code,
+ std::string* error_message) override;
+
+ void set_allow_trailing_comma(bool new_value) {
+ allow_trailing_comma_ = new_value;
+ }
+
+ private:
+ // Data is owned by the caller of the constructor.
+ base::StringPiece json_string_;
+ // If true, deserialization will allow trailing commas.
+ bool allow_trailing_comma_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSONStringValueDeserializer);
+};
+
+#endif // BASE_JSON_JSON_STRING_VALUE_SERIALIZER_H_
diff --git a/libchrome/base/json/json_value_converter.cc b/libchrome/base/json/json_value_converter.cc
new file mode 100644
index 0000000..6f772f3
--- /dev/null
+++ b/libchrome/base/json/json_value_converter.cc
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_value_converter.h"
+
+namespace base {
+namespace internal {
+
+bool BasicValueConverter<int>::Convert(
+ const base::Value& value, int* field) const {
+ return value.GetAsInteger(field);
+}
+
+bool BasicValueConverter<std::string>::Convert(
+ const base::Value& value, std::string* field) const {
+ return value.GetAsString(field);
+}
+
+bool BasicValueConverter<string16>::Convert(
+ const base::Value& value, string16* field) const {
+ return value.GetAsString(field);
+}
+
+bool BasicValueConverter<double>::Convert(
+ const base::Value& value, double* field) const {
+ return value.GetAsDouble(field);
+}
+
+bool BasicValueConverter<bool>::Convert(
+ const base::Value& value, bool* field) const {
+ return value.GetAsBoolean(field);
+}
+
+} // namespace internal
+} // namespace base
+
diff --git a/libchrome/base/json/json_value_converter.h b/libchrome/base/json/json_value_converter.h
new file mode 100644
index 0000000..4cca034
--- /dev/null
+++ b/libchrome/base/json/json_value_converter.h
@@ -0,0 +1,516 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_VALUE_CONVERTER_H_
+#define BASE_JSON_JSON_VALUE_CONVERTER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_vector.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+
+// JSONValueConverter converts a JSON value into a C++ struct in a
+// lightweight way.
+//
+// Usage:
+// For real examples, you may want to refer to _unittest.cc file.
+//
+// Assume that you have a struct like this:
+// struct Message {
+// int foo;
+// std::string bar;
+// static void RegisterJSONConverter(
+// JSONValueConverter<Message>* converter);
+// };
+//
+// And you want to parse a json data into this struct. First, you
+// need to declare RegisterJSONConverter() method in your struct.
+// // static
+// void Message::RegisterJSONConverter(
+// JSONValueConverter<Message>* converter) {
+// converter->RegisterIntField("foo", &Message::foo);
+// converter->RegisterStringField("bar", &Message::bar);
+// }
+//
+// Then, you just instantiate your JSONValueConverter of your type and call
+// Convert() method.
+// Message message;
+// JSONValueConverter<Message> converter;
+// converter.Convert(json, &message);
+//
+// Convert() returns false when it fails. Here "fail" means that the value is
+// structurally different from expected, such like a string value appears
+// for an int field. Do not report failures for missing fields.
+// Also note that Convert() will modify the passed |message| even when it
+// fails for performance reason.
+//
+// For nested field, the internal message also has to implement the registration
+// method. Then, just use RegisterNestedField() from the containing struct's
+// RegisterJSONConverter method.
+// struct Nested {
+// Message foo;
+// static void RegisterJSONConverter(...) {
+// ...
+// converter->RegisterNestedField("foo", &Nested::foo);
+// }
+// };
+//
+// For repeated field, we just assume ScopedVector for its container
+// and you can put RegisterRepeatedInt or some other types. Use
+// RegisterRepeatedMessage for nested repeated fields.
+//
+// Sometimes JSON format uses string representations for other types such
+// like enum, timestamp, or URL. You can use RegisterCustomField method
+// and specify a function to convert a StringPiece to your type.
+// bool ConvertFunc(const StringPiece& s, YourEnum* result) {
+// // do something and return true if succeed...
+// }
+// struct Message {
+// YourEnum ye;
+// ...
+// static void RegisterJSONConverter(...) {
+// ...
+// converter->RegsiterCustomField<YourEnum>(
+// "your_enum", &Message::ye, &ConvertFunc);
+// }
+// };
+
+namespace base {
+
+template <typename StructType>
+class JSONValueConverter;
+
+namespace internal {
+
+template<typename StructType>
+class FieldConverterBase {
+ public:
+ explicit FieldConverterBase(const std::string& path) : field_path_(path) {}
+ virtual ~FieldConverterBase() {}
+ virtual bool ConvertField(const base::Value& value, StructType* obj)
+ const = 0;
+ const std::string& field_path() const { return field_path_; }
+
+ private:
+ std::string field_path_;
+ DISALLOW_COPY_AND_ASSIGN(FieldConverterBase);
+};
+
+template <typename FieldType>
+class ValueConverter {
+ public:
+ virtual ~ValueConverter() {}
+ virtual bool Convert(const base::Value& value, FieldType* field) const = 0;
+};
+
+template <typename StructType, typename FieldType>
+class FieldConverter : public FieldConverterBase<StructType> {
+ public:
+ explicit FieldConverter(const std::string& path,
+ FieldType StructType::* field,
+ ValueConverter<FieldType>* converter)
+ : FieldConverterBase<StructType>(path),
+ field_pointer_(field),
+ value_converter_(converter) {
+ }
+
+ bool ConvertField(const base::Value& value, StructType* dst) const override {
+ return value_converter_->Convert(value, &(dst->*field_pointer_));
+ }
+
+ private:
+ FieldType StructType::* field_pointer_;
+ std::unique_ptr<ValueConverter<FieldType>> value_converter_;
+ DISALLOW_COPY_AND_ASSIGN(FieldConverter);
+};
+
+template <typename FieldType>
+class BasicValueConverter;
+
+template <>
+class BASE_EXPORT BasicValueConverter<int> : public ValueConverter<int> {
+ public:
+ BasicValueConverter() {}
+
+ bool Convert(const base::Value& value, int* field) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<std::string>
+ : public ValueConverter<std::string> {
+ public:
+ BasicValueConverter() {}
+
+ bool Convert(const base::Value& value, std::string* field) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<string16>
+ : public ValueConverter<string16> {
+ public:
+ BasicValueConverter() {}
+
+ bool Convert(const base::Value& value, string16* field) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<double> : public ValueConverter<double> {
+ public:
+ BasicValueConverter() {}
+
+ bool Convert(const base::Value& value, double* field) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <>
+class BASE_EXPORT BasicValueConverter<bool> : public ValueConverter<bool> {
+ public:
+ BasicValueConverter() {}
+
+ bool Convert(const base::Value& value, bool* field) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicValueConverter);
+};
+
+template <typename FieldType>
+class ValueFieldConverter : public ValueConverter<FieldType> {
+ public:
+ typedef bool(*ConvertFunc)(const base::Value* value, FieldType* field);
+
+ ValueFieldConverter(ConvertFunc convert_func)
+ : convert_func_(convert_func) {}
+
+ bool Convert(const base::Value& value, FieldType* field) const override {
+ return convert_func_(&value, field);
+ }
+
+ private:
+ ConvertFunc convert_func_;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueFieldConverter);
+};
+
+template <typename FieldType>
+class CustomFieldConverter : public ValueConverter<FieldType> {
+ public:
+ typedef bool(*ConvertFunc)(const StringPiece& value, FieldType* field);
+
+ CustomFieldConverter(ConvertFunc convert_func)
+ : convert_func_(convert_func) {}
+
+ bool Convert(const base::Value& value, FieldType* field) const override {
+ std::string string_value;
+ return value.GetAsString(&string_value) &&
+ convert_func_(string_value, field);
+ }
+
+ private:
+ ConvertFunc convert_func_;
+
+ DISALLOW_COPY_AND_ASSIGN(CustomFieldConverter);
+};
+
+template <typename NestedType>
+class NestedValueConverter : public ValueConverter<NestedType> {
+ public:
+ NestedValueConverter() {}
+
+ bool Convert(const base::Value& value, NestedType* field) const override {
+ return converter_.Convert(value, field);
+ }
+
+ private:
+ JSONValueConverter<NestedType> converter_;
+ DISALLOW_COPY_AND_ASSIGN(NestedValueConverter);
+};
+
+template <typename Element>
+class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
+ public:
+ RepeatedValueConverter() {}
+
+ bool Convert(const base::Value& value,
+ ScopedVector<Element>* field) const override {
+ const base::ListValue* list = NULL;
+ if (!value.GetAsList(&list)) {
+ // The field is not a list.
+ return false;
+ }
+
+ field->reserve(list->GetSize());
+ for (size_t i = 0; i < list->GetSize(); ++i) {
+ const base::Value* element = NULL;
+ if (!list->Get(i, &element))
+ continue;
+
+ std::unique_ptr<Element> e(new Element);
+ if (basic_converter_.Convert(*element, e.get())) {
+ field->push_back(e.release());
+ } else {
+ DVLOG(1) << "failure at " << i << "-th element";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ BasicValueConverter<Element> basic_converter_;
+ DISALLOW_COPY_AND_ASSIGN(RepeatedValueConverter);
+};
+
+template <typename NestedType>
+class RepeatedMessageConverter
+ : public ValueConverter<ScopedVector<NestedType> > {
+ public:
+ RepeatedMessageConverter() {}
+
+ bool Convert(const base::Value& value,
+ ScopedVector<NestedType>* field) const override {
+ const base::ListValue* list = NULL;
+ if (!value.GetAsList(&list))
+ return false;
+
+ field->reserve(list->GetSize());
+ for (size_t i = 0; i < list->GetSize(); ++i) {
+ const base::Value* element = NULL;
+ if (!list->Get(i, &element))
+ continue;
+
+ std::unique_ptr<NestedType> nested(new NestedType);
+ if (converter_.Convert(*element, nested.get())) {
+ field->push_back(nested.release());
+ } else {
+ DVLOG(1) << "failure at " << i << "-th element";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ JSONValueConverter<NestedType> converter_;
+ DISALLOW_COPY_AND_ASSIGN(RepeatedMessageConverter);
+};
+
+template <typename NestedType>
+class RepeatedCustomValueConverter
+ : public ValueConverter<ScopedVector<NestedType> > {
+ public:
+ typedef bool(*ConvertFunc)(const base::Value* value, NestedType* field);
+
+ RepeatedCustomValueConverter(ConvertFunc convert_func)
+ : convert_func_(convert_func) {}
+
+ bool Convert(const base::Value& value,
+ ScopedVector<NestedType>* field) const override {
+ const base::ListValue* list = NULL;
+ if (!value.GetAsList(&list))
+ return false;
+
+ field->reserve(list->GetSize());
+ for (size_t i = 0; i < list->GetSize(); ++i) {
+ const base::Value* element = NULL;
+ if (!list->Get(i, &element))
+ continue;
+
+ std::unique_ptr<NestedType> nested(new NestedType);
+ if ((*convert_func_)(element, nested.get())) {
+ field->push_back(nested.release());
+ } else {
+ DVLOG(1) << "failure at " << i << "-th element";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ ConvertFunc convert_func_;
+ DISALLOW_COPY_AND_ASSIGN(RepeatedCustomValueConverter);
+};
+
+
+} // namespace internal
+
+template <class StructType>
+class JSONValueConverter {
+ public:
+ JSONValueConverter() {
+ StructType::RegisterJSONConverter(this);
+ }
+
+ void RegisterIntField(const std::string& field_name,
+ int StructType::* field) {
+ fields_.push_back(new internal::FieldConverter<StructType, int>(
+ field_name, field, new internal::BasicValueConverter<int>));
+ }
+
+ void RegisterStringField(const std::string& field_name,
+ std::string StructType::* field) {
+ fields_.push_back(new internal::FieldConverter<StructType, std::string>(
+ field_name, field, new internal::BasicValueConverter<std::string>));
+ }
+
+ void RegisterStringField(const std::string& field_name,
+ string16 StructType::* field) {
+ fields_.push_back(new internal::FieldConverter<StructType, string16>(
+ field_name, field, new internal::BasicValueConverter<string16>));
+ }
+
+ void RegisterBoolField(const std::string& field_name,
+ bool StructType::* field) {
+ fields_.push_back(new internal::FieldConverter<StructType, bool>(
+ field_name, field, new internal::BasicValueConverter<bool>));
+ }
+
+ void RegisterDoubleField(const std::string& field_name,
+ double StructType::* field) {
+ fields_.push_back(new internal::FieldConverter<StructType, double>(
+ field_name, field, new internal::BasicValueConverter<double>));
+ }
+
+ template <class NestedType>
+ void RegisterNestedField(
+ const std::string& field_name, NestedType StructType::* field) {
+ fields_.push_back(new internal::FieldConverter<StructType, NestedType>(
+ field_name,
+ field,
+ new internal::NestedValueConverter<NestedType>));
+ }
+
+ template <typename FieldType>
+ void RegisterCustomField(
+ const std::string& field_name,
+ FieldType StructType::* field,
+ bool (*convert_func)(const StringPiece&, FieldType*)) {
+ fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
+ field_name,
+ field,
+ new internal::CustomFieldConverter<FieldType>(convert_func)));
+ }
+
+ template <typename FieldType>
+ void RegisterCustomValueField(
+ const std::string& field_name,
+ FieldType StructType::* field,
+ bool (*convert_func)(const base::Value*, FieldType*)) {
+ fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
+ field_name,
+ field,
+ new internal::ValueFieldConverter<FieldType>(convert_func)));
+ }
+
+ void RegisterRepeatedInt(const std::string& field_name,
+ ScopedVector<int> StructType::* field) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<int> >(
+ field_name, field, new internal::RepeatedValueConverter<int>));
+ }
+
+ void RegisterRepeatedString(const std::string& field_name,
+ ScopedVector<std::string> StructType::* field) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<std::string> >(
+ field_name,
+ field,
+ new internal::RepeatedValueConverter<std::string>));
+ }
+
+ void RegisterRepeatedString(const std::string& field_name,
+ ScopedVector<string16> StructType::* field) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<string16> >(
+ field_name,
+ field,
+ new internal::RepeatedValueConverter<string16>));
+ }
+
+ void RegisterRepeatedDouble(const std::string& field_name,
+ ScopedVector<double> StructType::* field) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<double> >(
+ field_name, field, new internal::RepeatedValueConverter<double>));
+ }
+
+ void RegisterRepeatedBool(const std::string& field_name,
+ ScopedVector<bool> StructType::* field) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<bool> >(
+ field_name, field, new internal::RepeatedValueConverter<bool>));
+ }
+
+ template <class NestedType>
+ void RegisterRepeatedCustomValue(
+ const std::string& field_name,
+ ScopedVector<NestedType> StructType::* field,
+ bool (*convert_func)(const base::Value*, NestedType*)) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
+ field_name,
+ field,
+ new internal::RepeatedCustomValueConverter<NestedType>(
+ convert_func)));
+ }
+
+ template <class NestedType>
+ void RegisterRepeatedMessage(const std::string& field_name,
+ ScopedVector<NestedType> StructType::* field) {
+ fields_.push_back(
+ new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
+ field_name,
+ field,
+ new internal::RepeatedMessageConverter<NestedType>));
+ }
+
+ bool Convert(const base::Value& value, StructType* output) const {
+ const DictionaryValue* dictionary_value = NULL;
+ if (!value.GetAsDictionary(&dictionary_value))
+ return false;
+
+ for(size_t i = 0; i < fields_.size(); ++i) {
+ const internal::FieldConverterBase<StructType>* field_converter =
+ fields_[i];
+ const base::Value* field = NULL;
+ if (dictionary_value->Get(field_converter->field_path(), &field)) {
+ if (!field_converter->ConvertField(*field, output)) {
+ DVLOG(1) << "failure at field " << field_converter->field_path();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ private:
+ ScopedVector<internal::FieldConverterBase<StructType> > fields_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSONValueConverter);
+};
+
+} // namespace base
+
+#endif // BASE_JSON_JSON_VALUE_CONVERTER_H_
diff --git a/libchrome/base/json/json_value_converter_unittest.cc b/libchrome/base/json/json_value_converter_unittest.cc
new file mode 100644
index 0000000..56ade24
--- /dev/null
+++ b/libchrome/base/json/json_value_converter_unittest.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_value_converter.h"
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/json/json_reader.h"
+#include "base/memory/scoped_vector.h"
+#include "base/strings/string_piece.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// Very simple messages.
+struct SimpleMessage {
+ enum SimpleEnum {
+ FOO, BAR,
+ };
+ int foo;
+ std::string bar;
+ bool baz;
+ bool bstruct;
+ SimpleEnum simple_enum;
+ ScopedVector<int> ints;
+ ScopedVector<std::string> string_values;
+ SimpleMessage() : foo(0), baz(false), bstruct(false), simple_enum(FOO) {}
+
+ static bool ParseSimpleEnum(const StringPiece& value, SimpleEnum* field) {
+ if (value == "foo") {
+ *field = FOO;
+ return true;
+ } else if (value == "bar") {
+ *field = BAR;
+ return true;
+ }
+ return false;
+ }
+
+ static bool HasFieldPresent(const base::Value* value, bool* result) {
+ *result = value != NULL;
+ return true;
+ }
+
+ static bool GetValueString(const base::Value* value, std::string* result) {
+ const base::DictionaryValue* dict = NULL;
+ if (!value->GetAsDictionary(&dict))
+ return false;
+
+ if (!dict->GetString("val", result))
+ return false;
+
+ return true;
+ }
+
+ static void RegisterJSONConverter(
+ base::JSONValueConverter<SimpleMessage>* converter) {
+ converter->RegisterIntField("foo", &SimpleMessage::foo);
+ converter->RegisterStringField("bar", &SimpleMessage::bar);
+ converter->RegisterBoolField("baz", &SimpleMessage::baz);
+ converter->RegisterCustomField<SimpleEnum>(
+ "simple_enum", &SimpleMessage::simple_enum, &ParseSimpleEnum);
+ converter->RegisterRepeatedInt("ints", &SimpleMessage::ints);
+ converter->RegisterCustomValueField<bool>("bstruct",
+ &SimpleMessage::bstruct,
+ &HasFieldPresent);
+ converter->RegisterRepeatedCustomValue<std::string>(
+ "string_values",
+ &SimpleMessage::string_values,
+ &GetValueString);
+ }
+};
+
+// For nested messages.
+struct NestedMessage {
+ double foo;
+ SimpleMessage child;
+ ScopedVector<SimpleMessage> children;
+
+ NestedMessage() : foo(0) {}
+
+ static void RegisterJSONConverter(
+ base::JSONValueConverter<NestedMessage>* converter) {
+ converter->RegisterDoubleField("foo", &NestedMessage::foo);
+ converter->RegisterNestedField("child", &NestedMessage::child);
+ converter->RegisterRepeatedMessage("children", &NestedMessage::children);
+ }
+};
+
+} // namespace
+
+TEST(JSONValueConverterTest, ParseSimpleMessage) {
+ const char normal_data[] =
+ "{\n"
+ " \"foo\": 1,\n"
+ " \"bar\": \"bar\",\n"
+ " \"baz\": true,\n"
+ " \"bstruct\": {},\n"
+ " \"string_values\": [{\"val\": \"value_1\"}, {\"val\": \"value_2\"}],"
+ " \"simple_enum\": \"foo\","
+ " \"ints\": [1, 2]"
+ "}\n";
+
+ std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+ SimpleMessage message;
+ base::JSONValueConverter<SimpleMessage> converter;
+ EXPECT_TRUE(converter.Convert(*value.get(), &message));
+
+ EXPECT_EQ(1, message.foo);
+ EXPECT_EQ("bar", message.bar);
+ EXPECT_TRUE(message.baz);
+ EXPECT_EQ(SimpleMessage::FOO, message.simple_enum);
+ EXPECT_EQ(2, static_cast<int>(message.ints.size()));
+ ASSERT_EQ(2U, message.string_values.size());
+ EXPECT_EQ("value_1", *message.string_values[0]);
+ EXPECT_EQ("value_2", *message.string_values[1]);
+ EXPECT_EQ(1, *(message.ints[0]));
+ EXPECT_EQ(2, *(message.ints[1]));
+}
+
+TEST(JSONValueConverterTest, ParseNestedMessage) {
+ const char normal_data[] =
+ "{\n"
+ " \"foo\": 1.0,\n"
+ " \"child\": {\n"
+ " \"foo\": 1,\n"
+ " \"bar\": \"bar\",\n"
+ " \"bstruct\": {},\n"
+ " \"string_values\": [{\"val\": \"value_1\"}, {\"val\": \"value_2\"}],"
+ " \"baz\": true\n"
+ " },\n"
+ " \"children\": [{\n"
+ " \"foo\": 2,\n"
+ " \"bar\": \"foobar\",\n"
+ " \"bstruct\": \"\",\n"
+ " \"string_values\": [{\"val\": \"value_1\"}],"
+ " \"baz\": true\n"
+ " },\n"
+ " {\n"
+ " \"foo\": 3,\n"
+ " \"bar\": \"barbaz\",\n"
+ " \"baz\": false\n"
+ " }]\n"
+ "}\n";
+
+ std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+ NestedMessage message;
+ base::JSONValueConverter<NestedMessage> converter;
+ EXPECT_TRUE(converter.Convert(*value.get(), &message));
+
+ EXPECT_EQ(1.0, message.foo);
+ EXPECT_EQ(1, message.child.foo);
+ EXPECT_EQ("bar", message.child.bar);
+ EXPECT_TRUE(message.child.baz);
+ EXPECT_TRUE(message.child.bstruct);
+ ASSERT_EQ(2U, message.child.string_values.size());
+ EXPECT_EQ("value_1", *message.child.string_values[0]);
+ EXPECT_EQ("value_2", *message.child.string_values[1]);
+
+ EXPECT_EQ(2, static_cast<int>(message.children.size()));
+ const SimpleMessage* first_child = message.children[0];
+ ASSERT_TRUE(first_child);
+ EXPECT_EQ(2, first_child->foo);
+ EXPECT_EQ("foobar", first_child->bar);
+ EXPECT_TRUE(first_child->baz);
+ EXPECT_TRUE(first_child->bstruct);
+ ASSERT_EQ(1U, first_child->string_values.size());
+ EXPECT_EQ("value_1", *first_child->string_values[0]);
+
+ const SimpleMessage* second_child = message.children[1];
+ ASSERT_TRUE(second_child);
+ EXPECT_EQ(3, second_child->foo);
+ EXPECT_EQ("barbaz", second_child->bar);
+ EXPECT_FALSE(second_child->baz);
+ EXPECT_FALSE(second_child->bstruct);
+ EXPECT_EQ(0U, second_child->string_values.size());
+}
+
+TEST(JSONValueConverterTest, ParseFailures) {
+ const char normal_data[] =
+ "{\n"
+ " \"foo\": 1,\n"
+ " \"bar\": 2,\n" // "bar" is an integer here.
+ " \"baz\": true,\n"
+ " \"ints\": [1, 2]"
+ "}\n";
+
+ std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+ SimpleMessage message;
+ base::JSONValueConverter<SimpleMessage> converter;
+ EXPECT_FALSE(converter.Convert(*value.get(), &message));
+ // Do not check the values below. |message| may be modified during
+ // Convert() even it fails.
+}
+
+TEST(JSONValueConverterTest, ParseWithMissingFields) {
+ const char normal_data[] =
+ "{\n"
+ " \"foo\": 1,\n"
+ " \"baz\": true,\n"
+ " \"ints\": [1, 2]"
+ "}\n";
+
+ std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+ SimpleMessage message;
+ base::JSONValueConverter<SimpleMessage> converter;
+ // Convert() still succeeds even if the input doesn't have "bar" field.
+ EXPECT_TRUE(converter.Convert(*value.get(), &message));
+
+ EXPECT_EQ(1, message.foo);
+ EXPECT_TRUE(message.baz);
+ EXPECT_EQ(2, static_cast<int>(message.ints.size()));
+ EXPECT_EQ(1, *(message.ints[0]));
+ EXPECT_EQ(2, *(message.ints[1]));
+}
+
+TEST(JSONValueConverterTest, EnumParserFails) {
+ const char normal_data[] =
+ "{\n"
+ " \"foo\": 1,\n"
+ " \"bar\": \"bar\",\n"
+ " \"baz\": true,\n"
+ " \"simple_enum\": \"baz\","
+ " \"ints\": [1, 2]"
+ "}\n";
+
+ std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+ SimpleMessage message;
+ base::JSONValueConverter<SimpleMessage> converter;
+ EXPECT_FALSE(converter.Convert(*value.get(), &message));
+ // No check the values as mentioned above.
+}
+
+TEST(JSONValueConverterTest, RepeatedValueErrorInTheMiddle) {
+ const char normal_data[] =
+ "{\n"
+ " \"foo\": 1,\n"
+ " \"bar\": \"bar\",\n"
+ " \"baz\": true,\n"
+ " \"simple_enum\": \"baz\","
+ " \"ints\": [1, false]"
+ "}\n";
+
+ std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
+ SimpleMessage message;
+ base::JSONValueConverter<SimpleMessage> converter;
+ EXPECT_FALSE(converter.Convert(*value.get(), &message));
+ // No check the values as mentioned above.
+}
+
+} // namespace base
diff --git a/libchrome/base/json/json_value_serializer_unittest.cc b/libchrome/base/json/json_value_serializer_unittest.cc
new file mode 100644
index 0000000..0c079b7
--- /dev/null
+++ b/libchrome/base/json/json_value_serializer_unittest.cc
@@ -0,0 +1,500 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/json/json_file_value_serializer.h"
+#include "base/json/json_reader.h"
+#include "base/json/json_string_value_serializer.h"
+#include "base/json/json_writer.h"
+#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
+#include "base/path_service.h"
+#endif
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Some proper JSON to test with:
+const char kProperJSON[] =
+ "{\n"
+ " \"compound\": {\n"
+ " \"a\": 1,\n"
+ " \"b\": 2\n"
+ " },\n"
+ " \"some_String\": \"1337\",\n"
+ " \"some_int\": 42,\n"
+ " \"the_list\": [ \"val1\", \"val2\" ]\n"
+ "}\n";
+
+// Some proper JSON with trailing commas:
+const char kProperJSONWithCommas[] =
+ "{\n"
+ "\t\"some_int\": 42,\n"
+ "\t\"some_String\": \"1337\",\n"
+ "\t\"the_list\": [\"val1\", \"val2\", ],\n"
+ "\t\"compound\": { \"a\": 1, \"b\": 2, },\n"
+ "}\n";
+
+// kProperJSON with a few misc characters at the begin and end.
+const char kProperJSONPadded[] =
+ ")]}'\n"
+ "{\n"
+ " \"compound\": {\n"
+ " \"a\": 1,\n"
+ " \"b\": 2\n"
+ " },\n"
+ " \"some_String\": \"1337\",\n"
+ " \"some_int\": 42,\n"
+ " \"the_list\": [ \"val1\", \"val2\" ]\n"
+ "}\n"
+ "?!ab\n";
+
+const char kWinLineEnds[] = "\r\n";
+const char kLinuxLineEnds[] = "\n";
+
+// Verifies the generated JSON against the expected output.
+void CheckJSONIsStillTheSame(const Value& value) {
+ // Serialize back the output.
+ std::string serialized_json;
+ JSONStringValueSerializer str_serializer(&serialized_json);
+ str_serializer.set_pretty_print(true);
+ ASSERT_TRUE(str_serializer.Serialize(value));
+ // Unify line endings between platforms.
+ ReplaceSubstringsAfterOffset(&serialized_json, 0,
+ kWinLineEnds, kLinuxLineEnds);
+ // Now compare the input with the output.
+ ASSERT_EQ(kProperJSON, serialized_json);
+}
+
+void ValidateJsonList(const std::string& json) {
+ std::unique_ptr<Value> root = JSONReader::Read(json);
+ ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
+ ListValue* list = static_cast<ListValue*>(root.get());
+ ASSERT_EQ(1U, list->GetSize());
+ Value* elt = NULL;
+ ASSERT_TRUE(list->Get(0, &elt));
+ int value = 0;
+ ASSERT_TRUE(elt && elt->GetAsInteger(&value));
+ ASSERT_EQ(1, value);
+}
+
+// Test proper JSON deserialization from string is working.
+TEST(JSONValueDeserializerTest, ReadProperJSONFromString) {
+ // Try to deserialize it through the serializer.
+ JSONStringValueDeserializer str_deserializer(kProperJSON);
+
+ int error_code = 0;
+ std::string error_message;
+ std::unique_ptr<Value> value =
+ str_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value.get());
+ ASSERT_EQ(0, error_code);
+ ASSERT_TRUE(error_message.empty());
+ // Verify if the same JSON is still there.
+ CheckJSONIsStillTheSame(*value);
+}
+
+// Test proper JSON deserialization from a StringPiece substring.
+TEST(JSONValueDeserializerTest, ReadProperJSONFromStringPiece) {
+ // Create a StringPiece for the substring of kProperJSONPadded that matches
+ // kProperJSON.
+ base::StringPiece proper_json(kProperJSONPadded);
+ proper_json = proper_json.substr(5, proper_json.length() - 10);
+ JSONStringValueDeserializer str_deserializer(proper_json);
+
+ int error_code = 0;
+ std::string error_message;
+ std::unique_ptr<Value> value =
+ str_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value.get());
+ ASSERT_EQ(0, error_code);
+ ASSERT_TRUE(error_message.empty());
+ // Verify if the same JSON is still there.
+ CheckJSONIsStillTheSame(*value);
+}
+
+// Test that trialing commas are only properly deserialized from string when
+// the proper flag for that is set.
+TEST(JSONValueDeserializerTest, ReadJSONWithTrailingCommasFromString) {
+ // Try to deserialize it through the serializer.
+ JSONStringValueDeserializer str_deserializer(kProperJSONWithCommas);
+
+ int error_code = 0;
+ std::string error_message;
+ std::unique_ptr<Value> value =
+ str_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_FALSE(value.get());
+ ASSERT_NE(0, error_code);
+ ASSERT_FALSE(error_message.empty());
+ // Now the flag is set and it must pass.
+ str_deserializer.set_allow_trailing_comma(true);
+ value = str_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value.get());
+ ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+ // Verify if the same JSON is still there.
+ CheckJSONIsStillTheSame(*value);
+}
+
+// Test proper JSON deserialization from file is working.
+TEST(JSONValueDeserializerTest, ReadProperJSONFromFile) {
+ ScopedTempDir tempdir;
+ ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+ // Write it down in the file.
+ FilePath temp_file(tempdir.path().AppendASCII("test.json"));
+ ASSERT_EQ(static_cast<int>(strlen(kProperJSON)),
+ WriteFile(temp_file, kProperJSON, strlen(kProperJSON)));
+
+ // Try to deserialize it through the serializer.
+ JSONFileValueDeserializer file_deserializer(temp_file);
+
+ int error_code = 0;
+ std::string error_message;
+ std::unique_ptr<Value> value =
+ file_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value.get());
+ ASSERT_EQ(0, error_code);
+ ASSERT_TRUE(error_message.empty());
+ // Verify if the same JSON is still there.
+ CheckJSONIsStillTheSame(*value);
+}
+
+// Test that trialing commas are only properly deserialized from file when
+// the proper flag for that is set.
+TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) {
+ ScopedTempDir tempdir;
+ ASSERT_TRUE(tempdir.CreateUniqueTempDir());
+ // Write it down in the file.
+ FilePath temp_file(tempdir.path().AppendASCII("test.json"));
+ ASSERT_EQ(static_cast<int>(strlen(kProperJSONWithCommas)),
+ WriteFile(temp_file, kProperJSONWithCommas,
+ strlen(kProperJSONWithCommas)));
+
+ // Try to deserialize it through the serializer.
+ JSONFileValueDeserializer file_deserializer(temp_file);
+ // This must fail without the proper flag.
+ int error_code = 0;
+ std::string error_message;
+ std::unique_ptr<Value> value =
+ file_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_FALSE(value.get());
+ ASSERT_NE(0, error_code);
+ ASSERT_FALSE(error_message.empty());
+ // Now the flag is set and it must pass.
+ file_deserializer.set_allow_trailing_comma(true);
+ value = file_deserializer.Deserialize(&error_code, &error_message);
+ ASSERT_TRUE(value.get());
+ ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+ // Verify if the same JSON is still there.
+ CheckJSONIsStillTheSame(*value);
+}
+
+TEST(JSONValueDeserializerTest, AllowTrailingComma) {
+ std::unique_ptr<Value> root;
+ std::unique_ptr<Value> root_expected;
+ static const char kTestWithCommas[] = "{\"key\": [true,],}";
+ static const char kTestNoCommas[] = "{\"key\": [true]}";
+
+ JSONStringValueDeserializer deserializer(kTestWithCommas);
+ deserializer.set_allow_trailing_comma(true);
+ JSONStringValueDeserializer deserializer_expected(kTestNoCommas);
+ root = deserializer.Deserialize(NULL, NULL);
+ ASSERT_TRUE(root.get());
+ root_expected = deserializer_expected.Deserialize(NULL, NULL);
+ ASSERT_TRUE(root_expected.get());
+ ASSERT_TRUE(root->Equals(root_expected.get()));
+}
+
+TEST(JSONValueSerializerTest, Roundtrip) {
+ static const char kOriginalSerialization[] =
+ "{\"bool\":true,\"double\":3.14,\"int\":42,\"list\":[1,2],\"null\":null}";
+ JSONStringValueDeserializer deserializer(kOriginalSerialization);
+ std::unique_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
+ ASSERT_TRUE(root.get());
+ ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+
+ DictionaryValue* root_dict = static_cast<DictionaryValue*>(root.get());
+
+ Value* null_value = NULL;
+ ASSERT_TRUE(root_dict->Get("null", &null_value));
+ ASSERT_TRUE(null_value);
+ ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
+
+ bool bool_value = false;
+ ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
+ ASSERT_TRUE(bool_value);
+
+ int int_value = 0;
+ ASSERT_TRUE(root_dict->GetInteger("int", &int_value));
+ ASSERT_EQ(42, int_value);
+
+ double double_value = 0.0;
+ ASSERT_TRUE(root_dict->GetDouble("double", &double_value));
+ ASSERT_DOUBLE_EQ(3.14, double_value);
+
+ std::string test_serialization;
+ JSONStringValueSerializer mutable_serializer(&test_serialization);
+ ASSERT_TRUE(mutable_serializer.Serialize(*root_dict));
+ ASSERT_EQ(kOriginalSerialization, test_serialization);
+
+ mutable_serializer.set_pretty_print(true);
+ ASSERT_TRUE(mutable_serializer.Serialize(*root_dict));
+ // JSON output uses a different newline style on Windows than on other
+ // platforms.
+#if defined(OS_WIN)
+#define JSON_NEWLINE "\r\n"
+#else
+#define JSON_NEWLINE "\n"
+#endif
+ const std::string pretty_serialization =
+ "{" JSON_NEWLINE
+ " \"bool\": true," JSON_NEWLINE
+ " \"double\": 3.14," JSON_NEWLINE
+ " \"int\": 42," JSON_NEWLINE
+ " \"list\": [ 1, 2 ]," JSON_NEWLINE
+ " \"null\": null" JSON_NEWLINE
+ "}" JSON_NEWLINE;
+#undef JSON_NEWLINE
+ ASSERT_EQ(pretty_serialization, test_serialization);
+}
+
+TEST(JSONValueSerializerTest, StringEscape) {
+ string16 all_chars;
+ for (int i = 1; i < 256; ++i) {
+ all_chars += static_cast<char16>(i);
+ }
+ // Generated in in Firefox using the following js (with an extra backslash for
+ // double quote):
+ // var s = '';
+ // for (var i = 1; i < 256; ++i) { s += String.fromCharCode(i); }
+ // uneval(s).replace(/\\/g, "\\\\");
+ std::string all_chars_expected =
+ "\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\\t\\n\\u000B\\f\\r"
+ "\\u000E\\u000F\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017"
+ "\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F !\\\"#$%&'()*+,"
+ "-./0123456789:;\\u003C=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcde"
+ "fghijklmnopqrstuvwxyz{|}~\x7F\xC2\x80\xC2\x81\xC2\x82\xC2\x83\xC2\x84"
+ "\xC2\x85\xC2\x86\xC2\x87\xC2\x88\xC2\x89\xC2\x8A\xC2\x8B\xC2\x8C\xC2\x8D"
+ "\xC2\x8E\xC2\x8F\xC2\x90\xC2\x91\xC2\x92\xC2\x93\xC2\x94\xC2\x95\xC2\x96"
+ "\xC2\x97\xC2\x98\xC2\x99\xC2\x9A\xC2\x9B\xC2\x9C\xC2\x9D\xC2\x9E\xC2\x9F"
+ "\xC2\xA0\xC2\xA1\xC2\xA2\xC2\xA3\xC2\xA4\xC2\xA5\xC2\xA6\xC2\xA7\xC2\xA8"
+ "\xC2\xA9\xC2\xAA\xC2\xAB\xC2\xAC\xC2\xAD\xC2\xAE\xC2\xAF\xC2\xB0\xC2\xB1"
+ "\xC2\xB2\xC2\xB3\xC2\xB4\xC2\xB5\xC2\xB6\xC2\xB7\xC2\xB8\xC2\xB9\xC2\xBA"
+ "\xC2\xBB\xC2\xBC\xC2\xBD\xC2\xBE\xC2\xBF\xC3\x80\xC3\x81\xC3\x82\xC3\x83"
+ "\xC3\x84\xC3\x85\xC3\x86\xC3\x87\xC3\x88\xC3\x89\xC3\x8A\xC3\x8B\xC3\x8C"
+ "\xC3\x8D\xC3\x8E\xC3\x8F\xC3\x90\xC3\x91\xC3\x92\xC3\x93\xC3\x94\xC3\x95"
+ "\xC3\x96\xC3\x97\xC3\x98\xC3\x99\xC3\x9A\xC3\x9B\xC3\x9C\xC3\x9D\xC3\x9E"
+ "\xC3\x9F\xC3\xA0\xC3\xA1\xC3\xA2\xC3\xA3\xC3\xA4\xC3\xA5\xC3\xA6\xC3\xA7"
+ "\xC3\xA8\xC3\xA9\xC3\xAA\xC3\xAB\xC3\xAC\xC3\xAD\xC3\xAE\xC3\xAF\xC3\xB0"
+ "\xC3\xB1\xC3\xB2\xC3\xB3\xC3\xB4\xC3\xB5\xC3\xB6\xC3\xB7\xC3\xB8\xC3\xB9"
+ "\xC3\xBA\xC3\xBB\xC3\xBC\xC3\xBD\xC3\xBE\xC3\xBF";
+
+ std::string expected_output = "{\"all_chars\":\"" + all_chars_expected +
+ "\"}";
+ // Test JSONWriter interface
+ std::string output_js;
+ DictionaryValue valueRoot;
+ valueRoot.SetString("all_chars", all_chars);
+ JSONWriter::Write(valueRoot, &output_js);
+ ASSERT_EQ(expected_output, output_js);
+
+ // Test JSONValueSerializer interface (uses JSONWriter).
+ JSONStringValueSerializer serializer(&output_js);
+ ASSERT_TRUE(serializer.Serialize(valueRoot));
+ ASSERT_EQ(expected_output, output_js);
+}
+
+TEST(JSONValueSerializerTest, UnicodeStrings) {
+ // unicode string json -> escaped ascii text
+ DictionaryValue root;
+ string16 test(WideToUTF16(L"\x7F51\x9875"));
+ root.SetString("web", test);
+
+ static const char kExpected[] = "{\"web\":\"\xE7\xBD\x91\xE9\xA1\xB5\"}";
+
+ std::string actual;
+ JSONStringValueSerializer serializer(&actual);
+ ASSERT_TRUE(serializer.Serialize(root));
+ ASSERT_EQ(kExpected, actual);
+
+ // escaped ascii text -> json
+ JSONStringValueDeserializer deserializer(kExpected);
+ std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+ ASSERT_TRUE(deserial_root.get());
+ DictionaryValue* dict_root =
+ static_cast<DictionaryValue*>(deserial_root.get());
+ string16 web_value;
+ ASSERT_TRUE(dict_root->GetString("web", &web_value));
+ ASSERT_EQ(test, web_value);
+}
+
+TEST(JSONValueSerializerTest, HexStrings) {
+ // hex string json -> escaped ascii text
+ DictionaryValue root;
+ string16 test(WideToUTF16(L"\x01\x02"));
+ root.SetString("test", test);
+
+ static const char kExpected[] = "{\"test\":\"\\u0001\\u0002\"}";
+
+ std::string actual;
+ JSONStringValueSerializer serializer(&actual);
+ ASSERT_TRUE(serializer.Serialize(root));
+ ASSERT_EQ(kExpected, actual);
+
+ // escaped ascii text -> json
+ JSONStringValueDeserializer deserializer(kExpected);
+ std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+ ASSERT_TRUE(deserial_root.get());
+ DictionaryValue* dict_root =
+ static_cast<DictionaryValue*>(deserial_root.get());
+ string16 test_value;
+ ASSERT_TRUE(dict_root->GetString("test", &test_value));
+ ASSERT_EQ(test, test_value);
+
+ // Test converting escaped regular chars
+ static const char kEscapedChars[] = "{\"test\":\"\\u0067\\u006f\"}";
+ JSONStringValueDeserializer deserializer2(kEscapedChars);
+ deserial_root = deserializer2.Deserialize(NULL, NULL);
+ ASSERT_TRUE(deserial_root.get());
+ dict_root = static_cast<DictionaryValue*>(deserial_root.get());
+ ASSERT_TRUE(dict_root->GetString("test", &test_value));
+ ASSERT_EQ(ASCIIToUTF16("go"), test_value);
+}
+
+TEST(JSONValueSerializerTest, JSONReaderComments) {
+ ValidateJsonList("[ // 2, 3, ignore me ] \n1 ]");
+ ValidateJsonList("[ /* 2, \n3, ignore me ]*/ \n1 ]");
+ ValidateJsonList("//header\n[ // 2, \n// 3, \n1 ]// footer");
+ ValidateJsonList("/*\n[ // 2, \n// 3, \n1 ]*/[1]");
+ ValidateJsonList("[ 1 /* one */ ] /* end */");
+ ValidateJsonList("[ 1 //// ,2\r\n ]");
+
+ // It's ok to have a comment in a string.
+ std::unique_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
+ ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
+ ListValue* list = static_cast<ListValue*>(root.get());
+ ASSERT_EQ(1U, list->GetSize());
+ Value* elt = NULL;
+ ASSERT_TRUE(list->Get(0, &elt));
+ std::string value;
+ ASSERT_TRUE(elt && elt->GetAsString(&value));
+ ASSERT_EQ("// ok\n /* foo */ ", value);
+
+ // You can't nest comments.
+ root = JSONReader::Read("/* /* inner */ outer */ [ 1 ]");
+ ASSERT_FALSE(root.get());
+
+ // Not a open comment token.
+ root = JSONReader::Read("/ * * / [1]");
+ ASSERT_FALSE(root.get());
+}
+
+#if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
+class JSONFileValueSerializerTest : public testing::Test {
+ protected:
+ void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
+
+ base::ScopedTempDir temp_dir_;
+};
+
+TEST_F(JSONFileValueSerializerTest, Roundtrip) {
+ base::FilePath original_file_path;
+ ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
+ original_file_path =
+ original_file_path.Append(FILE_PATH_LITERAL("serializer_test.json"));
+
+ ASSERT_TRUE(PathExists(original_file_path));
+
+ JSONFileValueDeserializer deserializer(original_file_path);
+ std::unique_ptr<Value> root;
+ root = deserializer.Deserialize(NULL, NULL);
+
+ ASSERT_TRUE(root.get());
+ ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+
+ DictionaryValue* root_dict = static_cast<DictionaryValue*>(root.get());
+
+ Value* null_value = NULL;
+ ASSERT_TRUE(root_dict->Get("null", &null_value));
+ ASSERT_TRUE(null_value);
+ ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
+
+ bool bool_value = false;
+ ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
+ ASSERT_TRUE(bool_value);
+
+ int int_value = 0;
+ ASSERT_TRUE(root_dict->GetInteger("int", &int_value));
+ ASSERT_EQ(42, int_value);
+
+ std::string string_value;
+ ASSERT_TRUE(root_dict->GetString("string", &string_value));
+ ASSERT_EQ("hello", string_value);
+
+ // Now try writing.
+ const base::FilePath written_file_path =
+ temp_dir_.path().Append(FILE_PATH_LITERAL("test_output.js"));
+
+ ASSERT_FALSE(PathExists(written_file_path));
+ JSONFileValueSerializer serializer(written_file_path);
+ ASSERT_TRUE(serializer.Serialize(*root));
+ ASSERT_TRUE(PathExists(written_file_path));
+
+ // Now compare file contents.
+ EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
+ EXPECT_TRUE(base::DeleteFile(written_file_path, false));
+}
+
+TEST_F(JSONFileValueSerializerTest, RoundtripNested) {
+ base::FilePath original_file_path;
+ ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
+ original_file_path = original_file_path.Append(
+ FILE_PATH_LITERAL("serializer_nested_test.json"));
+
+ ASSERT_TRUE(PathExists(original_file_path));
+
+ JSONFileValueDeserializer deserializer(original_file_path);
+ std::unique_ptr<Value> root;
+ root = deserializer.Deserialize(NULL, NULL);
+ ASSERT_TRUE(root.get());
+
+ // Now try writing.
+ base::FilePath written_file_path = temp_dir_.path().Append(
+ FILE_PATH_LITERAL("test_output.json"));
+
+ ASSERT_FALSE(PathExists(written_file_path));
+ JSONFileValueSerializer serializer(written_file_path);
+ ASSERT_TRUE(serializer.Serialize(*root));
+ ASSERT_TRUE(PathExists(written_file_path));
+
+ // Now compare file contents.
+ EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
+ EXPECT_TRUE(base::DeleteFile(written_file_path, false));
+}
+
+TEST_F(JSONFileValueSerializerTest, NoWhitespace) {
+ base::FilePath source_file_path;
+ ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &source_file_path));
+ source_file_path = source_file_path.Append(
+ FILE_PATH_LITERAL("serializer_test_nowhitespace.json"));
+ ASSERT_TRUE(PathExists(source_file_path));
+ JSONFileValueDeserializer deserializer(source_file_path);
+ std::unique_ptr<Value> root;
+ root = deserializer.Deserialize(NULL, NULL);
+ ASSERT_TRUE(root.get());
+}
+#endif // !__ANDROID__ && !__ANDROID_HOST__
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/json/json_writer.cc b/libchrome/base/json/json_writer.cc
new file mode 100644
index 0000000..0b658ee
--- /dev/null
+++ b/libchrome/base/json/json_writer.cc
@@ -0,0 +1,210 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_writer.h"
+
+#include <stdint.h>
+
+#include <cmath>
+#include <limits>
+
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(OS_WIN)
+const char kPrettyPrintLineEnding[] = "\r\n";
+#else
+const char kPrettyPrintLineEnding[] = "\n";
+#endif
+
+// static
+bool JSONWriter::Write(const Value& node, std::string* json) {
+ return WriteWithOptions(node, 0, json);
+}
+
+// static
+bool JSONWriter::WriteWithOptions(const Value& node,
+ int options,
+ std::string* json) {
+ json->clear();
+ // Is there a better way to estimate the size of the output?
+ json->reserve(1024);
+
+ JSONWriter writer(options, json);
+ bool result = writer.BuildJSONString(node, 0U);
+
+ if (options & OPTIONS_PRETTY_PRINT)
+ json->append(kPrettyPrintLineEnding);
+
+ return result;
+}
+
+JSONWriter::JSONWriter(int options, std::string* json)
+ : omit_binary_values_((options & OPTIONS_OMIT_BINARY_VALUES) != 0),
+ omit_double_type_preservation_(
+ (options & OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION) != 0),
+ pretty_print_((options & OPTIONS_PRETTY_PRINT) != 0),
+ json_string_(json) {
+ DCHECK(json);
+}
+
+bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
+ switch (node.GetType()) {
+ case Value::TYPE_NULL: {
+ json_string_->append("null");
+ return true;
+ }
+
+ case Value::TYPE_BOOLEAN: {
+ bool value;
+ bool result = node.GetAsBoolean(&value);
+ DCHECK(result);
+ json_string_->append(value ? "true" : "false");
+ return result;
+ }
+
+ case Value::TYPE_INTEGER: {
+ int value;
+ bool result = node.GetAsInteger(&value);
+ DCHECK(result);
+ json_string_->append(IntToString(value));
+ return result;
+ }
+
+ case Value::TYPE_DOUBLE: {
+ double value;
+ bool result = node.GetAsDouble(&value);
+ DCHECK(result);
+ if (omit_double_type_preservation_ &&
+ value <= std::numeric_limits<int64_t>::max() &&
+ value >= std::numeric_limits<int64_t>::min() &&
+ std::floor(value) == value) {
+ json_string_->append(Int64ToString(static_cast<int64_t>(value)));
+ return result;
+ }
+ std::string real = DoubleToString(value);
+ // Ensure that the number has a .0 if there's no decimal or 'e'. This
+ // makes sure that when we read the JSON back, it's interpreted as a
+ // real rather than an int.
+ if (real.find('.') == std::string::npos &&
+ real.find('e') == std::string::npos &&
+ real.find('E') == std::string::npos) {
+ real.append(".0");
+ }
+ // The JSON spec requires that non-integer values in the range (-1,1)
+ // have a zero before the decimal point - ".52" is not valid, "0.52" is.
+ if (real[0] == '.') {
+ real.insert(static_cast<size_t>(0), static_cast<size_t>(1), '0');
+ } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
+ // "-.1" bad "-0.1" good
+ real.insert(static_cast<size_t>(1), static_cast<size_t>(1), '0');
+ }
+ json_string_->append(real);
+ return result;
+ }
+
+ case Value::TYPE_STRING: {
+ std::string value;
+ bool result = node.GetAsString(&value);
+ DCHECK(result);
+ EscapeJSONString(value, true, json_string_);
+ return result;
+ }
+
+ case Value::TYPE_LIST: {
+ json_string_->push_back('[');
+ if (pretty_print_)
+ json_string_->push_back(' ');
+
+ const ListValue* list = NULL;
+ bool first_value_has_been_output = false;
+ bool result = node.GetAsList(&list);
+ DCHECK(result);
+ for (const auto& value : *list) {
+ if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
+ continue;
+
+ if (first_value_has_been_output) {
+ json_string_->push_back(',');
+ if (pretty_print_)
+ json_string_->push_back(' ');
+ }
+
+ if (!BuildJSONString(*value, depth))
+ result = false;
+
+ first_value_has_been_output = true;
+ }
+
+ if (pretty_print_)
+ json_string_->push_back(' ');
+ json_string_->push_back(']');
+ return result;
+ }
+
+ case Value::TYPE_DICTIONARY: {
+ json_string_->push_back('{');
+ if (pretty_print_)
+ json_string_->append(kPrettyPrintLineEnding);
+
+ const DictionaryValue* dict = NULL;
+ bool first_value_has_been_output = false;
+ bool result = node.GetAsDictionary(&dict);
+ DCHECK(result);
+ for (DictionaryValue::Iterator itr(*dict); !itr.IsAtEnd();
+ itr.Advance()) {
+ if (omit_binary_values_ &&
+ itr.value().GetType() == Value::TYPE_BINARY) {
+ continue;
+ }
+
+ if (first_value_has_been_output) {
+ json_string_->push_back(',');
+ if (pretty_print_)
+ json_string_->append(kPrettyPrintLineEnding);
+ }
+
+ if (pretty_print_)
+ IndentLine(depth + 1U);
+
+ EscapeJSONString(itr.key(), true, json_string_);
+ json_string_->push_back(':');
+ if (pretty_print_)
+ json_string_->push_back(' ');
+
+ if (!BuildJSONString(itr.value(), depth + 1U))
+ result = false;
+
+ first_value_has_been_output = true;
+ }
+
+ if (pretty_print_) {
+ json_string_->append(kPrettyPrintLineEnding);
+ IndentLine(depth);
+ }
+
+ json_string_->push_back('}');
+ return result;
+ }
+
+ case Value::TYPE_BINARY:
+ // Successful only if we're allowed to omit it.
+ DLOG_IF(ERROR, !omit_binary_values_) << "Cannot serialize binary value.";
+ return omit_binary_values_;
+ }
+ NOTREACHED();
+ return false;
+}
+
+void JSONWriter::IndentLine(size_t depth) {
+ json_string_->append(depth * 3U, ' ');
+}
+
+} // namespace base
diff --git a/libchrome/base/json/json_writer.h b/libchrome/base/json/json_writer.h
new file mode 100644
index 0000000..ef43341
--- /dev/null
+++ b/libchrome/base/json/json_writer.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_JSON_JSON_WRITER_H_
+#define BASE_JSON_JSON_WRITER_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+class Value;
+
+class BASE_EXPORT JSONWriter {
+ public:
+ enum Options {
+ // This option instructs the writer that if a Binary value is encountered,
+ // the value (and key if within a dictionary) will be omitted from the
+ // output, and success will be returned. Otherwise, if a binary value is
+ // encountered, failure will be returned.
+ OPTIONS_OMIT_BINARY_VALUES = 1 << 0,
+
+ // This option instructs the writer to write doubles that have no fractional
+ // part as a normal integer (i.e., without using exponential notation
+ // or appending a '.0') as long as the value is within the range of a
+ // 64-bit int.
+ OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION = 1 << 1,
+
+ // Return a slightly nicer formatted json string (pads with whitespace to
+ // help with readability).
+ OPTIONS_PRETTY_PRINT = 1 << 2,
+ };
+
+ // Given a root node, generates a JSON string and puts it into |json|.
+ // TODO(tc): Should we generate json if it would be invalid json (e.g.,
+ // |node| is not a DictionaryValue/ListValue or if there are inf/-inf float
+ // values)? Return true on success and false on failure.
+ static bool Write(const Value& node, std::string* json);
+
+ // Same as above but with |options| which is a bunch of JSONWriter::Options
+ // bitwise ORed together. Return true on success and false on failure.
+ static bool WriteWithOptions(const Value& node,
+ int options,
+ std::string* json);
+
+ private:
+ JSONWriter(int options, std::string* json);
+
+ // Called recursively to build the JSON string. When completed,
+ // |json_string_| will contain the JSON.
+ bool BuildJSONString(const Value& node, size_t depth);
+
+ // Adds space to json_string_ for the indent level.
+ void IndentLine(size_t depth);
+
+ bool omit_binary_values_;
+ bool omit_double_type_preservation_;
+ bool pretty_print_;
+
+ // Where we write JSON data as we generate it.
+ std::string* json_string_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSONWriter);
+};
+
+} // namespace base
+
+#endif // BASE_JSON_JSON_WRITER_H_
diff --git a/libchrome/base/json/json_writer_unittest.cc b/libchrome/base/json/json_writer_unittest.cc
new file mode 100644
index 0000000..233ac5e
--- /dev/null
+++ b/libchrome/base/json/json_writer_unittest.cc
@@ -0,0 +1,154 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/json_writer.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(JSONWriterTest, BasicTypes) {
+ std::string output_js;
+
+ // Test null.
+ EXPECT_TRUE(JSONWriter::Write(*Value::CreateNullValue(), &output_js));
+ EXPECT_EQ("null", output_js);
+
+ // Test empty dict.
+ EXPECT_TRUE(JSONWriter::Write(DictionaryValue(), &output_js));
+ EXPECT_EQ("{}", output_js);
+
+ // Test empty list.
+ EXPECT_TRUE(JSONWriter::Write(ListValue(), &output_js));
+ EXPECT_EQ("[]", output_js);
+
+ // Test integer values.
+ EXPECT_TRUE(JSONWriter::Write(FundamentalValue(42), &output_js));
+ EXPECT_EQ("42", output_js);
+
+ // Test boolean values.
+ EXPECT_TRUE(JSONWriter::Write(FundamentalValue(true), &output_js));
+ EXPECT_EQ("true", output_js);
+
+ // Test Real values should always have a decimal or an 'e'.
+ EXPECT_TRUE(JSONWriter::Write(FundamentalValue(1.0), &output_js));
+ EXPECT_EQ("1.0", output_js);
+
+ // Test Real values in the the range (-1, 1) must have leading zeros
+ EXPECT_TRUE(JSONWriter::Write(FundamentalValue(0.2), &output_js));
+ EXPECT_EQ("0.2", output_js);
+
+ // Test Real values in the the range (-1, 1) must have leading zeros
+ EXPECT_TRUE(JSONWriter::Write(FundamentalValue(-0.8), &output_js));
+ EXPECT_EQ("-0.8", output_js);
+
+ // Test String values.
+ EXPECT_TRUE(JSONWriter::Write(StringValue("foo"), &output_js));
+ EXPECT_EQ("\"foo\"", output_js);
+}
+
+TEST(JSONWriterTest, NestedTypes) {
+ std::string output_js;
+
+ // Writer unittests like empty list/dict nesting,
+ // list list nesting, etc.
+ DictionaryValue root_dict;
+ std::unique_ptr<ListValue> list(new ListValue());
+ std::unique_ptr<DictionaryValue> inner_dict(new DictionaryValue());
+ inner_dict->SetInteger("inner int", 10);
+ list->Append(std::move(inner_dict));
+ list->Append(WrapUnique(new ListValue()));
+ list->AppendBoolean(true);
+ root_dict.Set("list", std::move(list));
+
+ // Test the pretty-printer.
+ EXPECT_TRUE(JSONWriter::Write(root_dict, &output_js));
+ EXPECT_EQ("{\"list\":[{\"inner int\":10},[],true]}", output_js);
+ EXPECT_TRUE(JSONWriter::WriteWithOptions(
+ root_dict, JSONWriter::OPTIONS_PRETTY_PRINT, &output_js));
+
+ // The pretty-printer uses a different newline style on Windows than on
+ // other platforms.
+#if defined(OS_WIN)
+#define JSON_NEWLINE "\r\n"
+#else
+#define JSON_NEWLINE "\n"
+#endif
+ EXPECT_EQ("{" JSON_NEWLINE
+ " \"list\": [ {" JSON_NEWLINE
+ " \"inner int\": 10" JSON_NEWLINE
+ " }, [ ], true ]" JSON_NEWLINE
+ "}" JSON_NEWLINE,
+ output_js);
+#undef JSON_NEWLINE
+}
+
+TEST(JSONWriterTest, KeysWithPeriods) {
+ std::string output_js;
+
+ DictionaryValue period_dict;
+ period_dict.SetIntegerWithoutPathExpansion("a.b", 3);
+ period_dict.SetIntegerWithoutPathExpansion("c", 2);
+ std::unique_ptr<DictionaryValue> period_dict2(new DictionaryValue());
+ period_dict2->SetIntegerWithoutPathExpansion("g.h.i.j", 1);
+ period_dict.SetWithoutPathExpansion("d.e.f", std::move(period_dict2));
+ EXPECT_TRUE(JSONWriter::Write(period_dict, &output_js));
+ EXPECT_EQ("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}", output_js);
+
+ DictionaryValue period_dict3;
+ period_dict3.SetInteger("a.b", 2);
+ period_dict3.SetIntegerWithoutPathExpansion("a.b", 1);
+ EXPECT_TRUE(JSONWriter::Write(period_dict3, &output_js));
+ EXPECT_EQ("{\"a\":{\"b\":2},\"a.b\":1}", output_js);
+}
+
+TEST(JSONWriterTest, BinaryValues) {
+ std::string output_js;
+
+ // Binary values should return errors unless suppressed via the
+ // OPTIONS_OMIT_BINARY_VALUES flag.
+ std::unique_ptr<Value> root(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ EXPECT_FALSE(JSONWriter::Write(*root, &output_js));
+ EXPECT_TRUE(JSONWriter::WriteWithOptions(
+ *root, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
+ EXPECT_TRUE(output_js.empty());
+
+ ListValue binary_list;
+ binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_list.Append(WrapUnique(new FundamentalValue(5)));
+ binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_list.Append(WrapUnique(new FundamentalValue(2)));
+ binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
+ EXPECT_TRUE(JSONWriter::WriteWithOptions(
+ binary_list, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
+ EXPECT_EQ("[5,2]", output_js);
+
+ DictionaryValue binary_dict;
+ binary_dict.Set("a", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_dict.SetInteger("b", 5);
+ binary_dict.Set("c", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ binary_dict.SetInteger("d", 2);
+ binary_dict.Set("e", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+ EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
+ EXPECT_TRUE(JSONWriter::WriteWithOptions(
+ binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
+ EXPECT_EQ("{\"b\":5,\"d\":2}", output_js);
+}
+
+TEST(JSONWriterTest, DoublesAsInts) {
+ std::string output_js;
+
+ // Test allowing a double with no fractional part to be written as an integer.
+ FundamentalValue double_value(1e10);
+ EXPECT_TRUE(JSONWriter::WriteWithOptions(
+ double_value, JSONWriter::OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION,
+ &output_js));
+ EXPECT_EQ("10000000000", output_js);
+}
+
+} // namespace base
diff --git a/libchrome/base/json/string_escape.cc b/libchrome/base/json/string_escape.cc
new file mode 100644
index 0000000..f67fa93
--- /dev/null
+++ b/libchrome/base/json/string_escape.cc
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/string_escape.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <string>
+
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+// Format string for printing a \uXXXX escape sequence.
+const char kU16EscapeFormat[] = "\\u%04X";
+
+// The code point to output for an invalid input code unit.
+const uint32_t kReplacementCodePoint = 0xFFFD;
+
+// Used below in EscapeSpecialCodePoint().
+static_assert('<' == 0x3C, "less than sign must be 0x3c");
+
+// Try to escape the |code_point| if it is a known special character. If
+// successful, returns true and appends the escape sequence to |dest|. This
+// isn't required by the spec, but it's more readable by humans.
+bool EscapeSpecialCodePoint(uint32_t code_point, std::string* dest) {
+ // WARNING: if you add a new case here, you need to update the reader as well.
+ // Note: \v is in the reader, but not here since the JSON spec doesn't
+ // allow it.
+ switch (code_point) {
+ case '\b':
+ dest->append("\\b");
+ break;
+ case '\f':
+ dest->append("\\f");
+ break;
+ case '\n':
+ dest->append("\\n");
+ break;
+ case '\r':
+ dest->append("\\r");
+ break;
+ case '\t':
+ dest->append("\\t");
+ break;
+ case '\\':
+ dest->append("\\\\");
+ break;
+ case '"':
+ dest->append("\\\"");
+ break;
+ // Escape < to prevent script execution; escaping > is not necessary and
+ // not doing so save a few bytes.
+ case '<':
+ dest->append("\\u003C");
+ break;
+ // Escape the "Line Separator" and "Paragraph Separator" characters, since
+ // they should be treated like a new line \r or \n.
+ case 0x2028:
+ dest->append("\\u2028");
+ break;
+ case 0x2029:
+ dest->append("\\u2029");
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+template <typename S>
+bool EscapeJSONStringImpl(const S& str, bool put_in_quotes, std::string* dest) {
+ bool did_replacement = false;
+
+ if (put_in_quotes)
+ dest->push_back('"');
+
+ // Casting is necessary because ICU uses int32_t. Try and do so safely.
+ CHECK_LE(str.length(),
+ static_cast<size_t>(std::numeric_limits<int32_t>::max()));
+ const int32_t length = static_cast<int32_t>(str.length());
+
+ for (int32_t i = 0; i < length; ++i) {
+ uint32_t code_point;
+ if (!ReadUnicodeCharacter(str.data(), length, &i, &code_point)) {
+ code_point = kReplacementCodePoint;
+ did_replacement = true;
+ }
+
+ if (EscapeSpecialCodePoint(code_point, dest))
+ continue;
+
+ // Escape non-printing characters.
+ if (code_point < 32)
+ base::StringAppendF(dest, kU16EscapeFormat, code_point);
+ else
+ WriteUnicodeCharacter(code_point, dest);
+ }
+
+ if (put_in_quotes)
+ dest->push_back('"');
+
+ return !did_replacement;
+}
+
+} // namespace
+
+bool EscapeJSONString(const StringPiece& str,
+ bool put_in_quotes,
+ std::string* dest) {
+ return EscapeJSONStringImpl(str, put_in_quotes, dest);
+}
+
+bool EscapeJSONString(const StringPiece16& str,
+ bool put_in_quotes,
+ std::string* dest) {
+ return EscapeJSONStringImpl(str, put_in_quotes, dest);
+}
+
+std::string GetQuotedJSONString(const StringPiece& str) {
+ std::string dest;
+ bool ok = EscapeJSONStringImpl(str, true, &dest);
+ DCHECK(ok);
+ return dest;
+}
+
+std::string GetQuotedJSONString(const StringPiece16& str) {
+ std::string dest;
+ bool ok = EscapeJSONStringImpl(str, true, &dest);
+ DCHECK(ok);
+ return dest;
+}
+
+std::string EscapeBytesAsInvalidJSONString(const StringPiece& str,
+ bool put_in_quotes) {
+ std::string dest;
+
+ if (put_in_quotes)
+ dest.push_back('"');
+
+ for (StringPiece::const_iterator it = str.begin(); it != str.end(); ++it) {
+ unsigned char c = *it;
+ if (EscapeSpecialCodePoint(c, &dest))
+ continue;
+
+ if (c < 32 || c > 126)
+ base::StringAppendF(&dest, kU16EscapeFormat, c);
+ else
+ dest.push_back(*it);
+ }
+
+ if (put_in_quotes)
+ dest.push_back('"');
+
+ return dest;
+}
+
+} // namespace base
diff --git a/libchrome/base/json/string_escape.h b/libchrome/base/json/string_escape.h
new file mode 100644
index 0000000..b66b7e5
--- /dev/null
+++ b/libchrome/base/json/string_escape.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines utility functions for escaping strings suitable for JSON.
+
+#ifndef BASE_JSON_STRING_ESCAPE_H_
+#define BASE_JSON_STRING_ESCAPE_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Appends to |dest| an escaped version of |str|. Valid UTF-8 code units will
+// pass through from the input to the output. Invalid code units will be
+// replaced with the U+FFFD replacement character. This function returns true
+// if no replacement was necessary and false if there was a lossy replacement.
+// On return, |dest| will contain a valid UTF-8 JSON string.
+//
+// Non-printing control characters will be escaped as \uXXXX sequences for
+// readability.
+//
+// If |put_in_quotes| is true, then a leading and trailing double-quote mark
+// will be appended to |dest| as well.
+BASE_EXPORT bool EscapeJSONString(const StringPiece& str,
+ bool put_in_quotes,
+ std::string* dest);
+
+// Performs a similar function to the UTF-8 StringPiece version above,
+// converting UTF-16 code units to UTF-8 code units and escaping non-printing
+// control characters. On return, |dest| will contain a valid UTF-8 JSON string.
+BASE_EXPORT bool EscapeJSONString(const StringPiece16& str,
+ bool put_in_quotes,
+ std::string* dest);
+
+// Helper functions that wrap the above two functions but return the value
+// instead of appending. |put_in_quotes| is always true.
+BASE_EXPORT std::string GetQuotedJSONString(const StringPiece& str);
+BASE_EXPORT std::string GetQuotedJSONString(const StringPiece16& str);
+
+// Given an arbitrary byte string |str|, this will escape all non-ASCII bytes
+// as \uXXXX escape sequences. This function is *NOT* meant to be used with
+// Unicode strings and does not validate |str| as one.
+//
+// CAVEAT CALLER: The output of this function may not be valid JSON, since
+// JSON requires escape sequences to be valid UTF-16 code units. This output
+// will be mangled if passed to to the base::JSONReader, since the reader will
+// interpret it as UTF-16 and convert it to UTF-8.
+//
+// The output of this function takes the *appearance* of JSON but is not in
+// fact valid according to RFC 4627.
+BASE_EXPORT std::string EscapeBytesAsInvalidJSONString(const StringPiece& str,
+ bool put_in_quotes);
+
+} // namespace base
+
+#endif // BASE_JSON_STRING_ESCAPE_H_
diff --git a/libchrome/base/json/string_escape_unittest.cc b/libchrome/base/json/string_escape_unittest.cc
new file mode 100644
index 0000000..ae3d82a
--- /dev/null
+++ b/libchrome/base/json/string_escape_unittest.cc
@@ -0,0 +1,189 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/json/string_escape.h"
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(JSONStringEscapeTest, EscapeUTF8) {
+ const struct {
+ const char* to_escape;
+ const char* escaped;
+ } cases[] = {
+ {"\b\001aZ\"\\wee", "\\b\\u0001aZ\\\"\\\\wee"},
+ {"a\b\f\n\r\t\v\1\\.\"z",
+ "a\\b\\f\\n\\r\\t\\u000B\\u0001\\\\.\\\"z"},
+ {"b\x0f\x7f\xf0\xff!", // \xf0\xff is not a valid UTF-8 unit.
+ "b\\u000F\x7F\xEF\xBF\xBD\xEF\xBF\xBD!"},
+ {"c<>d", "c\\u003C>d"},
+ {"Hello\xe2\x80\xa8world", "Hello\\u2028world"},
+ {"\xe2\x80\xa9purple", "\\u2029purple"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ const char* in_ptr = cases[i].to_escape;
+ std::string in_str = in_ptr;
+
+ std::string out;
+ EscapeJSONString(in_ptr, false, &out);
+ EXPECT_EQ(std::string(cases[i].escaped), out);
+ EXPECT_TRUE(IsStringUTF8(out));
+
+ out.erase();
+ bool convert_ok = EscapeJSONString(in_str, false, &out);
+ EXPECT_EQ(std::string(cases[i].escaped), out);
+ EXPECT_TRUE(IsStringUTF8(out));
+
+ if (convert_ok) {
+ std::string fooout = GetQuotedJSONString(in_str);
+ EXPECT_EQ("\"" + std::string(cases[i].escaped) + "\"", fooout);
+ EXPECT_TRUE(IsStringUTF8(out));
+ }
+ }
+
+ std::string in = cases[0].to_escape;
+ std::string out;
+ EscapeJSONString(in, false, &out);
+ EXPECT_TRUE(IsStringUTF8(out));
+
+ // test quoting
+ std::string out_quoted;
+ EscapeJSONString(in, true, &out_quoted);
+ EXPECT_EQ(out.length() + 2, out_quoted.length());
+ EXPECT_EQ(out_quoted.find(out), 1U);
+ EXPECT_TRUE(IsStringUTF8(out_quoted));
+
+ // now try with a NULL in the string
+ std::string null_prepend = "test";
+ null_prepend.push_back(0);
+ in = null_prepend + in;
+ std::string expected = "test\\u0000";
+ expected += cases[0].escaped;
+ out.clear();
+ EscapeJSONString(in, false, &out);
+ EXPECT_EQ(expected, out);
+ EXPECT_TRUE(IsStringUTF8(out));
+}
+
+TEST(JSONStringEscapeTest, EscapeUTF16) {
+ const struct {
+ const wchar_t* to_escape;
+ const char* escaped;
+ } cases[] = {
+ {L"b\uffb1\u00ff", "b\xEF\xBE\xB1\xC3\xBF"},
+ {L"\b\001aZ\"\\wee", "\\b\\u0001aZ\\\"\\\\wee"},
+ {L"a\b\f\n\r\t\v\1\\.\"z",
+ "a\\b\\f\\n\\r\\t\\u000B\\u0001\\\\.\\\"z"},
+ {L"b\x0f\x7f\xf0\xff!", "b\\u000F\x7F\xC3\xB0\xC3\xBF!"},
+ {L"c<>d", "c\\u003C>d"},
+ {L"Hello\u2028world", "Hello\\u2028world"},
+ {L"\u2029purple", "\\u2029purple"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ string16 in = WideToUTF16(cases[i].to_escape);
+
+ std::string out;
+ EscapeJSONString(in, false, &out);
+ EXPECT_EQ(std::string(cases[i].escaped), out);
+ EXPECT_TRUE(IsStringUTF8(out));
+
+ out = GetQuotedJSONString(in);
+ EXPECT_EQ("\"" + std::string(cases[i].escaped) + "\"", out);
+ EXPECT_TRUE(IsStringUTF8(out));
+ }
+
+ string16 in = WideToUTF16(cases[0].to_escape);
+ std::string out;
+ EscapeJSONString(in, false, &out);
+ EXPECT_TRUE(IsStringUTF8(out));
+
+ // test quoting
+ std::string out_quoted;
+ EscapeJSONString(in, true, &out_quoted);
+ EXPECT_EQ(out.length() + 2, out_quoted.length());
+ EXPECT_EQ(out_quoted.find(out), 1U);
+ EXPECT_TRUE(IsStringUTF8(out));
+
+ // now try with a NULL in the string
+ string16 null_prepend = WideToUTF16(L"test");
+ null_prepend.push_back(0);
+ in = null_prepend + in;
+ std::string expected = "test\\u0000";
+ expected += cases[0].escaped;
+ out.clear();
+ EscapeJSONString(in, false, &out);
+ EXPECT_EQ(expected, out);
+ EXPECT_TRUE(IsStringUTF8(out));
+}
+
+TEST(JSONStringEscapeTest, EscapeUTF16OutsideBMP) {
+ {
+ // {a, U+10300, !}, SMP.
+ string16 test;
+ test.push_back('a');
+ test.push_back(0xD800);
+ test.push_back(0xDF00);
+ test.push_back('!');
+ std::string actual;
+ EXPECT_TRUE(EscapeJSONString(test, false, &actual));
+ EXPECT_EQ("a\xF0\x90\x8C\x80!", actual);
+ }
+ {
+ // {U+20021, U+2002B}, SIP.
+ string16 test;
+ test.push_back(0xD840);
+ test.push_back(0xDC21);
+ test.push_back(0xD840);
+ test.push_back(0xDC2B);
+ std::string actual;
+ EXPECT_TRUE(EscapeJSONString(test, false, &actual));
+ EXPECT_EQ("\xF0\xA0\x80\xA1\xF0\xA0\x80\xAB", actual);
+ }
+ {
+ // {?, U+D800, @}, lone surrogate.
+ string16 test;
+ test.push_back('?');
+ test.push_back(0xD800);
+ test.push_back('@');
+ std::string actual;
+ EXPECT_FALSE(EscapeJSONString(test, false, &actual));
+ EXPECT_EQ("?\xEF\xBF\xBD@", actual);
+ }
+}
+
+TEST(JSONStringEscapeTest, EscapeBytes) {
+ const struct {
+ const char* to_escape;
+ const char* escaped;
+ } cases[] = {
+ {"b\x0f\x7f\xf0\xff!", "b\\u000F\\u007F\\u00F0\\u00FF!"},
+ {"\xe5\xc4\x4f\x05\xb6\xfd", "\\u00E5\\u00C4O\\u0005\\u00B6\\u00FD"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ std::string in = std::string(cases[i].to_escape);
+ EXPECT_FALSE(IsStringUTF8(in));
+
+ EXPECT_EQ(std::string(cases[i].escaped),
+ EscapeBytesAsInvalidJSONString(in, false));
+ EXPECT_EQ("\"" + std::string(cases[i].escaped) + "\"",
+ EscapeBytesAsInvalidJSONString(in, true));
+ }
+
+ const char kEmbedNull[] = { '\xab', '\x39', '\0', '\x9f', '\xab' };
+ std::string in(kEmbedNull, arraysize(kEmbedNull));
+ EXPECT_FALSE(IsStringUTF8(in));
+ EXPECT_EQ(std::string("\\u00AB9\\u0000\\u009F\\u00AB"),
+ EscapeBytesAsInvalidJSONString(in, false));
+}
+
+} // namespace base
diff --git a/libchrome/base/lazy_instance.cc b/libchrome/base/lazy_instance.cc
new file mode 100644
index 0000000..5468065
--- /dev/null
+++ b/libchrome/base/lazy_instance.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/lazy_instance.h"
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+// TODO(joth): This function could be shared with Singleton, in place of its
+// WaitForInstance() call.
+bool NeedsLazyInstance(subtle::AtomicWord* state) {
+ // Try to create the instance, if we're the first, will go from 0 to
+ // kLazyInstanceStateCreating, otherwise we've already been beaten here.
+ // The memory access has no memory ordering as state 0 and
+ // kLazyInstanceStateCreating have no associated data (memory barriers are
+ // all about ordering of memory accesses to *associated* data).
+ if (subtle::NoBarrier_CompareAndSwap(state, 0,
+ kLazyInstanceStateCreating) == 0)
+ // Caller must create instance
+ return true;
+
+ // It's either in the process of being created, or already created. Spin.
+ // The load has acquire memory ordering as a thread which sees
+ // state_ == STATE_CREATED needs to acquire visibility over
+ // the associated data (buf_). Pairing Release_Store is in
+ // CompleteLazyInstance().
+ while (subtle::Acquire_Load(state) == kLazyInstanceStateCreating) {
+ PlatformThread::YieldCurrentThread();
+ }
+ // Someone else created the instance.
+ return false;
+}
+
+void CompleteLazyInstance(subtle::AtomicWord* state,
+ subtle::AtomicWord new_instance,
+ void* lazy_instance,
+ void (*dtor)(void*)) {
+ // Instance is created, go from CREATING to CREATED.
+ // Releases visibility over private_buf_ to readers. Pairing Acquire_Load's
+ // are in NeedsInstance() and Pointer().
+ subtle::Release_Store(state, new_instance);
+
+ // Make sure that the lazily instantiated object will get destroyed at exit.
+ if (dtor)
+ AtExitManager::RegisterCallback(dtor, lazy_instance);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/lazy_instance.h b/libchrome/base/lazy_instance.h
new file mode 100644
index 0000000..ac970c5
--- /dev/null
+++ b/libchrome/base/lazy_instance.h
@@ -0,0 +1,210 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed. This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety. The Type constructor will only ever
+// be called once, even if two threads are racing to create the object. Get()
+// and Pointer() will always return the same, completely initialized instance.
+// When the instance is constructed it is registered with AtExitManager. The
+// destructor will be called on program exit.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor. It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property. You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance. It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap. This may help with the
+// performance of creating the instance, and reducing heap fragmentation. This
+// requires that Type be a complete type so we can determine the size.
+//
+// Example usage:
+// static LazyInstance<MyClass> my_instance = LAZY_INSTANCE_INITIALIZER;
+// void SomeMethod() {
+// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
+//
+// MyClass* ptr = my_instance.Pointer();
+// ptr->DoDoDo(); // MyClass::DoDoDo
+// }
+
+#ifndef BASE_LAZY_INSTANCE_H_
+#define BASE_LAZY_INSTANCE_H_
+
+#include <new> // For placement new.
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/threading/thread_restrictions.h"
+
+// LazyInstance uses its own struct initializer-list style static
+// initialization, as base's LINKER_INITIALIZED requires a constructor and on
+// some compilers (notably gcc 4.4) this still ends up needing runtime
+// initialization.
+#ifdef __clang__
+ #define LAZY_INSTANCE_INITIALIZER {}
+#else
+ #define LAZY_INSTANCE_INITIALIZER {0, 0}
+#endif
+
+namespace base {
+
+template <typename Type>
+struct DefaultLazyInstanceTraits {
+ static const bool kRegisterOnExit = true;
+#ifndef NDEBUG
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+
+ static Type* New(void* instance) {
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u)
+ << ": Bad boy, the buffer passed to placement new is not aligned!\n"
+ "This may break some stuff like SSE-based optimizations assuming the "
+ "<Type> objects are word aligned.";
+ // Use placement new to initialize our instance in our preallocated space.
+ // The parenthesis is very important here to force POD type initialization.
+ return new (instance) Type();
+ }
+ static void Delete(Type* instance) {
+ // Explicitly call the destructor.
+ instance->~Type();
+ }
+};
+
+// We pull out some of the functionality into non-templated functions, so we
+// can implement the more complicated pieces out of line in the .cc file.
+namespace internal {
+
+// Use LazyInstance<T>::Leaky for a less-verbose call-site typedef; e.g.:
+// base::LazyInstance<T>::Leaky my_leaky_lazy_instance;
+// instead of:
+// base::LazyInstance<T, base::internal::LeakyLazyInstanceTraits<T> >
+// my_leaky_lazy_instance;
+// (especially when T is MyLongTypeNameImplClientHolderFactory).
+// Only use this internal::-qualified verbose form to extend this traits class
+// (depending on its implementation details).
+template <typename Type>
+struct LeakyLazyInstanceTraits {
+ static const bool kRegisterOnExit = false;
+#ifndef NDEBUG
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+ static Type* New(void* instance) {
+ ANNOTATE_SCOPED_MEMORY_LEAK;
+ return DefaultLazyInstanceTraits<Type>::New(instance);
+ }
+ static void Delete(Type*) {}
+};
+
+// Our AtomicWord doubles as a spinlock, where a value of
+// kBeingCreatedMarker means the spinlock is being held for creation.
+static const subtle::AtomicWord kLazyInstanceStateCreating = 1;
+
+// Check if instance needs to be created. If so return true otherwise
+// if another thread has beat us, wait for instance to be created and
+// return false.
+BASE_EXPORT bool NeedsLazyInstance(subtle::AtomicWord* state);
+
+// After creating an instance, call this to register the dtor to be called
+// at program exit and to update the atomic state to hold the |new_instance|
+BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
+ subtle::AtomicWord new_instance,
+ void* lazy_instance,
+ void (*dtor)(void*));
+
+} // namespace internal
+
+template <typename Type, typename Traits = DefaultLazyInstanceTraits<Type> >
+class LazyInstance {
+ public:
+ // Do not define a destructor, as doing so makes LazyInstance a
+ // non-POD-struct. We don't want that because then a static initializer will
+ // be created to register the (empty) destructor with atexit() under MSVC, for
+ // example. We handle destruction of the contained Type class explicitly via
+ // the OnExit member function, where needed.
+ // ~LazyInstance() {}
+
+ // Convenience typedef to avoid having to repeat Type for leaky lazy
+ // instances.
+ typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type> > Leaky;
+
+ Type& Get() {
+ return *Pointer();
+ }
+
+ Type* Pointer() {
+#ifndef NDEBUG
+ // Avoid making TLS lookup on release builds.
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+ // If any bit in the created mask is true, the instance has already been
+ // fully constructed.
+ static const subtle::AtomicWord kLazyInstanceCreatedMask =
+ ~internal::kLazyInstanceStateCreating;
+
+ // We will hopefully have fast access when the instance is already created.
+ // Since a thread sees private_instance_ == 0 or kLazyInstanceStateCreating
+ // at most once, the load is taken out of NeedsInstance() as a fast-path.
+ // The load has acquire memory ordering as a thread which sees
+ // private_instance_ > creating needs to acquire visibility over
+ // the associated data (private_buf_). Pairing Release_Store is in
+ // CompleteLazyInstance().
+ subtle::AtomicWord value = subtle::Acquire_Load(&private_instance_);
+ if (!(value & kLazyInstanceCreatedMask) &&
+ internal::NeedsLazyInstance(&private_instance_)) {
+ // Create the instance in the space provided by |private_buf_|.
+ value = reinterpret_cast<subtle::AtomicWord>(
+ Traits::New(private_buf_.void_data()));
+ internal::CompleteLazyInstance(&private_instance_, value, this,
+ Traits::kRegisterOnExit ? OnExit : NULL);
+ }
+ return instance();
+ }
+
+ bool operator==(Type* p) {
+ switch (subtle::NoBarrier_Load(&private_instance_)) {
+ case 0:
+ return p == NULL;
+ case internal::kLazyInstanceStateCreating:
+ return static_cast<void*>(p) == private_buf_.void_data();
+ default:
+ return p == instance();
+ }
+ }
+
+ // Effectively private: member data is only public to allow the linker to
+ // statically initialize it and to maintain a POD class. DO NOT USE FROM
+ // OUTSIDE THIS CLASS.
+
+ subtle::AtomicWord private_instance_;
+ // Preallocated space for the Type instance.
+ base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> private_buf_;
+
+ private:
+ Type* instance() {
+ return reinterpret_cast<Type*>(subtle::NoBarrier_Load(&private_instance_));
+ }
+
+ // Adapter function for use with AtExit. This should be called single
+ // threaded, so don't synchronize across threads.
+ // Calling OnExit while the instance is in use by other threads is a mistake.
+ static void OnExit(void* lazy_instance) {
+ LazyInstance<Type, Traits>* me =
+ reinterpret_cast<LazyInstance<Type, Traits>*>(lazy_instance);
+ Traits::Delete(me->instance());
+ subtle::NoBarrier_Store(&me->private_instance_, 0);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_LAZY_INSTANCE_H_
diff --git a/libchrome/base/lazy_instance_unittest.cc b/libchrome/base/lazy_instance_unittest.cc
new file mode 100644
index 0000000..8947b12
--- /dev/null
+++ b/libchrome/base/lazy_instance_unittest.cc
@@ -0,0 +1,174 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/at_exit.h"
+#include "base/atomic_sequence_num.h"
+#include "base/lazy_instance.h"
+#include "base/memory/aligned_memory.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+base::StaticAtomicSequenceNumber constructed_seq_;
+base::StaticAtomicSequenceNumber destructed_seq_;
+
+class ConstructAndDestructLogger {
+ public:
+ ConstructAndDestructLogger() {
+ constructed_seq_.GetNext();
+ }
+ ~ConstructAndDestructLogger() {
+ destructed_seq_.GetNext();
+ }
+};
+
+class SlowConstructor {
+ public:
+ SlowConstructor() : some_int_(0) {
+ // Sleep for 1 second to try to cause a race.
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(1));
+ ++constructed;
+ some_int_ = 12;
+ }
+ int some_int() const { return some_int_; }
+
+ static int constructed;
+ private:
+ int some_int_;
+};
+
+int SlowConstructor::constructed = 0;
+
+class SlowDelegate : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit SlowDelegate(base::LazyInstance<SlowConstructor>* lazy)
+ : lazy_(lazy) {}
+
+ void Run() override {
+ EXPECT_EQ(12, lazy_->Get().some_int());
+ EXPECT_EQ(12, lazy_->Pointer()->some_int());
+ }
+
+ private:
+ base::LazyInstance<SlowConstructor>* lazy_;
+};
+
+} // namespace
+
+static base::LazyInstance<ConstructAndDestructLogger> lazy_logger =
+ LAZY_INSTANCE_INITIALIZER;
+
+TEST(LazyInstanceTest, Basic) {
+ {
+ base::ShadowingAtExitManager shadow;
+
+ EXPECT_EQ(0, constructed_seq_.GetNext());
+ EXPECT_EQ(0, destructed_seq_.GetNext());
+
+ lazy_logger.Get();
+ EXPECT_EQ(2, constructed_seq_.GetNext());
+ EXPECT_EQ(1, destructed_seq_.GetNext());
+
+ lazy_logger.Pointer();
+ EXPECT_EQ(3, constructed_seq_.GetNext());
+ EXPECT_EQ(2, destructed_seq_.GetNext());
+ }
+ EXPECT_EQ(4, constructed_seq_.GetNext());
+ EXPECT_EQ(4, destructed_seq_.GetNext());
+}
+
+static base::LazyInstance<SlowConstructor> lazy_slow =
+ LAZY_INSTANCE_INITIALIZER;
+
+TEST(LazyInstanceTest, ConstructorThreadSafety) {
+ {
+ base::ShadowingAtExitManager shadow;
+
+ SlowDelegate delegate(&lazy_slow);
+ EXPECT_EQ(0, SlowConstructor::constructed);
+
+ base::DelegateSimpleThreadPool pool("lazy_instance_cons", 5);
+ pool.AddWork(&delegate, 20);
+ EXPECT_EQ(0, SlowConstructor::constructed);
+
+ pool.Start();
+ pool.JoinAll();
+ EXPECT_EQ(1, SlowConstructor::constructed);
+ }
+}
+
+namespace {
+
+// DeleteLogger is an object which sets a flag when it's destroyed.
+// It accepts a bool* and sets the bool to true when the dtor runs.
+class DeleteLogger {
+ public:
+ DeleteLogger() : deleted_(NULL) {}
+ ~DeleteLogger() { *deleted_ = true; }
+
+ void SetDeletedPtr(bool* deleted) {
+ deleted_ = deleted;
+ }
+
+ private:
+ bool* deleted_;
+};
+
+} // anonymous namespace
+
+TEST(LazyInstanceTest, LeakyLazyInstance) {
+ // Check that using a plain LazyInstance causes the dtor to run
+ // when the AtExitManager finishes.
+ bool deleted1 = false;
+ {
+ base::ShadowingAtExitManager shadow;
+ static base::LazyInstance<DeleteLogger> test = LAZY_INSTANCE_INITIALIZER;
+ test.Get().SetDeletedPtr(&deleted1);
+ }
+ EXPECT_TRUE(deleted1);
+
+ // Check that using a *leaky* LazyInstance makes the dtor not run
+ // when the AtExitManager finishes.
+ bool deleted2 = false;
+ {
+ base::ShadowingAtExitManager shadow;
+ static base::LazyInstance<DeleteLogger>::Leaky
+ test = LAZY_INSTANCE_INITIALIZER;
+ test.Get().SetDeletedPtr(&deleted2);
+ }
+ EXPECT_FALSE(deleted2);
+}
+
+namespace {
+
+template <size_t alignment>
+class AlignedData {
+ public:
+ AlignedData() {}
+ ~AlignedData() {}
+ base::AlignedMemory<alignment, alignment> data_;
+};
+
+} // anonymous namespace
+
+#define EXPECT_ALIGNED(ptr, align) \
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+TEST(LazyInstanceTest, Alignment) {
+ using base::LazyInstance;
+
+ // Create some static instances with increasing sizes and alignment
+ // requirements. By ordering this way, the linker will need to do some work to
+ // ensure proper alignment of the static data.
+ static LazyInstance<AlignedData<4> > align4 = LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<32> > align32 = LAZY_INSTANCE_INITIALIZER;
+ static LazyInstance<AlignedData<4096> > align4096 = LAZY_INSTANCE_INITIALIZER;
+
+ EXPECT_ALIGNED(align4.Pointer(), 4);
+ EXPECT_ALIGNED(align32.Pointer(), 32);
+ EXPECT_ALIGNED(align4096.Pointer(), 4096);
+}
diff --git a/libchrome/base/location.cc b/libchrome/base/location.cc
new file mode 100644
index 0000000..1333e6e
--- /dev/null
+++ b/libchrome/base/location.cc
@@ -0,0 +1,106 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
+#include "base/location.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+
+namespace tracked_objects {
+
+Location::Location(const char* function_name,
+ const char* file_name,
+ int line_number,
+ const void* program_counter)
+ : function_name_(function_name),
+ file_name_(file_name),
+ line_number_(line_number),
+ program_counter_(program_counter) {
+}
+
+Location::Location()
+ : function_name_("Unknown"),
+ file_name_("Unknown"),
+ line_number_(-1),
+ program_counter_(NULL) {
+}
+
+Location::Location(const Location& other)
+ : function_name_(other.function_name_),
+ file_name_(other.file_name_),
+ line_number_(other.line_number_),
+ program_counter_(other.program_counter_) {
+}
+
+std::string Location::ToString() const {
+ return std::string(function_name_) + "@" + file_name_ + ":" +
+ base::IntToString(line_number_);
+}
+
+void Location::Write(bool display_filename, bool display_function_name,
+ std::string* output) const {
+ base::StringAppendF(output, "%s[%d] ",
+ display_filename ? file_name_ : "line",
+ line_number_);
+
+ if (display_function_name) {
+ WriteFunctionName(output);
+ output->push_back(' ');
+ }
+}
+
+void Location::WriteFunctionName(std::string* output) const {
+ // Translate "<" to "<" for HTML safety.
+ // TODO(jar): Support ASCII or html for logging in ASCII.
+ for (const char *p = function_name_; *p; p++) {
+ switch (*p) {
+ case '<':
+ output->append("<");
+ break;
+
+ case '>':
+ output->append(">");
+ break;
+
+ default:
+ output->push_back(*p);
+ break;
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+LocationSnapshot::LocationSnapshot() : line_number(-1) {
+}
+
+LocationSnapshot::LocationSnapshot(
+ const tracked_objects::Location& location)
+ : file_name(location.file_name()),
+ function_name(location.function_name()),
+ line_number(location.line_number()) {
+}
+
+LocationSnapshot::~LocationSnapshot() {
+}
+
+//------------------------------------------------------------------------------
+#if defined(COMPILER_MSVC)
+__declspec(noinline)
+#endif
+BASE_EXPORT const void* GetProgramCounter() {
+#if defined(COMPILER_MSVC)
+ return _ReturnAddress();
+#elif defined(COMPILER_GCC) && !defined(OS_NACL)
+ return __builtin_extract_return_addr(__builtin_return_address(0));
+#else
+ return NULL;
+#endif
+}
+
+} // namespace tracked_objects
diff --git a/libchrome/base/location.h b/libchrome/base/location.h
new file mode 100644
index 0000000..21e270c
--- /dev/null
+++ b/libchrome/base/location.h
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOCATION_H_
+#define BASE_LOCATION_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/hash.h"
+
+namespace tracked_objects {
+
+// Location provides basic info where of an object was constructed, or was
+// significantly brought to life.
+class BASE_EXPORT Location {
+ public:
+ // Constructor should be called with a long-lived char*, such as __FILE__.
+ // It assumes the provided value will persist as a global constant, and it
+ // will not make a copy of it.
+ Location(const char* function_name,
+ const char* file_name,
+ int line_number,
+ const void* program_counter);
+
+ // Provide a default constructor for easy of debugging.
+ Location();
+
+ // Copy constructor.
+ Location(const Location& other);
+
+ // Comparator for hash map insertion.
+ // No need to use |function_name_| since the other two fields uniquely
+ // identify this location.
+ bool operator==(const Location& other) const {
+ return line_number_ == other.line_number_ &&
+ file_name_ == other.file_name_;
+ }
+
+ const char* function_name() const { return function_name_; }
+ const char* file_name() const { return file_name_; }
+ int line_number() const { return line_number_; }
+ const void* program_counter() const { return program_counter_; }
+
+ std::string ToString() const;
+
+ // Hash operator for hash maps.
+ struct Hash {
+ size_t operator()(const Location& location) const {
+ // Compute the hash value using file name pointer and line number.
+ // No need to use |function_name_| since the other two fields uniquely
+ // identify this location.
+
+ // The file name will always be uniquely identified by its pointer since
+ // it comes from __FILE__, so no need to check the contents of the string.
+ // See the definition of FROM_HERE in location.h, and how it is used
+ // elsewhere.
+ return base::HashInts(reinterpret_cast<uintptr_t>(location.file_name()),
+ location.line_number());
+ }
+ };
+
+ // Translate the some of the state in this instance into a human readable
+ // string with HTML characters in the function names escaped, and append that
+ // string to |output|. Inclusion of the file_name_ and function_name_ are
+ // optional, and controlled by the boolean arguments.
+ void Write(bool display_filename, bool display_function_name,
+ std::string* output) const;
+
+ // Write function_name_ in HTML with '<' and '>' properly encoded.
+ void WriteFunctionName(std::string* output) const;
+
+ private:
+ const char* function_name_;
+ const char* file_name_;
+ int line_number_;
+ const void* program_counter_;
+};
+
+// A "snapshotted" representation of the Location class that can safely be
+// passed across process boundaries.
+struct BASE_EXPORT LocationSnapshot {
+ // The default constructor is exposed to support the IPC serialization macros.
+ LocationSnapshot();
+ explicit LocationSnapshot(const tracked_objects::Location& location);
+ ~LocationSnapshot();
+
+ std::string file_name;
+ std::string function_name;
+ int line_number;
+};
+
+BASE_EXPORT const void* GetProgramCounter();
+
+// Define a macro to record the current source location.
+#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__FUNCTION__)
+
+#define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name) \
+ ::tracked_objects::Location(function_name, \
+ __FILE__, \
+ __LINE__, \
+ ::tracked_objects::GetProgramCounter())
+
+} // namespace tracked_objects
+
+#endif // BASE_LOCATION_H_
diff --git a/libchrome/base/logging.cc b/libchrome/base/logging.cc
new file mode 100644
index 0000000..381e9ee
--- /dev/null
+++ b/libchrome/base/logging.cc
@@ -0,0 +1,950 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <io.h>
+#include <windows.h>
+typedef HANDLE FileHandle;
+typedef HANDLE MutexHandle;
+// Windows warns on using write(). It prefers _write().
+#define write(fd, buf, count) _write(fd, buf, static_cast<unsigned int>(count))
+// Windows doesn't define STDERR_FILENO. Define it here.
+#define STDERR_FILENO 2
+#elif defined(OS_MACOSX)
+#include <asl.h>
+#include <CoreFoundation/CoreFoundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach-o/dyld.h>
+#elif defined(OS_POSIX)
+#if defined(OS_NACL)
+#include <sys/time.h> // timespec doesn't seem to be in <time.h>
+#endif
+#include <time.h>
+#endif
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <paths.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#define MAX_PATH PATH_MAX
+typedef FILE* FileHandle;
+typedef pthread_mutex_t* MutexHandle;
+#endif
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+#include <iomanip>
+#include <ostream>
+#include <string>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/debug/alias.h"
+#include "base/debug/debugger.h"
+#include "base/debug/stack_trace.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/threading/platform_thread.h"
+#include "base/vlog.h"
+#if defined(OS_POSIX)
+#include "base/posix/safe_strerror.h"
+#endif
+
+#if !defined(OS_ANDROID)
+#include "base/files/file_path.h"
+#endif
+
+#if defined(OS_ANDROID) || defined(__ANDROID__)
+#include <android/log.h>
+#endif
+
+namespace logging {
+
+namespace {
+
+VlogInfo* g_vlog_info = nullptr;
+VlogInfo* g_vlog_info_prev = nullptr;
+
+const char* const log_severity_names[LOG_NUM_SEVERITIES] = {
+ "INFO", "WARNING", "ERROR", "FATAL" };
+
+const char* log_severity_name(int severity) {
+ if (severity >= 0 && severity < LOG_NUM_SEVERITIES)
+ return log_severity_names[severity];
+ return "UNKNOWN";
+}
+
+int g_min_log_level = 0;
+
+LoggingDestination g_logging_destination = LOG_DEFAULT;
+
+// For LOG_ERROR and above, always print to stderr.
+const int kAlwaysPrintErrorLevel = LOG_ERROR;
+
+// Which log file to use? This is initialized by InitLogging or
+// will be lazily initialized to the default value when it is
+// first needed.
+#if defined(OS_WIN)
+typedef std::wstring PathString;
+#else
+typedef std::string PathString;
+#endif
+PathString* g_log_file_name = nullptr;
+
+// This file is lazily opened and the handle may be nullptr
+FileHandle g_log_file = nullptr;
+
+// What should be prepended to each message?
+bool g_log_process_id = false;
+bool g_log_thread_id = false;
+bool g_log_timestamp = true;
+bool g_log_tickcount = false;
+
+// Should we pop up fatal debug messages in a dialog?
+bool show_error_dialogs = false;
+
+// An assert handler override specified by the client to be called instead of
+// the debug message dialog and process termination.
+LogAssertHandlerFunction log_assert_handler = nullptr;
+// A log message handler that gets notified of every log message we process.
+LogMessageHandlerFunction log_message_handler = nullptr;
+
+// Helper functions to wrap platform differences.
+
+int32_t CurrentProcessId() {
+#if defined(OS_WIN)
+ return GetCurrentProcessId();
+#elif defined(OS_POSIX)
+ return getpid();
+#endif
+}
+
+uint64_t TickCount() {
+#if defined(OS_WIN)
+ return GetTickCount();
+#elif defined(OS_MACOSX)
+ return mach_absolute_time();
+#elif defined(OS_NACL)
+ // NaCl sadly does not have _POSIX_TIMERS enabled in sys/features.h
+ // So we have to use clock() for now.
+ return clock();
+#elif defined(OS_POSIX)
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ uint64_t absolute_micro = static_cast<int64_t>(ts.tv_sec) * 1000000 +
+ static_cast<int64_t>(ts.tv_nsec) / 1000;
+
+ return absolute_micro;
+#endif
+}
+
+void DeleteFilePath(const PathString& log_name) {
+#if defined(OS_WIN)
+ DeleteFile(log_name.c_str());
+#elif defined(OS_NACL)
+ // Do nothing; unlink() isn't supported on NaCl.
+#else
+ unlink(log_name.c_str());
+#endif
+}
+
+PathString GetDefaultLogFile() {
+#if defined(OS_WIN)
+ // On Windows we use the same path as the exe.
+ wchar_t module_name[MAX_PATH];
+ GetModuleFileName(nullptr, module_name, MAX_PATH);
+
+ PathString log_name = module_name;
+ PathString::size_type last_backslash = log_name.rfind('\\', log_name.size());
+ if (last_backslash != PathString::npos)
+ log_name.erase(last_backslash + 1);
+ log_name += L"debug.log";
+ return log_name;
+#elif defined(OS_POSIX)
+ // On other platforms we just use the current directory.
+ return PathString("debug.log");
+#endif
+}
+
+// We don't need locks on Windows for atomically appending to files. The OS
+// provides this functionality.
+#if !defined(OS_WIN)
+// This class acts as a wrapper for locking the logging files.
+// LoggingLock::Init() should be called from the main thread before any logging
+// is done. Then whenever logging, be sure to have a local LoggingLock
+// instance on the stack. This will ensure that the lock is unlocked upon
+// exiting the frame.
+// LoggingLocks can not be nested.
+class LoggingLock {
+ public:
+ LoggingLock() {
+ LockLogging();
+ }
+
+ ~LoggingLock() {
+ UnlockLogging();
+ }
+
+ static void Init(LogLockingState lock_log, const PathChar* /*new_log_file*/) {
+ if (initialized)
+ return;
+ lock_log_file = lock_log;
+
+ if (lock_log_file != LOCK_LOG_FILE)
+ log_lock = new base::internal::LockImpl();
+
+ initialized = true;
+ }
+
+ private:
+ static void LockLogging() {
+ if (lock_log_file == LOCK_LOG_FILE) {
+#if defined(OS_POSIX)
+ pthread_mutex_lock(&log_mutex);
+#endif
+ } else {
+ // use the lock
+ log_lock->Lock();
+ }
+ }
+
+ static void UnlockLogging() {
+ if (lock_log_file == LOCK_LOG_FILE) {
+#if defined(OS_POSIX)
+ pthread_mutex_unlock(&log_mutex);
+#endif
+ } else {
+ log_lock->Unlock();
+ }
+ }
+
+ // The lock is used if log file locking is false. It helps us avoid problems
+ // with multiple threads writing to the log file at the same time. Use
+ // LockImpl directly instead of using Lock, because Lock makes logging calls.
+ static base::internal::LockImpl* log_lock;
+
+ // When we don't use a lock, we are using a global mutex. We need to do this
+ // because LockFileEx is not thread safe.
+#if defined(OS_POSIX)
+ static pthread_mutex_t log_mutex;
+#endif
+
+ static bool initialized;
+ static LogLockingState lock_log_file;
+};
+
+// static
+bool LoggingLock::initialized = false;
+// static
+base::internal::LockImpl* LoggingLock::log_lock = nullptr;
+// static
+LogLockingState LoggingLock::lock_log_file = LOCK_LOG_FILE;
+
+#if defined(OS_POSIX)
+pthread_mutex_t LoggingLock::log_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+#endif // OS_WIN
+
+// Called by logging functions to ensure that |g_log_file| is initialized
+// and can be used for writing. Returns false if the file could not be
+// initialized. |g_log_file| will be nullptr in this case.
+bool InitializeLogFileHandle() {
+ if (g_log_file)
+ return true;
+
+ if (!g_log_file_name) {
+ // Nobody has called InitLogging to specify a debug log file, so here we
+ // initialize the log file name to a default.
+ g_log_file_name = new PathString(GetDefaultLogFile());
+ }
+
+ if ((g_logging_destination & LOG_TO_FILE) != 0) {
+#if defined(OS_WIN)
+ // The FILE_APPEND_DATA access mask ensures that the file is atomically
+ // appended to across accesses from multiple threads.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa364399(v=vs.85).aspx
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+ g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+ if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+ // We are intentionally not using FilePath or FileUtil here to reduce the
+ // dependencies of the logging implementation. For e.g. FilePath and
+ // FileUtil depend on shell32 and user32.dll. This is not acceptable for
+ // some consumers of base logging like chrome_elf, etc.
+ // Please don't change the code below to use FilePath.
+ // try the current directory
+ wchar_t system_buffer[MAX_PATH];
+ system_buffer[0] = 0;
+ DWORD len = ::GetCurrentDirectory(arraysize(system_buffer),
+ system_buffer);
+ if (len == 0 || len > arraysize(system_buffer))
+ return false;
+
+ *g_log_file_name = system_buffer;
+ // Append a trailing backslash if needed.
+ if (g_log_file_name->back() != L'\\')
+ *g_log_file_name += L"\\";
+ *g_log_file_name += L"debug.log";
+
+ g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
+ OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
+ if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+ g_log_file = nullptr;
+ return false;
+ }
+ }
+#elif defined(OS_POSIX)
+ g_log_file = fopen(g_log_file_name->c_str(), "a");
+ if (g_log_file == nullptr)
+ return false;
+#endif
+ }
+
+ return true;
+}
+
+void CloseFile(FileHandle log) {
+#if defined(OS_WIN)
+ CloseHandle(log);
+#else
+ fclose(log);
+#endif
+}
+
+void CloseLogFileUnlocked() {
+ if (!g_log_file)
+ return;
+
+ CloseFile(g_log_file);
+ g_log_file = nullptr;
+}
+
+} // namespace
+
+LoggingSettings::LoggingSettings()
+ : logging_dest(LOG_DEFAULT),
+ log_file(nullptr),
+ lock_log(LOCK_LOG_FILE),
+ delete_old(APPEND_TO_OLD_LOG_FILE) {}
+
+bool BaseInitLoggingImpl(const LoggingSettings& settings) {
+#if defined(OS_NACL)
+ // Can log only to the system debug log.
+ CHECK_EQ(settings.logging_dest & ~LOG_TO_SYSTEM_DEBUG_LOG, 0);
+#endif
+ if (base::CommandLine::InitializedForCurrentProcess()) {
+ base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
+ // Don't bother initializing |g_vlog_info| unless we use one of the
+ // vlog switches.
+ if (command_line->HasSwitch(switches::kV) ||
+ command_line->HasSwitch(switches::kVModule)) {
+ // NOTE: If |g_vlog_info| has already been initialized, it might be in use
+ // by another thread. Don't delete the old VLogInfo, just create a second
+ // one. We keep track of both to avoid memory leak warnings.
+ CHECK(!g_vlog_info_prev);
+ g_vlog_info_prev = g_vlog_info;
+
+ g_vlog_info =
+ new VlogInfo(command_line->GetSwitchValueASCII(switches::kV),
+ command_line->GetSwitchValueASCII(switches::kVModule),
+ &g_min_log_level);
+ }
+ }
+
+ g_logging_destination = settings.logging_dest;
+
+ // ignore file options unless logging to file is set.
+ if ((g_logging_destination & LOG_TO_FILE) == 0)
+ return true;
+
+#if !defined(OS_WIN)
+ LoggingLock::Init(settings.lock_log, settings.log_file);
+ LoggingLock logging_lock;
+#endif
+
+ // Calling InitLogging twice or after some log call has already opened the
+ // default log file will re-initialize to the new options.
+ CloseLogFileUnlocked();
+
+ if (!g_log_file_name)
+ g_log_file_name = new PathString();
+ *g_log_file_name = settings.log_file;
+ if (settings.delete_old == DELETE_OLD_LOG_FILE)
+ DeleteFilePath(*g_log_file_name);
+
+ return InitializeLogFileHandle();
+}
+
+void SetMinLogLevel(int level) {
+ g_min_log_level = std::min(LOG_FATAL, level);
+}
+
+int GetMinLogLevel() {
+ return g_min_log_level;
+}
+
+bool ShouldCreateLogMessage(int severity) {
+ if (severity < g_min_log_level)
+ return false;
+
+ // Return true here unless we know ~LogMessage won't do anything. Note that
+ // ~LogMessage writes to stderr if severity_ >= kAlwaysPrintErrorLevel, even
+ // when g_logging_destination is LOG_NONE.
+ return g_logging_destination != LOG_NONE || log_message_handler ||
+ severity >= kAlwaysPrintErrorLevel;
+}
+
+int GetVlogVerbosity() {
+ return std::max(-1, LOG_INFO - GetMinLogLevel());
+}
+
+int GetVlogLevelHelper(const char* file, size_t N) {
+ DCHECK_GT(N, 0U);
+ // Note: |g_vlog_info| may change on a different thread during startup
+ // (but will always be valid or nullptr).
+ VlogInfo* vlog_info = g_vlog_info;
+ return vlog_info ?
+ vlog_info->GetVlogLevel(base::StringPiece(file, N - 1)) :
+ GetVlogVerbosity();
+}
+
+void SetLogItems(bool enable_process_id, bool enable_thread_id,
+ bool enable_timestamp, bool enable_tickcount) {
+ g_log_process_id = enable_process_id;
+ g_log_thread_id = enable_thread_id;
+ g_log_timestamp = enable_timestamp;
+ g_log_tickcount = enable_tickcount;
+}
+
+void SetShowErrorDialogs(bool enable_dialogs) {
+ show_error_dialogs = enable_dialogs;
+}
+
+void SetLogAssertHandler(LogAssertHandlerFunction handler) {
+ log_assert_handler = handler;
+}
+
+void SetLogMessageHandler(LogMessageHandlerFunction handler) {
+ log_message_handler = handler;
+}
+
+LogMessageHandlerFunction GetLogMessageHandler() {
+ return log_message_handler;
+}
+
+// Explicit instantiations for commonly used comparisons.
+template std::string* MakeCheckOpString<int, int>(
+ const int&, const int&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned long>(
+ const unsigned long&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<unsigned long, unsigned int>(
+ const unsigned long&, const unsigned int&, const char* names);
+template std::string* MakeCheckOpString<unsigned int, unsigned long>(
+ const unsigned int&, const unsigned long&, const char* names);
+template std::string* MakeCheckOpString<std::string, std::string>(
+ const std::string&, const std::string&, const char* name);
+
+void MakeCheckOpValueString(std::ostream* os, std::nullptr_t) {
+ (*os) << "nullptr";
+}
+
+#if !defined(NDEBUG)
+// Displays a message box to the user with the error message in it.
+// Used for fatal messages, where we close the app simultaneously.
+// This is for developers only; we don't use this in circumstances
+// (like release builds) where users could see it, since users don't
+// understand these messages anyway.
+void DisplayDebugMessageInDialog(const std::string& str) {
+ if (str.empty())
+ return;
+
+ if (!show_error_dialogs)
+ return;
+
+#if defined(OS_WIN)
+ MessageBoxW(nullptr, base::UTF8ToUTF16(str).c_str(), L"Fatal error",
+ MB_OK | MB_ICONHAND | MB_TOPMOST);
+#else
+ // We intentionally don't implement a dialog on other platforms.
+ // You can just look at stderr.
+#endif // defined(OS_WIN)
+}
+#endif // !defined(NDEBUG)
+
+#if defined(OS_WIN)
+LogMessage::SaveLastError::SaveLastError() : last_error_(::GetLastError()) {
+}
+
+LogMessage::SaveLastError::~SaveLastError() {
+ ::SetLastError(last_error_);
+}
+#endif // defined(OS_WIN)
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
+ : severity_(severity), file_(file), line_(line) {
+ Init(file, line);
+}
+
+LogMessage::LogMessage(const char* file, int line, const char* condition)
+ : severity_(LOG_FATAL), file_(file), line_(line) {
+ Init(file, line);
+ stream_ << "Check failed: " << condition << ". ";
+}
+
+LogMessage::LogMessage(const char* file, int line, std::string* result)
+ : severity_(LOG_FATAL), file_(file), line_(line) {
+ Init(file, line);
+ stream_ << "Check failed: " << *result;
+ delete result;
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+ std::string* result)
+ : severity_(severity), file_(file), line_(line) {
+ Init(file, line);
+ stream_ << "Check failed: " << *result;
+ delete result;
+}
+
+LogMessage::~LogMessage() {
+#if !defined(OFFICIAL_BUILD) && !defined(OS_NACL) && !defined(__UCLIBC__)
+ if (severity_ == LOG_FATAL && !base::debug::BeingDebugged()) {
+ // Include a stack trace on a fatal, unless a debugger is attached.
+ base::debug::StackTrace trace;
+ stream_ << std::endl; // Newline to separate from log message.
+ trace.OutputToStream(&stream_);
+ }
+#endif
+ stream_ << std::endl;
+ std::string str_newline(stream_.str());
+
+ // Give any log message handler first dibs on the message.
+ if (log_message_handler &&
+ log_message_handler(severity_, file_, line_,
+ message_start_, str_newline)) {
+ // The handler took care of it, no further processing.
+ return;
+ }
+
+ if ((g_logging_destination & LOG_TO_SYSTEM_DEBUG_LOG) != 0) {
+#if defined(OS_WIN)
+ OutputDebugStringA(str_newline.c_str());
+#elif defined(OS_MACOSX)
+ // In LOG_TO_SYSTEM_DEBUG_LOG mode, log messages are always written to
+ // stderr. If stderr is /dev/null, also log via ASL (Apple System Log). If
+ // there's something weird about stderr, assume that log messages are going
+ // nowhere and log via ASL too. Messages logged via ASL show up in
+ // Console.app.
+ //
+ // Programs started by launchd, as UI applications normally are, have had
+ // stderr connected to /dev/null since OS X 10.8. Prior to that, stderr was
+ // a pipe to launchd, which logged what it received (see log_redirect_fd in
+ // 10.7.5 launchd-392.39/launchd/src/launchd_core_logic.c).
+ //
+ // Another alternative would be to determine whether stderr is a pipe to
+ // launchd and avoid logging via ASL only in that case. See 10.7.5
+ // CF-635.21/CFUtilities.c also_do_stderr(). This would result in logging to
+ // both stderr and ASL even in tests, where it's undesirable to log to the
+ // system log at all.
+ //
+ // Note that the ASL client by default discards messages whose levels are
+ // below ASL_LEVEL_NOTICE. It's possible to change that with
+ // asl_set_filter(), but this is pointless because syslogd normally applies
+ // the same filter.
+ const bool log_via_asl = []() {
+ struct stat stderr_stat;
+ if (fstat(fileno(stderr), &stderr_stat) == -1) {
+ return true;
+ }
+ if (!S_ISCHR(stderr_stat.st_mode)) {
+ return false;
+ }
+
+ struct stat dev_null_stat;
+ if (stat(_PATH_DEVNULL, &dev_null_stat) == -1) {
+ return true;
+ }
+
+ return !S_ISCHR(dev_null_stat.st_mode) ||
+ stderr_stat.st_rdev == dev_null_stat.st_rdev;
+ }();
+
+ if (log_via_asl) {
+ // Log roughly the same way that CFLog() and NSLog() would. See 10.10.5
+ // CF-1153.18/CFUtilities.c __CFLogCString().
+ //
+ // The ASL facility is set to the main bundle ID if available. Otherwise,
+ // "com.apple.console" is used.
+ CFBundleRef main_bundle = CFBundleGetMainBundle();
+ CFStringRef main_bundle_id_cf =
+ main_bundle ? CFBundleGetIdentifier(main_bundle) : nullptr;
+ std::string asl_facility =
+ main_bundle_id_cf ? base::SysCFStringRefToUTF8(main_bundle_id_cf)
+ : std::string("com.apple.console");
+
+ class ASLClient {
+ public:
+ explicit ASLClient(const std::string& asl_facility)
+ : client_(asl_open(nullptr,
+ asl_facility.c_str(),
+ ASL_OPT_NO_DELAY)) {}
+ ~ASLClient() { asl_close(client_); }
+
+ aslclient get() const { return client_; }
+
+ private:
+ aslclient client_;
+ DISALLOW_COPY_AND_ASSIGN(ASLClient);
+ } asl_client(asl_facility);
+
+ class ASLMessage {
+ public:
+ ASLMessage() : message_(asl_new(ASL_TYPE_MSG)) {}
+ ~ASLMessage() { asl_free(message_); }
+
+ aslmsg get() const { return message_; }
+
+ private:
+ aslmsg message_;
+ DISALLOW_COPY_AND_ASSIGN(ASLMessage);
+ } asl_message;
+
+ // By default, messages are only readable by the admin group. Explicitly
+ // make them readable by the user generating the messages.
+ char euid_string[12];
+ snprintf(euid_string, arraysize(euid_string), "%d", geteuid());
+ asl_set(asl_message.get(), ASL_KEY_READ_UID, euid_string);
+
+ // Map Chrome log severities to ASL log levels.
+ const char* const asl_level_string = [](LogSeverity severity) {
+ // ASL_LEVEL_* are ints, but ASL needs equivalent strings. This
+ // non-obvious two-step macro trick achieves what's needed.
+ // https://gcc.gnu.org/onlinedocs/cpp/Stringification.html
+#define ASL_LEVEL_STR(level) ASL_LEVEL_STR_X(level)
+#define ASL_LEVEL_STR_X(level) #level
+ switch (severity) {
+ case LOG_INFO:
+ return ASL_LEVEL_STR(ASL_LEVEL_INFO);
+ case LOG_WARNING:
+ return ASL_LEVEL_STR(ASL_LEVEL_WARNING);
+ case LOG_ERROR:
+ return ASL_LEVEL_STR(ASL_LEVEL_ERR);
+ case LOG_FATAL:
+ return ASL_LEVEL_STR(ASL_LEVEL_CRIT);
+ default:
+ return severity < 0 ? ASL_LEVEL_STR(ASL_LEVEL_DEBUG)
+ : ASL_LEVEL_STR(ASL_LEVEL_NOTICE);
+ }
+#undef ASL_LEVEL_STR
+#undef ASL_LEVEL_STR_X
+ }(severity_);
+ asl_set(asl_message.get(), ASL_KEY_LEVEL, asl_level_string);
+
+ asl_set(asl_message.get(), ASL_KEY_MSG, str_newline.c_str());
+
+ asl_send(asl_client.get(), asl_message.get());
+ }
+#elif defined(OS_ANDROID) || defined(__ANDROID__)
+ android_LogPriority priority =
+ (severity_ < 0) ? ANDROID_LOG_VERBOSE : ANDROID_LOG_UNKNOWN;
+ switch (severity_) {
+ case LOG_INFO:
+ priority = ANDROID_LOG_INFO;
+ break;
+ case LOG_WARNING:
+ priority = ANDROID_LOG_WARN;
+ break;
+ case LOG_ERROR:
+ priority = ANDROID_LOG_ERROR;
+ break;
+ case LOG_FATAL:
+ priority = ANDROID_LOG_FATAL;
+ break;
+ }
+#if defined(OS_ANDROID)
+ __android_log_write(priority, "chromium", str_newline.c_str());
+#else
+ __android_log_write(
+ priority,
+ base::CommandLine::InitializedForCurrentProcess() ?
+ base::CommandLine::ForCurrentProcess()->
+ GetProgram().BaseName().value().c_str() : nullptr,
+ str_newline.c_str());
+#endif // defined(OS_ANDROID)
+#endif
+ ignore_result(fwrite(str_newline.data(), str_newline.size(), 1, stderr));
+ fflush(stderr);
+ } else if (severity_ >= kAlwaysPrintErrorLevel) {
+ // When we're only outputting to a log file, above a certain log level, we
+ // should still output to stderr so that we can better detect and diagnose
+ // problems with unit tests, especially on the buildbots.
+ ignore_result(fwrite(str_newline.data(), str_newline.size(), 1, stderr));
+ fflush(stderr);
+ }
+
+ // write to log file
+ if ((g_logging_destination & LOG_TO_FILE) != 0) {
+ // We can have multiple threads and/or processes, so try to prevent them
+ // from clobbering each other's writes.
+ // If the client app did not call InitLogging, and the lock has not
+ // been created do it now. We do this on demand, but if two threads try
+ // to do this at the same time, there will be a race condition to create
+ // the lock. This is why InitLogging should be called from the main
+ // thread at the beginning of execution.
+#if !defined(OS_WIN)
+ LoggingLock::Init(LOCK_LOG_FILE, nullptr);
+ LoggingLock logging_lock;
+#endif
+ if (InitializeLogFileHandle()) {
+#if defined(OS_WIN)
+ DWORD num_written;
+ WriteFile(g_log_file,
+ static_cast<const void*>(str_newline.c_str()),
+ static_cast<DWORD>(str_newline.length()),
+ &num_written,
+ nullptr);
+#else
+ ignore_result(fwrite(
+ str_newline.data(), str_newline.size(), 1, g_log_file));
+ fflush(g_log_file);
+#endif
+ }
+ }
+
+ if (severity_ == LOG_FATAL) {
+ // Ensure the first characters of the string are on the stack so they
+ // are contained in minidumps for diagnostic purposes.
+ char str_stack[1024];
+ str_newline.copy(str_stack, arraysize(str_stack));
+ base::debug::Alias(str_stack);
+
+ if (log_assert_handler) {
+ // Make a copy of the string for the handler out of paranoia.
+ log_assert_handler(std::string(stream_.str()));
+ } else {
+ // Don't use the string with the newline, get a fresh version to send to
+ // the debug message process. We also don't display assertions to the
+ // user in release mode. The enduser can't do anything with this
+ // information, and displaying message boxes when the application is
+ // hosed can cause additional problems.
+#ifndef NDEBUG
+ if (!base::debug::BeingDebugged()) {
+ // Displaying a dialog is unnecessary when debugging and can complicate
+ // debugging.
+ DisplayDebugMessageInDialog(stream_.str());
+ }
+#endif
+ // Crash the process to generate a dump.
+ base::debug::BreakDebugger();
+ }
+ }
+}
+
+// writes the common header info to the stream
+void LogMessage::Init(const char* file, int line) {
+ base::StringPiece filename(file);
+ size_t last_slash_pos = filename.find_last_of("\\/");
+ if (last_slash_pos != base::StringPiece::npos)
+ filename.remove_prefix(last_slash_pos + 1);
+
+ // TODO(darin): It might be nice if the columns were fixed width.
+
+ stream_ << '[';
+ if (g_log_process_id)
+ stream_ << CurrentProcessId() << ':';
+ if (g_log_thread_id)
+ stream_ << base::PlatformThread::CurrentId() << ':';
+ if (g_log_timestamp) {
+ time_t t = time(nullptr);
+#if defined(__ANDROID__) || defined(ANDROID)
+ struct tm local_time;
+ memset(&local_time, 0, sizeof(local_time));
+#else
+ struct tm local_time = {0};
+#endif
+#ifdef _MSC_VER
+ localtime_s(&local_time, &t);
+#else
+ localtime_r(&t, &local_time);
+#endif
+ struct tm* tm_time = &local_time;
+ stream_ << std::setfill('0')
+ << std::setw(2) << 1 + tm_time->tm_mon
+ << std::setw(2) << tm_time->tm_mday
+ << '/'
+ << std::setw(2) << tm_time->tm_hour
+ << std::setw(2) << tm_time->tm_min
+ << std::setw(2) << tm_time->tm_sec
+ << ':';
+ }
+ if (g_log_tickcount)
+ stream_ << TickCount() << ':';
+ if (severity_ >= 0)
+ stream_ << log_severity_name(severity_);
+ else
+ stream_ << "VERBOSE" << -severity_;
+
+ stream_ << ":" << filename << "(" << line << ")] ";
+
+ message_start_ = stream_.str().length();
+}
+
+#if defined(OS_WIN)
+// This has already been defined in the header, but defining it again as DWORD
+// ensures that the type used in the header is equivalent to DWORD. If not,
+// the redefinition is a compile error.
+typedef DWORD SystemErrorCode;
+#endif
+
+SystemErrorCode GetLastSystemErrorCode() {
+#if defined(OS_WIN)
+ return ::GetLastError();
+#elif defined(OS_POSIX)
+ return errno;
+#else
+#error Not implemented
+#endif
+}
+
+#if defined(OS_WIN)
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code) {
+ const int kErrorMessageBufferSize = 256;
+ char msgbuf[kErrorMessageBufferSize];
+ DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+ DWORD len = FormatMessageA(flags, nullptr, error_code, 0, msgbuf,
+ arraysize(msgbuf), nullptr);
+ if (len) {
+ // Messages returned by system end with line breaks.
+ return base::CollapseWhitespaceASCII(msgbuf, true) +
+ base::StringPrintf(" (0x%X)", error_code);
+ }
+ return base::StringPrintf("Error (0x%X) while retrieving error. (0x%X)",
+ GetLastError(), error_code);
+}
+#elif defined(OS_POSIX)
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code) {
+ return base::safe_strerror(error_code);
+}
+#else
+#error Not implemented
+#endif // defined(OS_WIN)
+
+
+#if defined(OS_WIN)
+Win32ErrorLogMessage::Win32ErrorLogMessage(const char* file,
+ int line,
+ LogSeverity severity,
+ SystemErrorCode err)
+ : err_(err),
+ log_message_(file, line, severity) {
+}
+
+Win32ErrorLogMessage::~Win32ErrorLogMessage() {
+ stream() << ": " << SystemErrorCodeToString(err_);
+ // We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
+ // field) and use Alias in hopes that it makes it into crash dumps.
+ DWORD last_error = err_;
+ base::debug::Alias(&last_error);
+}
+#elif defined(OS_POSIX)
+ErrnoLogMessage::ErrnoLogMessage(const char* file,
+ int line,
+ LogSeverity severity,
+ SystemErrorCode err)
+ : err_(err),
+ log_message_(file, line, severity) {
+}
+
+ErrnoLogMessage::~ErrnoLogMessage() {
+ stream() << ": " << SystemErrorCodeToString(err_);
+}
+#endif // defined(OS_WIN)
+
+void CloseLogFile() {
+#if !defined(OS_WIN)
+ LoggingLock logging_lock;
+#endif
+ CloseLogFileUnlocked();
+}
+
+void RawLog(int level, const char* message) {
+ if (level >= g_min_log_level && message) {
+ size_t bytes_written = 0;
+ const size_t message_len = strlen(message);
+ int rv;
+ while (bytes_written < message_len) {
+ rv = HANDLE_EINTR(
+ write(STDERR_FILENO, message + bytes_written,
+ message_len - bytes_written));
+ if (rv < 0) {
+ // Give up, nothing we can do now.
+ break;
+ }
+ bytes_written += rv;
+ }
+
+ if (message_len > 0 && message[message_len - 1] != '\n') {
+ do {
+ rv = HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
+ if (rv < 0) {
+ // Give up, nothing we can do now.
+ break;
+ }
+ } while (rv != 1);
+ }
+ }
+
+ if (level == LOG_FATAL)
+ base::debug::BreakDebugger();
+}
+
+// This was defined at the beginning of this file.
+#undef write
+
+#if defined(OS_WIN)
+bool IsLoggingToFileEnabled() {
+ return g_logging_destination & LOG_TO_FILE;
+}
+
+std::wstring GetLogFileFullPath() {
+ if (g_log_file_name)
+ return *g_log_file_name;
+ return std::wstring();
+}
+#endif
+
+BASE_EXPORT void LogErrorNotReached(const char* file, int line) {
+ LogMessage(file, line, LOG_ERROR).stream()
+ << "NOTREACHED() hit.";
+}
+
+} // namespace logging
+
+std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) {
+ return out << (wstr ? base::WideToUTF8(wstr) : std::string());
+}
diff --git a/libchrome/base/logging.h b/libchrome/base/logging.h
new file mode 100644
index 0000000..2bfc972
--- /dev/null
+++ b/libchrome/base/logging.h
@@ -0,0 +1,1010 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOGGING_H_
+#define BASE_LOGGING_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/debug/debugger.h"
+#include "base/macros.h"
+#include "base/template_util.h"
+#include "build/build_config.h"
+
+//
+// Optional message capabilities
+// -----------------------------
+// Assertion failed messages and fatal errors are displayed in a dialog box
+// before the application exits. However, running this UI creates a message
+// loop, which causes application messages to be processed and potentially
+// dispatched to existing application windows. Since the application is in a
+// bad state when this assertion dialog is displayed, these messages may not
+// get processed and hang the dialog, or the application might go crazy.
+//
+// Therefore, it can be beneficial to display the error dialog in a separate
+// process from the main application. When the logging system needs to display
+// a fatal error dialog box, it will look for a program called
+// "DebugMessage.exe" in the same directory as the application executable. It
+// will run this application with the message as the command line, and will
+// not include the name of the application as is traditional for easier
+// parsing.
+//
+// The code for DebugMessage.exe is only one line. In WinMain, do:
+// MessageBox(NULL, GetCommandLineW(), L"Fatal Error", 0);
+//
+// If DebugMessage.exe is not found, the logging code will use a normal
+// MessageBox, potentially causing the problems discussed above.
+
+
+// Instructions
+// ------------
+//
+// Make a bunch of macros for logging. The way to log things is to stream
+// things to LOG(<a particular severity level>). E.g.,
+//
+// LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can also do conditional logging:
+//
+// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// The CHECK(condition) macro is active in both debug and release builds and
+// effectively performs a LOG(FATAL) which terminates the process and
+// generates a crashdump unless a debugger is attached.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+// DLOG(INFO) << "Found cookies";
+//
+// DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles. LOG_IF and development flags also work well together
+// because the code can be compiled away sometimes.
+//
+// We also have
+//
+// LOG_ASSERT(assertion);
+// DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros. They look like
+//
+// VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+// VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+// The verbose logging can also be turned on module-by-module. For instance,
+// --vmodule=profile=2,icon_loader=1,browser_*=3,*/chromeos/*=4 --v=0
+// will cause:
+// a. VLOG(2) and lower messages to be printed from profile.{h,cc}
+// b. VLOG(1) and lower messages to be printed from icon_loader.{h,cc}
+// c. VLOG(3) and lower messages to be printed from files prefixed with
+// "browser"
+// d. VLOG(4) and lower messages to be printed from files under a
+// "chromeos" directory.
+// e. VLOG(0) and lower messages to be printed from elsewhere
+//
+// The wildcarding functionality shown by (c) supports both '*' (match
+// 0 or more characters) and '?' (match any single character)
+// wildcards. Any pattern containing a forward or backward slash will
+// be tested against the whole pathname and not just the module.
+// E.g., "*/foo/bar/*=2" would change the logging level for all code
+// in source files under a "foo/bar" directory.
+//
+// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+// if (VLOG_IS_ON(2)) {
+// // do some logging preparation and logging
+// // that can't be accomplished with just VLOG(2) << ...;
+// }
+//
+// There is also a VLOG_IF "verbose level" condition macro for sample
+// cases, when some extra computation and preparation for logs is not
+// needed.
+//
+// VLOG_IF(1, (size > 1024))
+// << "I'm printed when size is more than 1024 and when you run the "
+// "program with --v=1 or more";
+//
+// We also override the standard 'assert' to use 'DLOG_ASSERT'.
+//
+// Lastly, there is:
+//
+// PLOG(ERROR) << "Couldn't do foo";
+// DPLOG(ERROR) << "Couldn't do foo";
+// PLOG_IF(ERROR, cond) << "Couldn't do foo";
+// DPLOG_IF(ERROR, cond) << "Couldn't do foo";
+// PCHECK(condition) << "Couldn't do foo";
+// DPCHECK(condition) << "Couldn't do foo";
+//
+// which append the last system error to the message in string form (taken from
+// GetLastError() on Windows and errno on POSIX).
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// There is the special severity of DFATAL, which logs FATAL in debug mode,
+// ERROR in normal mode.
+
+// Note that "The behavior of a C++ program is undefined if it adds declarations
+// or definitions to namespace std or to a namespace within namespace std unless
+// otherwise specified." --C++11[namespace.std]
+//
+// We've checked that this particular definition has the intended behavior on
+// our implementations, but it's prone to breaking in the future, and please
+// don't imitate this in your own definitions without checking with some
+// standard library experts.
+namespace std {
+// These functions are provided as a convenience for logging, which is where we
+// use streams (it is against Google style to use streams in other places). It
+// is designed to allow you to emit non-ASCII Unicode strings to the log file,
+// which is normally ASCII. It is relatively slow, so try not to use it for
+// common cases. Non-ASCII characters will be converted to UTF-8 by these
+// operators.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
+inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
+ return out << wstr.c_str();
+}
+
+template<typename T>
+typename std::enable_if<std::is_enum<T>::value, std::ostream&>::type operator<<(
+ std::ostream& out, T value) {
+ return out << static_cast<typename std::underlying_type<T>::type>(value);
+}
+
+} // namespace std
+
+namespace logging {
+
+// TODO(avi): do we want to do a unification of character types here?
+#if defined(OS_WIN)
+typedef wchar_t PathChar;
+#else
+typedef char PathChar;
+#endif
+
+// Where to record logging output? A flat file and/or system debug log
+// via OutputDebugString.
+enum LoggingDestination {
+ LOG_NONE = 0,
+ LOG_TO_FILE = 1 << 0,
+ LOG_TO_SYSTEM_DEBUG_LOG = 1 << 1,
+
+ LOG_TO_ALL = LOG_TO_FILE | LOG_TO_SYSTEM_DEBUG_LOG,
+
+ // On Windows, use a file next to the exe; on POSIX platforms, where
+ // it may not even be possible to locate the executable on disk, use
+ // stderr.
+#if defined(OS_WIN)
+ LOG_DEFAULT = LOG_TO_FILE,
+#elif defined(OS_POSIX)
+ LOG_DEFAULT = LOG_TO_SYSTEM_DEBUG_LOG,
+#endif
+};
+
+// Indicates that the log file should be locked when being written to.
+// Unless there is only one single-threaded process that is logging to
+// the log file, the file should be locked during writes to make each
+// log output atomic. Other writers will block.
+//
+// All processes writing to the log file must have their locking set for it to
+// work properly. Defaults to LOCK_LOG_FILE.
+enum LogLockingState { LOCK_LOG_FILE, DONT_LOCK_LOG_FILE };
+
+// On startup, should we delete or append to an existing log file (if any)?
+// Defaults to APPEND_TO_OLD_LOG_FILE.
+enum OldFileDeletionState { DELETE_OLD_LOG_FILE, APPEND_TO_OLD_LOG_FILE };
+
+struct BASE_EXPORT LoggingSettings {
+ // The defaults values are:
+ //
+ // logging_dest: LOG_DEFAULT
+ // log_file: NULL
+ // lock_log: LOCK_LOG_FILE
+ // delete_old: APPEND_TO_OLD_LOG_FILE
+ LoggingSettings();
+
+ LoggingDestination logging_dest;
+
+ // The three settings below have an effect only when LOG_TO_FILE is
+ // set in |logging_dest|.
+ const PathChar* log_file;
+ LogLockingState lock_log;
+ OldFileDeletionState delete_old;
+};
+
+// Define different names for the BaseInitLoggingImpl() function depending on
+// whether NDEBUG is defined or not so that we'll fail to link if someone tries
+// to compile logging.cc with NDEBUG but includes logging.h without defining it,
+// or vice versa.
+#if NDEBUG
+#define BaseInitLoggingImpl BaseInitLoggingImpl_built_with_NDEBUG
+#else
+#define BaseInitLoggingImpl BaseInitLoggingImpl_built_without_NDEBUG
+#endif
+
+// Implementation of the InitLogging() method declared below. We use a
+// more-specific name so we can #define it above without affecting other code
+// that has named stuff "InitLogging".
+BASE_EXPORT bool BaseInitLoggingImpl(const LoggingSettings& settings);
+
+// Sets the log file name and other global logging state. Calling this function
+// is recommended, and is normally done at the beginning of application init.
+// If you don't call it, all the flags will be initialized to their default
+// values, and there is a race condition that may leak a critical section
+// object if two threads try to do the first log at the same time.
+// See the definition of the enums above for descriptions and default values.
+//
+// The default log file is initialized to "debug.log" in the application
+// directory. You probably don't want this, especially since the program
+// directory may not be writable on an enduser's system.
+//
+// This function may be called a second time to re-direct logging (e.g after
+// loging in to a user partition), however it should never be called more than
+// twice.
+inline bool InitLogging(const LoggingSettings& settings) {
+ return BaseInitLoggingImpl(settings);
+}
+
+// Sets the log level. Anything at or above this level will be written to the
+// log file/displayed to the user (if applicable). Anything below this level
+// will be silently ignored. The log level defaults to 0 (everything is logged
+// up to level INFO) if this function is not called.
+// Note that log messages for VLOG(x) are logged at level -x, so setting
+// the min log level to negative values enables verbose logging.
+BASE_EXPORT void SetMinLogLevel(int level);
+
+// Gets the current log level.
+BASE_EXPORT int GetMinLogLevel();
+
+// Used by LOG_IS_ON to lazy-evaluate stream arguments.
+BASE_EXPORT bool ShouldCreateLogMessage(int severity);
+
+// Gets the VLOG default verbosity level.
+BASE_EXPORT int GetVlogVerbosity();
+
+// Gets the current vlog level for the given file (usually taken from
+// __FILE__).
+
+// Note that |N| is the size *with* the null terminator.
+BASE_EXPORT int GetVlogLevelHelper(const char* file_start, size_t N);
+
+template <size_t N>
+int GetVlogLevel(const char (&file)[N]) {
+ return GetVlogLevelHelper(file, N);
+}
+
+// Sets the common items you want to be prepended to each log message.
+// process and thread IDs default to off, the timestamp defaults to on.
+// If this function is not called, logging defaults to writing the timestamp
+// only.
+BASE_EXPORT void SetLogItems(bool enable_process_id, bool enable_thread_id,
+ bool enable_timestamp, bool enable_tickcount);
+
+// Sets whether or not you'd like to see fatal debug messages popped up in
+// a dialog box or not.
+// Dialogs are not shown by default.
+BASE_EXPORT void SetShowErrorDialogs(bool enable_dialogs);
+
+// Sets the Log Assert Handler that will be used to notify of check failures.
+// The default handler shows a dialog box and then terminate the process,
+// however clients can use this function to override with their own handling
+// (e.g. a silent one for Unit Tests)
+typedef void (*LogAssertHandlerFunction)(const std::string& str);
+BASE_EXPORT void SetLogAssertHandler(LogAssertHandlerFunction handler);
+
+// Sets the Log Message Handler that gets passed every log message before
+// it's sent to other log destinations (if any).
+// Returns true to signal that it handled the message and the message
+// should not be sent to other log destinations.
+typedef bool (*LogMessageHandlerFunction)(int severity,
+ const char* file, int line, size_t message_start, const std::string& str);
+BASE_EXPORT void SetLogMessageHandler(LogMessageHandlerFunction handler);
+BASE_EXPORT LogMessageHandlerFunction GetLogMessageHandler();
+
+typedef int LogSeverity;
+const LogSeverity LOG_VERBOSE = -1; // This is level 1 verbosity
+// Note: the log severities are used to index into the array of names,
+// see log_severity_names.
+const LogSeverity LOG_INFO = 0;
+const LogSeverity LOG_WARNING = 1;
+const LogSeverity LOG_ERROR = 2;
+const LogSeverity LOG_FATAL = 3;
+const LogSeverity LOG_NUM_SEVERITIES = 4;
+
+// LOG_DFATAL is LOG_FATAL in debug mode, ERROR in normal mode
+#ifdef NDEBUG
+const LogSeverity LOG_DFATAL = LOG_ERROR;
+#else
+const LogSeverity LOG_DFATAL = LOG_FATAL;
+#endif
+
+// A few definitions of macros that don't generate much code. These are used
+// by LOG() and LOG_IF, etc. Since these are used all over our code, it's
+// better to have compact code for these operations.
+#define COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
+ logging::ClassName(__FILE__, __LINE__, logging::LOG_INFO , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
+ logging::ClassName(__FILE__, __LINE__, logging::LOG_WARNING , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
+ logging::ClassName(__FILE__, __LINE__, logging::LOG_ERROR , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
+ logging::ClassName(__FILE__, __LINE__, logging::LOG_FATAL , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
+ logging::ClassName(__FILE__, __LINE__, logging::LOG_DFATAL , ##__VA_ARGS__)
+
+#define COMPACT_GOOGLE_LOG_INFO \
+ COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
+#define COMPACT_GOOGLE_LOG_WARNING \
+ COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
+#define COMPACT_GOOGLE_LOG_ERROR \
+ COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
+#define COMPACT_GOOGLE_LOG_FATAL \
+ COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
+#define COMPACT_GOOGLE_LOG_DFATAL \
+ COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
+
+#if defined(OS_WIN)
+// wingdi.h defines ERROR to be 0. When we call LOG(ERROR), it gets
+// substituted with 0, and it expands to COMPACT_GOOGLE_LOG_0. To allow us
+// to keep using this syntax, we define this macro to do the same thing
+// as COMPACT_GOOGLE_LOG_ERROR, and also define ERROR the same way that
+// the Windows SDK does for consistency.
+#define ERROR 0
+#define COMPACT_GOOGLE_LOG_EX_0(ClassName, ...) \
+ COMPACT_GOOGLE_LOG_EX_ERROR(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_0 COMPACT_GOOGLE_LOG_ERROR
+// Needed for LOG_IS_ON(ERROR).
+const LogSeverity LOG_0 = LOG_ERROR;
+#endif
+
+// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
+// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
+// always fire if they fail.
+#define LOG_IS_ON(severity) \
+ (::logging::ShouldCreateLogMessage(::logging::LOG_##severity))
+
+// We can't do any caching tricks with VLOG_IS_ON() like the
+// google-glog version since it requires GCC extensions. This means
+// that using the v-logging functions in conjunction with --vmodule
+// may be slow.
+#define VLOG_IS_ON(verboselevel) \
+ ((verboselevel) <= ::logging::GetVlogLevel(__FILE__))
+
+// Helper macro which avoids evaluating the arguments to a stream if
+// the condition doesn't hold. Condition is evaluated once and only once.
+#define LAZY_STREAM(stream, condition) \
+ !(condition) ? (void) 0 : ::logging::LogMessageVoidify() & (stream)
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// LOG(INFO) becomes the token COMPACT_GOOGLE_LOG_INFO. There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define LOG_STREAM(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+
+#define LOG(severity) LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity))
+#define LOG_IF(severity, condition) \
+ LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+// The VLOG macros log with negative verbosities.
+#define VLOG_STREAM(verbose_level) \
+ logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
+
+#define VLOG(verbose_level) \
+ LAZY_STREAM(VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VLOG_IF(verbose_level, condition) \
+ LAZY_STREAM(VLOG_STREAM(verbose_level), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+#if defined (OS_WIN)
+#define VPLOG_STREAM(verbose_level) \
+ logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#elif defined(OS_POSIX)
+#define VPLOG_STREAM(verbose_level) \
+ logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#endif
+
+#define VPLOG(verbose_level) \
+ LAZY_STREAM(VPLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
+
+#define VPLOG_IF(verbose_level, condition) \
+ LAZY_STREAM(VPLOG_STREAM(verbose_level), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
+
+#define LOG_ASSERT(condition) \
+ LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
+
+#if defined(OS_WIN)
+#define PLOG_STREAM(severity) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(Win32ErrorLogMessage, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#elif defined(OS_POSIX)
+#define PLOG_STREAM(severity) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(ErrnoLogMessage, \
+ ::logging::GetLastSystemErrorCode()).stream()
+#endif
+
+#define PLOG(severity) \
+ LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity))
+
+#define PLOG_IF(severity, condition) \
+ LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
+
+// The actual stream used isn't important.
+#define EAT_STREAM_PARAMETERS \
+ true ? (void) 0 : ::logging::LogMessageVoidify() & LOG_STREAM(FATAL)
+
+// Captures the result of a CHECK_EQ (for example) and facilitates testing as a
+// boolean.
+class CheckOpResult {
+ public:
+ // |message| must be non-null if and only if the check failed.
+ CheckOpResult(std::string* message) : message_(message) {}
+ // Returns true if the check succeeded.
+ operator bool() const { return !message_; }
+ // Returns the message.
+ std::string* message() { return message_; }
+
+ private:
+ std::string* message_;
+};
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode.
+//
+// We make sure CHECK et al. always evaluates their arguments, as
+// doing CHECK(FunctionWithSideEffect()) is a common idiom.
+
+#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
+
+// Make all CHECK functions discard their log strings to reduce code
+// bloat, and improve performance, for official release builds.
+
+#if defined(COMPILER_GCC) || __clang__
+#define LOGGING_CRASH() __builtin_trap()
+#else
+#define LOGGING_CRASH() ((void)(*(volatile char*)0 = 0))
+#endif
+
+// This is not calling BreakDebugger since this is called frequently, and
+// calling an out-of-line function instead of a noreturn inline macro prevents
+// compiler optimizations.
+#define CHECK(condition) \
+ !(condition) ? LOGGING_CRASH() : EAT_STREAM_PARAMETERS
+
+#define PCHECK(condition) CHECK(condition)
+
+#define CHECK_OP(name, op, val1, val2) CHECK((val1) op (val2))
+
+#else // !(OFFICIAL_BUILD && NDEBUG)
+
+#if defined(_PREFAST_) && defined(OS_WIN)
+// Use __analysis_assume to tell the VC++ static analysis engine that
+// assert conditions are true, to suppress warnings. The LAZY_STREAM
+// parameter doesn't reference 'condition' in /analyze builds because
+// this evaluation confuses /analyze. The !! before condition is because
+// __analysis_assume gets confused on some conditions:
+// http://randomascii.wordpress.com/2011/09/13/analyze-for-visual-studio-the-ugly-part-5/
+
+#define CHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(LOG_STREAM(FATAL), false) \
+ << "Check failed: " #condition ". "
+
+#define PCHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(PLOG_STREAM(FATAL), false) \
+ << "Check failed: " #condition ". "
+
+#else // _PREFAST_
+
+// Do as much work as possible out of line to reduce inline code size.
+#define CHECK(condition) \
+ LAZY_STREAM(logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
+ !(condition))
+
+#define PCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(FATAL), !(condition)) \
+ << "Check failed: " #condition ". "
+
+#endif // _PREFAST_
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+// CHECK_EQ(2, a);
+#define CHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (logging::CheckOpResult true_if_passed = \
+ logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2)) \
+ ; \
+ else \
+ logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
+
+#endif // !(OFFICIAL_BUILD && NDEBUG)
+
+// This formats a value for a failing CHECK_XX statement. Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline typename std::enable_if<
+ base::internal::SupportsOstreamOperator<const T&>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << v;
+}
+
+// We need overloads for enums that don't support operator<<.
+// (i.e. scoped enums where no operator<< overload was declared).
+template <typename T>
+inline typename std::enable_if<
+ !base::internal::SupportsOstreamOperator<const T&>::value &&
+ std::is_enum<T>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << static_cast<typename base::underlying_type<T>::type>(v);
+}
+
+// We need an explicit overload for std::nullptr_t.
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p);
+
+// Build the error message string. This is separate from the "Impl"
+// function template because it is not performance critical and so can
+// be out of line, while the "Impl" code should be inline. Caller
+// takes ownership of the returned string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+ std::ostringstream ss;
+ ss << names << " (";
+ MakeCheckOpValueString(&ss, v1);
+ ss << " vs. ";
+ MakeCheckOpValueString(&ss, v2);
+ ss << ")";
+ std::string* msg = new std::string(ss.str());
+ return msg;
+}
+
+// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
+// in logging.cc.
+extern template BASE_EXPORT std::string* MakeCheckOpString<int, int>(
+ const int&, const int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned long>(
+ const unsigned long&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned long, unsigned int>(
+ const unsigned long&, const unsigned int&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<unsigned int, unsigned long>(
+ const unsigned int&, const unsigned long&, const char* names);
+extern template BASE_EXPORT
+std::string* MakeCheckOpString<std::string, std::string>(
+ const std::string&, const std::string&, const char* name);
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+ template <class t1, class t2> \
+ inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+ const char* names) { \
+ if (v1 op v2) return NULL; \
+ else return MakeCheckOpString(v1, v2, names); \
+ } \
+ inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
+ if (v1 op v2) return NULL; \
+ else return MakeCheckOpString(v1, v2, names); \
+ }
+DEFINE_CHECK_OP_IMPL(EQ, ==)
+DEFINE_CHECK_OP_IMPL(NE, !=)
+DEFINE_CHECK_OP_IMPL(LE, <=)
+DEFINE_CHECK_OP_IMPL(LT, < )
+DEFINE_CHECK_OP_IMPL(GE, >=)
+DEFINE_CHECK_OP_IMPL(GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define ENABLE_DLOG 0
+#else
+#define ENABLE_DLOG 1
+#endif
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define DCHECK_IS_ON() 0
+#else
+#define DCHECK_IS_ON() 1
+#endif
+
+// Definitions for DLOG et al.
+
+#if ENABLE_DLOG
+
+#define DLOG_IS_ON(severity) LOG_IS_ON(severity)
+#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+#define DPLOG_IF(severity, condition) PLOG_IF(severity, condition)
+#define DVLOG_IF(verboselevel, condition) VLOG_IF(verboselevel, condition)
+#define DVPLOG_IF(verboselevel, condition) VPLOG_IF(verboselevel, condition)
+
+#else // ENABLE_DLOG
+
+// If ENABLE_DLOG is off, we want to avoid emitting any references to
+// |condition| (which may reference a variable defined only if NDEBUG
+// is not defined). Contrast this with DCHECK et al., which has
+// different behavior.
+
+#define DLOG_IS_ON(severity) false
+#define DLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DLOG_ASSERT(condition) EAT_STREAM_PARAMETERS
+#define DPLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
+#define DVLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+#define DVPLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
+
+#endif // ENABLE_DLOG
+
+// DEBUG_MODE is for uses like
+// if (DEBUG_MODE) foo.CheckThatFoo();
+// instead of
+// #ifndef NDEBUG
+// foo.CheckThatFoo();
+// #endif
+//
+// We tie its state to ENABLE_DLOG.
+enum { DEBUG_MODE = ENABLE_DLOG };
+
+#undef ENABLE_DLOG
+
+#define DLOG(severity) \
+ LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DPLOG(severity) \
+ LAZY_STREAM(PLOG_STREAM(severity), DLOG_IS_ON(severity))
+
+#define DVLOG(verboselevel) DVLOG_IF(verboselevel, VLOG_IS_ON(verboselevel))
+
+#define DVPLOG(verboselevel) DVPLOG_IF(verboselevel, VLOG_IS_ON(verboselevel))
+
+// Definitions for DCHECK et al.
+
+#if DCHECK_IS_ON()
+
+#define COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+ COMPACT_GOOGLE_LOG_EX_FATAL(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_DCHECK COMPACT_GOOGLE_LOG_FATAL
+const LogSeverity LOG_DCHECK = LOG_FATAL;
+
+#else // DCHECK_IS_ON()
+
+// These are just dummy values.
+#define COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
+ COMPACT_GOOGLE_LOG_EX_INFO(ClassName , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_DCHECK COMPACT_GOOGLE_LOG_INFO
+const LogSeverity LOG_DCHECK = LOG_INFO;
+
+#endif // DCHECK_IS_ON()
+
+// DCHECK et al. make sure to reference |condition| regardless of
+// whether DCHECKs are enabled; this is so that we don't get unused
+// variable warnings if the only use of a variable is in a DCHECK.
+// This behavior is different from DLOG_IF et al.
+
+#if defined(_PREFAST_) && defined(OS_WIN)
+// See comments on the previous use of __analysis_assume.
+
+#define DCHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(LOG_STREAM(DCHECK), false) \
+ << "Check failed: " #condition ". "
+
+#define DPCHECK(condition) \
+ __analysis_assume(!!(condition)), \
+ LAZY_STREAM(PLOG_STREAM(DCHECK), false) \
+ << "Check failed: " #condition ". "
+
+#else // _PREFAST_
+
+#define DCHECK(condition) \
+ LAZY_STREAM(LOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
+ << "Check failed: " #condition ". "
+
+#define DPCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
+ << "Check failed: " #condition ". "
+
+#endif // _PREFAST_
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use DCHECK_EQ et al below.
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+// DCHECK_EQ(2, a);
+#define DCHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (logging::CheckOpResult true_if_passed = \
+ DCHECK_IS_ON() ? \
+ logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2) : nullptr) \
+ ; \
+ else \
+ logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
+ true_if_passed.message()).stream()
+
+// Equality/Inequality checks - compare two values, and log a
+// LOG_DCHECK message including the two values when the result is not
+// as expected. The values must have operator<<(ostream, ...)
+// defined.
+//
+// You may append to the error message like so:
+// DCHECK_NE(1, 2) << ": The world must be ending!";
+//
+// We are very careful to ensure that each argument is evaluated exactly
+// once, and that anything which is legal to pass as a function argument is
+// legal here. In particular, the arguments may be temporary expressions
+// which will end up being destroyed at the end of the apparent statement,
+// for example:
+// DCHECK_EQ(string("abc")[1], 'b');
+//
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL. In new code, prefer nullptr instead. To
+// work around this for C++98, simply static_cast NULL to the type of the
+// desired pointer.
+
+#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2)
+#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2)
+#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2)
+#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2)
+#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2)
+#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2)
+
+#if !DCHECK_IS_ON() && defined(OS_CHROMEOS)
+// Implement logging of NOTREACHED() as a dedicated function to get function
+// call overhead down to a minimum.
+void LogErrorNotReached(const char* file, int line);
+#define NOTREACHED() \
+ true ? ::logging::LogErrorNotReached(__FILE__, __LINE__) \
+ : EAT_STREAM_PARAMETERS
+#else
+#define NOTREACHED() DCHECK(false)
+#endif
+
+// Redefine the standard assert to use our nice log files
+#undef assert
+#define assert(x) DLOG_ASSERT(x)
+
+// This class more or less represents a particular log message. You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though. You should use the LOG() macro (and variants thereof)
+// above.
+class BASE_EXPORT LogMessage {
+ public:
+ // Used for LOG(severity).
+ LogMessage(const char* file, int line, LogSeverity severity);
+
+ // Used for CHECK(). Implied severity = LOG_FATAL.
+ LogMessage(const char* file, int line, const char* condition);
+
+ // Used for CHECK_EQ(), etc. Takes ownership of the given string.
+ // Implied severity = LOG_FATAL.
+ LogMessage(const char* file, int line, std::string* result);
+
+ // Used for DCHECK_EQ(), etc. Takes ownership of the given string.
+ LogMessage(const char* file, int line, LogSeverity severity,
+ std::string* result);
+
+ ~LogMessage();
+
+ std::ostream& stream() { return stream_; }
+
+ private:
+ void Init(const char* file, int line);
+
+ LogSeverity severity_;
+ std::ostringstream stream_;
+ size_t message_start_; // Offset of the start of the message (past prefix
+ // info).
+ // The file and line information passed in to the constructor.
+ const char* file_;
+ const int line_;
+
+#if defined(OS_WIN)
+ // Stores the current value of GetLastError in the constructor and restores
+ // it in the destructor by calling SetLastError.
+ // This is useful since the LogMessage class uses a lot of Win32 calls
+ // that will lose the value of GLE and the code that called the log function
+ // will have lost the thread error value when the log call returns.
+ class SaveLastError {
+ public:
+ SaveLastError();
+ ~SaveLastError();
+
+ unsigned long get_error() const { return last_error_; }
+
+ protected:
+ unsigned long last_error_;
+ };
+
+ SaveLastError last_error_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(LogMessage);
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LogMessageVoidify {
+ public:
+ LogMessageVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(std::ostream&) { }
+};
+
+#if defined(OS_WIN)
+typedef unsigned long SystemErrorCode;
+#elif defined(OS_POSIX)
+typedef int SystemErrorCode;
+#endif
+
+// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
+// pull in windows.h just for GetLastError() and DWORD.
+BASE_EXPORT SystemErrorCode GetLastSystemErrorCode();
+BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code);
+
+#if defined(OS_WIN)
+// Appends a formatted system message of the GetLastError() type.
+class BASE_EXPORT Win32ErrorLogMessage {
+ public:
+ Win32ErrorLogMessage(const char* file,
+ int line,
+ LogSeverity severity,
+ SystemErrorCode err);
+
+ // Appends the error message before destructing the encapsulated class.
+ ~Win32ErrorLogMessage();
+
+ std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+ SystemErrorCode err_;
+ LogMessage log_message_;
+
+ DISALLOW_COPY_AND_ASSIGN(Win32ErrorLogMessage);
+};
+#elif defined(OS_POSIX)
+// Appends a formatted system message of the errno type
+class BASE_EXPORT ErrnoLogMessage {
+ public:
+ ErrnoLogMessage(const char* file,
+ int line,
+ LogSeverity severity,
+ SystemErrorCode err);
+
+ // Appends the error message before destructing the encapsulated class.
+ ~ErrnoLogMessage();
+
+ std::ostream& stream() { return log_message_.stream(); }
+
+ private:
+ SystemErrorCode err_;
+ LogMessage log_message_;
+
+ DISALLOW_COPY_AND_ASSIGN(ErrnoLogMessage);
+};
+#endif // OS_WIN
+
+// Closes the log file explicitly if open.
+// NOTE: Since the log file is opened as necessary by the action of logging
+// statements, there's no guarantee that it will stay closed
+// after this call.
+BASE_EXPORT void CloseLogFile();
+
+// Async signal safe logging mechanism.
+BASE_EXPORT void RawLog(int level, const char* message);
+
+#define RAW_LOG(level, message) logging::RawLog(logging::LOG_ ## level, message)
+
+#define RAW_CHECK(condition) \
+ do { \
+ if (!(condition)) \
+ logging::RawLog(logging::LOG_FATAL, "Check failed: " #condition "\n"); \
+ } while (0)
+
+#if defined(OS_WIN)
+// Returns true if logging to file is enabled.
+BASE_EXPORT bool IsLoggingToFileEnabled();
+
+// Returns the default log file path.
+BASE_EXPORT std::wstring GetLogFileFullPath();
+#endif
+
+} // namespace logging
+
+// The NOTIMPLEMENTED() macro annotates codepaths which have
+// not been implemented yet.
+//
+// The implementation of this macro is controlled by NOTIMPLEMENTED_POLICY:
+// 0 -- Do nothing (stripped by compiler)
+// 1 -- Warn at compile time
+// 2 -- Fail at compile time
+// 3 -- Fail at runtime (DCHECK)
+// 4 -- [default] LOG(ERROR) at runtime
+// 5 -- LOG(ERROR) at runtime, only once per call-site
+
+#ifndef NOTIMPLEMENTED_POLICY
+#if defined(OS_ANDROID) && defined(OFFICIAL_BUILD)
+#define NOTIMPLEMENTED_POLICY 0
+#else
+// Select default policy: LOG(ERROR)
+#define NOTIMPLEMENTED_POLICY 4
+#endif
+#endif
+
+#if defined(COMPILER_GCC)
+// On Linux, with GCC, we can use __PRETTY_FUNCTION__ to get the demangled name
+// of the current function in the NOTIMPLEMENTED message.
+#define NOTIMPLEMENTED_MSG "Not implemented reached in " << __PRETTY_FUNCTION__
+#else
+#define NOTIMPLEMENTED_MSG "NOT IMPLEMENTED"
+#endif
+
+#if NOTIMPLEMENTED_POLICY == 0
+#define NOTIMPLEMENTED() EAT_STREAM_PARAMETERS
+#elif NOTIMPLEMENTED_POLICY == 1
+// TODO, figure out how to generate a warning
+#define NOTIMPLEMENTED() static_assert(false, "NOT_IMPLEMENTED")
+#elif NOTIMPLEMENTED_POLICY == 2
+#define NOTIMPLEMENTED() static_assert(false, "NOT_IMPLEMENTED")
+#elif NOTIMPLEMENTED_POLICY == 3
+#define NOTIMPLEMENTED() NOTREACHED()
+#elif NOTIMPLEMENTED_POLICY == 4
+#define NOTIMPLEMENTED() LOG(ERROR) << NOTIMPLEMENTED_MSG
+#elif NOTIMPLEMENTED_POLICY == 5
+#define NOTIMPLEMENTED() do {\
+ static bool logged_once = false;\
+ LOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG;\
+ logged_once = true;\
+} while(0);\
+EAT_STREAM_PARAMETERS
+#endif
+
+#endif // BASE_LOGGING_H_
diff --git a/libchrome/base/logging_unittest.cc b/libchrome/base/logging_unittest.cc
new file mode 100644
index 0000000..8a20c54
--- /dev/null
+++ b/libchrome/base/logging_unittest.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace logging {
+
+namespace {
+
+using ::testing::Return;
+
+// Needs to be global since log assert handlers can't maintain state.
+int log_sink_call_count = 0;
+
+#if !defined(OFFICIAL_BUILD) || defined(DCHECK_ALWAYS_ON) || !defined(NDEBUG)
+void LogSink(const std::string& str) {
+ ++log_sink_call_count;
+}
+#endif
+
+// Class to make sure any manipulations we do to the min log level are
+// contained (i.e., do not affect other unit tests).
+class LogStateSaver {
+ public:
+ LogStateSaver() : old_min_log_level_(GetMinLogLevel()) {}
+
+ ~LogStateSaver() {
+ SetMinLogLevel(old_min_log_level_);
+ SetLogAssertHandler(NULL);
+ log_sink_call_count = 0;
+ }
+
+ private:
+ int old_min_log_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+};
+
+class LoggingTest : public testing::Test {
+ private:
+ LogStateSaver log_state_saver_;
+};
+
+class MockLogSource {
+ public:
+ MOCK_METHOD0(Log, const char*());
+};
+
+TEST_F(LoggingTest, BasicLogging) {
+ MockLogSource mock_log_source;
+ EXPECT_CALL(mock_log_source, Log()).Times(DEBUG_MODE ? 16 : 8).
+ WillRepeatedly(Return("log message"));
+
+ SetMinLogLevel(LOG_INFO);
+
+ EXPECT_TRUE(LOG_IS_ON(INFO));
+ // As of g++-4.5, the first argument to EXPECT_EQ cannot be a
+ // constant expression.
+ const bool kIsDebugMode = (DEBUG_MODE != 0);
+ EXPECT_TRUE(kIsDebugMode == DLOG_IS_ON(INFO));
+ EXPECT_TRUE(VLOG_IS_ON(0));
+
+ LOG(INFO) << mock_log_source.Log();
+ LOG_IF(INFO, true) << mock_log_source.Log();
+ PLOG(INFO) << mock_log_source.Log();
+ PLOG_IF(INFO, true) << mock_log_source.Log();
+ VLOG(0) << mock_log_source.Log();
+ VLOG_IF(0, true) << mock_log_source.Log();
+ VPLOG(0) << mock_log_source.Log();
+ VPLOG_IF(0, true) << mock_log_source.Log();
+
+ DLOG(INFO) << mock_log_source.Log();
+ DLOG_IF(INFO, true) << mock_log_source.Log();
+ DPLOG(INFO) << mock_log_source.Log();
+ DPLOG_IF(INFO, true) << mock_log_source.Log();
+ DVLOG(0) << mock_log_source.Log();
+ DVLOG_IF(0, true) << mock_log_source.Log();
+ DVPLOG(0) << mock_log_source.Log();
+ DVPLOG_IF(0, true) << mock_log_source.Log();
+}
+
+TEST_F(LoggingTest, LogIsOn) {
+#if defined(NDEBUG)
+ const bool kDfatalIsFatal = false;
+#else // defined(NDEBUG)
+ const bool kDfatalIsFatal = true;
+#endif // defined(NDEBUG)
+
+ SetMinLogLevel(LOG_INFO);
+ EXPECT_TRUE(LOG_IS_ON(INFO));
+ EXPECT_TRUE(LOG_IS_ON(WARNING));
+ EXPECT_TRUE(LOG_IS_ON(ERROR));
+ EXPECT_TRUE(LOG_IS_ON(FATAL));
+ EXPECT_TRUE(LOG_IS_ON(DFATAL));
+
+ SetMinLogLevel(LOG_WARNING);
+ EXPECT_FALSE(LOG_IS_ON(INFO));
+ EXPECT_TRUE(LOG_IS_ON(WARNING));
+ EXPECT_TRUE(LOG_IS_ON(ERROR));
+ EXPECT_TRUE(LOG_IS_ON(FATAL));
+ EXPECT_TRUE(LOG_IS_ON(DFATAL));
+
+ SetMinLogLevel(LOG_ERROR);
+ EXPECT_FALSE(LOG_IS_ON(INFO));
+ EXPECT_FALSE(LOG_IS_ON(WARNING));
+ EXPECT_TRUE(LOG_IS_ON(ERROR));
+ EXPECT_TRUE(LOG_IS_ON(FATAL));
+ EXPECT_TRUE(LOG_IS_ON(DFATAL));
+
+ // LOG_IS_ON(FATAL) should always be true.
+ SetMinLogLevel(LOG_FATAL + 1);
+ EXPECT_FALSE(LOG_IS_ON(INFO));
+ EXPECT_FALSE(LOG_IS_ON(WARNING));
+ EXPECT_FALSE(LOG_IS_ON(ERROR));
+ EXPECT_TRUE(LOG_IS_ON(FATAL));
+ EXPECT_TRUE(kDfatalIsFatal == LOG_IS_ON(DFATAL));
+}
+
+TEST_F(LoggingTest, LoggingIsLazyBySeverity) {
+ MockLogSource mock_log_source;
+ EXPECT_CALL(mock_log_source, Log()).Times(0);
+
+ SetMinLogLevel(LOG_WARNING);
+
+ EXPECT_FALSE(LOG_IS_ON(INFO));
+ EXPECT_FALSE(DLOG_IS_ON(INFO));
+ EXPECT_FALSE(VLOG_IS_ON(1));
+
+ LOG(INFO) << mock_log_source.Log();
+ LOG_IF(INFO, false) << mock_log_source.Log();
+ PLOG(INFO) << mock_log_source.Log();
+ PLOG_IF(INFO, false) << mock_log_source.Log();
+ VLOG(1) << mock_log_source.Log();
+ VLOG_IF(1, true) << mock_log_source.Log();
+ VPLOG(1) << mock_log_source.Log();
+ VPLOG_IF(1, true) << mock_log_source.Log();
+
+ DLOG(INFO) << mock_log_source.Log();
+ DLOG_IF(INFO, true) << mock_log_source.Log();
+ DPLOG(INFO) << mock_log_source.Log();
+ DPLOG_IF(INFO, true) << mock_log_source.Log();
+ DVLOG(1) << mock_log_source.Log();
+ DVLOG_IF(1, true) << mock_log_source.Log();
+ DVPLOG(1) << mock_log_source.Log();
+ DVPLOG_IF(1, true) << mock_log_source.Log();
+}
+
+TEST_F(LoggingTest, LoggingIsLazyByDestination) {
+ MockLogSource mock_log_source;
+ MockLogSource mock_log_source_error;
+ EXPECT_CALL(mock_log_source, Log()).Times(0);
+
+ // Severity >= ERROR is always printed to stderr.
+ EXPECT_CALL(mock_log_source_error, Log()).Times(1).
+ WillRepeatedly(Return("log message"));
+
+ LoggingSettings settings;
+ settings.logging_dest = LOG_NONE;
+ InitLogging(settings);
+
+ LOG(INFO) << mock_log_source.Log();
+ LOG(WARNING) << mock_log_source.Log();
+ LOG(ERROR) << mock_log_source_error.Log();
+}
+
+// Official builds have CHECKs directly call BreakDebugger.
+#if !defined(OFFICIAL_BUILD)
+
+TEST_F(LoggingTest, CheckStreamsAreLazy) {
+ MockLogSource mock_log_source, uncalled_mock_log_source;
+ EXPECT_CALL(mock_log_source, Log()).Times(8).
+ WillRepeatedly(Return("check message"));
+ EXPECT_CALL(uncalled_mock_log_source, Log()).Times(0);
+
+ SetLogAssertHandler(&LogSink);
+
+ CHECK(mock_log_source.Log()) << uncalled_mock_log_source.Log();
+ PCHECK(!mock_log_source.Log()) << mock_log_source.Log();
+ CHECK_EQ(mock_log_source.Log(), mock_log_source.Log())
+ << uncalled_mock_log_source.Log();
+ CHECK_NE(mock_log_source.Log(), mock_log_source.Log())
+ << mock_log_source.Log();
+}
+
+#endif
+
+TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+ int debug_only_variable = 1;
+#endif
+ // These should avoid emitting references to |debug_only_variable|
+ // in release mode.
+ DLOG_IF(INFO, debug_only_variable) << "test";
+ DLOG_ASSERT(debug_only_variable) << "test";
+ DPLOG_IF(INFO, debug_only_variable) << "test";
+ DVLOG_IF(1, debug_only_variable) << "test";
+}
+
+TEST_F(LoggingTest, DcheckStreamsAreLazy) {
+ MockLogSource mock_log_source;
+ EXPECT_CALL(mock_log_source, Log()).Times(0);
+#if DCHECK_IS_ON()
+ DCHECK(true) << mock_log_source.Log();
+ DCHECK_EQ(0, 0) << mock_log_source.Log();
+#else
+ DCHECK(mock_log_source.Log()) << mock_log_source.Log();
+ DPCHECK(mock_log_source.Log()) << mock_log_source.Log();
+ DCHECK_EQ(0, 0) << mock_log_source.Log();
+ DCHECK_EQ(mock_log_source.Log(), static_cast<const char*>(NULL))
+ << mock_log_source.Log();
+#endif
+}
+
+TEST_F(LoggingTest, Dcheck) {
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+ // Release build.
+ EXPECT_FALSE(DCHECK_IS_ON());
+ EXPECT_FALSE(DLOG_IS_ON(DCHECK));
+#elif defined(NDEBUG) && defined(DCHECK_ALWAYS_ON)
+ // Release build with real DCHECKS.
+ SetLogAssertHandler(&LogSink);
+ EXPECT_TRUE(DCHECK_IS_ON());
+ EXPECT_TRUE(DLOG_IS_ON(DCHECK));
+#else
+ // Debug build.
+ SetLogAssertHandler(&LogSink);
+ EXPECT_TRUE(DCHECK_IS_ON());
+ EXPECT_TRUE(DLOG_IS_ON(DCHECK));
+#endif
+
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK(false);
+ EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
+ DPCHECK(false);
+ EXPECT_EQ(DCHECK_IS_ON() ? 2 : 0, log_sink_call_count);
+ DCHECK_EQ(0, 1);
+ EXPECT_EQ(DCHECK_IS_ON() ? 3 : 0, log_sink_call_count);
+
+ // Test DCHECK on std::nullptr_t
+ log_sink_call_count = 0;
+ const void* p_null = nullptr;
+ const void* p_not_null = &p_null;
+ DCHECK_EQ(p_null, nullptr);
+ DCHECK_EQ(nullptr, p_null);
+ DCHECK_NE(p_not_null, nullptr);
+ DCHECK_NE(nullptr, p_not_null);
+ EXPECT_EQ(0, log_sink_call_count);
+
+ // Test DCHECK on a scoped enum.
+ enum class Animal { DOG, CAT };
+ DCHECK_EQ(Animal::DOG, Animal::DOG);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(Animal::DOG, Animal::CAT);
+ EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
+}
+
+TEST_F(LoggingTest, DcheckReleaseBehavior) {
+ int some_variable = 1;
+ // These should still reference |some_variable| so we don't get
+ // unused variable warnings.
+ DCHECK(some_variable) << "test";
+ DPCHECK(some_variable) << "test";
+ DCHECK_EQ(some_variable, 1) << "test";
+}
+
+TEST_F(LoggingTest, DCheckEqStatements) {
+ bool reached = false;
+ if (false)
+ DCHECK_EQ(false, true); // Unreached.
+ else
+ DCHECK_EQ(true, reached = true); // Reached, passed.
+ ASSERT_EQ(DCHECK_IS_ON() ? true : false, reached);
+
+ if (false)
+ DCHECK_EQ(false, true); // Unreached.
+}
+
+TEST_F(LoggingTest, CheckEqStatements) {
+ bool reached = false;
+ if (false)
+ CHECK_EQ(false, true); // Unreached.
+ else
+ CHECK_EQ(true, reached = true); // Reached, passed.
+ ASSERT_TRUE(reached);
+
+ if (false)
+ CHECK_EQ(false, true); // Unreached.
+}
+
+// Test that defining an operator<< for a type in a namespace doesn't prevent
+// other code in that namespace from calling the operator<<(ostream, wstring)
+// defined by logging.h. This can fail if operator<<(ostream, wstring) can't be
+// found by ADL, since defining another operator<< prevents name lookup from
+// looking in the global namespace.
+namespace nested_test {
+ class Streamable {};
+ ALLOW_UNUSED_TYPE std::ostream& operator<<(std::ostream& out,
+ const Streamable&) {
+ return out << "Streamable";
+ }
+ TEST_F(LoggingTest, StreamingWstringFindsCorrectOperator) {
+ std::wstring wstr = L"Hello World";
+ std::ostringstream ostr;
+ ostr << wstr;
+ EXPECT_EQ("Hello World", ostr.str());
+ }
+} // namespace nested_test
+
+} // namespace
+
+} // namespace logging
diff --git a/libchrome/base/mac/OWNERS b/libchrome/base/mac/OWNERS
new file mode 100644
index 0000000..093a9c8
--- /dev/null
+++ b/libchrome/base/mac/OWNERS
@@ -0,0 +1,6 @@
+mark@chromium.org
+thakis@chromium.org
+
+# sdk_forward_declarations.[h|mm] will likely need to be modified by Cocoa
+# developers in general.
+per-file sdk_forward_declarations.*=file://chrome/browser/ui/cocoa/OWNERS
diff --git a/libchrome/base/mac/bind_objc_block.h b/libchrome/base/mac/bind_objc_block.h
new file mode 100644
index 0000000..2434d44
--- /dev/null
+++ b/libchrome/base/mac/bind_objc_block.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_BIND_OBJC_BLOCK_H_
+#define BASE_MAC_BIND_OBJC_BLOCK_H_
+
+#include <Block.h>
+
+#include "base/bind.h"
+#include "base/callback_forward.h"
+#include "base/mac/scoped_block.h"
+
+// BindBlock builds a callback from an Objective-C block. Example usages:
+//
+// Closure closure = BindBlock(^{DoSomething();});
+//
+// Callback<int(void)> callback = BindBlock(^{return 42;});
+//
+// Callback<void(const std::string&, const std::string&)> callback =
+// BindBlock(^(const std::string& arg0, const std::string& arg1) {
+// ...
+// });
+//
+// These variadic templates will accommodate any number of arguments, however
+// the underlying templates in bind_internal.h and callback.h are limited to
+// seven total arguments, and the bound block itself is used as one of these
+// arguments, so functionally the templates are limited to binding blocks with
+// zero through six arguments.
+
+namespace base {
+
+namespace internal {
+
+// Helper function to run the block contained in the parameter.
+template<typename R, typename... Args>
+R RunBlock(base::mac::ScopedBlock<R(^)(Args...)> block, Args... args) {
+ R(^extracted_block)(Args...) = block.get();
+ return extracted_block(args...);
+}
+
+} // namespace internal
+
+// Construct a callback from an objective-C block with up to six arguments (see
+// note above).
+template<typename R, typename... Args>
+base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
+ return base::Bind(
+ &base::internal::RunBlock<R, Args...>,
+ base::mac::ScopedBlock<R (^)(Args...)>(
+ base::mac::internal::ScopedBlockTraits<R (^)(Args...)>::Retain(
+ block)));
+}
+
+} // namespace base
+
+#endif // BASE_MAC_BIND_OBJC_BLOCK_H_
diff --git a/libchrome/base/mac/bundle_locations.h b/libchrome/base/mac/bundle_locations.h
new file mode 100644
index 0000000..276290b
--- /dev/null
+++ b/libchrome/base/mac/bundle_locations.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_BUNDLE_LOCATIONS_H_
+#define BASE_MAC_BUNDLE_LOCATIONS_H_
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+
+#if defined(__OBJC__)
+#import <Foundation/Foundation.h>
+#else // __OBJC__
+class NSBundle;
+class NSString;
+#endif // __OBJC__
+
+namespace base {
+
+class FilePath;
+
+namespace mac {
+
+// This file provides several functions to explicitly request the various
+// component bundles of Chrome. Please use these methods rather than calling
+// +[NSBundle mainBundle] or CFBundleGetMainBundle().
+//
+// Terminology
+// - "Outer Bundle" - This is the main bundle for Chrome; it's what
+// +[NSBundle mainBundle] returns when Chrome is launched normally.
+//
+// - "Main Bundle" - This is the bundle from which Chrome was launched.
+// This will be the same as the outer bundle except when Chrome is launched
+// via an app shortcut, in which case this will return the app shortcut's
+// bundle rather than the main Chrome bundle.
+//
+// - "Framework Bundle" - This is the bundle corresponding to the Chrome
+// framework.
+//
+// Guidelines for use:
+// - To access a resource, the Framework bundle should be used.
+// - If the choice is between the Outer or Main bundles then please choose
+// carefully. Most often the Outer bundle will be the right choice, but for
+// cases such as adding an app to the "launch on startup" list, the Main
+// bundle is probably the one to use.
+
+// Methods for retrieving the various bundles.
+BASE_EXPORT NSBundle* MainBundle();
+BASE_EXPORT FilePath MainBundlePath();
+BASE_EXPORT NSBundle* OuterBundle();
+BASE_EXPORT FilePath OuterBundlePath();
+BASE_EXPORT NSBundle* FrameworkBundle();
+BASE_EXPORT FilePath FrameworkBundlePath();
+
+// Set the bundle that the preceding functions will return, overriding the
+// default values. Restore the default by passing in |nil|.
+BASE_EXPORT void SetOverrideOuterBundle(NSBundle* bundle);
+BASE_EXPORT void SetOverrideFrameworkBundle(NSBundle* bundle);
+
+// Same as above but accepting a FilePath argument.
+BASE_EXPORT void SetOverrideOuterBundlePath(const FilePath& file_path);
+BASE_EXPORT void SetOverrideFrameworkBundlePath(const FilePath& file_path);
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_BUNDLE_LOCATIONS_H_
diff --git a/libchrome/base/mac/bundle_locations.mm b/libchrome/base/mac/bundle_locations.mm
new file mode 100644
index 0000000..54021b8
--- /dev/null
+++ b/libchrome/base/mac/bundle_locations.mm
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/bundle_locations.h"
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/strings/sys_string_conversions.h"
+
+namespace base {
+namespace mac {
+
+// NSBundle isn't threadsafe, all functions in this file must be called on the
+// main thread.
+static NSBundle* g_override_framework_bundle = nil;
+static NSBundle* g_override_outer_bundle = nil;
+
+NSBundle* MainBundle() {
+ return [NSBundle mainBundle];
+}
+
+FilePath MainBundlePath() {
+ NSBundle* bundle = MainBundle();
+ return NSStringToFilePath([bundle bundlePath]);
+}
+
+NSBundle* OuterBundle() {
+ if (g_override_outer_bundle)
+ return g_override_outer_bundle;
+ return [NSBundle mainBundle];
+}
+
+FilePath OuterBundlePath() {
+ NSBundle* bundle = OuterBundle();
+ return NSStringToFilePath([bundle bundlePath]);
+}
+
+NSBundle* FrameworkBundle() {
+ if (g_override_framework_bundle)
+ return g_override_framework_bundle;
+ return [NSBundle mainBundle];
+}
+
+FilePath FrameworkBundlePath() {
+ NSBundle* bundle = FrameworkBundle();
+ return NSStringToFilePath([bundle bundlePath]);
+}
+
+static void AssignOverrideBundle(NSBundle* new_bundle,
+ NSBundle** override_bundle) {
+ if (new_bundle != *override_bundle) {
+ [*override_bundle release];
+ *override_bundle = [new_bundle retain];
+ }
+}
+
+static void AssignOverridePath(const FilePath& file_path,
+ NSBundle** override_bundle) {
+ NSString* path = base::SysUTF8ToNSString(file_path.value());
+ NSBundle* new_bundle = [NSBundle bundleWithPath:path];
+ DCHECK(new_bundle) << "Failed to load the bundle at " << file_path.value();
+ AssignOverrideBundle(new_bundle, override_bundle);
+}
+
+void SetOverrideOuterBundle(NSBundle* bundle) {
+ AssignOverrideBundle(bundle, &g_override_outer_bundle);
+}
+
+void SetOverrideFrameworkBundle(NSBundle* bundle) {
+ AssignOverrideBundle(bundle, &g_override_framework_bundle);
+}
+
+void SetOverrideOuterBundlePath(const FilePath& file_path) {
+ AssignOverridePath(file_path, &g_override_outer_bundle);
+}
+
+void SetOverrideFrameworkBundlePath(const FilePath& file_path) {
+ AssignOverridePath(file_path, &g_override_framework_bundle);
+}
+
+} // namespace mac
+} // namespace base
diff --git a/libchrome/base/mac/cocoa_protocols.h b/libchrome/base/mac/cocoa_protocols.h
new file mode 100644
index 0000000..a28795c
--- /dev/null
+++ b/libchrome/base/mac/cocoa_protocols.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_COCOA_PROTOCOLS_H_
+#define BASE_MAC_COCOA_PROTOCOLS_H_
+
+#import <Cocoa/Cocoa.h>
+
+// New Mac OS X SDKs introduce new protocols used for delegates. These
+// protocol defintions aren't not present in earlier releases of the Mac OS X
+// SDK. In order to support building against the new SDK, which requires
+// delegates to conform to these protocols, and earlier SDKs, which do not
+// define these protocols at all, this file will provide empty protocol
+// definitions when used with earlier SDK versions.
+
+#define DEFINE_EMPTY_PROTOCOL(p) \
+@protocol p \
+@end
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+DEFINE_EMPTY_PROTOCOL(NSDraggingDestination)
+DEFINE_EMPTY_PROTOCOL(ICCameraDeviceDownloadDelegate)
+
+#endif // MAC_OS_X_VERSION_10_7
+
+#undef DEFINE_EMPTY_PROTOCOL
+
+#endif // BASE_MAC_COCOA_PROTOCOLS_H_
diff --git a/libchrome/base/mac/foundation_util.h b/libchrome/base/mac/foundation_util.h
new file mode 100644
index 0000000..ee23a17
--- /dev/null
+++ b/libchrome/base/mac/foundation_util.h
@@ -0,0 +1,398 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_FOUNDATION_UTIL_H_
+#define BASE_MAC_FOUNDATION_UTIL_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "build/build_config.h"
+
+#if defined(__OBJC__)
+#import <Foundation/Foundation.h>
+@class NSFont;
+@class UIFont;
+#else // __OBJC__
+#include <CoreFoundation/CoreFoundation.h>
+class NSBundle;
+class NSFont;
+class NSString;
+class UIFont;
+#endif // __OBJC__
+
+#if defined(OS_IOS)
+#include <CoreText/CoreText.h>
+#else
+#include <ApplicationServices/ApplicationServices.h>
+#endif
+
+// Adapted from NSObjCRuntime.h NS_ENUM definition (used in Foundation starting
+// with the OS X 10.8 SDK and the iOS 6.0 SDK).
+#if __has_extension(cxx_strong_enums) && \
+ (defined(OS_IOS) || (defined(MAC_OS_X_VERSION_10_8) && \
+ MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_8))
+#define CR_FORWARD_ENUM(_type, _name) enum _name : _type _name
+#else
+#define CR_FORWARD_ENUM(_type, _name) _type _name
+#endif
+
+// Adapted from NSPathUtilities.h and NSObjCRuntime.h.
+#if __LP64__ || NS_BUILD_32_LIKE_64
+typedef CR_FORWARD_ENUM(unsigned long, NSSearchPathDirectory);
+typedef unsigned long NSSearchPathDomainMask;
+#else
+typedef CR_FORWARD_ENUM(unsigned int, NSSearchPathDirectory);
+typedef unsigned int NSSearchPathDomainMask;
+#endif
+
+typedef struct OpaqueSecTrustRef* SecACLRef;
+typedef struct OpaqueSecTrustedApplicationRef* SecTrustedApplicationRef;
+
+namespace base {
+
+class FilePath;
+
+namespace mac {
+
+// Returns true if the application is running from a bundle
+BASE_EXPORT bool AmIBundled();
+BASE_EXPORT void SetOverrideAmIBundled(bool value);
+
+#if defined(UNIT_TEST)
+// This is required because instantiating some tests requires checking the
+// directory structure, which sets the AmIBundled cache state. Individual tests
+// may or may not be bundled, and this would trip them up if the cache weren't
+// cleared. This should not be called from individual tests, just from test
+// instantiation code that gets a path from PathService.
+BASE_EXPORT void ClearAmIBundledCache();
+#endif
+
+// Returns true if this process is marked as a "Background only process".
+BASE_EXPORT bool IsBackgroundOnlyProcess();
+
+// Returns the path to a resource within the framework bundle.
+BASE_EXPORT FilePath PathForFrameworkBundleResource(CFStringRef resourceName);
+
+// Returns the creator code associated with the CFBundleRef at bundle.
+OSType CreatorCodeForCFBundleRef(CFBundleRef bundle);
+
+// Returns the creator code associated with this application, by calling
+// CreatorCodeForCFBundleRef for the application's main bundle. If this
+// information cannot be determined, returns kUnknownType ('????'). This
+// does not respect the override app bundle because it's based on CFBundle
+// instead of NSBundle, and because callers probably don't want the override
+// app bundle's creator code anyway.
+BASE_EXPORT OSType CreatorCodeForApplication();
+
+// Searches for directories for the given key in only the given |domain_mask|.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true. Otherwise, returns false.
+BASE_EXPORT bool GetSearchPathDirectory(NSSearchPathDirectory directory,
+ NSSearchPathDomainMask domain_mask,
+ FilePath* result);
+
+// Searches for directories for the given key in only the local domain.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true. Otherwise, returns false.
+BASE_EXPORT bool GetLocalDirectory(NSSearchPathDirectory directory,
+ FilePath* result);
+
+// Searches for directories for the given key in only the user domain.
+// If found, fills result (which must always be non-NULL) with the
+// first found directory and returns true. Otherwise, returns false.
+BASE_EXPORT bool GetUserDirectory(NSSearchPathDirectory directory,
+ FilePath* result);
+
+// Returns the ~/Library directory.
+BASE_EXPORT FilePath GetUserLibraryPath();
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the outermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
+// |exec_name| - path to the binary
+// returns - path to the application bundle, or empty on error
+BASE_EXPORT FilePath GetAppBundlePath(const FilePath& exec_name);
+
+#define TYPE_NAME_FOR_CF_TYPE_DECL(TypeCF) \
+BASE_EXPORT std::string TypeNameForCFType(TypeCF##Ref);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CFArray);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFBag);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFBoolean);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFData);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFDate);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFDictionary);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFNull);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFNumber);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFSet);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFString);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFURL);
+TYPE_NAME_FOR_CF_TYPE_DECL(CFUUID);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CGColor);
+
+TYPE_NAME_FOR_CF_TYPE_DECL(CTFont);
+TYPE_NAME_FOR_CF_TYPE_DECL(CTRun);
+
+#undef TYPE_NAME_FOR_CF_TYPE_DECL
+
+// Retain/release calls for memory management in C++.
+BASE_EXPORT void NSObjectRetain(void* obj);
+BASE_EXPORT void NSObjectRelease(void* obj);
+
+// CFTypeRefToNSObjectAutorelease transfers ownership of a Core Foundation
+// object (one derived from CFTypeRef) to the Foundation memory management
+// system. In a traditional managed-memory environment, cf_object is
+// autoreleased and returned as an NSObject. In a garbage-collected
+// environment, cf_object is marked as eligible for garbage collection.
+//
+// This function should only be used to convert a concrete CFTypeRef type to
+// its equivalent "toll-free bridged" NSObject subclass, for example,
+// converting a CFStringRef to NSString.
+//
+// By calling this function, callers relinquish any ownership claim to
+// cf_object. In a managed-memory environment, the object's ownership will be
+// managed by the innermost NSAutoreleasePool, so after this function returns,
+// callers should not assume that cf_object is valid any longer than the
+// returned NSObject.
+//
+// Returns an id, typed here for C++'s sake as a void*.
+BASE_EXPORT void* CFTypeRefToNSObjectAutorelease(CFTypeRef cf_object);
+
+// Returns the base bundle ID, which can be set by SetBaseBundleID but
+// defaults to a reasonable string. This never returns NULL. BaseBundleID
+// returns a pointer to static storage that must not be freed.
+BASE_EXPORT const char* BaseBundleID();
+
+// Sets the base bundle ID to override the default. The implementation will
+// make its own copy of new_base_bundle_id.
+BASE_EXPORT void SetBaseBundleID(const char* new_base_bundle_id);
+
+} // namespace mac
+} // namespace base
+
+#if !defined(__OBJC__)
+#define OBJC_CPP_CLASS_DECL(x) class x;
+#else // __OBJC__
+#define OBJC_CPP_CLASS_DECL(x)
+#endif // __OBJC__
+
+// Convert toll-free bridged CFTypes to NSTypes and vice-versa. This does not
+// autorelease |cf_val|. This is useful for the case where there is a CFType in
+// a call that expects an NSType and the compiler is complaining about const
+// casting problems.
+// The calls are used like this:
+// NSString *foo = CFToNSCast(CFSTR("Hello"));
+// CFStringRef foo2 = NSToCFCast(@"Hello");
+// The macro magic below is to enforce safe casting. It could possibly have
+// been done using template function specialization, but template function
+// specialization doesn't always work intuitively,
+// (http://www.gotw.ca/publications/mill17.htm) so the trusty combination
+// of macros and function overloading is used instead.
+
+#define CF_TO_NS_CAST_DECL(TypeCF, TypeNS) \
+OBJC_CPP_CLASS_DECL(TypeNS) \
+\
+namespace base { \
+namespace mac { \
+BASE_EXPORT TypeNS* CFToNSCast(TypeCF##Ref cf_val); \
+BASE_EXPORT TypeCF##Ref NSToCFCast(TypeNS* ns_val); \
+} \
+}
+
+#define CF_TO_NS_MUTABLE_CAST_DECL(name) \
+CF_TO_NS_CAST_DECL(CF##name, NS##name) \
+OBJC_CPP_CLASS_DECL(NSMutable##name) \
+\
+namespace base { \
+namespace mac { \
+BASE_EXPORT NSMutable##name* CFToNSCast(CFMutable##name##Ref cf_val); \
+BASE_EXPORT CFMutable##name##Ref NSToCFCast(NSMutable##name* ns_val); \
+} \
+}
+
+// List of toll-free bridged types taken from:
+// http://www.cocoadev.com/index.pl?TollFreeBridged
+
+CF_TO_NS_MUTABLE_CAST_DECL(Array);
+CF_TO_NS_MUTABLE_CAST_DECL(AttributedString);
+CF_TO_NS_CAST_DECL(CFCalendar, NSCalendar);
+CF_TO_NS_MUTABLE_CAST_DECL(CharacterSet);
+CF_TO_NS_MUTABLE_CAST_DECL(Data);
+CF_TO_NS_CAST_DECL(CFDate, NSDate);
+CF_TO_NS_MUTABLE_CAST_DECL(Dictionary);
+CF_TO_NS_CAST_DECL(CFError, NSError);
+CF_TO_NS_CAST_DECL(CFLocale, NSLocale);
+CF_TO_NS_CAST_DECL(CFNumber, NSNumber);
+CF_TO_NS_CAST_DECL(CFRunLoopTimer, NSTimer);
+CF_TO_NS_CAST_DECL(CFTimeZone, NSTimeZone);
+CF_TO_NS_MUTABLE_CAST_DECL(Set);
+CF_TO_NS_CAST_DECL(CFReadStream, NSInputStream);
+CF_TO_NS_CAST_DECL(CFWriteStream, NSOutputStream);
+CF_TO_NS_MUTABLE_CAST_DECL(String);
+CF_TO_NS_CAST_DECL(CFURL, NSURL);
+
+#if defined(OS_IOS)
+CF_TO_NS_CAST_DECL(CTFont, UIFont);
+#else
+CF_TO_NS_CAST_DECL(CTFont, NSFont);
+#endif
+
+#undef CF_TO_NS_CAST_DECL
+#undef CF_TO_NS_MUTABLE_CAST_DECL
+#undef OBJC_CPP_CLASS_DECL
+
+namespace base {
+namespace mac {
+
+// CFCast<>() and CFCastStrict<>() cast a basic CFTypeRef to a more
+// specific CoreFoundation type. The compatibility of the passed
+// object is found by comparing its opaque type against the
+// requested type identifier. If the supplied object is not
+// compatible with the requested return type, CFCast<>() returns
+// NULL and CFCastStrict<>() will DCHECK. Providing a NULL pointer
+// to either variant results in NULL being returned without
+// triggering any DCHECK.
+//
+// Example usage:
+// CFNumberRef some_number = base::mac::CFCast<CFNumberRef>(
+// CFArrayGetValueAtIndex(array, index));
+//
+// CFTypeRef hello = CFSTR("hello world");
+// CFStringRef some_string = base::mac::CFCastStrict<CFStringRef>(hello);
+
+template<typename T>
+T CFCast(const CFTypeRef& cf_val);
+
+template<typename T>
+T CFCastStrict(const CFTypeRef& cf_val);
+
+#define CF_CAST_DECL(TypeCF) \
+template<> BASE_EXPORT TypeCF##Ref \
+CFCast<TypeCF##Ref>(const CFTypeRef& cf_val);\
+\
+template<> BASE_EXPORT TypeCF##Ref \
+CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val);
+
+CF_CAST_DECL(CFArray);
+CF_CAST_DECL(CFBag);
+CF_CAST_DECL(CFBoolean);
+CF_CAST_DECL(CFData);
+CF_CAST_DECL(CFDate);
+CF_CAST_DECL(CFDictionary);
+CF_CAST_DECL(CFNull);
+CF_CAST_DECL(CFNumber);
+CF_CAST_DECL(CFSet);
+CF_CAST_DECL(CFString);
+CF_CAST_DECL(CFURL);
+CF_CAST_DECL(CFUUID);
+
+CF_CAST_DECL(CGColor);
+
+CF_CAST_DECL(CTFont);
+CF_CAST_DECL(CTFontDescriptor);
+CF_CAST_DECL(CTRun);
+
+CF_CAST_DECL(SecACL);
+CF_CAST_DECL(SecTrustedApplication);
+
+#undef CF_CAST_DECL
+
+#if defined(__OBJC__)
+
+// ObjCCast<>() and ObjCCastStrict<>() cast a basic id to a more
+// specific (NSObject-derived) type. The compatibility of the passed
+// object is found by checking if it's a kind of the requested type
+// identifier. If the supplied object is not compatible with the
+// requested return type, ObjCCast<>() returns nil and
+// ObjCCastStrict<>() will DCHECK. Providing a nil pointer to either
+// variant results in nil being returned without triggering any DCHECK.
+//
+// The strict variant is useful when retrieving a value from a
+// collection which only has values of a specific type, e.g. an
+// NSArray of NSStrings. The non-strict variant is useful when
+// retrieving values from data that you can't fully control. For
+// example, a plist read from disk may be beyond your exclusive
+// control, so you'd only want to check that the values you retrieve
+// from it are of the expected types, but not crash if they're not.
+//
+// Example usage:
+// NSString* version = base::mac::ObjCCast<NSString>(
+// [bundle objectForInfoDictionaryKey:@"CFBundleShortVersionString"]);
+//
+// NSString* str = base::mac::ObjCCastStrict<NSString>(
+// [ns_arr_of_ns_strs objectAtIndex:0]);
+template<typename T>
+T* ObjCCast(id objc_val) {
+ if ([objc_val isKindOfClass:[T class]]) {
+ return reinterpret_cast<T*>(objc_val);
+ }
+ return nil;
+}
+
+template<typename T>
+T* ObjCCastStrict(id objc_val) {
+ T* rv = ObjCCast<T>(objc_val);
+ DCHECK(objc_val == nil || rv);
+ return rv;
+}
+
+#endif // defined(__OBJC__)
+
+// Helper function for GetValueFromDictionary to create the error message
+// that appears when a type mismatch is encountered.
+BASE_EXPORT std::string GetValueFromDictionaryErrorMessage(
+ CFStringRef key, const std::string& expected_type, CFTypeRef value);
+
+// Utility function to pull out a value from a dictionary, check its type, and
+// return it. Returns NULL if the key is not present or of the wrong type.
+template<typename T>
+T GetValueFromDictionary(CFDictionaryRef dict, CFStringRef key) {
+ CFTypeRef value = CFDictionaryGetValue(dict, key);
+ T value_specific = CFCast<T>(value);
+
+ if (value && !value_specific) {
+ std::string expected_type = TypeNameForCFType(value_specific);
+ DLOG(WARNING) << GetValueFromDictionaryErrorMessage(key,
+ expected_type,
+ value);
+ }
+
+ return value_specific;
+}
+
+// Converts |path| to an autoreleased NSString. Returns nil if |path| is empty.
+BASE_EXPORT NSString* FilePathToNSString(const FilePath& path);
+
+// Converts |str| to a FilePath. Returns an empty path if |str| is nil.
+BASE_EXPORT FilePath NSStringToFilePath(NSString* str);
+
+#if defined(__OBJC__)
+// Converts |range| to an NSRange, returning the new range in |range_out|.
+// Returns true if conversion was successful, false if the values of |range|
+// could not be converted to NSUIntegers.
+BASE_EXPORT bool CFRangeToNSRange(CFRange range,
+ NSRange* range_out) WARN_UNUSED_RESULT;
+#endif // defined(__OBJC__)
+
+} // namespace mac
+} // namespace base
+
+// Stream operations for CFTypes. They can be used with NSTypes as well
+// by using the NSToCFCast methods above.
+// e.g. LOG(INFO) << base::mac::NSToCFCast(@"foo");
+// Operator << can not be overloaded for ObjectiveC types as the compiler
+// can not distinguish between overloads for id with overloads for void*.
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
+ const CFErrorRef err);
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& o,
+ const CFStringRef str);
+
+#endif // BASE_MAC_FOUNDATION_UTIL_H_
diff --git a/libchrome/base/mac/foundation_util.mm b/libchrome/base/mac/foundation_util.mm
new file mode 100644
index 0000000..4f6fa60
--- /dev/null
+++ b/libchrome/base/mac/foundation_util.mm
@@ -0,0 +1,476 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/foundation_util.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/bundle_locations.h"
+#include "base/mac/mac_logging.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/sys_string_conversions.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#import <AppKit/AppKit.h>
+#endif
+
+#if !defined(OS_IOS)
+extern "C" {
+CFTypeID SecACLGetTypeID();
+CFTypeID SecTrustedApplicationGetTypeID();
+Boolean _CFIsObjC(CFTypeID typeID, CFTypeRef obj);
+} // extern "C"
+#endif
+
+namespace base {
+namespace mac {
+
+namespace {
+
+bool g_cached_am_i_bundled_called = false;
+bool g_cached_am_i_bundled_value = false;
+bool g_override_am_i_bundled = false;
+bool g_override_am_i_bundled_value = false;
+
+bool UncachedAmIBundled() {
+#if defined(OS_IOS)
+ // All apps are bundled on iOS.
+ return true;
+#else
+ if (g_override_am_i_bundled)
+ return g_override_am_i_bundled_value;
+
+ // Yes, this is cheap.
+ return [[base::mac::OuterBundle() bundlePath] hasSuffix:@".app"];
+#endif
+}
+
+} // namespace
+
+bool AmIBundled() {
+ // If the return value is not cached, this function will return different
+ // values depending on when it's called. This confuses some client code, see
+ // http://crbug.com/63183 .
+ if (!g_cached_am_i_bundled_called) {
+ g_cached_am_i_bundled_called = true;
+ g_cached_am_i_bundled_value = UncachedAmIBundled();
+ }
+ DCHECK_EQ(g_cached_am_i_bundled_value, UncachedAmIBundled())
+ << "The return value of AmIBundled() changed. This will confuse tests. "
+ << "Call SetAmIBundled() override manually if your test binary "
+ << "delay-loads the framework.";
+ return g_cached_am_i_bundled_value;
+}
+
+void SetOverrideAmIBundled(bool value) {
+#if defined(OS_IOS)
+ // It doesn't make sense not to be bundled on iOS.
+ if (!value)
+ NOTREACHED();
+#endif
+ g_override_am_i_bundled = true;
+ g_override_am_i_bundled_value = value;
+}
+
+BASE_EXPORT void ClearAmIBundledCache() {
+ g_cached_am_i_bundled_called = false;
+}
+
+bool IsBackgroundOnlyProcess() {
+ // This function really does want to examine NSBundle's idea of the main
+ // bundle dictionary. It needs to look at the actual running .app's
+ // Info.plist to access its LSUIElement property.
+ NSDictionary* info_dictionary = [base::mac::MainBundle() infoDictionary];
+ return [[info_dictionary objectForKey:@"LSUIElement"] boolValue] != NO;
+}
+
+FilePath PathForFrameworkBundleResource(CFStringRef resourceName) {
+ NSBundle* bundle = base::mac::FrameworkBundle();
+ NSString* resourcePath = [bundle pathForResource:(NSString*)resourceName
+ ofType:nil];
+ return NSStringToFilePath(resourcePath);
+}
+
+OSType CreatorCodeForCFBundleRef(CFBundleRef bundle) {
+ OSType creator = kUnknownType;
+ CFBundleGetPackageInfo(bundle, NULL, &creator);
+ return creator;
+}
+
+OSType CreatorCodeForApplication() {
+ CFBundleRef bundle = CFBundleGetMainBundle();
+ if (!bundle)
+ return kUnknownType;
+
+ return CreatorCodeForCFBundleRef(bundle);
+}
+
+bool GetSearchPathDirectory(NSSearchPathDirectory directory,
+ NSSearchPathDomainMask domain_mask,
+ FilePath* result) {
+ DCHECK(result);
+ NSArray* dirs =
+ NSSearchPathForDirectoriesInDomains(directory, domain_mask, YES);
+ if ([dirs count] < 1) {
+ return false;
+ }
+ *result = NSStringToFilePath([dirs objectAtIndex:0]);
+ return true;
+}
+
+bool GetLocalDirectory(NSSearchPathDirectory directory, FilePath* result) {
+ return GetSearchPathDirectory(directory, NSLocalDomainMask, result);
+}
+
+bool GetUserDirectory(NSSearchPathDirectory directory, FilePath* result) {
+ return GetSearchPathDirectory(directory, NSUserDomainMask, result);
+}
+
+FilePath GetUserLibraryPath() {
+ FilePath user_library_path;
+ if (!GetUserDirectory(NSLibraryDirectory, &user_library_path)) {
+ DLOG(WARNING) << "Could not get user library path";
+ }
+ return user_library_path;
+}
+
+// Takes a path to an (executable) binary and tries to provide the path to an
+// application bundle containing it. It takes the outermost bundle that it can
+// find (so for "/Foo/Bar.app/.../Baz.app/..." it produces "/Foo/Bar.app").
+// |exec_name| - path to the binary
+// returns - path to the application bundle, or empty on error
+FilePath GetAppBundlePath(const FilePath& exec_name) {
+ const char kExt[] = ".app";
+ const size_t kExtLength = arraysize(kExt) - 1;
+
+ // Split the path into components.
+ std::vector<std::string> components;
+ exec_name.GetComponents(&components);
+
+ // It's an error if we don't get any components.
+ if (components.empty())
+ return FilePath();
+
+ // Don't prepend '/' to the first component.
+ std::vector<std::string>::const_iterator it = components.begin();
+ std::string bundle_name = *it;
+ DCHECK_GT(it->length(), 0U);
+ // If the first component ends in ".app", we're already done.
+ if (it->length() > kExtLength &&
+ !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength))
+ return FilePath(bundle_name);
+
+ // The first component may be "/" or "//", etc. Only append '/' if it doesn't
+ // already end in '/'.
+ if (bundle_name.back() != '/')
+ bundle_name += '/';
+
+ // Go through the remaining components.
+ for (++it; it != components.end(); ++it) {
+ DCHECK_GT(it->length(), 0U);
+
+ bundle_name += *it;
+
+ // If the current component ends in ".app", we're done.
+ if (it->length() > kExtLength &&
+ !it->compare(it->length() - kExtLength, kExtLength, kExt, kExtLength))
+ return FilePath(bundle_name);
+
+ // Separate this component from the next one.
+ bundle_name += '/';
+ }
+
+ return FilePath();
+}
+
+#define TYPE_NAME_FOR_CF_TYPE_DEFN(TypeCF) \
+std::string TypeNameForCFType(TypeCF##Ref) { \
+ return #TypeCF; \
+}
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFArray);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFBag);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFBoolean);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFData);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFDate);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFDictionary);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFNull);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFNumber);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFSet);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFString);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFURL);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CFUUID);
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CGColor);
+
+TYPE_NAME_FOR_CF_TYPE_DEFN(CTFont);
+TYPE_NAME_FOR_CF_TYPE_DEFN(CTRun);
+
+#undef TYPE_NAME_FOR_CF_TYPE_DEFN
+
+void NSObjectRetain(void* obj) {
+ id<NSObject> nsobj = static_cast<id<NSObject> >(obj);
+ [nsobj retain];
+}
+
+void NSObjectRelease(void* obj) {
+ id<NSObject> nsobj = static_cast<id<NSObject> >(obj);
+ [nsobj release];
+}
+
+void* CFTypeRefToNSObjectAutorelease(CFTypeRef cf_object) {
+ // When GC is on, NSMakeCollectable marks cf_object for GC and autorelease
+ // is a no-op.
+ //
+ // In the traditional GC-less environment, NSMakeCollectable is a no-op,
+ // and cf_object is autoreleased, balancing out the caller's ownership claim.
+ //
+ // NSMakeCollectable returns nil when used on a NULL object.
+ return [NSMakeCollectable(cf_object) autorelease];
+}
+
+static const char* base_bundle_id;
+
+const char* BaseBundleID() {
+ if (base_bundle_id) {
+ return base_bundle_id;
+ }
+
+#if defined(GOOGLE_CHROME_BUILD)
+ return "com.google.Chrome";
+#else
+ return "org.chromium.Chromium";
+#endif
+}
+
+void SetBaseBundleID(const char* new_base_bundle_id) {
+ if (new_base_bundle_id != base_bundle_id) {
+ free((void*)base_bundle_id);
+ base_bundle_id = new_base_bundle_id ? strdup(new_base_bundle_id) : NULL;
+ }
+}
+
+// Definitions for the corresponding CF_TO_NS_CAST_DECL macros in
+// foundation_util.h.
+#define CF_TO_NS_CAST_DEFN(TypeCF, TypeNS) \
+\
+TypeNS* CFToNSCast(TypeCF##Ref cf_val) { \
+ DCHECK(!cf_val || TypeCF##GetTypeID() == CFGetTypeID(cf_val)); \
+ TypeNS* ns_val = \
+ const_cast<TypeNS*>(reinterpret_cast<const TypeNS*>(cf_val)); \
+ return ns_val; \
+} \
+\
+TypeCF##Ref NSToCFCast(TypeNS* ns_val) { \
+ TypeCF##Ref cf_val = reinterpret_cast<TypeCF##Ref>(ns_val); \
+ DCHECK(!cf_val || TypeCF##GetTypeID() == CFGetTypeID(cf_val)); \
+ return cf_val; \
+}
+
+#define CF_TO_NS_MUTABLE_CAST_DEFN(name) \
+CF_TO_NS_CAST_DEFN(CF##name, NS##name) \
+\
+NSMutable##name* CFToNSCast(CFMutable##name##Ref cf_val) { \
+ DCHECK(!cf_val || CF##name##GetTypeID() == CFGetTypeID(cf_val)); \
+ NSMutable##name* ns_val = reinterpret_cast<NSMutable##name*>(cf_val); \
+ return ns_val; \
+} \
+\
+CFMutable##name##Ref NSToCFCast(NSMutable##name* ns_val) { \
+ CFMutable##name##Ref cf_val = \
+ reinterpret_cast<CFMutable##name##Ref>(ns_val); \
+ DCHECK(!cf_val || CF##name##GetTypeID() == CFGetTypeID(cf_val)); \
+ return cf_val; \
+}
+
+CF_TO_NS_MUTABLE_CAST_DEFN(Array);
+CF_TO_NS_MUTABLE_CAST_DEFN(AttributedString);
+CF_TO_NS_CAST_DEFN(CFCalendar, NSCalendar);
+CF_TO_NS_MUTABLE_CAST_DEFN(CharacterSet);
+CF_TO_NS_MUTABLE_CAST_DEFN(Data);
+CF_TO_NS_CAST_DEFN(CFDate, NSDate);
+CF_TO_NS_MUTABLE_CAST_DEFN(Dictionary);
+CF_TO_NS_CAST_DEFN(CFError, NSError);
+CF_TO_NS_CAST_DEFN(CFLocale, NSLocale);
+CF_TO_NS_CAST_DEFN(CFNumber, NSNumber);
+CF_TO_NS_CAST_DEFN(CFRunLoopTimer, NSTimer);
+CF_TO_NS_CAST_DEFN(CFTimeZone, NSTimeZone);
+CF_TO_NS_MUTABLE_CAST_DEFN(Set);
+CF_TO_NS_CAST_DEFN(CFReadStream, NSInputStream);
+CF_TO_NS_CAST_DEFN(CFWriteStream, NSOutputStream);
+CF_TO_NS_MUTABLE_CAST_DEFN(String);
+CF_TO_NS_CAST_DEFN(CFURL, NSURL);
+
+#if defined(OS_IOS)
+CF_TO_NS_CAST_DEFN(CTFont, UIFont);
+#else
+// The NSFont/CTFont toll-free bridging is broken when it comes to type
+// checking, so do some special-casing.
+// http://www.openradar.me/15341349 rdar://15341349
+NSFont* CFToNSCast(CTFontRef cf_val) {
+ NSFont* ns_val =
+ const_cast<NSFont*>(reinterpret_cast<const NSFont*>(cf_val));
+ DCHECK(!cf_val ||
+ CTFontGetTypeID() == CFGetTypeID(cf_val) ||
+ (_CFIsObjC(CTFontGetTypeID(), cf_val) &&
+ [ns_val isKindOfClass:[NSFont class]]));
+ return ns_val;
+}
+
+CTFontRef NSToCFCast(NSFont* ns_val) {
+ CTFontRef cf_val = reinterpret_cast<CTFontRef>(ns_val);
+ DCHECK(!cf_val ||
+ CTFontGetTypeID() == CFGetTypeID(cf_val) ||
+ [ns_val isKindOfClass:[NSFont class]]);
+ return cf_val;
+}
+#endif
+
+#undef CF_TO_NS_CAST_DEFN
+#undef CF_TO_NS_MUTABLE_CAST_DEFN
+
+#define CF_CAST_DEFN(TypeCF) \
+template<> TypeCF##Ref \
+CFCast<TypeCF##Ref>(const CFTypeRef& cf_val) { \
+ if (cf_val == NULL) { \
+ return NULL; \
+ } \
+ if (CFGetTypeID(cf_val) == TypeCF##GetTypeID()) { \
+ return (TypeCF##Ref)(cf_val); \
+ } \
+ return NULL; \
+} \
+\
+template<> TypeCF##Ref \
+CFCastStrict<TypeCF##Ref>(const CFTypeRef& cf_val) { \
+ TypeCF##Ref rv = CFCast<TypeCF##Ref>(cf_val); \
+ DCHECK(cf_val == NULL || rv); \
+ return rv; \
+}
+
+CF_CAST_DEFN(CFArray);
+CF_CAST_DEFN(CFBag);
+CF_CAST_DEFN(CFBoolean);
+CF_CAST_DEFN(CFData);
+CF_CAST_DEFN(CFDate);
+CF_CAST_DEFN(CFDictionary);
+CF_CAST_DEFN(CFNull);
+CF_CAST_DEFN(CFNumber);
+CF_CAST_DEFN(CFSet);
+CF_CAST_DEFN(CFString);
+CF_CAST_DEFN(CFURL);
+CF_CAST_DEFN(CFUUID);
+
+CF_CAST_DEFN(CGColor);
+
+CF_CAST_DEFN(CTFontDescriptor);
+CF_CAST_DEFN(CTRun);
+
+#if defined(OS_IOS)
+CF_CAST_DEFN(CTFont);
+#else
+// The NSFont/CTFont toll-free bridging is broken when it comes to type
+// checking, so do some special-casing.
+// http://www.openradar.me/15341349 rdar://15341349
+template<> CTFontRef
+CFCast<CTFontRef>(const CFTypeRef& cf_val) {
+ if (cf_val == NULL) {
+ return NULL;
+ }
+ if (CFGetTypeID(cf_val) == CTFontGetTypeID()) {
+ return (CTFontRef)(cf_val);
+ }
+
+ if (!_CFIsObjC(CTFontGetTypeID(), cf_val))
+ return NULL;
+
+ id<NSObject> ns_val = reinterpret_cast<id>(const_cast<void*>(cf_val));
+ if ([ns_val isKindOfClass:[NSFont class]]) {
+ return (CTFontRef)(cf_val);
+ }
+ return NULL;
+}
+
+template<> CTFontRef
+CFCastStrict<CTFontRef>(const CFTypeRef& cf_val) {
+ CTFontRef rv = CFCast<CTFontRef>(cf_val);
+ DCHECK(cf_val == NULL || rv);
+ return rv;
+}
+#endif
+
+#if !defined(OS_IOS)
+CF_CAST_DEFN(SecACL);
+CF_CAST_DEFN(SecTrustedApplication);
+#endif
+
+#undef CF_CAST_DEFN
+
+std::string GetValueFromDictionaryErrorMessage(
+ CFStringRef key, const std::string& expected_type, CFTypeRef value) {
+ ScopedCFTypeRef<CFStringRef> actual_type_ref(
+ CFCopyTypeIDDescription(CFGetTypeID(value)));
+ return "Expected value for key " +
+ base::SysCFStringRefToUTF8(key) +
+ " to be " +
+ expected_type +
+ " but it was " +
+ base::SysCFStringRefToUTF8(actual_type_ref) +
+ " instead";
+}
+
+NSString* FilePathToNSString(const FilePath& path) {
+ if (path.empty())
+ return nil;
+ return [NSString stringWithUTF8String:path.value().c_str()];
+}
+
+FilePath NSStringToFilePath(NSString* str) {
+ if (![str length])
+ return FilePath();
+ return FilePath([str fileSystemRepresentation]);
+}
+
+bool CFRangeToNSRange(CFRange range, NSRange* range_out) {
+ if (base::IsValueInRangeForNumericType<decltype(range_out->location)>(
+ range.location) &&
+ base::IsValueInRangeForNumericType<decltype(range_out->length)>(
+ range.length) &&
+ base::IsValueInRangeForNumericType<decltype(range_out->location)>(
+ range.location + range.length)) {
+ *range_out = NSMakeRange(range.location, range.length);
+ return true;
+ }
+ return false;
+}
+
+} // namespace mac
+} // namespace base
+
+std::ostream& operator<<(std::ostream& o, const CFStringRef string) {
+ return o << base::SysCFStringRefToUTF8(string);
+}
+
+std::ostream& operator<<(std::ostream& o, const CFErrorRef err) {
+ base::ScopedCFTypeRef<CFStringRef> desc(CFErrorCopyDescription(err));
+ base::ScopedCFTypeRef<CFDictionaryRef> user_info(CFErrorCopyUserInfo(err));
+ CFStringRef errorDesc = NULL;
+ if (user_info.get()) {
+ errorDesc = reinterpret_cast<CFStringRef>(
+ CFDictionaryGetValue(user_info.get(), kCFErrorDescriptionKey));
+ }
+ o << "Code: " << CFErrorGetCode(err)
+ << " Domain: " << CFErrorGetDomain(err)
+ << " Desc: " << desc.get();
+ if(errorDesc) {
+ o << "(" << errorDesc << ")";
+ }
+ return o;
+}
diff --git a/libchrome/base/mac/mac_logging.h b/libchrome/base/mac/mac_logging.h
new file mode 100644
index 0000000..30e43ea
--- /dev/null
+++ b/libchrome/base/mac/mac_logging.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MAC_LOGGING_H_
+#define BASE_MAC_MAC_LOGGING_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include <MacTypes.h>
+#else
+#include <libkern/OSTypes.h>
+#endif
+
+// Use the OSSTATUS_LOG family to log messages related to errors in Mac OS X
+// system routines that report status via an OSStatus or OSErr value. It is
+// similar to the PLOG family which operates on errno, but because there is no
+// global (or thread-local) OSStatus or OSErr value, the specific error must
+// be supplied as an argument to the OSSTATUS_LOG macro. The message logged
+// will contain the symbolic constant name corresponding to the status value,
+// along with the value itself.
+//
+// OSErr is just an older 16-bit form of the newer 32-bit OSStatus. Despite
+// the name, OSSTATUS_LOG can be used equally well for OSStatus and OSErr.
+
+namespace logging {
+
+// Returns a UTF8 description from an OS X Status error.
+BASE_EXPORT std::string DescriptionFromOSStatus(OSStatus err);
+
+class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage {
+ public:
+ OSStatusLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ OSStatus status);
+ ~OSStatusLogMessage();
+
+ private:
+ OSStatus status_;
+
+ DISALLOW_COPY_AND_ASSIGN(OSStatusLogMessage);
+};
+
+} // namespace logging
+
+#if defined(NDEBUG)
+#define MAC_DVLOG_IS_ON(verbose_level) 0
+#else
+#define MAC_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
+#endif
+
+#define OSSTATUS_LOG_STREAM(severity, status) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(OSStatusLogMessage, status).stream()
+#define OSSTATUS_VLOG_STREAM(verbose_level, status) \
+ logging::OSStatusLogMessage(__FILE__, __LINE__, \
+ -verbose_level, status).stream()
+
+#define OSSTATUS_LOG(severity, status) \
+ LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), LOG_IS_ON(severity))
+#define OSSTATUS_LOG_IF(severity, condition, status) \
+ LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
+ LOG_IS_ON(severity) && (condition))
+
+#define OSSTATUS_VLOG(verbose_level, status) \
+ LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+ VLOG_IS_ON(verbose_level))
+#define OSSTATUS_VLOG_IF(verbose_level, condition, status) \
+ LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+#define OSSTATUS_CHECK(condition, status) \
+ LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), !(condition)) \
+ << "Check failed: " # condition << ". "
+
+#define OSSTATUS_DLOG(severity, status) \
+ LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), DLOG_IS_ON(severity))
+#define OSSTATUS_DLOG_IF(severity, condition, status) \
+ LAZY_STREAM(OSSTATUS_LOG_STREAM(severity, status), \
+ DLOG_IS_ON(severity) && (condition))
+
+#define OSSTATUS_DVLOG(verbose_level, status) \
+ LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+ MAC_DVLOG_IS_ON(verbose_level))
+#define OSSTATUS_DVLOG_IF(verbose_level, condition, status) \
+ LAZY_STREAM(OSSTATUS_VLOG_STREAM(verbose_level, status), \
+ MAC_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define OSSTATUS_DCHECK(condition, status) \
+ LAZY_STREAM(OSSTATUS_LOG_STREAM(FATAL, status), \
+ DCHECK_IS_ON() && !(condition)) \
+ << "Check failed: " #condition << ". "
+
+#endif // BASE_MAC_MAC_LOGGING_H_
diff --git a/libchrome/base/mac/mac_logging.mm b/libchrome/base/mac/mac_logging.mm
new file mode 100644
index 0000000..f0d3c07
--- /dev/null
+++ b/libchrome/base/mac/mac_logging.mm
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mac_logging.h"
+
+#import <Foundation/Foundation.h>
+
+#include <iomanip>
+
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include <CoreServices/CoreServices.h>
+#endif
+
+namespace logging {
+
+std::string DescriptionFromOSStatus(OSStatus err) {
+ NSError* error =
+ [NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil];
+ return error.description.UTF8String;
+}
+
+OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ OSStatus status)
+ : LogMessage(file_path, line, severity),
+ status_(status) {
+}
+
+OSStatusLogMessage::~OSStatusLogMessage() {
+#if defined(OS_IOS)
+ // TODO(crbug.com/546375): Consider using NSError with NSOSStatusErrorDomain
+ // to try to get a description of the failure.
+ stream() << ": " << status_;
+#else
+ stream() << ": "
+ << DescriptionFromOSStatus(status_)
+ << " ("
+ << status_
+ << ")";
+#endif
+}
+
+} // namespace logging
diff --git a/libchrome/base/mac/mac_util.h b/libchrome/base/mac/mac_util.h
new file mode 100644
index 0000000..84948f7
--- /dev/null
+++ b/libchrome/base/mac/mac_util.h
@@ -0,0 +1,206 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MAC_UTIL_H_
+#define BASE_MAC_MAC_UTIL_H_
+
+#include <AvailabilityMacros.h>
+#include <Carbon/Carbon.h>
+#include <stdint.h>
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+class FilePath;
+
+namespace mac {
+
+// Full screen modes, in increasing order of priority. More permissive modes
+// take predecence.
+enum FullScreenMode {
+ kFullScreenModeHideAll = 0,
+ kFullScreenModeHideDock = 1,
+ kFullScreenModeAutoHideAll = 2,
+ kNumFullScreenModes = 3,
+
+ // kFullScreenModeNormal is not a valid FullScreenMode, but it is useful to
+ // other classes, so we include it here.
+ kFullScreenModeNormal = 10,
+};
+
+BASE_EXPORT std::string PathFromFSRef(const FSRef& ref);
+BASE_EXPORT bool FSRefFromPath(const std::string& path, FSRef* ref);
+
+// Returns an sRGB color space. The return value is a static value; do not
+// release it!
+BASE_EXPORT CGColorSpaceRef GetSRGBColorSpace();
+
+// Returns the generic RGB color space. The return value is a static value; do
+// not release it!
+BASE_EXPORT CGColorSpaceRef GetGenericRGBColorSpace();
+
+// Returns the color space being used by the main display. The return value
+// is a static value; do not release it!
+BASE_EXPORT CGColorSpaceRef GetSystemColorSpace();
+
+// Add a full screen request for the given |mode|. Must be paired with a
+// ReleaseFullScreen() call for the same |mode|. This does not by itself create
+// a fullscreen window; rather, it manages per-application state related to
+// hiding the dock and menubar. Must be called on the main thread.
+BASE_EXPORT void RequestFullScreen(FullScreenMode mode);
+
+// Release a request for full screen mode. Must be matched with a
+// RequestFullScreen() call for the same |mode|. As with RequestFullScreen(),
+// this does not affect windows directly, but rather manages per-application
+// state. For example, if there are no other outstanding
+// |kFullScreenModeAutoHideAll| requests, this will reshow the menu bar. Must
+// be called on main thread.
+BASE_EXPORT void ReleaseFullScreen(FullScreenMode mode);
+
+// Convenience method to switch the current fullscreen mode. This has the same
+// net effect as a ReleaseFullScreen(from_mode) call followed immediately by a
+// RequestFullScreen(to_mode). Must be called on the main thread.
+BASE_EXPORT void SwitchFullScreenModes(FullScreenMode from_mode,
+ FullScreenMode to_mode);
+
+// Returns true if this process is in the foreground, meaning that it's the
+// frontmost process, the one whose menu bar is shown at the top of the main
+// display.
+BASE_EXPORT bool AmIForeground();
+
+// Excludes the file given by |file_path| from being backed up by Time Machine.
+BASE_EXPORT bool SetFileBackupExclusion(const FilePath& file_path);
+
+// Checks if the current application is set as a Login Item, so it will launch
+// on Login. If a non-NULL pointer to is_hidden is passed, the Login Item also
+// is queried for the 'hide on launch' flag.
+BASE_EXPORT bool CheckLoginItemStatus(bool* is_hidden);
+
+// Adds current application to the set of Login Items with specified "hide"
+// flag. This has the same effect as adding/removing the application in
+// SystemPreferences->Accounts->LoginItems or marking Application in the Dock
+// as "Options->Open on Login".
+// Does nothing if the application is already set up as Login Item with
+// specified hide flag.
+BASE_EXPORT void AddToLoginItems(bool hide_on_startup);
+
+// Removes the current application from the list Of Login Items.
+BASE_EXPORT void RemoveFromLoginItems();
+
+// Returns true if the current process was automatically launched as a
+// 'Login Item' or via Lion's Resume. Used to suppress opening windows.
+BASE_EXPORT bool WasLaunchedAsLoginOrResumeItem();
+
+// Returns true if the current process was automatically launched as a
+// 'Login Item' or via Resume, and the 'Reopen windows when logging back in'
+// checkbox was selected by the user. This indicates that the previous
+// session should be restored.
+BASE_EXPORT bool WasLaunchedAsLoginItemRestoreState();
+
+// Returns true if the current process was automatically launched as a
+// 'Login Item' with 'hide on startup' flag. Used to suppress opening windows.
+BASE_EXPORT bool WasLaunchedAsHiddenLoginItem();
+
+// Remove the quarantine xattr from the given file. Returns false if there was
+// an error, or true otherwise.
+BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
+
+// Run-time OS version checks. Use these instead of
+// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "OrEarlier" and
+// "OrLater" variants to those that check for a specific version, unless you
+// know for sure that you need to check for a specific version.
+
+// Mavericks is OS X 10.9, Darwin 13.
+BASE_EXPORT bool IsOSMavericks();
+
+// Yosemite is OS X 10.10, Darwin 14.
+BASE_EXPORT bool IsOSYosemite();
+BASE_EXPORT bool IsOSYosemiteOrEarlier();
+BASE_EXPORT bool IsOSYosemiteOrLater();
+
+// El Capitan is OS X 10.11, Darwin 15.
+BASE_EXPORT bool IsOSElCapitan();
+BASE_EXPORT bool IsOSElCapitanOrEarlier();
+BASE_EXPORT bool IsOSElCapitanOrLater();
+
+// Sierra is macOS 10.12, Darwin 16.
+BASE_EXPORT bool IsOSSierra();
+BASE_EXPORT bool IsOSSierraOrLater();
+
+// This should be infrequently used. It only makes sense to use this to avoid
+// codepaths that are very likely to break on future (unreleased, untested,
+// unborn) OS releases, or to log when the OS is newer than any known version.
+BASE_EXPORT bool IsOSLaterThanSierra_DontCallThis();
+
+// Inline functions that are redundant due to version ranges being mutually-
+// exclusive.
+inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
+inline bool IsOSElCapitanOrEarlier() { return !IsOSSierraOrLater(); }
+
+// When the deployment target is set, the code produced cannot run on earlier
+// OS releases. That enables some of the IsOS* family to be implemented as
+// constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
+// contains the value of the deployment target.
+
+#if defined(MAC_OS_X_VERSION_10_9) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_9
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_9
+inline bool IsOSMavericks() { return false; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_10) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_10
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_10
+inline bool IsOSYosemiteOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_10) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_10
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_10
+inline bool IsOSYosemite() { return false; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_11) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_11
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_11
+inline bool IsOSElCapitanOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_11) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
+inline bool IsOSElCapitan() { return false; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12
+inline bool IsOSSierraOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12
+inline bool IsOSSierra() { return false; }
+inline bool IsOSLaterThanSierra_DontCallThis() { return true; }
+#endif
+
+// Retrieve the system's model identifier string from the IOKit registry:
+// for example, "MacPro4,1", "MacBookPro6,1". Returns empty string upon
+// failure.
+BASE_EXPORT std::string GetModelIdentifier();
+
+// Parse a model identifier string; for example, into ("MacBookPro", 6, 1).
+// If any error occurs, none of the input pointers are touched.
+BASE_EXPORT bool ParseModelIdentifier(const std::string& ident,
+ std::string* type,
+ int32_t* major,
+ int32_t* minor);
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_MAC_UTIL_H_
diff --git a/libchrome/base/mac/mach_logging.cc b/libchrome/base/mac/mach_logging.cc
new file mode 100644
index 0000000..7b939b3
--- /dev/null
+++ b/libchrome/base/mac/mach_logging.cc
@@ -0,0 +1,88 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_logging.h"
+
+#include <iomanip>
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include <servers/bootstrap.h>
+#endif // !OS_IOS
+
+namespace {
+
+std::string FormatMachErrorNumber(mach_error_t mach_err) {
+ // For the os/kern subsystem, give the error number in decimal as in
+ // <mach/kern_return.h>. Otherwise, give it in hexadecimal to make it easier
+ // to visualize the various bits. See <mach/error.h>.
+ if (mach_err >= 0 && mach_err < KERN_RETURN_MAX) {
+ return base::StringPrintf(" (%d)", mach_err);
+ }
+ return base::StringPrintf(" (0x%08x)", mach_err);
+}
+
+} // namespace
+
+namespace logging {
+
+MachLogMessage::MachLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ mach_error_t mach_err)
+ : LogMessage(file_path, line, severity),
+ mach_err_(mach_err) {
+}
+
+MachLogMessage::~MachLogMessage() {
+ stream() << ": "
+ << mach_error_string(mach_err_)
+ << FormatMachErrorNumber(mach_err_);
+}
+
+#if !defined(OS_IOS)
+
+BootstrapLogMessage::BootstrapLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ kern_return_t bootstrap_err)
+ : LogMessage(file_path, line, severity),
+ bootstrap_err_(bootstrap_err) {
+}
+
+BootstrapLogMessage::~BootstrapLogMessage() {
+ stream() << ": "
+ << bootstrap_strerror(bootstrap_err_);
+
+ switch (bootstrap_err_) {
+ case BOOTSTRAP_SUCCESS:
+ case BOOTSTRAP_NOT_PRIVILEGED:
+ case BOOTSTRAP_NAME_IN_USE:
+ case BOOTSTRAP_UNKNOWN_SERVICE:
+ case BOOTSTRAP_SERVICE_ACTIVE:
+ case BOOTSTRAP_BAD_COUNT:
+ case BOOTSTRAP_NO_MEMORY:
+ case BOOTSTRAP_NO_CHILDREN: {
+ // Show known bootstrap errors in decimal because that's how they're
+ // defined in <servers/bootstrap.h>.
+ stream() << " (" << bootstrap_err_ << ")";
+ break;
+ }
+
+ default: {
+ // bootstrap_strerror passes unknown errors to mach_error_string, so
+ // format them as they would be if they were handled by
+ // MachErrorMessage.
+ stream() << FormatMachErrorNumber(bootstrap_err_);
+ break;
+ }
+ }
+}
+
+#endif // !OS_IOS
+
+} // namespace logging
diff --git a/libchrome/base/mac/mach_logging.h b/libchrome/base/mac/mach_logging.h
new file mode 100644
index 0000000..59ab762
--- /dev/null
+++ b/libchrome/base/mac/mach_logging.h
@@ -0,0 +1,167 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_LOGGING_H_
+#define BASE_MAC_MACH_LOGGING_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// Use the MACH_LOG family of macros along with a mach_error_t (kern_return_t)
+// containing a Mach error. The error value will be decoded so that logged
+// messages explain the error.
+//
+// Use the BOOTSTRAP_LOG family of macros specifically for errors that occur
+// while interoperating with the bootstrap subsystem. These errors will first
+// be looked up as bootstrap error messages. If no match is found, they will
+// be treated as generic Mach errors, as in MACH_LOG.
+//
+// Examples:
+//
+// kern_return_t kr = mach_timebase_info(&info);
+// if (kr != KERN_SUCCESS) {
+// MACH_LOG(ERROR, kr) << "mach_timebase_info";
+// }
+//
+// kr = vm_deallocate(task, address, size);
+// MACH_DCHECK(kr == KERN_SUCCESS, kr) << "vm_deallocate";
+
+namespace logging {
+
+class BASE_EXPORT MachLogMessage : public logging::LogMessage {
+ public:
+ MachLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ mach_error_t mach_err);
+ ~MachLogMessage();
+
+ private:
+ mach_error_t mach_err_;
+
+ DISALLOW_COPY_AND_ASSIGN(MachLogMessage);
+};
+
+} // namespace logging
+
+#if defined(NDEBUG)
+#define MACH_DVLOG_IS_ON(verbose_level) 0
+#else
+#define MACH_DVLOG_IS_ON(verbose_level) VLOG_IS_ON(verbose_level)
+#endif
+
+#define MACH_LOG_STREAM(severity, mach_err) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(MachLogMessage, mach_err).stream()
+#define MACH_VLOG_STREAM(verbose_level, mach_err) \
+ logging::MachLogMessage(__FILE__, __LINE__, \
+ -verbose_level, mach_err).stream()
+
+#define MACH_LOG(severity, mach_err) \
+ LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), LOG_IS_ON(severity))
+#define MACH_LOG_IF(severity, condition, mach_err) \
+ LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
+ LOG_IS_ON(severity) && (condition))
+
+#define MACH_VLOG(verbose_level, mach_err) \
+ LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+ VLOG_IS_ON(verbose_level))
+#define MACH_VLOG_IF(verbose_level, condition, mach_err) \
+ LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+#define MACH_CHECK(condition, mach_err) \
+ LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), !(condition)) \
+ << "Check failed: " # condition << ". "
+
+#define MACH_DLOG(severity, mach_err) \
+ LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), DLOG_IS_ON(severity))
+#define MACH_DLOG_IF(severity, condition, mach_err) \
+ LAZY_STREAM(MACH_LOG_STREAM(severity, mach_err), \
+ DLOG_IS_ON(severity) && (condition))
+
+#define MACH_DVLOG(verbose_level, mach_err) \
+ LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+ MACH_DVLOG_IS_ON(verbose_level))
+#define MACH_DVLOG_IF(verbose_level, condition, mach_err) \
+ LAZY_STREAM(MACH_VLOG_STREAM(verbose_level, mach_err), \
+ MACH_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define MACH_DCHECK(condition, mach_err) \
+ LAZY_STREAM(MACH_LOG_STREAM(FATAL, mach_err), \
+ DCHECK_IS_ON() && !(condition)) \
+ << "Check failed: " #condition << ". "
+
+#if !defined(OS_IOS)
+
+namespace logging {
+
+class BASE_EXPORT BootstrapLogMessage : public logging::LogMessage {
+ public:
+ BootstrapLogMessage(const char* file_path,
+ int line,
+ LogSeverity severity,
+ kern_return_t bootstrap_err);
+ ~BootstrapLogMessage();
+
+ private:
+ kern_return_t bootstrap_err_;
+
+ DISALLOW_COPY_AND_ASSIGN(BootstrapLogMessage);
+};
+
+} // namespace logging
+
+#define BOOTSTRAP_DVLOG_IS_ON MACH_DVLOG_IS_ON
+
+#define BOOTSTRAP_LOG_STREAM(severity, bootstrap_err) \
+ COMPACT_GOOGLE_LOG_EX_ ## severity(BootstrapLogMessage, \
+ bootstrap_err).stream()
+#define BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err) \
+ logging::BootstrapLogMessage(__FILE__, __LINE__, \
+ -verbose_level, bootstrap_err).stream()
+
+#define BOOTSTRAP_LOG(severity, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, \
+ bootstrap_err), LOG_IS_ON(severity))
+#define BOOTSTRAP_LOG_IF(severity, condition, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+ LOG_IS_ON(severity) && (condition))
+
+#define BOOTSTRAP_VLOG(verbose_level, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+ VLOG_IS_ON(verbose_level))
+#define BOOTSTRAP_VLOG_IF(verbose_level, condition, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+ VLOG_IS_ON(verbose_level) && (condition))
+
+#define BOOTSTRAP_CHECK(condition, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), !(condition)) \
+ << "Check failed: " # condition << ". "
+
+#define BOOTSTRAP_DLOG(severity, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+ DLOG_IS_ON(severity))
+#define BOOTSTRAP_DLOG_IF(severity, condition, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_LOG_STREAM(severity, bootstrap_err), \
+ DLOG_IS_ON(severity) && (condition))
+
+#define BOOTSTRAP_DVLOG(verbose_level, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+ BOOTSTRAP_DVLOG_IS_ON(verbose_level))
+#define BOOTSTRAP_DVLOG_IF(verbose_level, condition, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_VLOG_STREAM(verbose_level, bootstrap_err), \
+ BOOTSTRAP_DVLOG_IS_ON(verbose_level) && (condition))
+
+#define BOOTSTRAP_DCHECK(condition, bootstrap_err) \
+ LAZY_STREAM(BOOTSTRAP_LOG_STREAM(FATAL, bootstrap_err), \
+ DCHECK_IS_ON() && !(condition)) \
+ << "Check failed: " #condition << ". "
+
+#endif // !OS_IOS
+
+#endif // BASE_MAC_MACH_LOGGING_H_
diff --git a/libchrome/base/mac/mach_port_broker.h b/libchrome/base/mac/mach_port_broker.h
new file mode 100644
index 0000000..4554b6a
--- /dev/null
+++ b/libchrome/base/mac/mach_port_broker.h
@@ -0,0 +1,108 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_BROKER_H_
+#define BASE_MAC_MACH_PORT_BROKER_H_
+
+#include <mach/mach.h>
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/mac/dispatch_source_mach.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/process/port_provider_mac.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// On OS X, the task port of a process is required to collect metrics about the
+// process, and to insert Mach ports into the process. Running |task_for_pid()|
+// is only allowed for privileged code. However, a process has port rights to
+// all its subprocesses, so let the child processes send their Mach port to the
+// parent over IPC.
+//
+// Mach ports can only be sent over Mach IPC, not over the |socketpair()| that
+// the regular IPC system uses. Hence, the child processes opens a Mach
+// connection shortly after launching and ipc their mach data to the parent
+// process. A single |MachPortBroker| with a given name is expected to exist in
+// the parent process.
+//
+// Since this data arrives over a separate channel, it is not available
+// immediately after a child process has been started.
+class BASE_EXPORT MachPortBroker : public base::PortProvider {
+ public:
+ // For use in child processes. This will send the task port of the current
+ // process over Mach IPC to the port registered by name (via this class) in
+ // the parent process. Returns true if the message was sent successfully
+ // and false if otherwise.
+ static bool ChildSendTaskPortToParent(const std::string& name);
+
+ // Returns the Mach port name to use when sending or receiving messages.
+ // Does the Right Thing in the browser and in child processes.
+ static std::string GetMachPortName(const std::string& name, bool is_child);
+
+ MachPortBroker(const std::string& name);
+ ~MachPortBroker() override;
+
+ // Performs any initialization work.
+ bool Init();
+
+ // Adds a placeholder to the map for the given pid with MACH_PORT_NULL.
+ // Callers are expected to later update the port with FinalizePid(). Callers
+ // MUST acquire the lock given by GetLock() before calling this method (and
+ // release the lock afterwards).
+ void AddPlaceholderForPid(base::ProcessHandle pid);
+
+ // Removes |pid| from the task port map. Callers MUST acquire the lock given
+ // by GetLock() before calling this method (and release the lock afterwards).
+ void InvalidatePid(base::ProcessHandle pid);
+
+ // The lock that protects this MachPortBroker object. Callers MUST acquire
+ // and release this lock around calls to AddPlaceholderForPid(),
+ // InvalidatePid(), and FinalizePid();
+ base::Lock& GetLock() { return lock_; }
+
+ // Implement |base::PortProvider|.
+ mach_port_t TaskForPid(base::ProcessHandle process) const override;
+
+ private:
+ friend class MachPortBrokerTest;
+
+ // Message handler that is invoked on |dispatch_source_| when an
+ // incoming message needs to be received.
+ void HandleRequest();
+
+ // Updates the mapping for |pid| to include the given |mach_info|. Does
+ // nothing if PlaceholderForPid() has not already been called for the given
+ // |pid|. Callers MUST acquire the lock given by GetLock() before calling
+ // this method (and release the lock afterwards).
+ void FinalizePid(base::ProcessHandle pid, mach_port_t task_port);
+
+ // Name used to identify a particular port broker.
+ const std::string name_;
+
+ // The Mach port on which the server listens.
+ base::mac::ScopedMachReceiveRight server_port_;
+
+ // The dispatch source and queue on which Mach messages will be received.
+ std::unique_ptr<base::DispatchSourceMach> dispatch_source_;
+
+ // Stores mach info for every process in the broker.
+ typedef std::map<base::ProcessHandle, mach_port_t> MachMap;
+ MachMap mach_map_;
+
+ // Mutex that guards |mach_map_|.
+ mutable base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(MachPortBroker);
+};
+
+} // namespace base
+
+#endif // BASE_MAC_MACH_PORT_BROKER_H_
diff --git a/libchrome/base/mac/mach_port_broker.mm b/libchrome/base/mac/mach_port_broker.mm
new file mode 100644
index 0000000..bd47017
--- /dev/null
+++ b/libchrome/base/mac/mach_port_broker.mm
@@ -0,0 +1,189 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include <bsm/libbsm.h>
+#include <servers/bootstrap.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+// Mach message structure used in the child as a sending message.
+struct MachPortBroker_ChildSendMsg {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t child_task_port;
+};
+
+// Complement to the ChildSendMsg, this is used in the parent for receiving
+// a message. Contains a message trailer with audit information.
+struct MachPortBroker_ParentRecvMsg : public MachPortBroker_ChildSendMsg {
+ mach_msg_audit_trailer_t trailer;
+};
+
+} // namespace
+
+// static
+bool MachPortBroker::ChildSendTaskPortToParent(const std::string& name) {
+ // Look up the named MachPortBroker port that's been registered with the
+ // bootstrap server.
+ mach_port_t parent_port;
+ kern_return_t kr = bootstrap_look_up(bootstrap_port,
+ const_cast<char*>(GetMachPortName(name, true).c_str()), &parent_port);
+ if (kr != KERN_SUCCESS) {
+ BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_look_up";
+ return false;
+ }
+ base::mac::ScopedMachSendRight scoped_right(parent_port);
+
+ // Create the check in message. This will copy a send right on this process'
+ // (the child's) task port and send it to the parent.
+ MachPortBroker_ChildSendMsg msg;
+ bzero(&msg, sizeof(msg));
+ msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND) |
+ MACH_MSGH_BITS_COMPLEX;
+ msg.header.msgh_remote_port = parent_port;
+ msg.header.msgh_size = sizeof(msg);
+ msg.body.msgh_descriptor_count = 1;
+ msg.child_task_port.name = mach_task_self();
+ msg.child_task_port.disposition = MACH_MSG_TYPE_PORT_SEND;
+ msg.child_task_port.type = MACH_MSG_PORT_DESCRIPTOR;
+
+ kr = mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg),
+ 0, MACH_PORT_NULL, 100 /*milliseconds*/, MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS) {
+ MACH_LOG(ERROR, kr) << "mach_msg";
+ return false;
+ }
+
+ return true;
+}
+
+// static
+std::string MachPortBroker::GetMachPortName(const std::string& name,
+ bool is_child) {
+ // In child processes, use the parent's pid.
+ const pid_t pid = is_child ? getppid() : getpid();
+ return base::StringPrintf(
+ "%s.%s.%d", base::mac::BaseBundleID(), name.c_str(), pid);
+}
+
+mach_port_t MachPortBroker::TaskForPid(base::ProcessHandle pid) const {
+ base::AutoLock lock(lock_);
+ MachPortBroker::MachMap::const_iterator it = mach_map_.find(pid);
+ if (it == mach_map_.end())
+ return MACH_PORT_NULL;
+ return it->second;
+}
+
+MachPortBroker::MachPortBroker(const std::string& name) : name_(name) {}
+
+MachPortBroker::~MachPortBroker() {}
+
+bool MachPortBroker::Init() {
+ DCHECK(server_port_.get() == MACH_PORT_NULL);
+
+ // Check in with launchd and publish the service name.
+ mach_port_t port;
+ kern_return_t kr = bootstrap_check_in(
+ bootstrap_port, GetMachPortName(name_, false).c_str(), &port);
+ if (kr != KERN_SUCCESS) {
+ BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_check_in";
+ return false;
+ }
+ server_port_.reset(port);
+
+ // Start the dispatch source.
+ std::string queue_name =
+ base::StringPrintf("%s.MachPortBroker", base::mac::BaseBundleID());
+ dispatch_source_.reset(new base::DispatchSourceMach(
+ queue_name.c_str(), server_port_.get(), ^{ HandleRequest(); }));
+ dispatch_source_->Resume();
+
+ return true;
+}
+
+void MachPortBroker::AddPlaceholderForPid(base::ProcessHandle pid) {
+ lock_.AssertAcquired();
+ DCHECK_EQ(0u, mach_map_.count(pid));
+ mach_map_[pid] = MACH_PORT_NULL;
+}
+
+void MachPortBroker::InvalidatePid(base::ProcessHandle pid) {
+ lock_.AssertAcquired();
+
+ MachMap::iterator mach_it = mach_map_.find(pid);
+ if (mach_it != mach_map_.end()) {
+ kern_return_t kr = mach_port_deallocate(mach_task_self(), mach_it->second);
+ MACH_LOG_IF(WARNING, kr != KERN_SUCCESS, kr) << "mach_port_deallocate";
+ mach_map_.erase(mach_it);
+ }
+}
+
+void MachPortBroker::HandleRequest() {
+ MachPortBroker_ParentRecvMsg msg;
+ bzero(&msg, sizeof(msg));
+ msg.header.msgh_size = sizeof(msg);
+ msg.header.msgh_local_port = server_port_.get();
+
+ const mach_msg_option_t options = MACH_RCV_MSG |
+ MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_AUDIT) |
+ MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT);
+
+ kern_return_t kr = mach_msg(&msg.header,
+ options,
+ 0,
+ sizeof(msg),
+ server_port_.get(),
+ MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS) {
+ MACH_LOG(ERROR, kr) << "mach_msg";
+ return;
+ }
+
+ // Use the kernel audit information to make sure this message is from
+ // a task that this process spawned. The kernel audit token contains the
+ // unspoofable pid of the task that sent the message.
+ //
+ // TODO(rsesek): In the 10.7 SDK, there's audit_token_to_pid().
+ pid_t child_pid;
+ audit_token_to_au32(msg.trailer.msgh_audit,
+ NULL, NULL, NULL, NULL, NULL, &child_pid, NULL, NULL);
+
+ mach_port_t child_task_port = msg.child_task_port.name;
+
+ // Take the lock and update the broker information.
+ {
+ base::AutoLock lock(lock_);
+ FinalizePid(child_pid, child_task_port);
+ }
+ NotifyObservers(child_pid);
+}
+
+void MachPortBroker::FinalizePid(base::ProcessHandle pid,
+ mach_port_t task_port) {
+ lock_.AssertAcquired();
+
+ MachMap::iterator it = mach_map_.find(pid);
+ if (it == mach_map_.end()) {
+ // Do nothing for unknown pids.
+ LOG(ERROR) << "Unknown process " << pid << " is sending Mach IPC messages!";
+ return;
+ }
+
+ DCHECK(it->second == MACH_PORT_NULL);
+ if (it->second == MACH_PORT_NULL)
+ it->second = task_port;
+}
+
+} // namespace base
diff --git a/libchrome/base/mac/mach_port_broker_unittest.cc b/libchrome/base/mac/mach_port_broker_unittest.cc
new file mode 100644
index 0000000..bff8eb6
--- /dev/null
+++ b/libchrome/base/mac/mach_port_broker_unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include "base/command_line.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+const char kBootstrapPortName[] = "thisisatest";
+}
+
+class MachPortBrokerTest : public testing::Test,
+ public base::PortProvider::Observer {
+ public:
+ MachPortBrokerTest()
+ : broker_(kBootstrapPortName),
+ event_(base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ received_process_(kNullProcessHandle) {
+ broker_.AddObserver(this);
+ }
+ ~MachPortBrokerTest() override {
+ broker_.RemoveObserver(this);
+ }
+
+ // Helper function to acquire/release locks and call |PlaceholderForPid()|.
+ void AddPlaceholderForPid(base::ProcessHandle pid) {
+ base::AutoLock lock(broker_.GetLock());
+ broker_.AddPlaceholderForPid(pid);
+ }
+
+ // Helper function to acquire/release locks and call |FinalizePid()|.
+ void FinalizePid(base::ProcessHandle pid,
+ mach_port_t task_port) {
+ base::AutoLock lock(broker_.GetLock());
+ broker_.FinalizePid(pid, task_port);
+ }
+
+ void WaitForTaskPort() {
+ event_.Wait();
+ }
+
+ // base::PortProvider::Observer:
+ void OnReceivedTaskPort(ProcessHandle process) override {
+ received_process_ = process;
+ event_.Signal();
+ }
+
+ protected:
+ MachPortBroker broker_;
+ WaitableEvent event_;
+ ProcessHandle received_process_;
+};
+
+TEST_F(MachPortBrokerTest, Locks) {
+ // Acquire and release the locks. Nothing bad should happen.
+ base::AutoLock lock(broker_.GetLock());
+}
+
+TEST_F(MachPortBrokerTest, AddPlaceholderAndFinalize) {
+ // Add a placeholder for PID 1.
+ AddPlaceholderForPid(1);
+ EXPECT_EQ(0u, broker_.TaskForPid(1));
+
+ // Finalize PID 1.
+ FinalizePid(1, 100u);
+ EXPECT_EQ(100u, broker_.TaskForPid(1));
+
+ // Should be no entry for PID 2.
+ EXPECT_EQ(0u, broker_.TaskForPid(2));
+}
+
+TEST_F(MachPortBrokerTest, FinalizeUnknownPid) {
+ // Finalizing an entry for an unknown pid should not add it to the map.
+ FinalizePid(1u, 100u);
+ EXPECT_EQ(0u, broker_.TaskForPid(1u));
+}
+
+MULTIPROCESS_TEST_MAIN(MachPortBrokerTestChild) {
+ CHECK(base::MachPortBroker::ChildSendTaskPortToParent(kBootstrapPortName));
+ return 0;
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
+ ASSERT_TRUE(broker_.Init());
+ CommandLine command_line(
+ base::GetMultiProcessTestChildBaseCommandLine());
+ broker_.GetLock().Acquire();
+ base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ "MachPortBrokerTestChild", command_line, LaunchOptions());
+ broker_.AddPlaceholderForPid(test_child_process.Handle());
+ broker_.GetLock().Release();
+
+ WaitForTaskPort();
+ EXPECT_EQ(test_child_process.Handle(), received_process_);
+
+ int rv = -1;
+ ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &rv));
+ EXPECT_EQ(0, rv);
+
+ EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
+ broker_.TaskForPid(test_child_process.Handle()));
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
+ ASSERT_TRUE(broker_.Init());
+ CommandLine command_line(
+ base::GetMultiProcessTestChildBaseCommandLine());
+ broker_.GetLock().Acquire();
+ base::Process test_child_process = base::SpawnMultiProcessTestChild(
+ "MachPortBrokerTestChild", command_line, LaunchOptions());
+ broker_.GetLock().Release();
+
+ int rv = -1;
+ ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &rv));
+ EXPECT_EQ(0, rv);
+
+ EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
+ broker_.TaskForPid(test_child_process.Handle()));
+}
+
+} // namespace base
diff --git a/libchrome/base/mac/mach_port_util.cc b/libchrome/base/mac/mach_port_util.cc
new file mode 100644
index 0000000..0eee210
--- /dev/null
+++ b/libchrome/base/mac/mach_port_util.cc
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_util.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace {
+
+// Struct for sending a complex Mach message.
+struct MachSendComplexMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t data;
+};
+
+// Struct for receiving a complex message.
+struct MachReceiveComplexMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t data;
+ mach_msg_trailer_t trailer;
+};
+
+} // namespace
+
+kern_return_t SendMachPort(mach_port_t endpoint,
+ mach_port_t port_to_send,
+ int disposition) {
+ MachSendComplexMessage send_msg;
+ send_msg.header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0) | MACH_MSGH_BITS_COMPLEX;
+ send_msg.header.msgh_size = sizeof(send_msg);
+ send_msg.header.msgh_remote_port = endpoint;
+ send_msg.header.msgh_local_port = MACH_PORT_NULL;
+ send_msg.header.msgh_reserved = 0;
+ send_msg.header.msgh_id = 0;
+ send_msg.body.msgh_descriptor_count = 1;
+ send_msg.data.name = port_to_send;
+ send_msg.data.disposition = disposition;
+ send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+
+ kern_return_t kr =
+ mach_msg(&send_msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT,
+ send_msg.header.msgh_size,
+ 0, // receive limit
+ MACH_PORT_NULL, // receive name
+ 0, // timeout
+ MACH_PORT_NULL); // notification port
+
+ if (kr != KERN_SUCCESS)
+ mach_port_deallocate(mach_task_self(), endpoint);
+
+ return kr;
+}
+
+base::mac::ScopedMachSendRight ReceiveMachPort(mach_port_t port_to_listen_on) {
+ MachReceiveComplexMessage recv_msg;
+ mach_msg_header_t* recv_hdr = &recv_msg.header;
+ recv_hdr->msgh_local_port = port_to_listen_on;
+ recv_hdr->msgh_size = sizeof(recv_msg);
+
+ kern_return_t kr =
+ mach_msg(recv_hdr, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
+ recv_hdr->msgh_size, port_to_listen_on, 0, MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS)
+ return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+ if (recv_msg.header.msgh_id != 0)
+ return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+ return base::mac::ScopedMachSendRight(recv_msg.data.name);
+}
+
+mach_port_name_t CreateIntermediateMachPort(
+ mach_port_t task_port,
+ base::mac::ScopedMachSendRight port_to_insert,
+ MachCreateError* error_code) {
+ DCHECK_NE(mach_task_self(), task_port);
+ DCHECK_NE(static_cast<mach_port_name_t>(MACH_PORT_NULL), task_port);
+
+ // Make a port with receive rights in the destination task.
+ mach_port_name_t endpoint;
+ kern_return_t kr =
+ mach_port_allocate(task_port, MACH_PORT_RIGHT_RECEIVE, &endpoint);
+ if (kr != KERN_SUCCESS) {
+ if (error_code)
+ *error_code = MachCreateError::ERROR_MAKE_RECEIVE_PORT;
+ return MACH_PORT_NULL;
+ }
+
+ // Change its message queue limit so that it accepts one message.
+ mach_port_limits limits = {};
+ limits.mpl_qlimit = 1;
+ kr = mach_port_set_attributes(task_port, endpoint, MACH_PORT_LIMITS_INFO,
+ reinterpret_cast<mach_port_info_t>(&limits),
+ MACH_PORT_LIMITS_INFO_COUNT);
+ if (kr != KERN_SUCCESS) {
+ if (error_code)
+ *error_code = MachCreateError::ERROR_SET_ATTRIBUTES;
+ mach_port_deallocate(task_port, endpoint);
+ return MACH_PORT_NULL;
+ }
+
+ // Get a send right.
+ mach_port_t send_once_right;
+ mach_msg_type_name_t send_right_type;
+ kr =
+ mach_port_extract_right(task_port, endpoint, MACH_MSG_TYPE_MAKE_SEND_ONCE,
+ &send_once_right, &send_right_type);
+ if (kr != KERN_SUCCESS) {
+ if (error_code)
+ *error_code = MachCreateError::ERROR_EXTRACT_DEST_RIGHT;
+ mach_port_deallocate(task_port, endpoint);
+ return MACH_PORT_NULL;
+ }
+ DCHECK_EQ(static_cast<mach_msg_type_name_t>(MACH_MSG_TYPE_PORT_SEND_ONCE),
+ send_right_type);
+
+ // This call takes ownership of |send_once_right|.
+ kr = base::SendMachPort(
+ send_once_right, port_to_insert.get(), MACH_MSG_TYPE_COPY_SEND);
+ if (kr != KERN_SUCCESS) {
+ if (error_code)
+ *error_code = MachCreateError::ERROR_SEND_MACH_PORT;
+ mach_port_deallocate(task_port, endpoint);
+ return MACH_PORT_NULL;
+ }
+
+ // Endpoint is intentionally leaked into the destination task. An IPC must be
+ // sent to the destination task so that it can clean up this port.
+ return endpoint;
+}
+
+} // namespace base
diff --git a/libchrome/base/mac/mach_port_util.h b/libchrome/base/mac/mach_port_util.h
new file mode 100644
index 0000000..f7a7f32
--- /dev/null
+++ b/libchrome/base/mac/mach_port_util.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_UTIL_H_
+#define BASE_MAC_MACH_PORT_UTIL_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_mach_port.h"
+
+namespace base {
+
+enum class MachCreateError {
+ ERROR_MAKE_RECEIVE_PORT,
+ ERROR_SET_ATTRIBUTES,
+ ERROR_EXTRACT_DEST_RIGHT,
+ ERROR_SEND_MACH_PORT,
+};
+
+// Sends a Mach port to |dest_port|. Assumes that |dest_port| is a send once
+// right. Takes ownership of |dest_port|.
+BASE_EXPORT kern_return_t SendMachPort(mach_port_t dest_port,
+ mach_port_t port_to_send,
+ int disposition);
+
+// Receives a Mach port from |port_to_listen_on|, which should have exactly one
+// queued message. Returns |MACH_PORT_NULL| on any error.
+BASE_EXPORT base::mac::ScopedMachSendRight ReceiveMachPort(
+ mach_port_t port_to_listen_on);
+
+// Creates an intermediate Mach port in |task_port| and sends |port_to_insert|
+// as a mach_msg to the intermediate Mach port.
+// |task_port| is the task port of another process.
+// |port_to_insert| must be a send right in the current task's name space.
+// Returns the intermediate port on success, and MACH_PORT_NULL on failure.
+// On failure, |error_code| is set if not null.
+// This method takes ownership of |port_to_insert|. On success, ownership is
+// passed to the intermediate Mach port.
+BASE_EXPORT mach_port_name_t CreateIntermediateMachPort(
+ mach_port_t task_port,
+ base::mac::ScopedMachSendRight port_to_insert,
+ MachCreateError* error_code);
+
+} // namespace base
+
+#endif // BASE_MAC_MACH_PORT_UTIL_H_
diff --git a/libchrome/base/mac/scoped_aedesc.h b/libchrome/base/mac/scoped_aedesc.h
new file mode 100644
index 0000000..7327092
--- /dev/null
+++ b/libchrome/base/mac/scoped_aedesc.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_AEDESC_H_
+#define BASE_MAC_SCOPED_AEDESC_H_
+
+#import <CoreServices/CoreServices.h>
+
+#include "base/macros.h"
+
+namespace base {
+namespace mac {
+
+// The ScopedAEDesc is used to scope AppleEvent descriptors. On creation,
+// it will store a NULL descriptor. On destruction, it will dispose of the
+// descriptor.
+//
+// This class is parameterized for additional type safety checks. You can use
+// the generic AEDesc type by not providing a template parameter:
+// ScopedAEDesc<> desc;
+template <typename AEDescType = AEDesc>
+class ScopedAEDesc {
+ public:
+ ScopedAEDesc() {
+ AECreateDesc(typeNull, NULL, 0, &desc_);
+ }
+
+ ~ScopedAEDesc() {
+ AEDisposeDesc(&desc_);
+ }
+
+ // Used for in parameters.
+ operator const AEDescType*() {
+ return &desc_;
+ }
+
+ // Used for out parameters.
+ AEDescType* OutPointer() {
+ return &desc_;
+ }
+
+ private:
+ AEDescType desc_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAEDesc);
+};
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_AEDESC_H_
diff --git a/libchrome/base/mac/scoped_authorizationref.h b/libchrome/base/mac/scoped_authorizationref.h
new file mode 100644
index 0000000..03cde86
--- /dev/null
+++ b/libchrome/base/mac/scoped_authorizationref.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_AUTHORIZATIONREF_H_
+#define BASE_MAC_SCOPED_AUTHORIZATIONREF_H_
+
+#include <Security/Authorization.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+// ScopedAuthorizationRef maintains ownership of an AuthorizationRef. It is
+// patterned after the scoped_ptr interface.
+
+namespace base {
+namespace mac {
+
+class ScopedAuthorizationRef {
+ public:
+ explicit ScopedAuthorizationRef(AuthorizationRef authorization = NULL)
+ : authorization_(authorization) {
+ }
+
+ ~ScopedAuthorizationRef() {
+ if (authorization_) {
+ AuthorizationFree(authorization_, kAuthorizationFlagDestroyRights);
+ }
+ }
+
+ void reset(AuthorizationRef authorization = NULL) {
+ if (authorization_ != authorization) {
+ if (authorization_) {
+ AuthorizationFree(authorization_, kAuthorizationFlagDestroyRights);
+ }
+ authorization_ = authorization;
+ }
+ }
+
+ bool operator==(AuthorizationRef that) const {
+ return authorization_ == that;
+ }
+
+ bool operator!=(AuthorizationRef that) const {
+ return authorization_ != that;
+ }
+
+ operator AuthorizationRef() const {
+ return authorization_;
+ }
+
+ AuthorizationRef* get_pointer() { return &authorization_; }
+
+ AuthorizationRef get() const {
+ return authorization_;
+ }
+
+ void swap(ScopedAuthorizationRef& that) {
+ AuthorizationRef temp = that.authorization_;
+ that.authorization_ = authorization_;
+ authorization_ = temp;
+ }
+
+ // ScopedAuthorizationRef::release() is like std::unique_ptr<>::release. It is
+ // NOT a wrapper for AuthorizationFree(). To force a ScopedAuthorizationRef
+ // object to call AuthorizationFree(), use ScopedAuthorizationRef::reset().
+ AuthorizationRef release() WARN_UNUSED_RESULT {
+ AuthorizationRef temp = authorization_;
+ authorization_ = NULL;
+ return temp;
+ }
+
+ private:
+ AuthorizationRef authorization_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAuthorizationRef);
+};
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_AUTHORIZATIONREF_H_
diff --git a/libchrome/base/mac/scoped_block.h b/libchrome/base/mac/scoped_block.h
new file mode 100644
index 0000000..8199677
--- /dev/null
+++ b/libchrome/base/mac/scoped_block.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_BLOCK_H_
+#define BASE_MAC_SCOPED_BLOCK_H_
+
+#include <Block.h>
+
+#include "base/mac/scoped_typeref.h"
+
+#if defined(__has_feature) && __has_feature(objc_arc)
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) (__bridge TYPE)(VALUE)
+#else
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) VALUE
+#endif
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+template <typename B>
+struct ScopedBlockTraits {
+ static B InvalidValue() { return nullptr; }
+ static B Retain(B block) {
+ return BASE_MAC_BRIDGE_CAST(
+ B, Block_copy(BASE_MAC_BRIDGE_CAST(const void*, block)));
+ }
+ static void Release(B block) {
+ Block_release(BASE_MAC_BRIDGE_CAST(const void*, block));
+ }
+};
+
+} // namespace internal
+
+// ScopedBlock<> is patterned after ScopedCFTypeRef<>, but uses Block_copy() and
+// Block_release() instead of CFRetain() and CFRelease().
+
+template <typename B>
+using ScopedBlock = ScopedTypeRef<B, internal::ScopedBlockTraits<B>>;
+
+} // namespace mac
+} // namespace base
+
+#undef BASE_MAC_BRIDGE_CAST
+
+#endif // BASE_MAC_SCOPED_BLOCK_H_
diff --git a/libchrome/base/mac/scoped_cffiledescriptorref.h b/libchrome/base/mac/scoped_cffiledescriptorref.h
new file mode 100644
index 0000000..923a159
--- /dev/null
+++ b/libchrome/base/mac/scoped_cffiledescriptorref.h
@@ -0,0 +1,39 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
+#define BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct ScopedCFFileDescriptorRefTraits {
+ static CFFileDescriptorRef InvalidValue() { return nullptr; }
+ static void Free(CFFileDescriptorRef ref) {
+ CFFileDescriptorInvalidate(ref);
+ CFRelease(ref);
+ }
+};
+
+} // namespace internal
+
+// ScopedCFFileDescriptorRef is designed after ScopedCFTypeRef<>. On
+// destruction, it will invalidate the file descriptor.
+// ScopedCFFileDescriptorRef (unlike ScopedCFTypeRef<>) does not support RETAIN
+// semantics, copying, or assignment, as doing so would increase the chances
+// that a file descriptor is invalidated while still in use.
+using ScopedCFFileDescriptorRef =
+ ScopedGeneric<CFFileDescriptorRef,
+ internal::ScopedCFFileDescriptorRefTraits>;
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_CFFILEDESCRIPTORREF_H_
diff --git a/libchrome/base/mac/scoped_cftyperef.h b/libchrome/base/mac/scoped_cftyperef.h
new file mode 100644
index 0000000..ccbc5cf
--- /dev/null
+++ b/libchrome/base/mac/scoped_cftyperef.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_CFTYPEREF_H_
+#define BASE_MAC_SCOPED_CFTYPEREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+
+// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
+// ownership of a CoreFoundation object: any object that can be represented
+// as a CFTypeRef. Style deviations here are solely for compatibility with
+// std::unique_ptr<>'s interface, with which everyone is already familiar.
+//
+// By default, ScopedCFTypeRef<> takes ownership of an object (in the
+// constructor or in reset()) by taking over the caller's existing ownership
+// claim. The caller must own the object it gives to ScopedCFTypeRef<>, and
+// relinquishes an ownership claim to that object. ScopedCFTypeRef<> does not
+// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy|
+// enum. If the value |RETAIN| is passed (in the constructor or in reset()),
+// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial
+// ownership is not changed.
+
+namespace internal {
+
+template<typename CFT>
+struct ScopedCFTypeRefTraits {
+ static CFT InvalidValue() { return nullptr; }
+ static CFT Retain(CFT object) {
+ CFRetain(object);
+ return object;
+ }
+ static void Release(CFT object) {
+ CFRelease(object);
+ }
+};
+
+} // namespace internal
+
+template<typename CFT>
+using ScopedCFTypeRef =
+ ScopedTypeRef<CFT, internal::ScopedCFTypeRefTraits<CFT>>;
+
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_CFTYPEREF_H_
diff --git a/libchrome/base/mac/scoped_dispatch_object.h b/libchrome/base/mac/scoped_dispatch_object.h
new file mode 100644
index 0000000..5f5d517
--- /dev/null
+++ b/libchrome/base/mac/scoped_dispatch_object.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
+#define BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
+
+#include <dispatch/dispatch.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+
+namespace internal {
+
+template <typename T>
+struct ScopedDispatchObjectTraits {
+ static T InvalidValue() { return nullptr; }
+ static T Retain(T object) {
+ dispatch_retain(object);
+ return object;
+ }
+ static void Release(T object) {
+ dispatch_release(object);
+ }
+};
+
+} // namepsace internal
+
+template <typename T>
+using ScopedDispatchObject =
+ ScopedTypeRef<T, internal::ScopedDispatchObjectTraits<T>>;
+
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
diff --git a/libchrome/base/mac/scoped_ioobject.h b/libchrome/base/mac/scoped_ioobject.h
new file mode 100644
index 0000000..c948cb5
--- /dev/null
+++ b/libchrome/base/mac/scoped_ioobject.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_IOOBJECT_H_
+#define BASE_MAC_SCOPED_IOOBJECT_H_
+
+#include <IOKit/IOKitLib.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+template <typename IOT>
+struct ScopedIOObjectTraits {
+ static IOT InvalidValue() { return IO_OBJECT_NULL; }
+ static IOT Retain(IOT iot) {
+ IOObjectRetain(iot);
+ return iot;
+ }
+ static void Release(IOT iot) { IOObjectRelease(iot); }
+};
+
+} // namespce internal
+
+// Just like ScopedCFTypeRef but for io_object_t and subclasses.
+template <typename IOT>
+using ScopedIOObject = ScopedTypeRef<IOT, internal::ScopedIOObjectTraits<IOT>>;
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_IOOBJECT_H_
diff --git a/libchrome/base/mac/scoped_ioplugininterface.h b/libchrome/base/mac/scoped_ioplugininterface.h
new file mode 100644
index 0000000..872da8e
--- /dev/null
+++ b/libchrome/base/mac/scoped_ioplugininterface.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_IOPLUGININTERFACE_H_
+#define BASE_MAC_SCOPED_IOPLUGININTERFACE_H_
+
+#include <IOKit/IOKitLib.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+template <typename T>
+struct ScopedIOPluginInterfaceTraits {
+ static T InvalidValue() { return nullptr; }
+ static T Retain(T t) {
+ (*t)->AddRef(t);
+ return t;
+ }
+ static void Release(T t) { (*t)->Release(t); }
+};
+
+} // namespace internal
+
+// Just like ScopedCFTypeRef but for IOCFPlugInInterface and friends
+// (IOUSBInterfaceStruct and IOUSBDeviceStruct320 in particular).
+template <typename T>
+using ScopedIOPluginInterface =
+ ScopedTypeRef<T**, internal::ScopedIOPluginInterfaceTraits<T**>>;
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_IOPLUGININTERFACE_H_
diff --git a/libchrome/base/mac/scoped_launch_data.h b/libchrome/base/mac/scoped_launch_data.h
new file mode 100644
index 0000000..f4db330
--- /dev/null
+++ b/libchrome/base/mac/scoped_launch_data.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_LAUNCH_DATA_H_
+#define BASE_MAC_SCOPED_LAUNCH_DATA_H_
+
+#include <launch.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct ScopedLaunchDataTraits {
+ static launch_data_t InvalidValue() { return nullptr; }
+ static void Free(launch_data_t ldt) { launch_data_free(ldt); }
+};
+
+} // namespace internal
+
+// Just like std::unique_ptr<> but for launch_data_t.
+using ScopedLaunchData =
+ ScopedGeneric<launch_data_t, internal::ScopedLaunchDataTraits>;
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_LAUNCH_DATA_H_
diff --git a/libchrome/base/mac/scoped_mach_port.cc b/libchrome/base/mac/scoped_mach_port.cc
new file mode 100644
index 0000000..13307f2
--- /dev/null
+++ b/libchrome/base/mac/scoped_mach_port.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_mach_port.h"
+
+#include "base/mac/mach_logging.h"
+
+namespace base {
+namespace mac {
+namespace internal {
+
+// static
+void SendRightTraits::Free(mach_port_t port) {
+ kern_return_t kr = mach_port_deallocate(mach_task_self(), port);
+ MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+ << "ScopedMachSendRight mach_port_deallocate";
+}
+
+// static
+void ReceiveRightTraits::Free(mach_port_t port) {
+ kern_return_t kr =
+ mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1);
+ MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+ << "ScopedMachReceiveRight mach_port_mod_refs";
+}
+
+// static
+void PortSetTraits::Free(mach_port_t port) {
+ kern_return_t kr =
+ mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_PORT_SET, -1);
+ MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr)
+ << "ScopedMachPortSet mach_port_mod_refs";
+}
+
+} // namespace internal
+} // namespace mac
+} // namespace base
diff --git a/libchrome/base/mac/scoped_mach_port.h b/libchrome/base/mac/scoped_mach_port.h
new file mode 100644
index 0000000..67fed6b
--- /dev/null
+++ b/libchrome/base/mac/scoped_mach_port.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_MACH_PORT_H_
+#define BASE_MAC_SCOPED_MACH_PORT_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct BASE_EXPORT SendRightTraits {
+ static mach_port_t InvalidValue() {
+ return MACH_PORT_NULL;
+ }
+
+ BASE_EXPORT static void Free(mach_port_t port);
+};
+
+struct BASE_EXPORT ReceiveRightTraits {
+ static mach_port_t InvalidValue() {
+ return MACH_PORT_NULL;
+ }
+
+ BASE_EXPORT static void Free(mach_port_t port);
+};
+
+struct PortSetTraits {
+ static mach_port_t InvalidValue() {
+ return MACH_PORT_NULL;
+ }
+
+ BASE_EXPORT static void Free(mach_port_t port);
+};
+
+} // namespace internal
+
+// A scoper for handling a Mach port that names a send right. Send rights are
+// reference counted, and this takes ownership of the right on construction
+// and then removes a reference to the right on destruction. If the reference
+// is the last one on the right, the right is deallocated.
+using ScopedMachSendRight =
+ ScopedGeneric<mach_port_t, internal::SendRightTraits>;
+
+// A scoper for handling a Mach port's receive right. There is only one
+// receive right per port. This takes ownership of the receive right on
+// construction and then destroys the right on destruction, turning all
+// outstanding send rights into dead names.
+using ScopedMachReceiveRight =
+ ScopedGeneric<mach_port_t, internal::ReceiveRightTraits>;
+
+// A scoper for handling a Mach port set. A port set can have only one
+// reference. This takes ownership of that single reference on construction and
+// destroys the port set on destruction. Destroying a port set does not destroy
+// the receive rights that are members of the port set.
+using ScopedMachPortSet = ScopedGeneric<mach_port_t, internal::PortSetTraits>;
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_MACH_PORT_H_
diff --git a/libchrome/base/mac/scoped_mach_vm.cc b/libchrome/base/mac/scoped_mach_vm.cc
new file mode 100644
index 0000000..d52c77f
--- /dev/null
+++ b/libchrome/base/mac/scoped_mach_vm.cc
@@ -0,0 +1,33 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_mach_vm.h"
+
+namespace base {
+namespace mac {
+
+void ScopedMachVM::reset(vm_address_t address, vm_size_t size) {
+ DCHECK_EQ(address % PAGE_SIZE, 0u);
+ DCHECK_EQ(size % PAGE_SIZE, 0u);
+
+ if (size_) {
+ if (address_ < address) {
+ vm_deallocate(mach_task_self(),
+ address_,
+ std::min(size_, address - address_));
+ }
+ if (address_ + size_ > address + size) {
+ vm_address_t deallocate_start = std::max(address_, address + size);
+ vm_deallocate(mach_task_self(),
+ deallocate_start,
+ address_ + size_ - deallocate_start);
+ }
+ }
+
+ address_ = address;
+ size_ = size;
+}
+
+} // namespace mac
+} // namespace base
diff --git a/libchrome/base/mac/scoped_mach_vm.h b/libchrome/base/mac/scoped_mach_vm.h
new file mode 100644
index 0000000..58a13f6
--- /dev/null
+++ b/libchrome/base/mac/scoped_mach_vm.h
@@ -0,0 +1,93 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_MACH_VM_H_
+#define BASE_MAC_SCOPED_MACH_VM_H_
+
+#include <mach/mach.h>
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// Use ScopedMachVM to supervise ownership of pages in the current process
+// through the Mach VM subsystem. Pages allocated with vm_allocate can be
+// released when exiting a scope with ScopedMachVM.
+//
+// The Mach VM subsystem operates on a page-by-page basis, and a single VM
+// allocation managed by a ScopedMachVM object may span multiple pages. As far
+// as Mach is concerned, allocated pages may be deallocated individually. This
+// is in contrast to higher-level allocators such as malloc, where the base
+// address of an allocation implies the size of an allocated block.
+// Consequently, it is not sufficient to just pass the base address of an
+// allocation to ScopedMachVM, it also needs to know the size of the
+// allocation. To avoid any confusion, both the base address and size must
+// be page-aligned.
+//
+// When dealing with Mach VM, base addresses will naturally be page-aligned,
+// but user-specified sizes may not be. If there's a concern that a size is
+// not page-aligned, use the mach_vm_round_page macro to correct it.
+//
+// Example:
+//
+// vm_address_t address = 0;
+// vm_size_t size = 12345; // This requested size is not page-aligned.
+// kern_return_t kr =
+// vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+// if (kr != KERN_SUCCESS) {
+// return false;
+// }
+// ScopedMachVM vm_owner(address, mach_vm_round_page(size));
+
+namespace base {
+namespace mac {
+
+class BASE_EXPORT ScopedMachVM {
+ public:
+ explicit ScopedMachVM(vm_address_t address = 0, vm_size_t size = 0)
+ : address_(address), size_(size) {
+ DCHECK_EQ(address % PAGE_SIZE, 0u);
+ DCHECK_EQ(size % PAGE_SIZE, 0u);
+ }
+
+ ~ScopedMachVM() {
+ if (size_) {
+ vm_deallocate(mach_task_self(), address_, size_);
+ }
+ }
+
+ void reset(vm_address_t address = 0, vm_size_t size = 0);
+
+ vm_address_t address() const {
+ return address_;
+ }
+
+ vm_size_t size() const {
+ return size_;
+ }
+
+ void swap(ScopedMachVM& that) {
+ std::swap(address_, that.address_);
+ std::swap(size_, that.size_);
+ }
+
+ void release() {
+ address_ = 0;
+ size_ = 0;
+ }
+
+ private:
+ vm_address_t address_;
+ vm_size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedMachVM);
+};
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_MACH_VM_H_
diff --git a/libchrome/base/mac/scoped_nsautorelease_pool.h b/libchrome/base/mac/scoped_nsautorelease_pool.h
new file mode 100644
index 0000000..4d15e6d
--- /dev/null
+++ b/libchrome/base/mac/scoped_nsautorelease_pool.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
+#define BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+#if defined(__OBJC__)
+@class NSAutoreleasePool;
+#else // __OBJC__
+class NSAutoreleasePool;
+#endif // __OBJC__
+
+namespace base {
+namespace mac {
+
+// ScopedNSAutoreleasePool allocates an NSAutoreleasePool when instantiated and
+// sends it a -drain message when destroyed. This allows an autorelease pool to
+// be maintained in ordinary C++ code without bringing in any direct Objective-C
+// dependency.
+
+class BASE_EXPORT ScopedNSAutoreleasePool {
+ public:
+ ScopedNSAutoreleasePool();
+ ~ScopedNSAutoreleasePool();
+
+ // Clear out the pool in case its position on the stack causes it to be
+ // alive for long periods of time (such as the entire length of the app).
+ // Only use then when you're certain the items currently in the pool are
+ // no longer needed.
+ void Recycle();
+ private:
+ NSAutoreleasePool* autorelease_pool_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedNSAutoreleasePool);
+};
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_NSAUTORELEASE_POOL_H_
diff --git a/libchrome/base/mac/scoped_nsautorelease_pool.mm b/libchrome/base/mac/scoped_nsautorelease_pool.mm
new file mode 100644
index 0000000..e542ca8
--- /dev/null
+++ b/libchrome/base/mac/scoped_nsautorelease_pool.mm
@@ -0,0 +1,32 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_nsautorelease_pool.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/logging.h"
+
+namespace base {
+namespace mac {
+
+ScopedNSAutoreleasePool::ScopedNSAutoreleasePool()
+ : autorelease_pool_([[NSAutoreleasePool alloc] init]) {
+ DCHECK(autorelease_pool_);
+}
+
+ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
+ [autorelease_pool_ drain];
+}
+
+// Cycle the internal pool, allowing everything there to get cleaned up and
+// start anew.
+void ScopedNSAutoreleasePool::Recycle() {
+ [autorelease_pool_ drain];
+ autorelease_pool_ = [[NSAutoreleasePool alloc] init];
+ DCHECK(autorelease_pool_);
+}
+
+} // namespace mac
+} // namespace base
diff --git a/libchrome/base/mac/scoped_nsobject.h b/libchrome/base/mac/scoped_nsobject.h
new file mode 100644
index 0000000..cc54aa0
--- /dev/null
+++ b/libchrome/base/mac/scoped_nsobject.h
@@ -0,0 +1,239 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_NSOBJECT_H_
+#define BASE_MAC_SCOPED_NSOBJECT_H_
+
+#include <type_traits>
+
+// Include NSObject.h directly because Foundation.h pulls in many dependencies.
+// (Approx 100k lines of code versus 1.5k for NSObject.h). scoped_nsobject gets
+// singled out because it is most typically included from other header files.
+#import <Foundation/NSObject.h>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/mac/scoped_typeref.h"
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+@class NSAutoreleasePool;
+#endif
+
+namespace base {
+
+// scoped_nsobject<> is patterned after std::unique_ptr<>, but maintains
+// ownership of an NSObject subclass object. Style deviations here are solely
+// for compatibility with std::unique_ptr<>'s interface, with which everyone is
+// already familiar.
+//
+// scoped_nsobject<> takes ownership of an object (in the constructor or in
+// reset()) by taking over the caller's existing ownership claim. The caller
+// must own the object it gives to scoped_nsobject<>, and relinquishes an
+// ownership claim to that object. scoped_nsobject<> does not call -retain,
+// callers have to call this manually if appropriate.
+//
+// scoped_nsprotocol<> has the same behavior as scoped_nsobject, but can be used
+// with protocols.
+//
+// scoped_nsobject<> is not to be used for NSAutoreleasePools. For
+// NSAutoreleasePools use ScopedNSAutoreleasePool from
+// scoped_nsautorelease_pool.h instead.
+// We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile
+// time with a template specialization (see below).
+//
+// If Automatic Reference Counting (aka ARC) is enabled then the ownership
+// policy is not controllable by the user as ARC make it really difficult to
+// transfer ownership (the reference passed to scoped_nsobject constructor is
+// sunk by ARC and __attribute((ns_consumed)) appears to not work correctly
+// with Objective-C++ see https://llvm.org/bugs/show_bug.cgi?id=27887). Due to
+// that, the policy is always to |RETAIN| when using ARC.
+
+namespace internal {
+
+BASE_EXPORT id ScopedNSProtocolTraitsRetain(__unsafe_unretained id obj)
+ __attribute((ns_returns_not_retained));
+BASE_EXPORT id ScopedNSProtocolTraitsAutoRelease(__unsafe_unretained id obj)
+ __attribute((ns_returns_not_retained));
+BASE_EXPORT void ScopedNSProtocolTraitsRelease(__unsafe_unretained id obj);
+
+// Traits for ScopedTypeRef<>. As this class may be compiled from file with
+// Automatic Reference Counting enable or not all methods have annotation to
+// enforce the same code generation in both case (in particular, the Retain
+// method uses ns_returns_not_retained to prevent ARC to insert a -release
+// call on the returned value and thus defeating the -retain).
+template <typename NST>
+struct ScopedNSProtocolTraits {
+ static NST InvalidValue() __attribute((ns_returns_not_retained)) {
+ return nil;
+ }
+ static NST Retain(__unsafe_unretained NST nst)
+ __attribute((ns_returns_not_retained)) {
+ return ScopedNSProtocolTraitsRetain(nst);
+ }
+ static void Release(__unsafe_unretained NST nst) {
+ ScopedNSProtocolTraitsRelease(nst);
+ }
+};
+
+} // namespace internal
+
+template <typename NST>
+class scoped_nsprotocol
+ : public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
+ public:
+ using Traits = internal::ScopedNSProtocolTraits<NST>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsprotocol(
+ NST object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : ScopedTypeRef<NST, Traits>(object, policy) {}
+#else
+ explicit scoped_nsprotocol(NST object = Traits::InvalidValue())
+ : ScopedTypeRef<NST, Traits>(object, base::scoped_policy::RETAIN) {}
+#endif
+
+ scoped_nsprotocol(const scoped_nsprotocol<NST>& that)
+ : ScopedTypeRef<NST, Traits>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsprotocol(const scoped_nsprotocol<NSR>& that_as_subclass)
+ : ScopedTypeRef<NST, Traits>(that_as_subclass) {}
+
+ scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
+ : ScopedTypeRef<NST, Traits>(that) {}
+
+ scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
+ ScopedTypeRef<NST, Traits>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(NST object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ ScopedTypeRef<NST, Traits>::reset(object, policy);
+ }
+#else
+ void reset(NST object = Traits::InvalidValue()) {
+ ScopedTypeRef<NST, Traits>::reset(object, base::scoped_policy::RETAIN);
+ }
+#endif
+
+ // Shift reference to the autorelease pool to be released later.
+ NST autorelease() __attribute((ns_returns_not_retained)) {
+ return internal::ScopedNSProtocolTraitsAutoRelease(this->release());
+ }
+};
+
+// Free functions
+template <class C>
+void swap(scoped_nsprotocol<C>& p1, scoped_nsprotocol<C>& p2) {
+ p1.swap(p2);
+}
+
+template <class C>
+bool operator==(C p1, const scoped_nsprotocol<C>& p2) {
+ return p1 == p2.get();
+}
+
+template <class C>
+bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
+ return p1 != p2.get();
+}
+
+template <typename NST>
+class scoped_nsobject : public scoped_nsprotocol<NST*> {
+ public:
+ using Traits = typename scoped_nsprotocol<NST*>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsobject(
+ NST* object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : scoped_nsprotocol<NST*>(object, policy) {}
+#else
+ explicit scoped_nsobject(NST* object = Traits::InvalidValue())
+ : scoped_nsprotocol<NST*>(object) {}
+#endif
+
+ scoped_nsobject(const scoped_nsobject<NST>& that)
+ : scoped_nsprotocol<NST*>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+ : scoped_nsprotocol<NST*>(that_as_subclass) {}
+
+ scoped_nsobject(scoped_nsobject<NST>&& that)
+ : scoped_nsprotocol<NST*>(that) {}
+
+ scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
+ scoped_nsprotocol<NST*>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(NST* object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ scoped_nsprotocol<NST*>::reset(object, policy);
+ }
+#else
+ void reset(NST* object = Traits::InvalidValue()) {
+ scoped_nsprotocol<NST*>::reset(object);
+ }
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
+ "Use ScopedNSAutoreleasePool instead");
+#endif
+};
+
+// Specialization to make scoped_nsobject<id> work.
+template<>
+class scoped_nsobject<id> : public scoped_nsprotocol<id> {
+ public:
+ using Traits = typename scoped_nsprotocol<id>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ explicit scoped_nsobject(
+ id object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : scoped_nsprotocol<id>(object, policy) {}
+#else
+ explicit scoped_nsobject(id object = Traits::InvalidValue())
+ : scoped_nsprotocol<id>(object) {}
+#endif
+
+ scoped_nsobject(const scoped_nsobject<id>& that)
+ : scoped_nsprotocol<id>(that) {}
+
+ template <typename NSR>
+ explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+ : scoped_nsprotocol<id>(that_as_subclass) {}
+
+ scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
+
+ scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
+ scoped_nsprotocol<id>::operator=(that);
+ return *this;
+ }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+ void reset(id object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ scoped_nsprotocol<id>::reset(object, policy);
+ }
+#else
+ void reset(id object = Traits::InvalidValue()) {
+ scoped_nsprotocol<id>::reset(object);
+ }
+#endif
+};
+
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_NSOBJECT_H_
diff --git a/libchrome/base/mac/scoped_typeref.h b/libchrome/base/mac/scoped_typeref.h
new file mode 100644
index 0000000..b8d8a14
--- /dev/null
+++ b/libchrome/base/mac/scoped_typeref.h
@@ -0,0 +1,139 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_TYPEREF_H_
+#define BASE_MAC_SCOPED_TYPEREF_H_
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/scoped_policy.h"
+
+namespace base {
+
+// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
+// of a reference to any type that is maintained by Retain and Release methods.
+//
+// The Traits structure must provide the Retain and Release methods for type T.
+// A default ScopedTypeRefTraits is used but not defined, and should be defined
+// for each type to use this interface. For example, an appropriate definition
+// of ScopedTypeRefTraits for CGLContextObj would be:
+//
+// template<>
+// struct ScopedTypeRefTraits<CGLContextObj> {
+// static CGLContextObj InvalidValue() { return nullptr; }
+// static CGLContextObj Retain(CGLContextObj object) {
+// CGLContextRetain(object);
+// return object;
+// }
+// static void Release(CGLContextObj object) { CGLContextRelease(object); }
+// };
+//
+// For the many types that have pass-by-pointer create functions, the function
+// InitializeInto() is provided to allow direct initialization and assumption
+// of ownership of the object. For example, continuing to use the above
+// CGLContextObj specialization:
+//
+// base::ScopedTypeRef<CGLContextObj> context;
+// CGLCreateContext(pixel_format, share_group, context.InitializeInto());
+//
+// For initialization with an existing object, the caller may specify whether
+// the ScopedTypeRef<> being initialized is assuming the caller's existing
+// ownership of the object (and should not call Retain in initialization) or if
+// it should not assume this ownership and must create its own (by calling
+// Retain in initialization). This behavior is based on the |policy| parameter,
+// with |ASSUME| for the former and |RETAIN| for the latter. The default policy
+// is to |ASSUME|.
+
+template<typename T>
+struct ScopedTypeRefTraits;
+
+template<typename T, typename Traits = ScopedTypeRefTraits<T>>
+class ScopedTypeRef {
+ public:
+ typedef T element_type;
+
+ explicit ScopedTypeRef(
+ __unsafe_unretained T object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+ : object_(object) {
+ if (object_ && policy == base::scoped_policy::RETAIN)
+ object_ = Traits::Retain(object_);
+ }
+
+ ScopedTypeRef(const ScopedTypeRef<T, Traits>& that)
+ : object_(that.object_) {
+ if (object_)
+ object_ = Traits::Retain(object_);
+ }
+
+ // This allows passing an object to a function that takes its superclass.
+ template <typename R, typename RTraits>
+ explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
+ : object_(that_as_subclass.get()) {
+ if (object_)
+ object_ = Traits::Retain(object_);
+ }
+
+ ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
+ that.object_ = Traits::InvalidValue();
+ }
+
+ ~ScopedTypeRef() {
+ if (object_)
+ Traits::Release(object_);
+ }
+
+ ScopedTypeRef& operator=(const ScopedTypeRef<T, Traits>& that) {
+ reset(that.get(), base::scoped_policy::RETAIN);
+ return *this;
+ }
+
+ // This is to be used only to take ownership of objects that are created
+ // by pass-by-pointer create functions. To enforce this, require that the
+ // object be reset to NULL before this may be used.
+ T* InitializeInto() WARN_UNUSED_RESULT {
+ DCHECK(!object_);
+ return &object_;
+ }
+
+ void reset(__unsafe_unretained T object = Traits::InvalidValue(),
+ base::scoped_policy::OwnershipPolicy policy =
+ base::scoped_policy::ASSUME) {
+ if (object && policy == base::scoped_policy::RETAIN)
+ object = Traits::Retain(object);
+ if (object_)
+ Traits::Release(object_);
+ object_ = object;
+ }
+
+ bool operator==(__unsafe_unretained T that) const { return object_ == that; }
+
+ bool operator!=(__unsafe_unretained T that) const { return object_ != that; }
+
+ operator T() const __attribute((ns_returns_not_retained)) { return object_; }
+
+ T get() const __attribute((ns_returns_not_retained)) { return object_; }
+
+ void swap(ScopedTypeRef& that) {
+ __unsafe_unretained T temp = that.object_;
+ that.object_ = object_;
+ object_ = temp;
+ }
+
+ // ScopedTypeRef<>::release() is like std::unique_ptr<>::release. It is NOT
+ // a wrapper for Release(). To force a ScopedTypeRef<> object to call
+ // Release(), use ScopedTypeRef<>::reset().
+ T release() __attribute((ns_returns_not_retained)) WARN_UNUSED_RESULT {
+ __unsafe_unretained T temp = object_;
+ object_ = Traits::InvalidValue();
+ return temp;
+ }
+
+ private:
+ __unsafe_unretained T object_;
+};
+
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_TYPEREF_H_
diff --git a/libchrome/base/mac/sdk_forward_declarations.h b/libchrome/base/mac/sdk_forward_declarations.h
new file mode 100644
index 0000000..818a1d0
--- /dev/null
+++ b/libchrome/base/mac/sdk_forward_declarations.h
@@ -0,0 +1,532 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains forward declarations for items in later SDKs than the
+// default one with which Chromium is built (currently 10.6).
+// If you call any function from this header, be sure to check at runtime for
+// respondsToSelector: before calling these functions (else your code will crash
+// on older OS X versions that chrome still supports).
+
+#ifndef BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
+#define BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
+
+#import <AppKit/AppKit.h>
+#import <CoreWLAN/CoreWLAN.h>
+#import <ImageCaptureCore/ImageCaptureCore.h>
+#import <IOBluetooth/IOBluetooth.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+
+// ----------------------------------------------------------------------------
+// Either define or forward declare classes only available in OSX 10.7+.
+// ----------------------------------------------------------------------------
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+@interface CWChannel : NSObject
+@end
+
+@interface CBPeripheral : NSObject
+@end
+
+@interface CBCentralManager : NSObject
+@end
+
+@interface CBUUID : NSObject
+@end
+
+#else
+
+@class CWChannel;
+@class CBPeripheral;
+@class CBCentralManager;
+@class CBUUID;
+
+#endif // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
+
+@interface NSUUID : NSObject
+@end
+
+#else
+
+@class NSUUID;
+
+#endif // MAC_OS_X_VERSION_10_8
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
+
+// NSProgress is public API in 10.9, but a version of it exists and is usable
+// in 10.8.
+@interface NSProgress : NSObject
+@end
+
+@interface NSAppearance : NSObject
+@end
+
+#else
+
+@class NSProgress;
+@class NSAppearance;
+
+#endif // MAC_OS_X_VERSION_10_9
+
+// ----------------------------------------------------------------------------
+// Define typedefs, enums, and protocols not available in the version of the
+// OSX SDK being compiled against.
+// ----------------------------------------------------------------------------
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+enum {
+ NSEventPhaseNone = 0, // event not associated with a phase.
+ NSEventPhaseBegan = 0x1 << 0,
+ NSEventPhaseStationary = 0x1 << 1,
+ NSEventPhaseChanged = 0x1 << 2,
+ NSEventPhaseEnded = 0x1 << 3,
+ NSEventPhaseCancelled = 0x1 << 4
+};
+typedef NSUInteger NSEventPhase;
+
+enum {
+ NSFullScreenWindowMask = 1 << 14,
+};
+
+enum {
+ NSApplicationPresentationFullScreen = 1 << 10,
+};
+
+enum {
+ NSWindowCollectionBehaviorFullScreenPrimary = 1 << 7,
+ NSWindowCollectionBehaviorFullScreenAuxiliary = 1 << 8,
+};
+
+enum {
+ NSEventSwipeTrackingLockDirection = 0x1 << 0,
+ NSEventSwipeTrackingClampGestureAmount = 0x1 << 1,
+};
+typedef NSUInteger NSEventSwipeTrackingOptions;
+
+enum {
+ NSWindowAnimationBehaviorDefault = 0,
+ NSWindowAnimationBehaviorNone = 2,
+ NSWindowAnimationBehaviorDocumentWindow = 3,
+ NSWindowAnimationBehaviorUtilityWindow = 4,
+ NSWindowAnimationBehaviorAlertPanel = 5
+};
+typedef NSInteger NSWindowAnimationBehavior;
+
+enum {
+ NSWindowDocumentVersionsButton = 6,
+ NSWindowFullScreenButton,
+};
+typedef NSUInteger NSWindowButton;
+
+enum CWChannelBand {
+ kCWChannelBandUnknown = 0,
+ kCWChannelBand2GHz = 1,
+ kCWChannelBand5GHz = 2,
+};
+
+enum {
+ kCWSecurityNone = 0,
+ kCWSecurityWEP = 1,
+ kCWSecurityWPAPersonal = 2,
+ kCWSecurityWPAPersonalMixed = 3,
+ kCWSecurityWPA2Personal = 4,
+ kCWSecurityPersonal = 5,
+ kCWSecurityDynamicWEP = 6,
+ kCWSecurityWPAEnterprise = 7,
+ kCWSecurityWPAEnterpriseMixed = 8,
+ kCWSecurityWPA2Enterprise = 9,
+ kCWSecurityEnterprise = 10,
+ kCWSecurityUnknown = NSIntegerMax,
+};
+
+typedef NSInteger CWSecurity;
+
+enum {
+ kBluetoothFeatureLESupportedController = (1 << 6L),
+};
+
+@protocol IOBluetoothDeviceInquiryDelegate
+- (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
+- (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
+ device:(IOBluetoothDevice*)device;
+- (void)deviceInquiryComplete:(IOBluetoothDeviceInquiry*)sender
+ error:(IOReturn)error
+ aborted:(BOOL)aborted;
+@end
+
+enum {
+ CBPeripheralStateDisconnected = 0,
+ CBPeripheralStateConnecting,
+ CBPeripheralStateConnected,
+};
+typedef NSInteger CBPeripheralState;
+
+enum {
+ CBCentralManagerStateUnknown = 0,
+ CBCentralManagerStateResetting,
+ CBCentralManagerStateUnsupported,
+ CBCentralManagerStateUnauthorized,
+ CBCentralManagerStatePoweredOff,
+ CBCentralManagerStatePoweredOn,
+};
+typedef NSInteger CBCentralManagerState;
+
+@protocol CBCentralManagerDelegate;
+
+@protocol CBCentralManagerDelegate<NSObject>
+- (void)centralManagerDidUpdateState:(CBCentralManager*)central;
+- (void)centralManager:(CBCentralManager*)central
+ didDiscoverPeripheral:(CBPeripheral*)peripheral
+ advertisementData:(NSDictionary*)advertisementData
+ RSSI:(NSNumber*)RSSI;
+@end
+
+#endif // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
+
+enum { NSEventPhaseMayBegin = 0x1 << 5 };
+
+#endif // MAC_OS_X_VERSION_10_8
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
+
+enum {
+ NSWindowOcclusionStateVisible = 1UL << 1,
+};
+typedef NSUInteger NSWindowOcclusionState;
+
+enum { NSWorkspaceLaunchWithErrorPresentation = 0x00000040 };
+
+#endif // MAC_OS_X_VERSION_10_9
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
+
+enum {
+ NSPressureBehaviorUnknown = -1,
+ NSPressureBehaviorPrimaryDefault = 0,
+ NSPressureBehaviorPrimaryClick = 1,
+ NSPressureBehaviorPrimaryGeneric = 2,
+ NSPressureBehaviorPrimaryAccelerator = 3,
+ NSPressureBehaviorPrimaryDeepClick = 5,
+ NSPressureBehaviorPrimaryDeepDrag = 6
+};
+typedef NSInteger NSPressureBehavior;
+
+@interface NSPressureConfiguration : NSObject
+- (instancetype)initWithPressureBehavior:(NSPressureBehavior)pressureBehavior;
+@end
+
+#endif // MAC_OS_X_VERSION_10_11
+
+// ----------------------------------------------------------------------------
+// Define NSStrings only available in newer versions of the OSX SDK to force
+// them to be statically linked.
+// ----------------------------------------------------------------------------
+
+extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+BASE_EXPORT extern NSString* const NSWindowWillEnterFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowWillExitFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowDidEnterFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowDidExitFullScreenNotification;
+BASE_EXPORT extern NSString* const
+ NSWindowDidChangeBackingPropertiesNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceDataKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceUUIDsKey;
+#endif // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataOverflowServiceUUIDsKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataIsConnectable;
+#endif // MAC_OS_X_VERSION_10_9
+
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+BASE_EXPORT extern NSString* const NSUserActivityTypeBrowsingWeb;
+BASE_EXPORT extern NSString* const NSAppearanceNameVibrantDark;
+BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
+#endif // MAC_OS_X_VERSION_10_10
+} // extern "C"
+
+// ----------------------------------------------------------------------------
+// If compiling against an older version of the OSX SDK, declare functions that
+// are available in newer versions of the OSX SDK. If compiling against a newer
+// version of the OSX SDK, redeclare those same functions to suppress
+// -Wpartial-availability warnings.
+// ----------------------------------------------------------------------------
+
+// Once Chrome no longer supports OSX 10.6, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+
+@interface NSEvent (LionSDK)
++ (BOOL)isSwipeTrackingFromScrollEventsEnabled;
+- (NSEventPhase)momentumPhase;
+- (NSEventPhase)phase;
+- (BOOL)hasPreciseScrollingDeltas;
+- (CGFloat)scrollingDeltaX;
+- (CGFloat)scrollingDeltaY;
+- (void)trackSwipeEventWithOptions:(NSEventSwipeTrackingOptions)options
+ dampenAmountThresholdMin:(CGFloat)minDampenThreshold
+ max:(CGFloat)maxDampenThreshold
+ usingHandler:(void (^)(CGFloat gestureAmount,
+ NSEventPhase phase,
+ BOOL isComplete,
+ BOOL* stop))trackingHandler;
+- (BOOL)isDirectionInvertedFromDevice;
+@end
+
+@interface NSApplication (LionSDK)
+- (void)disableRelaunchOnLogin;
+@end
+
+@interface CALayer (LionSDK)
+- (CGFloat)contentsScale;
+- (void)setContentsScale:(CGFloat)contentsScale;
+@end
+
+@interface NSScreen (LionSDK)
+- (CGFloat)backingScaleFactor;
+- (NSRect)convertRectToBacking:(NSRect)aRect;
+@end
+
+@interface NSWindow (LionSDK)
+- (CGFloat)backingScaleFactor;
+- (NSWindowAnimationBehavior)animationBehavior;
+- (void)setAnimationBehavior:(NSWindowAnimationBehavior)newAnimationBehavior;
+- (void)toggleFullScreen:(id)sender;
+- (void)setRestorable:(BOOL)flag;
+- (NSRect)convertRectFromScreen:(NSRect)aRect;
+- (NSRect)convertRectToScreen:(NSRect)aRect;
+@end
+
+@interface NSCursor (LionSDKDeclarations)
++ (NSCursor*)IBeamCursorForVerticalLayout;
+@end
+
+@interface NSAnimationContext (LionSDK)
++ (void)runAnimationGroup:(void (^)(NSAnimationContext* context))changes
+ completionHandler:(void (^)(void))completionHandler;
+@property(copy) void (^completionHandler)(void);
+@end
+
+@interface NSView (LionSDK)
+- (NSSize)convertSizeFromBacking:(NSSize)size;
+- (void)setWantsBestResolutionOpenGLSurface:(BOOL)flag;
+- (NSDraggingSession*)beginDraggingSessionWithItems:(NSArray*)items
+ event:(NSEvent*)event
+ source:
+ (id<NSDraggingSource>)source;
+@end
+
+@interface NSObject (ICCameraDeviceDelegateLionSDK)
+- (void)deviceDidBecomeReadyWithCompleteContentCatalog:(ICDevice*)device;
+- (void)didDownloadFile:(ICCameraFile*)file
+ error:(NSError*)error
+ options:(NSDictionary*)options
+ contextInfo:(void*)contextInfo;
+@end
+
+@interface CWInterface (LionSDK)
+- (BOOL)associateToNetwork:(CWNetwork*)network
+ password:(NSString*)password
+ error:(NSError**)error;
+- (NSSet*)scanForNetworksWithName:(NSString*)networkName error:(NSError**)error;
+@end
+
+@interface CWChannel (LionSDK)
+@property(readonly) CWChannelBand channelBand;
+@end
+
+@interface CWNetwork (LionSDK)
+@property(readonly) CWChannel* wlanChannel;
+@property(readonly) NSInteger rssiValue;
+- (BOOL)supportsSecurity:(CWSecurity)security;
+@end
+
+@interface IOBluetoothHostController (LionSDK)
+- (NSString*)nameAsString;
+- (BluetoothHCIPowerState)powerState;
+@end
+
+@interface IOBluetoothL2CAPChannel (LionSDK)
+@property(readonly) BluetoothL2CAPMTU outgoingMTU;
+@end
+
+@interface IOBluetoothDevice (LionSDK)
+- (NSString*)addressString;
+- (unsigned int)classOfDevice;
+- (BluetoothConnectionHandle)connectionHandle;
+- (BluetoothHCIRSSIValue)rawRSSI;
+- (NSArray*)services;
+- (IOReturn)performSDPQuery:(id)target uuids:(NSArray*)uuids;
+@end
+
+@interface CBPeripheral (LionSDK)
+@property(readonly, nonatomic) CFUUIDRef UUID;
+@property(retain, readonly) NSString* name;
+@property(readonly) BOOL isConnected;
+@end
+
+@interface CBCentralManager (LionSDK)
+@property(readonly) CBCentralManagerState state;
+- (id)initWithDelegate:(id<CBCentralManagerDelegate>)delegate
+ queue:(dispatch_queue_t)queue;
+- (void)scanForPeripheralsWithServices:(NSArray*)serviceUUIDs
+ options:(NSDictionary*)options;
+- (void)stopScan;
+@end
+
+@interface CBUUID (LionSDK)
+@property(nonatomic, readonly) NSData* data;
++ (CBUUID*)UUIDWithString:(NSString*)theString;
+@end
+
+BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
+ id object,
+ NSString* notification,
+ NSDictionary* user_info);
+
+#endif // MAC_OS_X_VERSION_10_7
+
+// Once Chrome no longer supports OSX 10.7, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_8
+
+@interface NSColor (MountainLionSDK)
+- (CGColorRef)CGColor;
+@end
+
+@interface NSUUID (MountainLionSDK)
+- (NSString*)UUIDString;
+@end
+
+@interface NSControl (MountainLionSDK)
+@property BOOL allowsExpansionToolTips;
+@end
+
+#endif // MAC_OS_X_VERSION_10_8
+
+// Once Chrome no longer supports OSX 10.8, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+
+@interface NSProgress (MavericksSDK)
+
+- (instancetype)initWithParent:(NSProgress*)parentProgressOrNil
+ userInfo:(NSDictionary*)userInfoOrNil;
+@property(copy) NSString* kind;
+
+@property int64_t totalUnitCount;
+@property int64_t completedUnitCount;
+
+@property(getter=isCancellable) BOOL cancellable;
+@property(getter=isPausable) BOOL pausable;
+@property(readonly, getter=isCancelled) BOOL cancelled;
+@property(readonly, getter=isPaused) BOOL paused;
+@property(copy) void (^cancellationHandler)(void);
+@property(copy) void (^pausingHandler)(void);
+- (void)cancel;
+- (void)pause;
+
+- (void)setUserInfoObject:(id)objectOrNil forKey:(NSString*)key;
+- (NSDictionary*)userInfo;
+
+@property(readonly, getter=isIndeterminate) BOOL indeterminate;
+@property(readonly) double fractionCompleted;
+
+- (void)publish;
+- (void)unpublish;
+
+@end
+
+@interface NSScreen (MavericksSDK)
++ (BOOL)screensHaveSeparateSpaces;
+@end
+
+@interface NSView (MavericksSDK)
+- (void)setCanDrawSubviewsIntoLayer:(BOOL)flag;
+- (void)setAppearance:(NSAppearance*)appearance;
+- (NSAppearance*)effectiveAppearance;
+@end
+
+@interface NSWindow (MavericksSDK)
+- (NSWindowOcclusionState)occlusionState;
+@end
+
+@interface NSAppearance (MavericksSDK)
++ (id<NSObject>)appearanceNamed:(NSString*)name;
+@end
+
+@interface CBPeripheral (MavericksSDK)
+@property(readonly, nonatomic) NSUUID* identifier;
+@end
+
+#endif // MAC_OS_X_VERSION_10_9
+
+// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+
+@interface CBUUID (YosemiteSDK)
+- (NSString*)UUIDString;
+@end
+
+@interface NSViewController (YosemiteSDK)
+- (void)viewDidLoad;
+@end
+
+@interface NSWindow (YosemiteSDK)
+- (void)setTitlebarAppearsTransparent:(BOOL)flag;
+@end
+
+#endif // MAC_OS_X_VERSION_10_10
+
+// Once Chrome no longer supports OSX 10.10.2, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10_3) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10_3
+
+@interface NSEvent (YosemiteSDK)
+@property(readonly) NSInteger stage;
+@end
+
+@interface NSView (YosemiteSDK)
+- (void)setPressureConfiguration:(NSPressureConfiguration*)aConfiguration;
+@end
+
+#endif // MAC_OS_X_VERSION_10_10
+
+// ----------------------------------------------------------------------------
+// The symbol for kCWSSIDDidChangeNotification is available in the
+// CoreWLAN.framework for OSX versions 10.6 through 10.10. The symbol is not
+// declared in the OSX 10.9+ SDK, so when compiling against an OSX 10.9+ SDK,
+// declare the symbol.
+// ----------------------------------------------------------------------------
+#if defined(MAC_OS_X_VERSION_10_9) && \
+ MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9
+BASE_EXPORT extern "C" NSString* const kCWSSIDDidChangeNotification;
+#endif
+#endif // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
diff --git a/libchrome/base/mac/sdk_forward_declarations.mm b/libchrome/base/mac/sdk_forward_declarations.mm
new file mode 100644
index 0000000..4e1d7ec
--- /dev/null
+++ b/libchrome/base/mac/sdk_forward_declarations.mm
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/sdk_forward_declarations.h"
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+NSString* const NSWindowWillEnterFullScreenNotification =
+ @"NSWindowWillEnterFullScreenNotification";
+
+NSString* const NSWindowWillExitFullScreenNotification =
+ @"NSWindowWillExitFullScreenNotification";
+
+NSString* const NSWindowDidEnterFullScreenNotification =
+ @"NSWindowDidEnterFullScreenNotification";
+
+NSString* const NSWindowDidExitFullScreenNotification =
+ @"NSWindowDidExitFullScreenNotification";
+
+NSString* const NSWindowDidChangeBackingPropertiesNotification =
+ @"NSWindowDidChangeBackingPropertiesNotification";
+
+NSString* const CBAdvertisementDataServiceDataKey = @"kCBAdvDataServiceData";
+
+NSString* const CBAdvertisementDataServiceUUIDsKey = @"kCBAdvDataServiceUUIDs";
+#endif // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+NSString* const NSWindowDidChangeOcclusionStateNotification =
+ @"NSWindowDidChangeOcclusionStateNotification";
+
+NSString* const CBAdvertisementDataOverflowServiceUUIDsKey =
+ @"kCBAdvDataOverflowServiceUUIDs";
+
+NSString* const CBAdvertisementDataIsConnectable = @"kCBAdvDataIsConnectable";
+#endif // MAC_OS_X_VERSION_10_9
+
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+NSString* const NSUserActivityTypeBrowsingWeb =
+ @"NSUserActivityTypeBrowsingWeb";
+
+NSString* const NSAppearanceNameVibrantDark = @"NSAppearanceNameVibrantDark";
+#endif // MAC_OS_X_VERSION_10_10
diff --git a/libchrome/base/macros.h b/libchrome/base/macros.h
new file mode 100644
index 0000000..4c62300
--- /dev/null
+++ b/libchrome/base/macros.h
@@ -0,0 +1,105 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros and macro-like constructs (e.g., templates) that
+// are commonly used throughout Chromium source. (It may also contain things
+// that are closely related to things that are commonly used that belong in this
+// file.)
+
+#ifndef BASE_MACROS_H_
+#define BASE_MACROS_H_
+
+#include <stddef.h> // For size_t.
+
+#if defined(ANDROID)
+// Prefer Android's libbase definitions to our own.
+#include <android-base/macros.h>
+#endif // defined(ANDROID)
+
+// Put this in the declarations for a class to be uncopyable.
+#if !defined(DISALLOW_COPY)
+#define DISALLOW_COPY(TypeName) \
+ TypeName(const TypeName&) = delete
+#endif
+
+// Put this in the declarations for a class to be unassignable.
+#if !defined(DISALLOW_ASSIGN)
+#define DISALLOW_ASSIGN(TypeName) \
+ void operator=(const TypeName&) = delete
+#endif
+
+// A macro to disallow the copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+// We define this macro conditionally as it may be defined by another libraries.
+#if !defined(DISALLOW_COPY_AND_ASSIGN)
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete
+#endif
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#if !defined(DISALLOW_IMPLICIT_CONSTRUCTORS)
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() = delete; \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+#endif
+
+// The arraysize(arr) macro returns the # of elements in an array arr. The
+// expression is a compile-time constant, and therefore can be used in defining
+// new arrays, for example. If you use arraysize on a pointer by mistake, you
+// will get a compile-time error. For the technical details, refer to
+// http://blogs.msdn.com/b/the1/archive/2004/05/07/128242.aspx.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+#if !defined(arraysize)
+template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+#endif
+
+// Used to explicitly mark the return value of a function as unused. If you are
+// really sure you don't want to do anything with the return value of a function
+// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
+//
+// std::unique_ptr<MyType> my_var = ...;
+// if (TakeOwnership(my_var.get()) == SUCCESS)
+// ignore_result(my_var.release());
+//
+template<typename T>
+inline void ignore_result(const T&) {
+}
+
+// The following enum should be used only as a constructor argument to indicate
+// that the variable has static storage class, and that the constructor should
+// do nothing to its state. It indicates to the reader that it is legal to
+// declare a static instance of the class, provided the constructor is given
+// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
+// static variable that has a constructor or a destructor because invocation
+// order is undefined. However, IF the type can be initialized by filling with
+// zeroes (which the loader does for static variables), AND the destructor also
+// does nothing to the storage, AND there are no virtual methods, then a
+// constructor declared as
+// explicit MyClass(base::LinkerInitialized x) {}
+// and invoked as
+// static MyClass my_variable_name(base::LINKER_INITIALIZED);
+namespace base {
+enum LinkerInitialized { LINKER_INITIALIZED };
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit. If you need
+// thread-safe initialization, use base/lazy_instance.h instead.
+#if !defined(CR_DEFINE_STATIC_LOCAL)
+#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type& name = *new type arguments
+#endif
+
+} // base
+
+#endif // BASE_MACROS_H_
diff --git a/libchrome/base/md5.cc b/libchrome/base/md5.cc
new file mode 100644
index 0000000..72c774d
--- /dev/null
+++ b/libchrome/base/md5.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The original file was copied from sqlite, and was in the public domain.
+
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest. This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ */
+
+#include "base/md5.h"
+
+#include <stddef.h>
+
+namespace {
+
+struct Context {
+ uint32_t buf[4];
+ uint32_t bits[2];
+ uint8_t in[64];
+};
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+void byteReverse(uint8_t* buf, unsigned longs) {
+ do {
+ uint32_t temp = static_cast<uint32_t>(
+ static_cast<unsigned>(buf[3]) << 8 |
+ buf[2]) << 16 |
+ (static_cast<unsigned>(buf[1]) << 8 | buf[0]);
+ *reinterpret_cast<uint32_t*>(buf) = temp;
+ buf += 4;
+ } while (--longs);
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+ (w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x)
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+void MD5Transform(uint32_t buf[4], const uint32_t in[16]) {
+ uint32_t a, b, c, d;
+
+ a = buf[0];
+ b = buf[1];
+ c = buf[2];
+ d = buf[3];
+
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+ buf[0] += a;
+ buf[1] += b;
+ buf[2] += c;
+ buf[3] += d;
+}
+
+} // namespace
+
+namespace base {
+
+/*
+ * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void MD5Init(MD5Context* context) {
+ struct Context* ctx = reinterpret_cast<struct Context*>(context);
+ ctx->buf[0] = 0x67452301;
+ ctx->buf[1] = 0xefcdab89;
+ ctx->buf[2] = 0x98badcfe;
+ ctx->buf[3] = 0x10325476;
+ ctx->bits[0] = 0;
+ ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void MD5Update(MD5Context* context, const StringPiece& data) {
+ struct Context* ctx = reinterpret_cast<struct Context*>(context);
+ const uint8_t* buf = reinterpret_cast<const uint8_t*>(data.data());
+ size_t len = data.size();
+
+ /* Update bitcount */
+
+ uint32_t t = ctx->bits[0];
+ if ((ctx->bits[0] = t + (static_cast<uint32_t>(len) << 3)) < t)
+ ctx->bits[1]++; /* Carry from low to high */
+ ctx->bits[1] += static_cast<uint32_t>(len >> 29);
+
+ t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */
+
+ /* Handle any leading odd-sized chunks */
+
+ if (t) {
+ uint8_t* p = static_cast<uint8_t*>(ctx->in + t);
+
+ t = 64 - t;
+ if (len < t) {
+ memcpy(p, buf, len);
+ return;
+ }
+ memcpy(p, buf, t);
+ byteReverse(ctx->in, 16);
+ MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+ buf += t;
+ len -= t;
+ }
+
+ /* Process data in 64-byte chunks */
+
+ while (len >= 64) {
+ memcpy(ctx->in, buf, 64);
+ byteReverse(ctx->in, 16);
+ MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+ buf += 64;
+ len -= 64;
+ }
+
+ /* Handle any remaining bytes of data. */
+
+ memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void MD5Final(MD5Digest* digest, MD5Context* context) {
+ struct Context* ctx = reinterpret_cast<struct Context*>(context);
+ unsigned count;
+ uint8_t* p;
+
+ /* Compute number of bytes mod 64 */
+ count = (ctx->bits[0] >> 3) & 0x3F;
+
+ /* Set the first char of padding to 0x80. This is safe since there is
+ always at least one byte free */
+ p = ctx->in + count;
+ *p++ = 0x80;
+
+ /* Bytes of padding needed to make 64 bytes */
+ count = 64 - 1 - count;
+
+ /* Pad out to 56 mod 64 */
+ if (count < 8) {
+ /* Two lots of padding: Pad the first block to 64 bytes */
+ memset(p, 0, count);
+ byteReverse(ctx->in, 16);
+ MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+
+ /* Now fill the next block with 56 bytes */
+ memset(ctx->in, 0, 56);
+ } else {
+ /* Pad block to 56 bytes */
+ memset(p, 0, count - 8);
+ }
+ byteReverse(ctx->in, 14);
+
+ /* Append length in bits and transform */
+ memcpy(&ctx->in[14 * sizeof(ctx->bits[0])], &ctx->bits[0],
+ sizeof(ctx->bits[0]));
+ memcpy(&ctx->in[15 * sizeof(ctx->bits[1])], &ctx->bits[1],
+ sizeof(ctx->bits[1]));
+
+ MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
+ byteReverse(reinterpret_cast<uint8_t*>(ctx->buf), 4);
+ memcpy(digest->a, ctx->buf, 16);
+ memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */
+}
+
+void MD5IntermediateFinal(MD5Digest* digest, const MD5Context* context) {
+ /* MD5Final mutates the MD5Context*. Make a copy for generating the
+ intermediate value. */
+ MD5Context context_copy;
+ memcpy(&context_copy, context, sizeof(context_copy));
+ MD5Final(digest, &context_copy);
+}
+
+std::string MD5DigestToBase16(const MD5Digest& digest) {
+ static char const zEncode[] = "0123456789abcdef";
+
+ std::string ret;
+ ret.resize(32);
+
+ for (int i = 0, j = 0; i < 16; i++, j += 2) {
+ uint8_t a = digest.a[i];
+ ret[j] = zEncode[(a >> 4) & 0xf];
+ ret[j + 1] = zEncode[a & 0xf];
+ }
+ return ret;
+}
+
+void MD5Sum(const void* data, size_t length, MD5Digest* digest) {
+ MD5Context ctx;
+ MD5Init(&ctx);
+ MD5Update(&ctx, StringPiece(reinterpret_cast<const char*>(data), length));
+ MD5Final(digest, &ctx);
+}
+
+std::string MD5String(const StringPiece& str) {
+ MD5Digest digest;
+ MD5Sum(str.data(), str.length(), &digest);
+ return MD5DigestToBase16(digest);
+}
+
+} // namespace base
diff --git a/libchrome/base/md5.h b/libchrome/base/md5.h
new file mode 100644
index 0000000..ef64178
--- /dev/null
+++ b/libchrome/base/md5.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MD5_H_
+#define BASE_MD5_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// MD5 stands for Message Digest algorithm 5.
+// MD5 is a robust hash function, designed for cyptography, but often used
+// for file checksums. The code is complex and slow, but has few
+// collisions.
+// See Also:
+// http://en.wikipedia.org/wiki/MD5
+
+// These functions perform MD5 operations. The simplest call is MD5Sum() to
+// generate the MD5 sum of the given data.
+//
+// You can also compute the MD5 sum of data incrementally by making multiple
+// calls to MD5Update():
+// MD5Context ctx; // intermediate MD5 data: do not use
+// MD5Init(&ctx);
+// MD5Update(&ctx, data1, length1);
+// MD5Update(&ctx, data2, length2);
+// ...
+//
+// MD5Digest digest; // the result of the computation
+// MD5Final(&digest, &ctx);
+//
+// You can call MD5DigestToBase16() to generate a string of the digest.
+
+// The output of an MD5 operation.
+struct MD5Digest {
+ uint8_t a[16];
+};
+
+// Used for storing intermediate data during an MD5 computation. Callers
+// should not access the data.
+typedef char MD5Context[88];
+
+// Initializes the given MD5 context structure for subsequent calls to
+// MD5Update().
+BASE_EXPORT void MD5Init(MD5Context* context);
+
+// For the given buffer of |data| as a StringPiece, updates the given MD5
+// context with the sum of the data. You can call this any number of times
+// during the computation, except that MD5Init() must have been called first.
+BASE_EXPORT void MD5Update(MD5Context* context, const StringPiece& data);
+
+// Finalizes the MD5 operation and fills the buffer with the digest.
+BASE_EXPORT void MD5Final(MD5Digest* digest, MD5Context* context);
+
+// MD5IntermediateFinal() generates a digest without finalizing the MD5
+// operation. Can be used to generate digests for the input seen thus far,
+// without affecting the digest generated for the entire input.
+BASE_EXPORT void MD5IntermediateFinal(MD5Digest* digest,
+ const MD5Context* context);
+
+// Converts a digest into human-readable hexadecimal.
+BASE_EXPORT std::string MD5DigestToBase16(const MD5Digest& digest);
+
+// Computes the MD5 sum of the given data buffer with the given length.
+// The given 'digest' structure will be filled with the result data.
+BASE_EXPORT void MD5Sum(const void* data, size_t length, MD5Digest* digest);
+
+// Returns the MD5 (in hexadecimal) of a string.
+BASE_EXPORT std::string MD5String(const StringPiece& str);
+
+} // namespace base
+
+#endif // BASE_MD5_H_
diff --git a/libchrome/base/md5_unittest.cc b/libchrome/base/md5_unittest.cc
new file mode 100644
index 0000000..b27efe9
--- /dev/null
+++ b/libchrome/base/md5_unittest.cc
@@ -0,0 +1,253 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/md5.h"
+
+#include <string.h>
+
+#include <memory>
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(MD5, DigestToBase16) {
+ MD5Digest digest;
+
+ int data[] = {
+ 0xd4, 0x1d, 0x8c, 0xd9,
+ 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98,
+ 0xec, 0xf8, 0x42, 0x7e
+ };
+
+ for (int i = 0; i < 16; ++i)
+ digest.a[i] = data[i] & 0xff;
+
+ std::string actual = MD5DigestToBase16(digest);
+ std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
+
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5SumEmtpyData) {
+ MD5Digest digest;
+ const char data[] = "";
+
+ MD5Sum(data, strlen(data), &digest);
+
+ int expected[] = {
+ 0xd4, 0x1d, 0x8c, 0xd9,
+ 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98,
+ 0xec, 0xf8, 0x42, 0x7e
+ };
+
+ for (int i = 0; i < 16; ++i)
+ EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, MD5SumOneByteData) {
+ MD5Digest digest;
+ const char data[] = "a";
+
+ MD5Sum(data, strlen(data), &digest);
+
+ int expected[] = {
+ 0x0c, 0xc1, 0x75, 0xb9,
+ 0xc0, 0xf1, 0xb6, 0xa8,
+ 0x31, 0xc3, 0x99, 0xe2,
+ 0x69, 0x77, 0x26, 0x61
+ };
+
+ for (int i = 0; i < 16; ++i)
+ EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, MD5SumLongData) {
+ const int length = 10 * 1024 * 1024 + 1;
+ std::unique_ptr<char[]> data(new char[length]);
+
+ for (int i = 0; i < length; ++i)
+ data[i] = i & 0xFF;
+
+ MD5Digest digest;
+ MD5Sum(data.get(), length, &digest);
+
+ int expected[] = {
+ 0x90, 0xbd, 0x6a, 0xd9,
+ 0x0a, 0xce, 0xf5, 0xad,
+ 0xaa, 0x92, 0x20, 0x3e,
+ 0x21, 0xc7, 0xa1, 0x3e
+ };
+
+ for (int i = 0; i < 16; ++i)
+ EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, ContextWithEmptyData) {
+ MD5Context ctx;
+ MD5Init(&ctx);
+
+ MD5Digest digest;
+ MD5Final(&digest, &ctx);
+
+ int expected[] = {
+ 0xd4, 0x1d, 0x8c, 0xd9,
+ 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98,
+ 0xec, 0xf8, 0x42, 0x7e
+ };
+
+ for (int i = 0; i < 16; ++i)
+ EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+TEST(MD5, ContextWithLongData) {
+ MD5Context ctx;
+ MD5Init(&ctx);
+
+ const int length = 10 * 1024 * 1024 + 1;
+ std::unique_ptr<char[]> data(new char[length]);
+
+ for (int i = 0; i < length; ++i)
+ data[i] = i & 0xFF;
+
+ int total = 0;
+ while (total < length) {
+ int len = 4097; // intentionally not 2^k.
+ if (len > length - total)
+ len = length - total;
+
+ MD5Update(&ctx,
+ StringPiece(reinterpret_cast<char*>(data.get() + total), len));
+ total += len;
+ }
+
+ EXPECT_EQ(length, total);
+
+ MD5Digest digest;
+ MD5Final(&digest, &ctx);
+
+ int expected[] = {
+ 0x90, 0xbd, 0x6a, 0xd9,
+ 0x0a, 0xce, 0xf5, 0xad,
+ 0xaa, 0x92, 0x20, 0x3e,
+ 0x21, 0xc7, 0xa1, 0x3e
+ };
+
+ for (int i = 0; i < 16; ++i)
+ EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
+}
+
+// Example data from http://www.ietf.org/rfc/rfc1321.txt A.5 Test Suite
+TEST(MD5, MD5StringTestSuite1) {
+ std::string actual = MD5String("");
+ std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite2) {
+ std::string actual = MD5String("a");
+ std::string expected = "0cc175b9c0f1b6a831c399e269772661";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite3) {
+ std::string actual = MD5String("abc");
+ std::string expected = "900150983cd24fb0d6963f7d28e17f72";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite4) {
+ std::string actual = MD5String("message digest");
+ std::string expected = "f96b697d7cb7938d525a2f31aaf161d0";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite5) {
+ std::string actual = MD5String("abcdefghijklmnopqrstuvwxyz");
+ std::string expected = "c3fcd3d76192e4007dfb496cca67e13b";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite6) {
+ std::string actual = MD5String("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789");
+ std::string expected = "d174ab98d277d9f5a5611c2c9f419d9f";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, MD5StringTestSuite7) {
+ std::string actual = MD5String("12345678901234567890"
+ "12345678901234567890"
+ "12345678901234567890"
+ "12345678901234567890");
+ std::string expected = "57edf4a22be3c955ac49da2e2107b67a";
+ EXPECT_EQ(expected, actual);
+}
+
+TEST(MD5, ContextWithStringData) {
+ MD5Context ctx;
+ MD5Init(&ctx);
+
+ MD5Update(&ctx, "abc");
+
+ MD5Digest digest;
+ MD5Final(&digest, &ctx);
+
+ std::string actual = MD5DigestToBase16(digest);
+ std::string expected = "900150983cd24fb0d6963f7d28e17f72";
+
+ EXPECT_EQ(expected, actual);
+}
+
+// Test that a digest generated by MD5IntermediateFinal() gives the same results
+// as an independently-calculated digest, and also does not modify the context.
+TEST(MD5, IntermediateFinal) {
+ // Independent context over the header.
+ MD5Context check_header_context;
+ MD5Init(&check_header_context);
+
+ // Independent context over entire input.
+ MD5Context check_full_context;
+ MD5Init(&check_full_context);
+
+ // Context intermediate digest will be calculated from.
+ MD5Context context;
+ MD5Init(&context);
+
+ static const char kHeader[] = "header data";
+ static const char kBody[] = "payload data";
+
+ MD5Update(&context, kHeader);
+ MD5Update(&check_header_context, kHeader);
+ MD5Update(&check_full_context, kHeader);
+
+ MD5Digest check_header_digest;
+ MD5Final(&check_header_digest, &check_header_context);
+
+ MD5Digest header_digest;
+ MD5IntermediateFinal(&header_digest, &context);
+
+ MD5Update(&context, kBody);
+ MD5Update(&check_full_context, kBody);
+
+ MD5Digest check_full_digest;
+ MD5Final(&check_full_digest, &check_full_context);
+
+ MD5Digest digest;
+ MD5Final(&digest, &context);
+
+ // The header and full digest pairs are the same, and they aren't the same as
+ // each other.
+ EXPECT_TRUE(!memcmp(&header_digest, &check_header_digest,
+ sizeof(header_digest)));
+ EXPECT_TRUE(!memcmp(&digest, &check_full_digest, sizeof(digest)));
+ EXPECT_TRUE(memcmp(&digest, &header_digest, sizeof(digest)));
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/OWNERS b/libchrome/base/memory/OWNERS
new file mode 100644
index 0000000..bcaf778
--- /dev/null
+++ b/libchrome/base/memory/OWNERS
@@ -0,0 +1,2 @@
+per-file *chromeos*=skuhne@chromium.org
+per-file *chromeos*=oshima@chromium.org
diff --git a/libchrome/base/memory/aligned_memory.cc b/libchrome/base/memory/aligned_memory.cc
new file mode 100644
index 0000000..526a495
--- /dev/null
+++ b/libchrome/base/memory/aligned_memory.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/aligned_memory.h"
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <malloc.h>
+#endif
+
+namespace base {
+
+void* AlignedAlloc(size_t size, size_t alignment) {
+ DCHECK_GT(size, 0U);
+ DCHECK_EQ(alignment & (alignment - 1), 0U);
+ DCHECK_EQ(alignment % sizeof(void*), 0U);
+ void* ptr = NULL;
+#if defined(COMPILER_MSVC)
+ ptr = _aligned_malloc(size, alignment);
+// Android technically supports posix_memalign(), but does not expose it in
+// the current version of the library headers used by Chrome. Luckily,
+// memalign() on Android returns pointers which can safely be used with
+// free(), so we can use it instead. Issue filed to document this:
+// http://code.google.com/p/android/issues/detail?id=35391
+#elif defined(OS_ANDROID)
+ ptr = memalign(alignment, size);
+#else
+ if (posix_memalign(&ptr, alignment, size))
+ ptr = NULL;
+#endif
+ // Since aligned allocations may fail for non-memory related reasons, force a
+ // crash if we encounter a failed allocation; maintaining consistent behavior
+ // with a normal allocation failure in Chrome.
+ if (!ptr) {
+ DLOG(ERROR) << "If you crashed here, your aligned allocation is incorrect: "
+ << "size=" << size << ", alignment=" << alignment;
+ CHECK(false);
+ }
+ // Sanity check alignment just to be safe.
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & (alignment - 1), 0U);
+ return ptr;
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/aligned_memory.h b/libchrome/base/memory/aligned_memory.h
new file mode 100644
index 0000000..d829011
--- /dev/null
+++ b/libchrome/base/memory/aligned_memory.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AlignedMemory is a POD type that gives you a portable way to specify static
+// or local stack data of a given alignment and size. For example, if you need
+// static storage for a class, but you want manual control over when the object
+// is constructed and destructed (you don't want static initialization and
+// destruction), use AlignedMemory:
+//
+// static AlignedMemory<sizeof(MyClass), ALIGNOF(MyClass)> my_class;
+//
+// // ... at runtime:
+// new(my_class.void_data()) MyClass();
+//
+// // ... use it:
+// MyClass* mc = my_class.data_as<MyClass>();
+//
+// // ... later, to destruct my_class:
+// my_class.data_as<MyClass>()->MyClass::~MyClass();
+//
+// Alternatively, a runtime sized aligned allocation can be created:
+//
+// float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//
+// // ... later, to release the memory:
+// AlignedFree(my_array);
+//
+// Or using unique_ptr:
+//
+// std::unique_ptr<float, AlignedFreeDeleter> my_array(
+// static_cast<float*>(AlignedAlloc(size, alignment)));
+
+#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
+#define BASE_MEMORY_ALIGNED_MEMORY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+#if defined(COMPILER_MSVC)
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+// AlignedMemory is specialized for all supported alignments.
+// Make sure we get a compiler error if someone uses an unsupported alignment.
+template <size_t Size, size_t ByteAlignment>
+struct AlignedMemory {};
+
+#define BASE_DECL_ALIGNED_MEMORY(byte_alignment) \
+ template <size_t Size> \
+ class AlignedMemory<Size, byte_alignment> { \
+ public: \
+ ALIGNAS(byte_alignment) uint8_t data_[Size]; \
+ void* void_data() { return static_cast<void*>(data_); } \
+ const void* void_data() const { return static_cast<const void*>(data_); } \
+ template <typename Type> \
+ Type* data_as() { \
+ return static_cast<Type*>(void_data()); \
+ } \
+ template <typename Type> \
+ const Type* data_as() const { \
+ return static_cast<const Type*>(void_data()); \
+ } \
+ \
+ private: \
+ void* operator new(size_t); \
+ void operator delete(void*); \
+ }
+
+// Specialization for all alignments is required because MSVC (as of VS 2008)
+// does not understand ALIGNAS(ALIGNOF(Type)) or ALIGNAS(template_param).
+// Greater than 4096 alignment is not supported by some compilers, so 4096 is
+// the maximum specified here.
+BASE_DECL_ALIGNED_MEMORY(1);
+BASE_DECL_ALIGNED_MEMORY(2);
+BASE_DECL_ALIGNED_MEMORY(4);
+BASE_DECL_ALIGNED_MEMORY(8);
+BASE_DECL_ALIGNED_MEMORY(16);
+BASE_DECL_ALIGNED_MEMORY(32);
+BASE_DECL_ALIGNED_MEMORY(64);
+BASE_DECL_ALIGNED_MEMORY(128);
+BASE_DECL_ALIGNED_MEMORY(256);
+BASE_DECL_ALIGNED_MEMORY(512);
+BASE_DECL_ALIGNED_MEMORY(1024);
+BASE_DECL_ALIGNED_MEMORY(2048);
+BASE_DECL_ALIGNED_MEMORY(4096);
+
+#undef BASE_DECL_ALIGNED_MEMORY
+
+BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
+
+inline void AlignedFree(void* ptr) {
+#if defined(COMPILER_MSVC)
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+// Deleter for use with unique_ptr. E.g., use as
+// std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
+ inline void operator()(void* ptr) const {
+ AlignedFree(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_ALIGNED_MEMORY_H_
diff --git a/libchrome/base/memory/aligned_memory_unittest.cc b/libchrome/base/memory/aligned_memory_unittest.cc
new file mode 100644
index 0000000..abe0cf3
--- /dev/null
+++ b/libchrome/base/memory/aligned_memory_unittest.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/aligned_memory.h"
+
+#include <memory>
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define EXPECT_ALIGNED(ptr, align) \
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+namespace {
+
+using base::AlignedMemory;
+
+TEST(AlignedMemoryTest, StaticAlignment) {
+ static AlignedMemory<8, 8> raw8;
+ static AlignedMemory<8, 16> raw16;
+ static AlignedMemory<8, 256> raw256;
+ static AlignedMemory<8, 4096> raw4096;
+
+ EXPECT_EQ(8u, ALIGNOF(raw8));
+ EXPECT_EQ(16u, ALIGNOF(raw16));
+ EXPECT_EQ(256u, ALIGNOF(raw256));
+ EXPECT_EQ(4096u, ALIGNOF(raw4096));
+
+ EXPECT_ALIGNED(raw8.void_data(), 8);
+ EXPECT_ALIGNED(raw16.void_data(), 16);
+ EXPECT_ALIGNED(raw256.void_data(), 256);
+ EXPECT_ALIGNED(raw4096.void_data(), 4096);
+}
+
+TEST(AlignedMemoryTest, StackAlignment) {
+ AlignedMemory<8, 8> raw8;
+ AlignedMemory<8, 16> raw16;
+ AlignedMemory<8, 128> raw128;
+
+ EXPECT_EQ(8u, ALIGNOF(raw8));
+ EXPECT_EQ(16u, ALIGNOF(raw16));
+ EXPECT_EQ(128u, ALIGNOF(raw128));
+
+ EXPECT_ALIGNED(raw8.void_data(), 8);
+ EXPECT_ALIGNED(raw16.void_data(), 16);
+
+ // TODO(ios): __attribute__((aligned(X))) with X >= 128 does not works on
+ // the stack when building for arm64 on iOS, http://crbug.com/349003
+#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
+ EXPECT_ALIGNED(raw128.void_data(), 128);
+
+ // NaCl x86-64 compiler emits non-validating instructions for >128
+ // bytes alignment.
+ // http://www.chromium.org/nativeclient/design-documents/nacl-sfi-model-on-x86-64-systems
+ // TODO(hamaji): Ideally, NaCl compiler for x86-64 should workaround
+ // this limitation and this #if should be removed.
+ // https://code.google.com/p/nativeclient/issues/detail?id=3463
+#if !(defined(OS_NACL) && defined(ARCH_CPU_X86_64))
+ AlignedMemory<8, 256> raw256;
+ EXPECT_EQ(256u, ALIGNOF(raw256));
+ EXPECT_ALIGNED(raw256.void_data(), 256);
+
+ // TODO(ios): This test hits an armv7 bug in clang. crbug.com/138066
+#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
+ AlignedMemory<8, 4096> raw4096;
+ EXPECT_EQ(4096u, ALIGNOF(raw4096));
+ EXPECT_ALIGNED(raw4096.void_data(), 4096);
+#endif // !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
+#endif // !(defined(OS_NACL) && defined(ARCH_CPU_X86_64))
+#endif // !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
+}
+
+TEST(AlignedMemoryTest, DynamicAllocation) {
+ void* p = base::AlignedAlloc(8, 8);
+ EXPECT_TRUE(p);
+ EXPECT_ALIGNED(p, 8);
+ base::AlignedFree(p);
+
+ p = base::AlignedAlloc(8, 16);
+ EXPECT_TRUE(p);
+ EXPECT_ALIGNED(p, 16);
+ base::AlignedFree(p);
+
+ p = base::AlignedAlloc(8, 256);
+ EXPECT_TRUE(p);
+ EXPECT_ALIGNED(p, 256);
+ base::AlignedFree(p);
+
+ p = base::AlignedAlloc(8, 4096);
+ EXPECT_TRUE(p);
+ EXPECT_ALIGNED(p, 4096);
+ base::AlignedFree(p);
+}
+
+TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
+ std::unique_ptr<float, base::AlignedFreeDeleter> p(
+ static_cast<float*>(base::AlignedAlloc(8, 8)));
+ EXPECT_TRUE(p.get());
+ EXPECT_ALIGNED(p.get(), 8);
+}
+
+} // namespace
diff --git a/libchrome/base/memory/free_deleter.h b/libchrome/base/memory/free_deleter.h
new file mode 100644
index 0000000..5604118
--- /dev/null
+++ b/libchrome/base/memory/free_deleter.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+// static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+ inline void operator()(void* ptr) const {
+ free(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_FREE_DELETER_H_
diff --git a/libchrome/base/memory/linked_ptr.h b/libchrome/base/memory/linked_ptr.h
new file mode 100644
index 0000000..649dc10
--- /dev/null
+++ b/libchrome/base/memory/linked_ptr.h
@@ -0,0 +1,179 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A "smart" pointer type with reference tracking. Every pointer to a
+// particular object is kept on a circular linked list. When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is released, the entire list of pointers to that
+// object is traversed. This class is therefore NOT SUITABLE when there
+// will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+// will happen (double deletion).
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Thread Safety:
+// A linked_ptr is NOT thread safe. Copying a linked_ptr object is
+// effectively a read-write operation.
+//
+// Alternative: to linked_ptr is shared_ptr, which
+// - is also two pointers in size (8 bytes for 32 bit addresses)
+// - is thread safe for copying and deletion
+// - supports weak_ptrs
+
+#ifndef BASE_MEMORY_LINKED_PTR_H_
+#define BASE_MEMORY_LINKED_PTR_H_
+
+#include "base/logging.h" // for CHECK macros
+
+// This is used internally by all instances of linked_ptr<>. It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+ // Create a new circle that includes only this instance.
+ void join_new() {
+ next_ = this;
+ }
+
+ // Join an existing circle.
+ void join(linked_ptr_internal const* ptr) {
+ next_ = ptr->next_;
+ ptr->next_ = this;
+ }
+
+ // Leave whatever circle we're part of. Returns true iff we were the
+ // last member of the circle. Once this is done, you can join() another.
+ bool depart() {
+ if (next_ == this) return true;
+ linked_ptr_internal const* p = next_;
+ while (p->next_ != this) p = p->next_;
+ p->next_ = next_;
+ return false;
+ }
+
+ private:
+ mutable linked_ptr_internal const* next_;
+};
+
+// TODO(http://crbug.com/556939): DEPRECATED: Use scoped_ptr instead (now that
+// we have support for moveable types inside STL containers).
+template <typename T>
+class linked_ptr {
+ public:
+ typedef T element_type;
+
+ // Take over ownership of a raw pointer. This should happen as soon as
+ // possible after the object is created.
+ explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+ ~linked_ptr() { depart(); }
+
+ // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+ template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+
+ linked_ptr(linked_ptr const& ptr) {
+ DCHECK_NE(&ptr, this);
+ copy(&ptr);
+ }
+
+ // Assignment releases the old value and acquires the new.
+ template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+ depart();
+ copy(&ptr);
+ return *this;
+ }
+
+ linked_ptr& operator=(linked_ptr const& ptr) {
+ if (&ptr != this) {
+ depart();
+ copy(&ptr);
+ }
+ return *this;
+ }
+
+ // Smart pointer members.
+ void reset(T* ptr = NULL) {
+ depart();
+ capture(ptr);
+ }
+ T* get() const { return value_; }
+ T* operator->() const { return value_; }
+ T& operator*() const { return *value_; }
+ // Release ownership of the pointed object and returns it.
+ // Sole ownership by this linked_ptr object is required.
+ T* release() {
+ bool last = link_.depart();
+ CHECK(last);
+ T* v = value_;
+ value_ = NULL;
+ return v;
+ }
+
+ bool operator==(const T* p) const { return value_ == p; }
+ bool operator!=(const T* p) const { return value_ != p; }
+ template <typename U>
+ bool operator==(linked_ptr<U> const& ptr) const {
+ return value_ == ptr.get();
+ }
+ template <typename U>
+ bool operator!=(linked_ptr<U> const& ptr) const {
+ return value_ != ptr.get();
+ }
+
+ private:
+ template <typename U>
+ friend class linked_ptr;
+
+ T* value_;
+ linked_ptr_internal link_;
+
+ void depart() {
+ if (link_.depart()) delete value_;
+ }
+
+ void capture(T* ptr) {
+ value_ = ptr;
+ link_.join_new();
+ }
+
+ template <typename U> void copy(linked_ptr<U> const* ptr) {
+ value_ = ptr->get();
+ if (value_)
+ link_.join(&ptr->link_);
+ else
+ link_.join_new();
+ }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+ return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+ return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+ return linked_ptr<T>(ptr);
+}
+
+#endif // BASE_MEMORY_LINKED_PTR_H_
diff --git a/libchrome/base/memory/linked_ptr_unittest.cc b/libchrome/base/memory/linked_ptr_unittest.cc
new file mode 100644
index 0000000..f6bc410
--- /dev/null
+++ b/libchrome/base/memory/linked_ptr_unittest.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/memory/linked_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+int num = 0;
+
+std::string history;
+
+// Class which tracks allocation/deallocation
+struct A {
+ A(): mynum(num++) { history += base::StringPrintf("A%d ctor\n", mynum); }
+ virtual ~A() { history += base::StringPrintf("A%d dtor\n", mynum); }
+ virtual void Use() { history += base::StringPrintf("A%d use\n", mynum); }
+ int mynum;
+};
+
+// Subclass
+struct B: public A {
+ B() { history += base::StringPrintf("B%d ctor\n", mynum); }
+ ~B() override { history += base::StringPrintf("B%d dtor\n", mynum); }
+ void Use() override { history += base::StringPrintf("B%d use\n", mynum); }
+};
+
+} // namespace
+
+TEST(LinkedPtrTest, Test) {
+ {
+ linked_ptr<A> a0, a1, a2;
+ a0 = a0;
+ a1 = a2;
+ ASSERT_EQ(a0.get(), static_cast<A*>(NULL));
+ ASSERT_EQ(a1.get(), static_cast<A*>(NULL));
+ ASSERT_EQ(a2.get(), static_cast<A*>(NULL));
+ ASSERT_TRUE(a0 == NULL);
+ ASSERT_TRUE(a1 == NULL);
+ ASSERT_TRUE(a2 == NULL);
+
+ {
+ linked_ptr<A> a3(new A);
+ a0 = a3;
+ ASSERT_TRUE(a0 == a3);
+ ASSERT_TRUE(a0 != NULL);
+ ASSERT_TRUE(a0.get() == a3);
+ ASSERT_TRUE(a0 == a3.get());
+ linked_ptr<A> a4(a0);
+ a1 = a4;
+ linked_ptr<A> a5(new A);
+ ASSERT_TRUE(a5.get() != a3);
+ ASSERT_TRUE(a5 != a3.get());
+ a2 = a5;
+ linked_ptr<B> b0(new B);
+ linked_ptr<A> a6(b0);
+ ASSERT_TRUE(b0 == a6);
+ ASSERT_TRUE(a6 == b0);
+ ASSERT_TRUE(b0 != NULL);
+ a5 = b0;
+ a5 = b0;
+ a3->Use();
+ a4->Use();
+ a5->Use();
+ a6->Use();
+ b0->Use();
+ (*b0).Use();
+ b0.get()->Use();
+ }
+
+ a0->Use();
+ a1->Use();
+ a2->Use();
+
+ a1 = a2;
+ a2.reset(new A);
+ a0.reset();
+
+ linked_ptr<A> a7;
+ }
+
+ ASSERT_EQ(history,
+ "A0 ctor\n"
+ "A1 ctor\n"
+ "A2 ctor\n"
+ "B2 ctor\n"
+ "A0 use\n"
+ "A0 use\n"
+ "B2 use\n"
+ "B2 use\n"
+ "B2 use\n"
+ "B2 use\n"
+ "B2 use\n"
+ "B2 dtor\n"
+ "A2 dtor\n"
+ "A0 use\n"
+ "A0 use\n"
+ "A1 use\n"
+ "A3 ctor\n"
+ "A0 dtor\n"
+ "A3 dtor\n"
+ "A1 dtor\n"
+ );
+}
diff --git a/libchrome/base/memory/manual_constructor.h b/libchrome/base/memory/manual_constructor.h
new file mode 100644
index 0000000..f401f62
--- /dev/null
+++ b/libchrome/base/memory/manual_constructor.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ManualConstructor statically-allocates space in which to store some
+// object, but does not initialize it. You can then call the constructor
+// and destructor for the object yourself as you see fit. This is useful
+// for memory management optimizations, where you want to initialize and
+// destroy an object multiple times but only allocate it once.
+//
+// (When I say ManualConstructor statically allocates space, I mean that
+// the ManualConstructor object itself is forced to be the right size.)
+//
+// For example usage, check out base/containers/small_map.h.
+
+#ifndef BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
+#define BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
+
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+#include "base/memory/aligned_memory.h"
+
+namespace base {
+
+template <typename Type>
+class ManualConstructor {
+ public:
+ // No constructor or destructor because one of the most useful uses of
+ // this class is as part of a union, and members of a union cannot have
+ // constructors or destructors. And, anyway, the whole point of this
+ // class is to bypass these.
+
+ // Support users creating arrays of ManualConstructor<>s. This ensures that
+ // the array itself has the correct alignment.
+ static void* operator new[](size_t size) {
+ return AlignedAlloc(size, ALIGNOF(Type));
+ }
+ static void operator delete[](void* mem) {
+ AlignedFree(mem);
+ }
+
+ inline Type* get() {
+ return space_.template data_as<Type>();
+ }
+ inline const Type* get() const {
+ return space_.template data_as<Type>();
+ }
+
+ inline Type* operator->() { return get(); }
+ inline const Type* operator->() const { return get(); }
+
+ inline Type& operator*() { return *get(); }
+ inline const Type& operator*() const { return *get(); }
+
+ template <typename... Ts>
+ inline void Init(Ts&&... params) {
+ new(space_.void_data()) Type(std::forward<Ts>(params)...);
+ }
+
+ inline void InitFromMove(ManualConstructor<Type>&& o) {
+ Init(std::move(*o));
+ }
+
+ inline void Destroy() {
+ get()->~Type();
+ }
+
+ private:
+ AlignedMemory<sizeof(Type), ALIGNOF(Type)> space_;
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
diff --git a/libchrome/base/memory/ptr_util.h b/libchrome/base/memory/ptr_util.h
new file mode 100644
index 0000000..8747ac9
--- /dev/null
+++ b/libchrome/base/memory/ptr_util.h
@@ -0,0 +1,74 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+ return std::unique_ptr<T>(ptr);
+}
+
+namespace internal {
+
+template <typename T>
+struct MakeUniqueResult {
+ using Scalar = std::unique_ptr<T>;
+};
+
+template <typename T>
+struct MakeUniqueResult<T[]> {
+ using Array = std::unique_ptr<T[]>;
+};
+
+template <typename T, size_t N>
+struct MakeUniqueResult<T[N]> {
+ using Invalid = void;
+};
+
+} // namespace internal
+
+// Helper to construct an object wrapped in a std::unique_ptr. This is an
+// implementation of C++14's std::make_unique that can be used in Chrome.
+//
+// MakeUnique<T>(args) should be preferred over WrapUnique(new T(args)): bare
+// calls to `new` should be treated with scrutiny.
+//
+// Usage:
+// // ptr is a std::unique_ptr<std::string>
+// auto ptr = MakeUnique<std::string>("hello world!");
+//
+// // arr is a std::unique_ptr<int[]>
+// auto arr = MakeUnique<int[]>(5);
+
+// Overload for non-array types. Arguments are forwarded to T's constructor.
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Scalar MakeUnique(Args&&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+// Overload for array types of unknown bound, e.g. T[]. The array is allocated
+// with `new T[n]()` and value-initialized: note that this is distinct from
+// `new T[n]`, which default-initializes.
+template <typename T>
+typename internal::MakeUniqueResult<T>::Array MakeUnique(size_t size) {
+ return std::unique_ptr<T>(new typename std::remove_extent<T>::type[size]());
+}
+
+// Overload to reject array types of known bound, e.g. T[n].
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Invalid MakeUnique(Args&&... args) =
+ delete;
+
+} // namespace base
+
+#endif // BASE_MEMORY_PTR_UTIL_H_
diff --git a/libchrome/base/memory/raw_scoped_refptr_mismatch_checker.h b/libchrome/base/memory/raw_scoped_refptr_mismatch_checker.h
new file mode 100644
index 0000000..5dbc183
--- /dev/null
+++ b/libchrome/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+
+#include <tuple>
+#include <type_traits>
+
+#include "base/memory/ref_counted.h"
+
+// It is dangerous to post a task with a T* argument where T is a subtype of
+// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
+// object may already have been deleted since it was not held with a
+// scoped_refptr. Example: http://crbug.com/27191
+// The following set of traits are designed to generate a compile error
+// whenever this antipattern is attempted.
+
+namespace base {
+
+// This is a base internal implementation file used by task.h and callback.h.
+// Not for public consumption, so we wrap it in namespace internal.
+namespace internal {
+
+template <typename T>
+struct NeedsScopedRefptrButGetsRawPtr {
+ enum {
+ // Human readable translation: you needed to be a scoped_refptr if you are a
+ // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
+ // type.
+ value = (std::is_pointer<T>::value &&
+ (std::is_convertible<T, subtle::RefCountedBase*>::value ||
+ std::is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
+ };
+};
+
+template <typename Params>
+struct ParamsUseScopedRefptrCorrectly {
+ enum { value = 0 };
+};
+
+template <>
+struct ParamsUseScopedRefptrCorrectly<std::tuple<>> {
+ enum { value = 1 };
+};
+
+template <typename Head, typename... Tail>
+struct ParamsUseScopedRefptrCorrectly<std::tuple<Head, Tail...>> {
+ enum { value = !NeedsScopedRefptrButGetsRawPtr<Head>::value &&
+ ParamsUseScopedRefptrCorrectly<std::tuple<Tail...>>::value };
+};
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
diff --git a/libchrome/base/memory/ref_counted.cc b/libchrome/base/memory/ref_counted.cc
new file mode 100644
index 0000000..f5924d0
--- /dev/null
+++ b/libchrome/base/memory/ref_counted.cc
@@ -0,0 +1,53 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_collision_warner.h"
+
+namespace base {
+
+namespace subtle {
+
+bool RefCountedThreadSafeBase::HasOneRef() const {
+ return AtomicRefCountIsOne(
+ &const_cast<RefCountedThreadSafeBase*>(this)->ref_count_);
+}
+
+RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
+#ifndef NDEBUG
+ in_dtor_ = false;
+#endif
+}
+
+RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
+#ifndef NDEBUG
+ DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
+ "calling Release()";
+#endif
+}
+
+void RefCountedThreadSafeBase::AddRef() const {
+#ifndef NDEBUG
+ DCHECK(!in_dtor_);
+#endif
+ AtomicRefCountInc(&ref_count_);
+}
+
+bool RefCountedThreadSafeBase::Release() const {
+#ifndef NDEBUG
+ DCHECK(!in_dtor_);
+ DCHECK(!AtomicRefCountIsZero(&ref_count_));
+#endif
+ if (!AtomicRefCountDec(&ref_count_)) {
+#ifndef NDEBUG
+ in_dtor_ = true;
+#endif
+ return true;
+ }
+ return false;
+}
+
+} // namespace subtle
+
+} // namespace base
diff --git a/libchrome/base/memory/ref_counted.h b/libchrome/base/memory/ref_counted.h
new file mode 100644
index 0000000..b026d9a
--- /dev/null
+++ b/libchrome/base/memory/ref_counted.h
@@ -0,0 +1,462 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_H_
+#define BASE_MEMORY_REF_COUNTED_H_
+
+#include <stddef.h>
+
+#include <cassert>
+#include <iosfwd>
+#include <type_traits>
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#ifndef NDEBUG
+#include "base/logging.h"
+#endif
+#include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace subtle {
+
+class BASE_EXPORT RefCountedBase {
+ public:
+ bool HasOneRef() const { return ref_count_ == 1; }
+
+ protected:
+ RefCountedBase()
+ : ref_count_(0)
+ #ifndef NDEBUG
+ , in_dtor_(false)
+ #endif
+ {
+ }
+
+ ~RefCountedBase() {
+ #ifndef NDEBUG
+ DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+ #endif
+ }
+
+
+ void AddRef() const {
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+ #ifndef NDEBUG
+ DCHECK(!in_dtor_);
+ #endif
+ ++ref_count_;
+ }
+
+ // Returns true if the object should self-delete.
+ bool Release() const {
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+ #ifndef NDEBUG
+ DCHECK(!in_dtor_);
+ #endif
+ if (--ref_count_ == 0) {
+ #ifndef NDEBUG
+ in_dtor_ = true;
+ #endif
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ mutable int ref_count_;
+#ifndef NDEBUG
+ mutable bool in_dtor_;
+#endif
+
+ DFAKE_MUTEX(add_release_);
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
+};
+
+class BASE_EXPORT RefCountedThreadSafeBase {
+ public:
+ bool HasOneRef() const;
+
+ protected:
+ RefCountedThreadSafeBase();
+ ~RefCountedThreadSafeBase();
+
+ void AddRef() const;
+
+ // Returns true if the object should self-delete.
+ bool Release() const;
+
+ private:
+ mutable AtomicRefCount ref_count_;
+#ifndef NDEBUG
+ mutable bool in_dtor_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
+};
+
+} // namespace subtle
+
+//
+// A base class for reference counted classes. Otherwise, known as a cheap
+// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
+// class from it like so:
+//
+// class MyFoo : public base::RefCounted<MyFoo> {
+// ...
+// private:
+// friend class base::RefCounted<MyFoo>;
+// ~MyFoo();
+// };
+//
+// You should always make your destructor non-public, to avoid any code deleting
+// the object accidently while there are references to it.
+template <class T>
+class RefCounted : public subtle::RefCountedBase {
+ public:
+ RefCounted() {}
+
+ void AddRef() const {
+ subtle::RefCountedBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedBase::Release()) {
+ delete static_cast<const T*>(this);
+ }
+ }
+
+ protected:
+ ~RefCounted() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
+};
+
+// Forward declaration.
+template <class T, typename Traits> class RefCountedThreadSafe;
+
+// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref
+// count reaches 0. Overload to delete it on a different thread etc.
+template<typename T>
+struct DefaultRefCountedThreadSafeTraits {
+ static void Destruct(const T* x) {
+ // Delete through RefCountedThreadSafe to make child classes only need to be
+ // friend with RefCountedThreadSafe instead of this struct, which is an
+ // implementation detail.
+ RefCountedThreadSafe<T,
+ DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
+ }
+};
+
+//
+// A thread-safe variant of RefCounted<T>
+//
+// class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
+// ...
+// };
+//
+// If you're using the default trait, then you should add compile time
+// asserts that no one else is deleting your object. i.e.
+// private:
+// friend class base::RefCountedThreadSafe<MyFoo>;
+// ~MyFoo();
+template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
+class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
+ public:
+ RefCountedThreadSafe() {}
+
+ void AddRef() const {
+ subtle::RefCountedThreadSafeBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedThreadSafeBase::Release()) {
+ Traits::Destruct(static_cast<const T*>(this));
+ }
+ }
+
+ protected:
+ ~RefCountedThreadSafe() {}
+
+ private:
+ friend struct DefaultRefCountedThreadSafeTraits<T>;
+ static void DeleteInternal(const T* x) { delete x; }
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
+};
+
+//
+// A thread-safe wrapper for some piece of data so we can place other
+// things in scoped_refptrs<>.
+//
+template<typename T>
+class RefCountedData
+ : public base::RefCountedThreadSafe< base::RefCountedData<T> > {
+ public:
+ RefCountedData() : data() {}
+ RefCountedData(const T& in_value) : data(in_value) {}
+
+ T data;
+
+ private:
+ friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
+ ~RefCountedData() {}
+};
+
+} // namespace base
+
+//
+// A smart pointer class for reference counted objects. Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference. Sample usage:
+//
+// class MyFoo : public RefCounted<MyFoo> {
+// ...
+// };
+//
+// void some_function() {
+// scoped_refptr<MyFoo> foo = new MyFoo();
+// foo->Method(param);
+// // |foo| is released when this function returns
+// }
+//
+// void some_other_function() {
+// scoped_refptr<MyFoo> foo = new MyFoo();
+// ...
+// foo = NULL; // explicitly releases |foo|
+// ...
+// if (foo)
+// foo->Method(param);
+// }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+// {
+// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> b;
+//
+// b.swap(a);
+// // now, |b| references the MyFoo object, and |a| references NULL.
+// }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+// {
+// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> b;
+//
+// b = a;
+// // now, |a| and |b| each own a reference to the same MyFoo object.
+// }
+//
+template <class T>
+class scoped_refptr {
+ public:
+ typedef T element_type;
+
+ scoped_refptr() : ptr_(NULL) {
+ }
+
+ scoped_refptr(T* p) : ptr_(p) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy constructor.
+ scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Move constructor. This is required in addition to the conversion
+ // constructor below in order for clang to warn about pessimizing moves.
+ scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
+
+ // Move conversion constructor.
+ template <typename U,
+ typename = typename std::enable_if<
+ std::is_convertible<U*, T*>::value>::type>
+ scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
+ r.ptr_ = nullptr;
+ }
+
+ ~scoped_refptr() {
+ if (ptr_)
+ Release(ptr_);
+ }
+
+ T* get() const { return ptr_; }
+
+ T& operator*() const {
+ assert(ptr_ != NULL);
+ return *ptr_;
+ }
+
+ T* operator->() const {
+ assert(ptr_ != NULL);
+ return ptr_;
+ }
+
+ scoped_refptr<T>& operator=(T* p) {
+ // AddRef first so that self assignment should work
+ if (p)
+ AddRef(p);
+ T* old_ptr = ptr_;
+ ptr_ = p;
+ if (old_ptr)
+ Release(old_ptr);
+ return *this;
+ }
+
+ scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+ return *this = r.ptr_;
+ }
+
+ template <typename U>
+ scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+ return *this = r.get();
+ }
+
+ scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
+ scoped_refptr<T>(std::move(r)).swap(*this);
+ return *this;
+ }
+
+ template <typename U>
+ scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
+ scoped_refptr<T>(std::move(r)).swap(*this);
+ return *this;
+ }
+
+ void swap(T** pp) {
+ T* p = ptr_;
+ ptr_ = *pp;
+ *pp = p;
+ }
+
+ void swap(scoped_refptr<T>& r) {
+ swap(&r.ptr_);
+ }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ template <typename U>
+ bool operator==(const scoped_refptr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator!=(const scoped_refptr<U>& rhs) const {
+ return !operator==(rhs);
+ }
+
+ template <typename U>
+ bool operator<(const scoped_refptr<U>& rhs) const {
+ return ptr_ < rhs.get();
+ }
+
+ protected:
+ T* ptr_;
+
+ private:
+ // Friend required for move constructors that set r.ptr_ to null.
+ template <typename U>
+ friend class scoped_refptr;
+
+ // Non-inline helpers to allow:
+ // class Opaque;
+ // extern template class scoped_refptr<Opaque>;
+ // Otherwise the compiler will complain that Opaque is an incomplete type.
+ static void AddRef(T* ptr);
+ static void Release(T* ptr);
+};
+
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+ ptr->AddRef();
+}
+
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+ ptr->Release();
+}
+
+// Handy utility for creating a scoped_refptr<T> out of a T* explicitly without
+// having to retype all the template arguments
+template <typename T>
+scoped_refptr<T> make_scoped_refptr(T* t) {
+ return scoped_refptr<T>(t);
+}
+
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+ return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t) {
+ return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t, const scoped_refptr<T>& rhs) {
+ return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+ return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+ return !operator==(null, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+ return out << p.get();
+}
+
+#endif // BASE_MEMORY_REF_COUNTED_H_
diff --git a/libchrome/base/memory/ref_counted_delete_on_message_loop.h b/libchrome/base/memory/ref_counted_delete_on_message_loop.h
new file mode 100644
index 0000000..de194e8
--- /dev/null
+++ b/libchrome/base/memory/ref_counted_delete_on_message_loop.h
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
+#define BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// RefCountedDeleteOnMessageLoop is similar to RefCountedThreadSafe, and ensures
+// that the object will be deleted on a specified message loop.
+//
+// Sample usage:
+// class Foo : public RefCountedDeleteOnMessageLoop<Foo> {
+//
+// Foo(scoped_refptr<SingleThreadTaskRunner> loop)
+// : RefCountedDeleteOnMessageLoop<Foo>(std::move(loop)) {}
+// ...
+// private:
+// friend class RefCountedDeleteOnMessageLoop<Foo>;
+// friend class DeleteHelper<Foo>;
+//
+// ~Foo();
+// };
+
+// TODO(skyostil): Rename this to RefCountedDeleteOnTaskRunner.
+template <class T>
+class RefCountedDeleteOnMessageLoop : public subtle::RefCountedThreadSafeBase {
+ public:
+ // This constructor will accept a MessageL00pProxy object, but new code should
+ // prefer a SingleThreadTaskRunner. A SingleThreadTaskRunner for the
+ // MessageLoop on the current thread can be acquired by calling
+ // MessageLoop::current()->task_runner().
+ RefCountedDeleteOnMessageLoop(
+ scoped_refptr<SingleThreadTaskRunner> task_runner)
+ : task_runner_(std::move(task_runner)) {
+ DCHECK(task_runner_);
+ }
+
+ void AddRef() const {
+ subtle::RefCountedThreadSafeBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedThreadSafeBase::Release())
+ DestructOnMessageLoop();
+ }
+
+ protected:
+ friend class DeleteHelper<RefCountedDeleteOnMessageLoop>;
+ ~RefCountedDeleteOnMessageLoop() {}
+
+ void DestructOnMessageLoop() const {
+ const T* t = static_cast<const T*>(this);
+ if (task_runner_->BelongsToCurrentThread())
+ delete t;
+ else
+ task_runner_->DeleteSoon(FROM_HERE, t);
+ }
+
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnMessageLoop);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
diff --git a/libchrome/base/memory/ref_counted_memory.cc b/libchrome/base/memory/ref_counted_memory.cc
new file mode 100644
index 0000000..26b78f3
--- /dev/null
+++ b/libchrome/base/memory/ref_counted_memory.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted_memory.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+bool RefCountedMemory::Equals(
+ const scoped_refptr<RefCountedMemory>& other) const {
+ return other.get() &&
+ size() == other->size() &&
+ (memcmp(front(), other->front(), size()) == 0);
+}
+
+RefCountedMemory::RefCountedMemory() {}
+
+RefCountedMemory::~RefCountedMemory() {}
+
+const unsigned char* RefCountedStaticMemory::front() const {
+ return data_;
+}
+
+size_t RefCountedStaticMemory::size() const {
+ return length_;
+}
+
+RefCountedStaticMemory::~RefCountedStaticMemory() {}
+
+RefCountedBytes::RefCountedBytes() {}
+
+RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
+ : data_(initializer) {
+}
+
+RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
+ : data_(p, p + size) {}
+
+scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
+ std::vector<unsigned char>* to_destroy) {
+ scoped_refptr<RefCountedBytes> bytes(new RefCountedBytes);
+ bytes->data_.swap(*to_destroy);
+ return bytes;
+}
+
+const unsigned char* RefCountedBytes::front() const {
+ // STL will assert if we do front() on an empty vector, but calling code
+ // expects a NULL.
+ return size() ? &data_.front() : NULL;
+}
+
+size_t RefCountedBytes::size() const {
+ return data_.size();
+}
+
+RefCountedBytes::~RefCountedBytes() {}
+
+RefCountedString::RefCountedString() {}
+
+RefCountedString::~RefCountedString() {}
+
+// static
+scoped_refptr<RefCountedString> RefCountedString::TakeString(
+ std::string* to_destroy) {
+ scoped_refptr<RefCountedString> self(new RefCountedString);
+ to_destroy->swap(self->data_);
+ return self;
+}
+
+const unsigned char* RefCountedString::front() const {
+ return data_.empty() ? NULL :
+ reinterpret_cast<const unsigned char*>(data_.data());
+}
+
+size_t RefCountedString::size() const {
+ return data_.size();
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/ref_counted_memory.h b/libchrome/base/memory/ref_counted_memory.h
new file mode 100644
index 0000000..aa22c9e
--- /dev/null
+++ b/libchrome/base/memory/ref_counted_memory.h
@@ -0,0 +1,130 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_MEMORY_H_
+#define BASE_MEMORY_REF_COUNTED_MEMORY_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+// A generic interface to memory. This object is reference counted because one
+// of its two subclasses own the data they carry, and we need to have
+// heterogeneous containers of these two types of memory.
+class BASE_EXPORT RefCountedMemory
+ : public base::RefCountedThreadSafe<RefCountedMemory> {
+ public:
+ // Retrieves a pointer to the beginning of the data we point to. If the data
+ // is empty, this will return NULL.
+ virtual const unsigned char* front() const = 0;
+
+ // Size of the memory pointed to.
+ virtual size_t size() const = 0;
+
+ // Returns true if |other| is byte for byte equal.
+ bool Equals(const scoped_refptr<RefCountedMemory>& other) const;
+
+ // Handy method to simplify calling front() with a reinterpret_cast.
+ template<typename T> const T* front_as() const {
+ return reinterpret_cast<const T*>(front());
+ }
+
+ protected:
+ friend class base::RefCountedThreadSafe<RefCountedMemory>;
+ RefCountedMemory();
+ virtual ~RefCountedMemory();
+};
+
+// An implementation of RefCountedMemory, where the ref counting does not
+// matter.
+class BASE_EXPORT RefCountedStaticMemory : public RefCountedMemory {
+ public:
+ RefCountedStaticMemory()
+ : data_(NULL), length_(0) {}
+ RefCountedStaticMemory(const void* data, size_t length)
+ : data_(static_cast<const unsigned char*>(length ? data : NULL)),
+ length_(length) {}
+
+ // Overridden from RefCountedMemory:
+ const unsigned char* front() const override;
+ size_t size() const override;
+
+ private:
+ ~RefCountedStaticMemory() override;
+
+ const unsigned char* data_;
+ size_t length_;
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory);
+};
+
+// An implementation of RefCountedMemory, where we own the data in a vector.
+class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
+ public:
+ RefCountedBytes();
+
+ // Constructs a RefCountedBytes object by _copying_ from |initializer|.
+ explicit RefCountedBytes(const std::vector<unsigned char>& initializer);
+
+ // Constructs a RefCountedBytes object by copying |size| bytes from |p|.
+ RefCountedBytes(const unsigned char* p, size_t size);
+
+ // Constructs a RefCountedBytes object by performing a swap. (To non
+ // destructively build a RefCountedBytes, use the constructor that takes a
+ // vector.)
+ static scoped_refptr<RefCountedBytes> TakeVector(
+ std::vector<unsigned char>* to_destroy);
+
+ // Overridden from RefCountedMemory:
+ const unsigned char* front() const override;
+ size_t size() const override;
+
+ const std::vector<unsigned char>& data() const { return data_; }
+ std::vector<unsigned char>& data() { return data_; }
+
+ private:
+ ~RefCountedBytes() override;
+
+ std::vector<unsigned char> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedBytes);
+};
+
+// An implementation of RefCountedMemory, where the bytes are stored in an STL
+// string. Use this if your data naturally arrives in that format.
+class BASE_EXPORT RefCountedString : public RefCountedMemory {
+ public:
+ RefCountedString();
+
+ // Constructs a RefCountedString object by performing a swap. (To non
+ // destructively build a RefCountedString, use the default constructor and
+ // copy into object->data()).
+ static scoped_refptr<RefCountedString> TakeString(std::string* to_destroy);
+
+ // Overridden from RefCountedMemory:
+ const unsigned char* front() const override;
+ size_t size() const override;
+
+ const std::string& data() const { return data_; }
+ std::string& data() { return data_; }
+
+ private:
+ ~RefCountedString() override;
+
+ std::string data_;
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedString);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_REF_COUNTED_MEMORY_H_
diff --git a/libchrome/base/memory/ref_counted_memory_unittest.cc b/libchrome/base/memory/ref_counted_memory_unittest.cc
new file mode 100644
index 0000000..bd2ed01
--- /dev/null
+++ b/libchrome/base/memory/ref_counted_memory_unittest.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted_memory.h"
+
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(RefCountedMemoryUnitTest, RefCountedStaticMemory) {
+ scoped_refptr<RefCountedMemory> mem = new RefCountedStaticMemory(
+ "static mem00", 10);
+
+ EXPECT_EQ(10U, mem->size());
+ EXPECT_EQ("static mem", std::string(mem->front_as<char>(), mem->size()));
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedBytes) {
+ std::vector<uint8_t> data;
+ data.push_back(45);
+ data.push_back(99);
+ scoped_refptr<RefCountedMemory> mem = RefCountedBytes::TakeVector(&data);
+
+ EXPECT_EQ(0U, data.size());
+
+ EXPECT_EQ(2U, mem->size());
+ EXPECT_EQ(45U, mem->front()[0]);
+ EXPECT_EQ(99U, mem->front()[1]);
+
+ scoped_refptr<RefCountedMemory> mem2;
+ {
+ unsigned char data2[] = { 12, 11, 99 };
+ mem2 = new RefCountedBytes(data2, 3);
+ }
+ EXPECT_EQ(3U, mem2->size());
+ EXPECT_EQ(12U, mem2->front()[0]);
+ EXPECT_EQ(11U, mem2->front()[1]);
+ EXPECT_EQ(99U, mem2->front()[2]);
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedString) {
+ std::string s("destroy me");
+ scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
+
+ EXPECT_EQ(0U, s.size());
+
+ EXPECT_EQ(10U, mem->size());
+ EXPECT_EQ('d', mem->front()[0]);
+ EXPECT_EQ('e', mem->front()[1]);
+}
+
+TEST(RefCountedMemoryUnitTest, Equals) {
+ std::string s1("same");
+ scoped_refptr<RefCountedMemory> mem1 = RefCountedString::TakeString(&s1);
+
+ std::vector<unsigned char> d2;
+ d2.push_back('s');
+ d2.push_back('a');
+ d2.push_back('m');
+ d2.push_back('e');
+ scoped_refptr<RefCountedMemory> mem2 = RefCountedBytes::TakeVector(&d2);
+
+ EXPECT_TRUE(mem1->Equals(mem2));
+
+ std::string s3("diff");
+ scoped_refptr<RefCountedMemory> mem3 = RefCountedString::TakeString(&s3);
+
+ EXPECT_FALSE(mem1->Equals(mem3));
+ EXPECT_FALSE(mem2->Equals(mem3));
+}
+
+TEST(RefCountedMemoryUnitTest, EqualsNull) {
+ std::string s("str");
+ scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
+ EXPECT_FALSE(mem->Equals(NULL));
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/ref_counted_unittest.cc b/libchrome/base/memory/ref_counted_unittest.cc
new file mode 100644
index 0000000..7c4e07a
--- /dev/null
+++ b/libchrome/base/memory/ref_counted_unittest.cc
@@ -0,0 +1,507 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+#include "base/test/opaque_ref_counted.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class SelfAssign : public base::RefCounted<SelfAssign> {
+ protected:
+ virtual ~SelfAssign() {}
+
+ private:
+ friend class base::RefCounted<SelfAssign>;
+};
+
+class Derived : public SelfAssign {
+ protected:
+ ~Derived() override {}
+
+ private:
+ friend class base::RefCounted<Derived>;
+};
+
+class CheckDerivedMemberAccess : public scoped_refptr<SelfAssign> {
+ public:
+ CheckDerivedMemberAccess() {
+ // This shouldn't compile if we don't have access to the member variable.
+ SelfAssign** pptr = &ptr_;
+ EXPECT_EQ(*pptr, ptr_);
+ }
+};
+
+class ScopedRefPtrToSelf : public base::RefCounted<ScopedRefPtrToSelf> {
+ public:
+ ScopedRefPtrToSelf() : self_ptr_(this) {}
+
+ static bool was_destroyed() { return was_destroyed_; }
+
+ static void reset_was_destroyed() { was_destroyed_ = false; }
+
+ scoped_refptr<ScopedRefPtrToSelf> self_ptr_;
+
+ private:
+ friend class base::RefCounted<ScopedRefPtrToSelf>;
+ ~ScopedRefPtrToSelf() { was_destroyed_ = true; }
+
+ static bool was_destroyed_;
+};
+
+bool ScopedRefPtrToSelf::was_destroyed_ = false;
+
+class ScopedRefPtrCountBase : public base::RefCounted<ScopedRefPtrCountBase> {
+ public:
+ ScopedRefPtrCountBase() { ++constructor_count_; }
+
+ static int constructor_count() { return constructor_count_; }
+
+ static int destructor_count() { return destructor_count_; }
+
+ static void reset_count() {
+ constructor_count_ = 0;
+ destructor_count_ = 0;
+ }
+
+ protected:
+ virtual ~ScopedRefPtrCountBase() { ++destructor_count_; }
+
+ private:
+ friend class base::RefCounted<ScopedRefPtrCountBase>;
+
+ static int constructor_count_;
+ static int destructor_count_;
+};
+
+int ScopedRefPtrCountBase::constructor_count_ = 0;
+int ScopedRefPtrCountBase::destructor_count_ = 0;
+
+class ScopedRefPtrCountDerived : public ScopedRefPtrCountBase {
+ public:
+ ScopedRefPtrCountDerived() { ++constructor_count_; }
+
+ static int constructor_count() { return constructor_count_; }
+
+ static int destructor_count() { return destructor_count_; }
+
+ static void reset_count() {
+ constructor_count_ = 0;
+ destructor_count_ = 0;
+ }
+
+ protected:
+ ~ScopedRefPtrCountDerived() override { ++destructor_count_; }
+
+ private:
+ friend class base::RefCounted<ScopedRefPtrCountDerived>;
+
+ static int constructor_count_;
+ static int destructor_count_;
+};
+
+int ScopedRefPtrCountDerived::constructor_count_ = 0;
+int ScopedRefPtrCountDerived::destructor_count_ = 0;
+
+class Other : public base::RefCounted<Other> {
+ private:
+ friend class base::RefCounted<Other>;
+
+ ~Other() {}
+};
+
+scoped_refptr<Other> Overloaded(scoped_refptr<Other> other) {
+ return other;
+}
+
+scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
+ return self_assign;
+}
+
+
+} // end namespace
+
+TEST(RefCountedUnitTest, TestSelfAssignment) {
+ SelfAssign* p = new SelfAssign;
+ scoped_refptr<SelfAssign> var(p);
+ var = var;
+ EXPECT_EQ(var.get(), p);
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrMemberAccess) {
+ CheckDerivedMemberAccess check;
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToSelfPointerAssignment) {
+ ScopedRefPtrToSelf::reset_was_destroyed();
+
+ ScopedRefPtrToSelf* check = new ScopedRefPtrToSelf();
+ EXPECT_FALSE(ScopedRefPtrToSelf::was_destroyed());
+ check->self_ptr_ = nullptr;
+ EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToSelfMoveAssignment) {
+ ScopedRefPtrToSelf::reset_was_destroyed();
+
+ ScopedRefPtrToSelf* check = new ScopedRefPtrToSelf();
+ EXPECT_FALSE(ScopedRefPtrToSelf::was_destroyed());
+ // Releasing |check->self_ptr_| will delete |check|.
+ // The move assignment operator must assign |check->self_ptr_| first then
+ // release |check->self_ptr_|.
+ check->self_ptr_ = scoped_refptr<ScopedRefPtrToSelf>();
+ EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToOpaque) {
+ scoped_refptr<base::OpaqueRefCounted> p = base::MakeOpaqueRefCounted();
+ base::TestOpaqueRefCounted(p);
+
+ scoped_refptr<base::OpaqueRefCounted> q;
+ q = p;
+ base::TestOpaqueRefCounted(p);
+ base::TestOpaqueRefCounted(q);
+}
+
+TEST(RefCountedUnitTest, BooleanTesting) {
+ scoped_refptr<SelfAssign> ptr_to_an_instance = new SelfAssign;
+ EXPECT_TRUE(ptr_to_an_instance);
+ EXPECT_FALSE(!ptr_to_an_instance);
+
+ if (ptr_to_an_instance) {
+ } else {
+ ADD_FAILURE() << "Pointer to an instance should result in true.";
+ }
+
+ if (!ptr_to_an_instance) { // check for operator!().
+ ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+ }
+
+ scoped_refptr<SelfAssign> null_ptr;
+ EXPECT_FALSE(null_ptr);
+ EXPECT_TRUE(!null_ptr);
+
+ if (null_ptr) {
+ ADD_FAILURE() << "Null pointer should result in false.";
+ }
+
+ if (!null_ptr) { // check for operator!().
+ } else {
+ ADD_FAILURE() << "Null pointer should result in !x being true.";
+ }
+}
+
+TEST(RefCountedUnitTest, Equality) {
+ scoped_refptr<SelfAssign> p1(new SelfAssign);
+ scoped_refptr<SelfAssign> p2(new SelfAssign);
+
+ EXPECT_EQ(p1, p1);
+ EXPECT_EQ(p2, p2);
+
+ EXPECT_NE(p1, p2);
+ EXPECT_NE(p2, p1);
+}
+
+TEST(RefCountedUnitTest, NullptrEquality) {
+ scoped_refptr<SelfAssign> ptr_to_an_instance(new SelfAssign);
+ scoped_refptr<SelfAssign> ptr_to_nullptr;
+
+ EXPECT_NE(nullptr, ptr_to_an_instance);
+ EXPECT_NE(ptr_to_an_instance, nullptr);
+ EXPECT_EQ(nullptr, ptr_to_nullptr);
+ EXPECT_EQ(ptr_to_nullptr, nullptr);
+}
+
+TEST(RefCountedUnitTest, ConvertibleEquality) {
+ scoped_refptr<Derived> p1(new Derived);
+ scoped_refptr<SelfAssign> p2;
+
+ EXPECT_NE(p1, p2);
+ EXPECT_NE(p2, p1);
+
+ p2 = p1;
+
+ EXPECT_EQ(p1, p2);
+ EXPECT_EQ(p2, p1);
+}
+
+TEST(RefCountedUnitTest, MoveAssignment1) {
+ ScopedRefPtrCountBase::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ {
+ scoped_refptr<ScopedRefPtrCountBase> p2;
+
+ p2 = std::move(p1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(nullptr, p1.get());
+ EXPECT_EQ(raw, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignment2) {
+ ScopedRefPtrCountBase::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1;
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ {
+ scoped_refptr<ScopedRefPtrCountBase> p2(raw);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ p1 = std::move(p2);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(raw, p1.get());
+ EXPECT_EQ(nullptr, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSameInstance1) {
+ ScopedRefPtrCountBase::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ {
+ scoped_refptr<ScopedRefPtrCountBase> p2(p1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ p1 = std::move(p2);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(raw, p1.get());
+ EXPECT_EQ(nullptr, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSameInstance2) {
+ ScopedRefPtrCountBase::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ {
+ scoped_refptr<ScopedRefPtrCountBase> p2(p1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ p2 = std::move(p1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(nullptr, p1.get());
+ EXPECT_EQ(raw, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentDifferentInstances) {
+ ScopedRefPtrCountBase::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw1 = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1(raw1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ {
+ ScopedRefPtrCountBase *raw2 = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p2(raw2);
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ p1 = std::move(p2);
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(raw2, p1.get());
+ EXPECT_EQ(nullptr, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(2, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentDerived) {
+ ScopedRefPtrCountBase::reset_count();
+ ScopedRefPtrCountDerived::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw1 = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1(raw1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+ {
+ ScopedRefPtrCountDerived *raw2 = new ScopedRefPtrCountDerived();
+ scoped_refptr<ScopedRefPtrCountDerived> p2(raw2);
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+ p1 = std::move(p2);
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+ EXPECT_EQ(raw2, p1.get());
+ EXPECT_EQ(nullptr, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(2, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveConstructor) {
+ ScopedRefPtrCountBase::reset_count();
+
+ {
+ ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+ scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+ {
+ scoped_refptr<ScopedRefPtrCountBase> p2(std::move(p1));
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(nullptr, p1.get());
+ EXPECT_EQ(raw, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveConstructorDerived) {
+ ScopedRefPtrCountBase::reset_count();
+ ScopedRefPtrCountDerived::reset_count();
+
+ {
+ ScopedRefPtrCountDerived *raw1 = new ScopedRefPtrCountDerived();
+ scoped_refptr<ScopedRefPtrCountDerived> p1(raw1);
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+ {
+ scoped_refptr<ScopedRefPtrCountBase> p2(std::move(p1));
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+ EXPECT_EQ(nullptr, p1.get());
+ EXPECT_EQ(raw1, p2.get());
+
+ // p2 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+
+ // p1 goes out of scope.
+ }
+ EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+ EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionCopy) {
+ scoped_refptr<Derived> derived(new Derived);
+ scoped_refptr<SelfAssign> expected(derived);
+ EXPECT_EQ(expected, Overloaded(derived));
+
+ scoped_refptr<Other> other(new Other);
+ EXPECT_EQ(other, Overloaded(other));
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
+ scoped_refptr<Derived> derived(new Derived);
+ scoped_refptr<SelfAssign> expected(derived);
+ EXPECT_EQ(expected, Overloaded(std::move(derived)));
+
+ scoped_refptr<Other> other(new Other);
+ scoped_refptr<Other> other2(other);
+ EXPECT_EQ(other2, Overloaded(std::move(other)));
+}
diff --git a/libchrome/base/memory/scoped_policy.h b/libchrome/base/memory/scoped_policy.h
new file mode 100644
index 0000000..5dbf204
--- /dev/null
+++ b/libchrome/base/memory/scoped_policy.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_POLICY_H_
+#define BASE_MEMORY_SCOPED_POLICY_H_
+
+namespace base {
+namespace scoped_policy {
+
+// Defines the ownership policy for a scoped object.
+enum OwnershipPolicy {
+ // The scoped object takes ownership of an object by taking over an existing
+ // ownership claim.
+ ASSUME,
+
+ // The scoped object will retain the the object and any initial ownership is
+ // not changed.
+ RETAIN
+};
+
+} // namespace scoped_policy
+} // namespace base
+
+#endif // BASE_MEMORY_SCOPED_POLICY_H_
diff --git a/libchrome/base/memory/scoped_vector.h b/libchrome/base/memory/scoped_vector.h
new file mode 100644
index 0000000..f3581ea
--- /dev/null
+++ b/libchrome/base/memory/scoped_vector.h
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_VECTOR_H_
+#define BASE_MEMORY_SCOPED_VECTOR_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+// ScopedVector wraps a vector deleting the elements from its
+// destructor.
+//
+// TODO(http://crbug.com/554289): DEPRECATED: Use std::vector instead (now that
+// we have support for moveable types inside containers).
+template <class T>
+class ScopedVector {
+ public:
+ typedef typename std::vector<T*>::allocator_type allocator_type;
+ typedef typename std::vector<T*>::size_type size_type;
+ typedef typename std::vector<T*>::difference_type difference_type;
+ typedef typename std::vector<T*>::pointer pointer;
+ typedef typename std::vector<T*>::const_pointer const_pointer;
+ typedef typename std::vector<T*>::reference reference;
+ typedef typename std::vector<T*>::const_reference const_reference;
+ typedef typename std::vector<T*>::value_type value_type;
+ typedef typename std::vector<T*>::iterator iterator;
+ typedef typename std::vector<T*>::const_iterator const_iterator;
+ typedef typename std::vector<T*>::reverse_iterator reverse_iterator;
+ typedef typename std::vector<T*>::const_reverse_iterator
+ const_reverse_iterator;
+
+ ScopedVector() {}
+ ~ScopedVector() { clear(); }
+ ScopedVector(ScopedVector&& other) { swap(other); }
+
+ ScopedVector& operator=(ScopedVector&& rhs) {
+ swap(rhs);
+ return *this;
+ }
+
+ reference operator[](size_t index) { return v_[index]; }
+ const_reference operator[](size_t index) const { return v_[index]; }
+
+ bool empty() const { return v_.empty(); }
+ size_t size() const { return v_.size(); }
+
+ reverse_iterator rbegin() { return v_.rbegin(); }
+ const_reverse_iterator rbegin() const { return v_.rbegin(); }
+ reverse_iterator rend() { return v_.rend(); }
+ const_reverse_iterator rend() const { return v_.rend(); }
+
+ iterator begin() { return v_.begin(); }
+ const_iterator begin() const { return v_.begin(); }
+ iterator end() { return v_.end(); }
+ const_iterator end() const { return v_.end(); }
+
+ const_reference front() const { return v_.front(); }
+ reference front() { return v_.front(); }
+ const_reference back() const { return v_.back(); }
+ reference back() { return v_.back(); }
+
+ void push_back(T* elem) { v_.push_back(elem); }
+ void push_back(std::unique_ptr<T> elem) { v_.push_back(elem.release()); }
+
+ void pop_back() {
+ DCHECK(!empty());
+ delete v_.back();
+ v_.pop_back();
+ }
+
+ std::vector<T*>& get() { return v_; }
+ const std::vector<T*>& get() const { return v_; }
+ void swap(std::vector<T*>& other) { v_.swap(other); }
+ void swap(ScopedVector<T>& other) { v_.swap(other.v_); }
+ void release(std::vector<T*>* out) {
+ out->swap(v_);
+ v_.clear();
+ }
+
+ void reserve(size_t capacity) { v_.reserve(capacity); }
+
+ // Resize, deleting elements in the disappearing range if we are shrinking.
+ void resize(size_t new_size) {
+ if (v_.size() > new_size)
+ STLDeleteContainerPointers(v_.begin() + new_size, v_.end());
+ v_.resize(new_size);
+ }
+
+ template<typename InputIterator>
+ void assign(InputIterator begin, InputIterator end) {
+ v_.assign(begin, end);
+ }
+
+ void clear() { STLDeleteElements(&v_); }
+
+ // Like |clear()|, but doesn't delete any elements.
+ void weak_clear() { v_.clear(); }
+
+ // Lets the ScopedVector take ownership of |x|.
+ iterator insert(iterator position, T* x) {
+ return v_.insert(position, x);
+ }
+
+ iterator insert(iterator position, std::unique_ptr<T> x) {
+ return v_.insert(position, x.release());
+ }
+
+ // Lets the ScopedVector take ownership of elements in [first,last).
+ template<typename InputIterator>
+ void insert(iterator position, InputIterator first, InputIterator last) {
+ v_.insert(position, first, last);
+ }
+
+ iterator erase(iterator position) {
+ delete *position;
+ return v_.erase(position);
+ }
+
+ iterator erase(iterator first, iterator last) {
+ STLDeleteContainerPointers(first, last);
+ return v_.erase(first, last);
+ }
+
+ // Like |erase()|, but doesn't delete the element at |position|.
+ iterator weak_erase(iterator position) {
+ return v_.erase(position);
+ }
+
+ // Like |erase()|, but doesn't delete the elements in [first, last).
+ iterator weak_erase(iterator first, iterator last) {
+ return v_.erase(first, last);
+ }
+
+ private:
+ std::vector<T*> v_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedVector);
+};
+
+#endif // BASE_MEMORY_SCOPED_VECTOR_H_
diff --git a/libchrome/base/memory/scoped_vector_unittest.cc b/libchrome/base/memory/scoped_vector_unittest.cc
new file mode 100644
index 0000000..ea3dcdc
--- /dev/null
+++ b/libchrome/base/memory/scoped_vector_unittest.cc
@@ -0,0 +1,338 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_vector.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// The LifeCycleObject notifies its Observer upon construction & destruction.
+class LifeCycleObject {
+ public:
+ class Observer {
+ public:
+ virtual void OnLifeCycleConstruct(LifeCycleObject* o) = 0;
+ virtual void OnLifeCycleDestroy(LifeCycleObject* o) = 0;
+
+ protected:
+ virtual ~Observer() {}
+ };
+
+ ~LifeCycleObject() {
+ if (observer_)
+ observer_->OnLifeCycleDestroy(this);
+ }
+
+ private:
+ friend class LifeCycleWatcher;
+
+ explicit LifeCycleObject(Observer* observer)
+ : observer_(observer) {
+ observer_->OnLifeCycleConstruct(this);
+ }
+
+ void DisconnectObserver() {
+ observer_ = nullptr;
+ }
+
+ Observer* observer_;
+
+ DISALLOW_COPY_AND_ASSIGN(LifeCycleObject);
+};
+
+// The life cycle states we care about for the purposes of testing ScopedVector
+// against objects.
+enum LifeCycleState {
+ LC_INITIAL,
+ LC_CONSTRUCTED,
+ LC_DESTROYED,
+};
+
+// Because we wish to watch the life cycle of an object being constructed and
+// destroyed, and further wish to test expectations against the state of that
+// object, we cannot save state in that object itself. Instead, we use this
+// pairing of the watcher, which observes the object and notifies of
+// construction & destruction. Since we also may be testing assumptions about
+// things not getting freed, this class also acts like a scoping object and
+// deletes the |constructed_life_cycle_object_|, if any when the
+// LifeCycleWatcher is destroyed. To keep this simple, the only expected state
+// changes are:
+// INITIAL -> CONSTRUCTED -> DESTROYED.
+// Anything more complicated than that should start another test.
+class LifeCycleWatcher : public LifeCycleObject::Observer {
+ public:
+ LifeCycleWatcher() : life_cycle_state_(LC_INITIAL) {}
+ ~LifeCycleWatcher() override {
+ // Stop watching the watched object. Without this, the object's destructor
+ // will call into OnLifeCycleDestroy when destructed, which happens after
+ // this destructor has finished running.
+ if (constructed_life_cycle_object_)
+ constructed_life_cycle_object_->DisconnectObserver();
+ }
+
+ // Assert INITIAL -> CONSTRUCTED and no LifeCycleObject associated with this
+ // LifeCycleWatcher.
+ void OnLifeCycleConstruct(LifeCycleObject* object) override {
+ ASSERT_EQ(LC_INITIAL, life_cycle_state_);
+ ASSERT_EQ(NULL, constructed_life_cycle_object_.get());
+ life_cycle_state_ = LC_CONSTRUCTED;
+ constructed_life_cycle_object_.reset(object);
+ }
+
+ // Assert CONSTRUCTED -> DESTROYED and the |object| being destroyed is the
+ // same one we saw constructed.
+ void OnLifeCycleDestroy(LifeCycleObject* object) override {
+ ASSERT_EQ(LC_CONSTRUCTED, life_cycle_state_);
+ LifeCycleObject* constructed_life_cycle_object =
+ constructed_life_cycle_object_.release();
+ ASSERT_EQ(constructed_life_cycle_object, object);
+ life_cycle_state_ = LC_DESTROYED;
+ }
+
+ LifeCycleState life_cycle_state() const { return life_cycle_state_; }
+
+ // Factory method for creating a new LifeCycleObject tied to this
+ // LifeCycleWatcher.
+ LifeCycleObject* NewLifeCycleObject() {
+ return new LifeCycleObject(this);
+ }
+
+ // Returns true iff |object| is the same object that this watcher is tracking.
+ bool IsWatching(LifeCycleObject* object) const {
+ return object == constructed_life_cycle_object_.get();
+ }
+
+ private:
+ LifeCycleState life_cycle_state_;
+ std::unique_ptr<LifeCycleObject> constructed_life_cycle_object_;
+
+ DISALLOW_COPY_AND_ASSIGN(LifeCycleWatcher);
+};
+
+TEST(ScopedVectorTest, LifeCycleWatcher) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ LifeCycleObject* object = watcher.NewLifeCycleObject();
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ delete object;
+ EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
+}
+
+TEST(ScopedVectorTest, PopBack) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+ scoped_vector.pop_back();
+ EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
+ EXPECT_TRUE(scoped_vector.empty());
+}
+
+TEST(ScopedVectorTest, Clear) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+ scoped_vector.clear();
+ EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
+ EXPECT_TRUE(scoped_vector.empty());
+}
+
+TEST(ScopedVectorTest, WeakClear) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+ scoped_vector.weak_clear();
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ EXPECT_TRUE(scoped_vector.empty());
+}
+
+TEST(ScopedVectorTest, ResizeShrink) {
+ LifeCycleWatcher first_watcher;
+ EXPECT_EQ(LC_INITIAL, first_watcher.life_cycle_state());
+ LifeCycleWatcher second_watcher;
+ EXPECT_EQ(LC_INITIAL, second_watcher.life_cycle_state());
+ ScopedVector<LifeCycleObject> scoped_vector;
+
+ scoped_vector.push_back(first_watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, first_watcher.life_cycle_state());
+ EXPECT_EQ(LC_INITIAL, second_watcher.life_cycle_state());
+ EXPECT_TRUE(first_watcher.IsWatching(scoped_vector[0]));
+ EXPECT_FALSE(second_watcher.IsWatching(scoped_vector[0]));
+
+ scoped_vector.push_back(second_watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, first_watcher.life_cycle_state());
+ EXPECT_EQ(LC_CONSTRUCTED, second_watcher.life_cycle_state());
+ EXPECT_FALSE(first_watcher.IsWatching(scoped_vector[1]));
+ EXPECT_TRUE(second_watcher.IsWatching(scoped_vector[1]));
+
+ // Test that shrinking a vector deletes elements in the disappearing range.
+ scoped_vector.resize(1);
+ EXPECT_EQ(LC_CONSTRUCTED, first_watcher.life_cycle_state());
+ EXPECT_EQ(LC_DESTROYED, second_watcher.life_cycle_state());
+ EXPECT_EQ(1u, scoped_vector.size());
+ EXPECT_TRUE(first_watcher.IsWatching(scoped_vector[0]));
+}
+
+TEST(ScopedVectorTest, ResizeGrow) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+
+ scoped_vector.resize(5);
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ ASSERT_EQ(5u, scoped_vector.size());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector[0]));
+ EXPECT_FALSE(watcher.IsWatching(scoped_vector[1]));
+ EXPECT_FALSE(watcher.IsWatching(scoped_vector[2]));
+ EXPECT_FALSE(watcher.IsWatching(scoped_vector[3]));
+ EXPECT_FALSE(watcher.IsWatching(scoped_vector[4]));
+}
+
+TEST(ScopedVectorTest, Scope) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ {
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+ }
+ EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
+}
+
+TEST(ScopedVectorTest, MoveConstruct) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ {
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ EXPECT_FALSE(scoped_vector.empty());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+
+ ScopedVector<LifeCycleObject> scoped_vector_copy(std::move(scoped_vector));
+ EXPECT_TRUE(scoped_vector.empty());
+ EXPECT_FALSE(scoped_vector_copy.empty());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector_copy.back()));
+
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ }
+ EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
+}
+
+TEST(ScopedVectorTest, MoveAssign) {
+ LifeCycleWatcher watcher;
+ EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
+ {
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.push_back(watcher.NewLifeCycleObject());
+ ScopedVector<LifeCycleObject> scoped_vector_assign;
+ EXPECT_FALSE(scoped_vector.empty());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
+
+ scoped_vector_assign = std::move(scoped_vector);
+ EXPECT_TRUE(scoped_vector.empty());
+ EXPECT_FALSE(scoped_vector_assign.empty());
+ EXPECT_TRUE(watcher.IsWatching(scoped_vector_assign.back()));
+
+ EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
+ }
+ EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
+}
+
+class DeleteCounter {
+ public:
+ explicit DeleteCounter(int* deletes)
+ : deletes_(deletes) {
+ }
+
+ ~DeleteCounter() {
+ (*deletes_)++;
+ }
+
+ void VoidMethod0() {}
+
+ private:
+ int* const deletes_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeleteCounter);
+};
+
+template <typename T>
+ScopedVector<T> PassThru(ScopedVector<T> scoper) {
+ return scoper;
+}
+
+TEST(ScopedVectorTest, Passed) {
+ int deletes = 0;
+ ScopedVector<DeleteCounter> deleter_vector;
+ deleter_vector.push_back(new DeleteCounter(&deletes));
+ EXPECT_EQ(0, deletes);
+ base::Callback<ScopedVector<DeleteCounter>(void)> callback =
+ base::Bind(&PassThru<DeleteCounter>, base::Passed(&deleter_vector));
+ EXPECT_EQ(0, deletes);
+ ScopedVector<DeleteCounter> result = callback.Run();
+ EXPECT_EQ(0, deletes);
+ result.clear();
+ EXPECT_EQ(1, deletes);
+};
+
+TEST(ScopedVectorTest, InsertRange) {
+ LifeCycleWatcher watchers[5];
+
+ std::vector<LifeCycleObject*> vec;
+ for(LifeCycleWatcher* it = watchers; it != watchers + arraysize(watchers);
+ ++it) {
+ EXPECT_EQ(LC_INITIAL, it->life_cycle_state());
+ vec.push_back(it->NewLifeCycleObject());
+ EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
+ }
+ // Start scope for ScopedVector.
+ {
+ ScopedVector<LifeCycleObject> scoped_vector;
+ scoped_vector.insert(scoped_vector.end(), vec.begin() + 1, vec.begin() + 3);
+ for(LifeCycleWatcher* it = watchers; it != watchers + arraysize(watchers);
+ ++it)
+ EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
+ }
+ for(LifeCycleWatcher* it = watchers; it != watchers + 1; ++it)
+ EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
+ for(LifeCycleWatcher* it = watchers + 1; it != watchers + 3; ++it)
+ EXPECT_EQ(LC_DESTROYED, it->life_cycle_state());
+ for(LifeCycleWatcher* it = watchers + 3; it != watchers + arraysize(watchers);
+ ++it)
+ EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
+}
+
+// Assertions for push_back(scoped_ptr).
+TEST(ScopedVectorTest, PushBackScopedPtr) {
+ int delete_counter = 0;
+ std::unique_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
+ EXPECT_EQ(0, delete_counter);
+ {
+ ScopedVector<DeleteCounter> v;
+ v.push_back(std::move(elem));
+ EXPECT_EQ(0, delete_counter);
+ }
+ EXPECT_EQ(1, delete_counter);
+}
+
+} // namespace
diff --git a/libchrome/base/memory/shared_memory.h b/libchrome/base/memory/shared_memory.h
new file mode 100644
index 0000000..e1c9fa7
--- /dev/null
+++ b/libchrome/base/memory/shared_memory.h
@@ -0,0 +1,287 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_H_
+#define BASE_MEMORY_SHARED_MEMORY_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <stdio.h>
+#include <sys/types.h>
+#include <semaphore.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+class FilePath;
+
+// Options for creating a shared memory object.
+struct BASE_EXPORT SharedMemoryCreateOptions {
+ SharedMemoryCreateOptions();
+
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+ // DEPRECATED (crbug.com/345734):
+ // If NULL, the object is anonymous. This pointer is owned by the caller
+ // and must live through the call to Create().
+ const std::string* name_deprecated;
+
+ // DEPRECATED (crbug.com/345734):
+ // If true, and the shared memory already exists, Create() will open the
+ // existing shared memory and ignore the size parameter. If false,
+ // shared memory must not exist. This flag is meaningless unless
+ // name_deprecated is non-NULL.
+ bool open_existing_deprecated;
+#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
+
+ // Size of the shared memory object to be created.
+ // When opening an existing object, this has no effect.
+ size_t size;
+
+ // If true, mappings might need to be made executable later.
+ bool executable;
+
+ // If true, the file can be shared read-only to a process.
+ bool share_read_only;
+};
+
+// Platform abstraction for shared memory. Provides a C++ wrapper
+// around the OS primitive for a memory mapped file.
+class BASE_EXPORT SharedMemory {
+ public:
+ SharedMemory();
+
+#if defined(OS_WIN)
+ // Similar to the default constructor, except that this allows for
+ // calling LockDeprecated() to acquire the named mutex before either Create or
+ // Open are called on Windows.
+ explicit SharedMemory(const std::wstring& name);
+#endif
+
+ // Create a new SharedMemory object from an existing, open
+ // shared memory file.
+ //
+ // WARNING: This does not reduce the OS-level permissions on the handle; it
+ // only affects how the SharedMemory will be mmapped. Use
+ // ShareReadOnlyToProcess to drop permissions. TODO(jln,jyasskin): DCHECK
+ // that |read_only| matches the permissions of the handle.
+ SharedMemory(const SharedMemoryHandle& handle, bool read_only);
+
+ // Closes any open files.
+ ~SharedMemory();
+
+ // Return true iff the given handle is valid (i.e. not the distingished
+ // invalid value; NULL for a HANDLE and -1 for a file descriptor)
+ static bool IsHandleValid(const SharedMemoryHandle& handle);
+
+ // Returns invalid handle (see comment above for exact definition).
+ static SharedMemoryHandle NULLHandle();
+
+ // Closes a shared memory handle.
+ static void CloseHandle(const SharedMemoryHandle& handle);
+
+ // Returns the maximum number of handles that can be open at once per process.
+ static size_t GetHandleLimit();
+
+ // Duplicates The underlying OS primitive. Returns NULLHandle() on failure.
+ // The caller is responsible for destroying the duplicated OS primitive.
+ static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
+
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+ // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
+ static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID)
+ // Gets the size of the shared memory region referred to by |handle|.
+ // Returns false on a failure to determine the size. On success, populates the
+ // output variable |size|.
+ static bool GetSizeFromSharedMemoryHandle(const SharedMemoryHandle& handle,
+ size_t* size);
+#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
+
+ // Creates a shared memory object as described by the options struct.
+ // Returns true on success and false on failure.
+ bool Create(const SharedMemoryCreateOptions& options);
+
+ // Creates and maps an anonymous shared memory segment of size size.
+ // Returns true on success and false on failure.
+ bool CreateAndMapAnonymous(size_t size);
+
+ // Creates an anonymous shared memory segment of size size.
+ // Returns true on success and false on failure.
+ bool CreateAnonymous(size_t size) {
+ SharedMemoryCreateOptions options;
+ options.size = size;
+ return Create(options);
+ }
+
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+ // DEPRECATED (crbug.com/345734):
+ // Creates or opens a shared memory segment based on a name.
+ // If open_existing is true, and the shared memory already exists,
+ // opens the existing shared memory and ignores the size parameter.
+ // If open_existing is false, shared memory must not exist.
+ // size is the size of the block to be created.
+ // Returns true on success, false on failure.
+ bool CreateNamedDeprecated(
+ const std::string& name, bool open_existing, size_t size) {
+ SharedMemoryCreateOptions options;
+ options.name_deprecated = &name;
+ options.open_existing_deprecated = open_existing;
+ options.size = size;
+ return Create(options);
+ }
+
+ // Deletes resources associated with a shared memory segment based on name.
+ // Not all platforms require this call.
+ bool Delete(const std::string& name);
+
+ // Opens a shared memory segment based on a name.
+ // If read_only is true, opens for read-only access.
+ // Returns true on success, false on failure.
+ bool Open(const std::string& name, bool read_only);
+#endif // !defined(OS_MACOSX) || defined(OS_IOS)
+
+ // Maps the shared memory into the caller's address space.
+ // Returns true on success, false otherwise. The memory address
+ // is accessed via the memory() accessor. The mapped address is guaranteed to
+ // have an alignment of at least MAP_MINIMUM_ALIGNMENT. This method will fail
+ // if this object is currently mapped.
+ bool Map(size_t bytes) {
+ return MapAt(0, bytes);
+ }
+
+ // Same as above, but with |offset| to specify from begining of the shared
+ // memory block to map.
+ // |offset| must be alignent to value of |SysInfo::VMAllocationGranularity()|.
+ bool MapAt(off_t offset, size_t bytes);
+ enum { MAP_MINIMUM_ALIGNMENT = 32 };
+
+ // Unmaps the shared memory from the caller's address space.
+ // Returns true if successful; returns false on error or if the
+ // memory is not mapped.
+ bool Unmap();
+
+ // The size requested when the map is first created.
+ size_t requested_size() const { return requested_size_; }
+
+ // The actual size of the mapped memory (may be larger than requested).
+ size_t mapped_size() const { return mapped_size_; }
+
+ // Gets a pointer to the opened memory space if it has been
+ // Mapped via Map(). Returns NULL if it is not mapped.
+ void* memory() const { return memory_; }
+
+ // Returns the underlying OS handle for this segment.
+ // Use of this handle for anything other than an opaque
+ // identifier is not portable.
+ SharedMemoryHandle handle() const;
+
+ // Closes the open shared memory segment. The memory will remain mapped if
+ // it was previously mapped.
+ // It is safe to call Close repeatedly.
+ void Close();
+
+ // Shares the shared memory to another process. Attempts to create a
+ // platform-specific new_handle which can be used in a remote process to read
+ // the shared memory file. new_handle is an output parameter to receive the
+ // handle for use in the remote process.
+ //
+ // |*this| must have been initialized using one of the Create*() or Open()
+ // methods with share_read_only=true. If it was constructed from a
+ // SharedMemoryHandle, this call will CHECK-fail.
+ //
+ // Returns true on success, false otherwise.
+ bool ShareReadOnlyToProcess(ProcessHandle process,
+ SharedMemoryHandle* new_handle) {
+ return ShareToProcessCommon(process, new_handle, false, SHARE_READONLY);
+ }
+
+ // Logically equivalent to:
+ // bool ok = ShareReadOnlyToProcess(process, new_handle);
+ // Close();
+ // return ok;
+ // Note that the memory is unmapped by calling this method, regardless of the
+ // return value.
+ bool GiveReadOnlyToProcess(ProcessHandle process,
+ SharedMemoryHandle* new_handle) {
+ return ShareToProcessCommon(process, new_handle, true, SHARE_READONLY);
+ }
+
+ // Shares the shared memory to another process. Attempts
+ // to create a platform-specific new_handle which can be
+ // used in a remote process to access the shared memory
+ // file. new_handle is an output parameter to receive
+ // the handle for use in the remote process.
+ // Returns true on success, false otherwise.
+ bool ShareToProcess(ProcessHandle process,
+ SharedMemoryHandle* new_handle) {
+ return ShareToProcessCommon(process, new_handle, false, SHARE_CURRENT_MODE);
+ }
+
+ // Logically equivalent to:
+ // bool ok = ShareToProcess(process, new_handle);
+ // Close();
+ // return ok;
+ // Note that the memory is unmapped by calling this method, regardless of the
+ // return value.
+ bool GiveToProcess(ProcessHandle process,
+ SharedMemoryHandle* new_handle) {
+ return ShareToProcessCommon(process, new_handle, true, SHARE_CURRENT_MODE);
+ }
+
+ private:
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
+ !(defined(OS_MACOSX) && !defined(OS_IOS))
+ bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
+ bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
+#endif
+ enum ShareMode {
+ SHARE_READONLY,
+ SHARE_CURRENT_MODE,
+ };
+ bool ShareToProcessCommon(ProcessHandle process,
+ SharedMemoryHandle* new_handle,
+ bool close_self,
+ ShareMode);
+
+#if defined(OS_WIN)
+ // If true indicates this came from an external source so needs extra checks
+ // before being mapped.
+ bool external_section_;
+ std::wstring name_;
+ win::ScopedHandle mapped_file_;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ // The OS primitive that backs the shared memory region.
+ SharedMemoryHandle shm_;
+#elif defined(OS_POSIX)
+ int mapped_file_;
+ int readonly_mapped_file_;
+#endif
+ size_t mapped_size_;
+ void* memory_;
+ bool read_only_;
+ size_t requested_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedMemory);
+};
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/libchrome/base/memory/shared_memory_android.cc b/libchrome/base/memory/shared_memory_android.cc
new file mode 100644
index 0000000..5ac6776
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_android.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+
+#include "base/logging.h"
+
+#if defined(__ANDROID__)
+#include <cutils/ashmem.h>
+#else
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+namespace base {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+ DCHECK_EQ(-1, mapped_file_ );
+
+ if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return false;
+
+ // "name" is just a label in ashmem. It is visible in /proc/pid/maps.
+ mapped_file_ = ashmem_create_region(
+ options.name_deprecated == NULL ? "" : options.name_deprecated->c_str(),
+ options.size);
+ if (-1 == mapped_file_) {
+ DLOG(ERROR) << "Shared memory creation failed";
+ return false;
+ }
+
+ int err = ashmem_set_prot_region(mapped_file_,
+ PROT_READ | PROT_WRITE | PROT_EXEC);
+ if (err < 0) {
+ DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
+ return false;
+ }
+
+ // Android doesn't appear to have a way to drop write access on an ashmem
+ // segment for a single descriptor. http://crbug.com/320865
+ readonly_mapped_file_ = dup(mapped_file_);
+ if (-1 == readonly_mapped_file_) {
+ DPLOG(ERROR) << "dup() failed";
+ return false;
+ }
+
+ requested_size_ = options.size;
+
+ return true;
+}
+
+bool SharedMemory::Delete(const std::string&) {
+ // Like on Windows, this is intentionally returning true as ashmem will
+ // automatically releases the resource when all FDs on it are closed.
+ return true;
+}
+
+bool SharedMemory::Open(const std::string&, bool /*read_only*/) {
+ // ashmem doesn't support name mapping
+ NOTIMPLEMENTED();
+ return false;
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/shared_memory_handle.h b/libchrome/base/memory/shared_memory_handle.h
new file mode 100644
index 0000000..8eff26b
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_handle.h
@@ -0,0 +1,164 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/process/process_handle.h"
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#endif
+
+namespace base {
+
+class Pickle;
+
+// SharedMemoryHandle is a platform specific type which represents
+// the underlying OS handle to a shared memory segment.
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+typedef FileDescriptor SharedMemoryHandle;
+#elif defined(OS_WIN)
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+ // The default constructor returns an invalid SharedMemoryHandle.
+ SharedMemoryHandle();
+ SharedMemoryHandle(HANDLE h, base::ProcessId pid);
+
+ // Standard copy constructor. The new instance shares the underlying OS
+ // primitives.
+ SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+ // Standard assignment operator. The updated instance shares the underlying
+ // OS primitives.
+ SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+ // Comparison operators.
+ bool operator==(const SharedMemoryHandle& handle) const;
+ bool operator!=(const SharedMemoryHandle& handle) const;
+
+ // Closes the underlying OS resources.
+ void Close() const;
+
+ // Whether the underlying OS primitive is valid.
+ bool IsValid() const;
+
+ // Whether |pid_| is the same as the current process's id.
+ bool BelongsToCurrentProcess() const;
+
+ // Whether handle_ needs to be duplicated into the destination process when
+ // an instance of this class is passed over a Chrome IPC channel.
+ bool NeedsBrokering() const;
+
+ void SetOwnershipPassesToIPC(bool ownership_passes);
+ bool OwnershipPassesToIPC() const;
+
+ HANDLE GetHandle() const;
+ base::ProcessId GetPID() const;
+
+ private:
+ HANDLE handle_;
+
+ // The process in which |handle_| is valid and can be used. If |handle_| is
+ // invalid, this will be kNullProcessId.
+ base::ProcessId pid_;
+
+ // Whether passing this object as a parameter to an IPC message passes
+ // ownership of |handle_| to the IPC stack. This is meant to mimic the
+ // behavior of the |auto_close| parameter of FileDescriptor. This member only
+ // affects attachment-brokered SharedMemoryHandles.
+ // Defaults to |false|.
+ bool ownership_passes_to_ipc_;
+};
+#else
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+ // The default constructor returns an invalid SharedMemoryHandle.
+ SharedMemoryHandle();
+
+ // Makes a Mach-based SharedMemoryHandle of the given size. On error,
+ // subsequent calls to IsValid() return false.
+ explicit SharedMemoryHandle(mach_vm_size_t size);
+
+ // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
+ // in the task with process id |pid|. The memory region has size |size|.
+ SharedMemoryHandle(mach_port_t memory_object,
+ mach_vm_size_t size,
+ base::ProcessId pid);
+
+ // Standard copy constructor. The new instance shares the underlying OS
+ // primitives.
+ SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+ // Standard assignment operator. The updated instance shares the underlying
+ // OS primitives.
+ SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+ // Duplicates the underlying OS resources.
+ SharedMemoryHandle Duplicate() const;
+
+ // Comparison operators.
+ bool operator==(const SharedMemoryHandle& handle) const;
+ bool operator!=(const SharedMemoryHandle& handle) const;
+
+ // Whether the underlying OS primitive is valid. Once the SharedMemoryHandle
+ // is backed by a valid OS primitive, it becomes immutable.
+ bool IsValid() const;
+
+ // Exposed so that the SharedMemoryHandle can be transported between
+ // processes.
+ mach_port_t GetMemoryObject() const;
+
+ // Returns false on a failure to determine the size. On success, populates the
+ // output variable |size|. Returns 0 if the handle is invalid.
+ bool GetSize(size_t* size) const;
+
+ // The SharedMemoryHandle must be valid.
+ // Returns whether the SharedMemoryHandle was successfully mapped into memory.
+ // On success, |memory| is an output variable that contains the start of the
+ // mapped memory.
+ bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
+
+ // Closes the underlying OS primitive.
+ void Close() const;
+
+ void SetOwnershipPassesToIPC(bool ownership_passes);
+ bool OwnershipPassesToIPC() const;
+
+ private:
+ // Shared code between copy constructor and operator=.
+ void CopyRelevantData(const SharedMemoryHandle& handle);
+
+ mach_port_t memory_object_ = MACH_PORT_NULL;
+
+ // The size of the shared memory region when |type_| is MACH. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ mach_vm_size_t size_ = 0;
+
+ // The pid of the process in which |memory_object_| is usable. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ base::ProcessId pid_ = 0;
+
+ // Whether passing this object as a parameter to an IPC message passes
+ // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+ // the behavior of the |auto_close| parameter of FileDescriptor.
+ // Defaults to |false|.
+ bool ownership_passes_to_ipc_ = false;
+};
+#endif
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/libchrome/base/memory/shared_memory_handle_mac.cc b/libchrome/base/memory/shared_memory_handle_mac.cc
new file mode 100644
index 0000000..ad470be
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_handle_mac.cc
@@ -0,0 +1,146 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/mac/mac_util.h"
+#include "base/posix/eintr_wrapper.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
+ mach_port_t named_right;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(),
+ &size,
+ 0, // Address.
+ MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+ &named_right,
+ MACH_PORT_NULL); // Parent handle.
+ if (kr != KERN_SUCCESS) {
+ memory_object_ = MACH_PORT_NULL;
+ return;
+ }
+
+ memory_object_ = named_right;
+ size_ = size;
+ pid_ = GetCurrentProcId();
+ ownership_passes_to_ipc_ = false;
+}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
+ mach_vm_size_t size,
+ base::ProcessId pid)
+ : memory_object_(memory_object),
+ size_(size),
+ pid_(pid),
+ ownership_passes_to_ipc_(false) {}
+
+SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) {
+ CopyRelevantData(handle);
+}
+
+SharedMemoryHandle& SharedMemoryHandle::operator=(
+ const SharedMemoryHandle& handle) {
+ if (this == &handle)
+ return *this;
+
+ CopyRelevantData(handle);
+ return *this;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+ if (!IsValid())
+ return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
+
+ // Increment the ref count.
+ kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+ MACH_PORT_RIGHT_SEND, 1);
+ DCHECK_EQ(kr, KERN_SUCCESS);
+ SharedMemoryHandle handle(*this);
+ handle.SetOwnershipPassesToIPC(true);
+ return handle;
+}
+
+bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
+ if (!IsValid() && !handle.IsValid())
+ return true;
+
+ return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+ pid_ == handle.pid_;
+}
+
+bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
+ return !(*this == handle);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+ return memory_object_ != MACH_PORT_NULL;
+}
+
+mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+ return memory_object_;
+}
+
+bool SharedMemoryHandle::GetSize(size_t* size) const {
+ if (!IsValid()) {
+ *size = 0;
+ return true;
+ }
+
+ *size = size_;
+ return true;
+}
+
+bool SharedMemoryHandle::MapAt(off_t offset,
+ size_t bytes,
+ void** memory,
+ bool read_only) {
+ DCHECK(IsValid());
+ DCHECK_EQ(pid_, GetCurrentProcId());
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(),
+ reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
+ bytes,
+ 0, // Alignment mask
+ VM_FLAGS_ANYWHERE, memory_object_, offset,
+ FALSE, // Copy
+ VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
+ VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK, // Maximum protection
+ VM_INHERIT_NONE);
+ return kr == KERN_SUCCESS;
+}
+
+void SharedMemoryHandle::Close() const {
+ if (!IsValid())
+ return;
+
+ kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+ if (kr != KERN_SUCCESS)
+ DPLOG(ERROR) << "Error deallocating mach port: " << kr;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+ ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+ return ownership_passes_to_ipc_;
+}
+
+void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
+ memory_object_ = handle.memory_object_;
+ size_ = handle.size_;
+ pid_ = handle.pid_;
+ ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/shared_memory_mac.cc b/libchrome/base/memory/shared_memory_mac.cc
new file mode 100644
index 0000000..d15c632
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_mac.cc
@@ -0,0 +1,218 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <mach/mach_vm.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Returns whether the operation succeeded.
+// |new_handle| is an output variable, populated on success. The caller takes
+// ownership of the underlying memory object.
+// |handle| is the handle to copy.
+// If |handle| is already mapped, |mapped_addr| is its mapped location.
+// Otherwise, |mapped_addr| should be |nullptr|.
+bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
+ SharedMemoryHandle handle,
+ void* mapped_addr) {
+ if (!handle.IsValid())
+ return false;
+
+ size_t size;
+ CHECK(handle.GetSize(&size));
+
+ // Map if necessary.
+ void* temp_addr = mapped_addr;
+ base::mac::ScopedMachVM scoper;
+ if (!temp_addr) {
+ // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+ size, 0, VM_FLAGS_ANYWHERE, handle.GetMemoryObject(), 0, FALSE,
+ VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+ if (kr != KERN_SUCCESS)
+ return false;
+ scoper.reset(reinterpret_cast<vm_address_t>(temp_addr),
+ mach_vm_round_page(size));
+ }
+
+ // Make new memory object.
+ mach_port_t named_right;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size),
+ reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+ &named_right, MACH_PORT_NULL);
+ if (kr != KERN_SUCCESS)
+ return false;
+
+ *new_handle = SharedMemoryHandle(named_right, size, base::GetCurrentProcId());
+ return true;
+}
+
+} // namespace
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+ : size(0),
+ executable(false),
+ share_read_only(false) {}
+
+SharedMemory::SharedMemory()
+ : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+ : shm_(handle),
+ mapped_size_(0),
+ memory_(NULL),
+ read_only_(read_only),
+ requested_size_(0) {}
+
+SharedMemory::~SharedMemory() {
+ Unmap();
+ Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+ return handle.IsValid();
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+ return SharedMemoryHandle();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+ handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+ // This should be effectively unlimited on OS X.
+ return 10000;
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+ const SharedMemoryHandle& handle) {
+ return handle.Duplicate();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+ return CreateAnonymous(size) && Map(size);
+}
+
+// static
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t* size) {
+ return handle.GetSize(size);
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+ // is fixed.
+ tracked_objects::ScopedTracker tracking_profile1(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "466437 SharedMemory::Create::Start"));
+ DCHECK(!shm_.IsValid());
+ if (options.size == 0) return false;
+
+ if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return false;
+
+ shm_ = SharedMemoryHandle(options.size);
+ requested_size_ = options.size;
+ return shm_.IsValid();
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+ if (!shm_.IsValid())
+ return false;
+ if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return false;
+ if (memory_)
+ return false;
+
+ bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
+ if (success) {
+ mapped_size_ = bytes;
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+ (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ } else {
+ memory_ = NULL;
+ }
+
+ return success;
+}
+
+bool SharedMemory::Unmap() {
+ if (memory_ == NULL)
+ return false;
+
+ mach_vm_deallocate(mach_task_self(),
+ reinterpret_cast<mach_vm_address_t>(memory_),
+ mapped_size_);
+ memory_ = NULL;
+ mapped_size_ = 0;
+ return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+ return shm_;
+}
+
+void SharedMemory::Close() {
+ shm_.Close();
+ shm_ = SharedMemoryHandle();
+}
+
+bool SharedMemory::ShareToProcessCommon(ProcessHandle /*process*/,
+ SharedMemoryHandle* new_handle,
+ bool close_self,
+ ShareMode share_mode) {
+ DCHECK(shm_.IsValid());
+
+ bool success = false;
+ switch (share_mode) {
+ case SHARE_CURRENT_MODE:
+ *new_handle = shm_.Duplicate();
+ success = true;
+ break;
+ case SHARE_READONLY:
+ success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+ break;
+ }
+
+ if (success)
+ new_handle->SetOwnershipPassesToIPC(true);
+
+ if (close_self) {
+ Unmap();
+ Close();
+ }
+
+ return success;
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/shared_memory_mac_unittest.cc b/libchrome/base/memory/shared_memory_mac_unittest.cc
new file mode 100644
index 0000000..c7d20ec
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_mac_unittest.cc
@@ -0,0 +1,459 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <servers/bootstrap.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/command_line.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+// Gets the current and maximum protection levels of the memory region.
+// Returns whether the operation was successful.
+// |current| and |max| are output variables only populated on success.
+bool GetProtections(void* address, size_t size, int* current, int* max) {
+ vm_region_info_t region_info;
+ mach_vm_address_t mem_address = reinterpret_cast<mach_vm_address_t>(address);
+ mach_vm_size_t mem_size = size;
+ vm_region_basic_info_64 basic_info;
+
+ region_info = reinterpret_cast<vm_region_recurse_info_t>(&basic_info);
+ vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
+ memory_object_name_t memory_object;
+ mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+ kern_return_t kr =
+ mach_vm_region(mach_task_self(), &mem_address, &mem_size, flavor,
+ region_info, &count, &memory_object);
+ if (kr != KERN_SUCCESS) {
+ MACH_LOG(ERROR, kr) << "Failed to get region info.";
+ return false;
+ }
+
+ *current = basic_info.protection;
+ *max = basic_info.max_protection;
+ return true;
+}
+
+// Creates a new SharedMemory with the given |size|, filled with 'a'.
+std::unique_ptr<SharedMemory> CreateSharedMemory(int size) {
+ SharedMemoryHandle shm(size);
+ if (!shm.IsValid()) {
+ LOG(ERROR) << "Failed to make SharedMemoryHandle";
+ return nullptr;
+ }
+ std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+ shared_memory->Map(size);
+ memset(shared_memory->memory(), 'a', size);
+ return shared_memory;
+}
+
+static const std::string g_service_switch_name = "service_name";
+
+// Structs used to pass a mach port from client to server.
+struct MachSendPortMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t data;
+};
+struct MachReceivePortMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t data;
+ mach_msg_trailer_t trailer;
+};
+
+// Makes the current process into a Mach Server with the given |service_name|.
+mach_port_t BecomeMachServer(const char* service_name) {
+ mach_port_t port;
+ kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
+ return port;
+}
+
+// Returns the mach port for the Mach Server with the given |service_name|.
+mach_port_t LookupServer(const char* service_name) {
+ mach_port_t server_port;
+ kern_return_t kr =
+ bootstrap_look_up(bootstrap_port, service_name, &server_port);
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
+ return server_port;
+}
+
+mach_port_t MakeReceivingPort() {
+ mach_port_t client_port;
+ kern_return_t kr =
+ mach_port_allocate(mach_task_self(), // our task is acquiring
+ MACH_PORT_RIGHT_RECEIVE, // a new receive right
+ &client_port); // with this name
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
+ return client_port;
+}
+
+// Blocks until a mach message is sent to |server_port|. This mach message
+// must contain a mach port. Returns that mach port.
+mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
+ MachReceivePortMessage recv_msg;
+ mach_msg_header_t* recv_hdr = &(recv_msg.header);
+ recv_hdr->msgh_local_port = port_to_listen_on;
+ recv_hdr->msgh_size = sizeof(recv_msg);
+ kern_return_t kr =
+ mach_msg(recv_hdr, // message buffer
+ MACH_RCV_MSG, // option indicating service
+ 0, // send size
+ recv_hdr->msgh_size, // size of header + body
+ port_to_listen_on, // receive name
+ MACH_MSG_TIMEOUT_NONE, // no timeout, wait forever
+ MACH_PORT_NULL); // no notification port
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
+ mach_port_t other_task_port = recv_msg.data.name;
+ return other_task_port;
+}
+
+// Passes a copy of the send right of |port_to_send| to |receiving_port|.
+void SendMachPort(mach_port_t receiving_port,
+ mach_port_t port_to_send,
+ int disposition) {
+ MachSendPortMessage send_msg;
+ mach_msg_header_t* send_hdr;
+ send_hdr = &(send_msg.header);
+ send_hdr->msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
+ send_hdr->msgh_size = sizeof(send_msg);
+ send_hdr->msgh_remote_port = receiving_port;
+ send_hdr->msgh_local_port = MACH_PORT_NULL;
+ send_hdr->msgh_reserved = 0;
+ send_hdr->msgh_id = 0;
+ send_msg.body.msgh_descriptor_count = 1;
+ send_msg.data.name = port_to_send;
+ send_msg.data.disposition = disposition;
+ send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+ int kr = mach_msg(send_hdr, // message buffer
+ MACH_SEND_MSG, // option indicating send
+ send_hdr->msgh_size, // size of header + body
+ 0, // receive limit
+ MACH_PORT_NULL, // receive name
+ MACH_MSG_TIMEOUT_NONE, // no timeout, wait forever
+ MACH_PORT_NULL); // no notification port
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
+}
+
+std::string CreateRandomServiceName() {
+ return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
+}
+
+// Sets up the mach communication ports with the server. Returns a port to which
+// the server will send mach objects.
+mach_port_t CommonChildProcessSetUp() {
+ CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+ std::string service_name =
+ cmd_line.GetSwitchValueASCII(g_service_switch_name);
+ mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
+ mach_port_t client_port = MakeReceivingPort();
+
+ // Send the port that this process is listening on to the server.
+ SendMachPort(server_port.get(), client_port, MACH_MSG_TYPE_MAKE_SEND);
+ return client_port;
+}
+
+// The number of active names in the current task's port name space.
+mach_msg_type_number_t GetActiveNameCount() {
+ mach_port_name_array_t name_array;
+ mach_msg_type_number_t names_count;
+ mach_port_type_array_t type_array;
+ mach_msg_type_number_t types_count;
+ kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
+ &names_count, &type_array, &types_count);
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
+ return names_count;
+}
+
+} // namespace
+
+class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
+ public:
+ SharedMemoryMacMultiProcessTest() {}
+
+ CommandLine MakeCmdLine(const std::string& procname) override {
+ CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
+ // Pass the service name to the child process.
+ command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
+ return command_line;
+ }
+
+ void SetUpChild(const std::string& name) {
+ // Make a random service name so that this test doesn't conflict with other
+ // similar tests.
+ service_name_ = CreateRandomServiceName();
+ server_port_.reset(BecomeMachServer(service_name_.c_str()));
+ child_process_ = SpawnChild(name);
+ client_port_.reset(ReceiveMachPort(server_port_.get()));
+ }
+
+ static const int s_memory_size = 99999;
+
+ protected:
+ std::string service_name_;
+
+ // A port on which the main process listens for mach messages from the child
+ // process.
+ mac::ScopedMachReceiveRight server_port_;
+
+ // A port on which the child process listens for mach messages from the main
+ // process.
+ mac::ScopedMachSendRight client_port_;
+
+ base::Process child_process_;
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
+};
+
+// Tests that content written to shared memory in the server process can be read
+// by the child process.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
+ SetUpChild("MachBasedSharedMemoryClient");
+
+ std::unique_ptr<SharedMemory> shared_memory(
+ CreateSharedMemory(s_memory_size));
+
+ // Send the underlying memory object to the client process.
+ SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
+ MACH_MSG_TYPE_COPY_SEND);
+ int rv = -1;
+ ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &rv));
+ EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
+ mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+ // The next mach port should be for a memory object.
+ mach_port_t memory_object = ReceiveMachPort(client_port.get());
+ SharedMemoryHandle shm(memory_object,
+ SharedMemoryMacMultiProcessTest::s_memory_size,
+ GetCurrentProcId());
+ SharedMemory shared_memory(shm, false);
+ shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
+ const char* start = static_cast<const char*>(shared_memory.memory());
+ for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
+ DCHECK_EQ(start[i], 'a');
+ }
+ return 0;
+}
+
+// Tests that mapping shared memory with an offset works correctly.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
+ SetUpChild("MachBasedSharedMemoryWithOffsetClient");
+
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ SharedMemory shared_memory(shm, false);
+ shared_memory.Map(s_memory_size);
+
+ size_t page_size = SysInfo::VMAllocationGranularity();
+ char* start = static_cast<char*>(shared_memory.memory());
+ memset(start, 'a', page_size);
+ memset(start + page_size, 'b', page_size);
+ memset(start + 2 * page_size, 'c', page_size);
+
+ // Send the underlying memory object to the client process.
+ SendMachPort(
+ client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+ int rv = -1;
+ ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &rv));
+ EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
+ mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+ // The next mach port should be for a memory object.
+ mach_port_t memory_object = ReceiveMachPort(client_port.get());
+ SharedMemoryHandle shm(memory_object,
+ SharedMemoryMacMultiProcessTest::s_memory_size,
+ GetCurrentProcId());
+ SharedMemory shared_memory(shm, false);
+ size_t page_size = SysInfo::VMAllocationGranularity();
+ shared_memory.MapAt(page_size, 2 * page_size);
+ const char* start = static_cast<const char*>(shared_memory.memory());
+ for (size_t i = 0; i < page_size; ++i) {
+ DCHECK_EQ(start[i], 'b');
+ }
+ for (size_t i = page_size; i < 2 * page_size; ++i) {
+ DCHECK_EQ(start[i], 'c');
+ }
+ return 0;
+}
+
+// Tests that duplication and closing has the right effect on Mach reference
+// counts.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ // Making a new SharedMemoryHandle increments the name count.
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
+ // make a new name.
+ shm.Duplicate();
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Closing the SharedMemoryHandle decrements the ref count. The first time has
+ // no effect.
+ shm.Close();
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Closing the SharedMemoryHandle decrements the ref count. The second time
+ // destroys the port.
+ shm.Close();
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that Mach shared memory can be mapped and unmapped.
+TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ std::unique_ptr<SharedMemory> shared_memory =
+ CreateSharedMemory(s_memory_size);
+ ASSERT_TRUE(shared_memory->Unmap());
+ ASSERT_TRUE(shared_memory->Map(s_memory_size));
+ shared_memory.reset();
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
+// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
+// as well.
+TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ // Making a new SharedMemoryHandle increments the name count.
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Name count doesn't change when mapping the memory.
+ std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+ shared_memory->Map(s_memory_size);
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Destroying the SharedMemory object frees the resource.
+ shared_memory.reset();
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the read-only flag works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
+ std::unique_ptr<SharedMemory> shared_memory(
+ CreateSharedMemory(s_memory_size));
+
+ SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+ ASSERT_TRUE(shm2.IsValid());
+ SharedMemory shared_memory2(shm2, true);
+ shared_memory2.Map(s_memory_size);
+ ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method ShareToProcess() works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcess) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ {
+ std::unique_ptr<SharedMemory> shared_memory(
+ CreateSharedMemory(s_memory_size));
+
+ SharedMemoryHandle shm2;
+ ASSERT_TRUE(shared_memory->ShareToProcess(GetCurrentProcId(), &shm2));
+ ASSERT_TRUE(shm2.IsValid());
+ SharedMemory shared_memory2(shm2, true);
+ shared_memory2.Map(s_memory_size);
+
+ ASSERT_EQ(0, memcmp(shared_memory->memory(), shared_memory2.memory(),
+ s_memory_size));
+ }
+
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the method ShareReadOnlyToProcess() creates a memory object that
+// is read only.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonly) {
+ std::unique_ptr<SharedMemory> shared_memory(
+ CreateSharedMemory(s_memory_size));
+
+ // Check the protection levels.
+ int current_prot, max_prot;
+ ASSERT_TRUE(GetProtections(shared_memory->memory(),
+ shared_memory->mapped_size(), ¤t_prot,
+ &max_prot));
+ ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, current_prot);
+ ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, max_prot);
+
+ // Make a new memory object.
+ SharedMemoryHandle shm2;
+ ASSERT_TRUE(shared_memory->ShareReadOnlyToProcess(GetCurrentProcId(), &shm2));
+ ASSERT_TRUE(shm2.IsValid());
+
+ // Mapping with |readonly| set to |false| should fail.
+ SharedMemory shared_memory2(shm2, false);
+ shared_memory2.Map(s_memory_size);
+ ASSERT_EQ(nullptr, shared_memory2.memory());
+
+ // Now trying mapping with |readonly| set to |true|.
+ SharedMemory shared_memory3(shm2.Duplicate(), true);
+ shared_memory3.Map(s_memory_size);
+ ASSERT_NE(nullptr, shared_memory3.memory());
+
+ // Check the protection levels.
+ ASSERT_TRUE(GetProtections(shared_memory3.memory(),
+ shared_memory3.mapped_size(), ¤t_prot,
+ &max_prot));
+ ASSERT_EQ(VM_PROT_READ, current_prot);
+ ASSERT_EQ(VM_PROT_READ, max_prot);
+
+ // The memory should still be readonly, since the underlying memory object
+ // is readonly.
+ ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method ShareReadOnlyToProcess() doesn't leak.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonlyLeak) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ {
+ std::unique_ptr<SharedMemory> shared_memory(
+ CreateSharedMemory(s_memory_size));
+
+ SharedMemoryHandle shm2;
+ ASSERT_TRUE(
+ shared_memory->ShareReadOnlyToProcess(GetCurrentProcId(), &shm2));
+ ASSERT_TRUE(shm2.IsValid());
+
+ // Intentionally map with |readonly| set to |false|.
+ SharedMemory shared_memory2(shm2, false);
+ shared_memory2.Map(s_memory_size);
+ }
+
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/shared_memory_posix.cc b/libchrome/base/memory/shared_memory_posix.cc
new file mode 100644
index 0000000..7e94223
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_posix.cc
@@ -0,0 +1,505 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#include "third_party/ashmem/ashmem.h"
+#elif defined(__ANDROID__)
+#include <cutils/ashmem.h>
+#endif
+
+namespace base {
+
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+ static FilePath* InvalidValue() { return nullptr; }
+
+ static void Free(FilePath* path) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+ // is fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "466437 SharedMemory::Create::Unlink"));
+ if (unlink(path->value().c_str()))
+ PLOG(WARNING) << "unlink";
+ }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+ ScopedFILE* fp,
+ ScopedFD* readonly_fd,
+ FilePath* path) {
+ // It doesn't make sense to have a open-existing private piece of shmem
+ DCHECK(!options.open_existing_deprecated);
+ // Q: Why not use the shm_open() etc. APIs?
+ // A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
+ FilePath directory;
+ ScopedPathUnlinker path_unlinker;
+ if (GetShmemTempDir(options.executable, &directory)) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+ // is fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "466437 SharedMemory::Create::OpenTemporaryFile"));
+ fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+
+ // Deleting the file prevents anyone else from mapping it in (making it
+ // private), and prevents the need for cleanup (once the last fd is
+ // closed, it is truly freed).
+ if (*fp)
+ path_unlinker.reset(path);
+ }
+
+ if (*fp) {
+ if (options.share_read_only) {
+ // TODO(erikchen): Remove ScopedTracker below once
+ // http://crbug.com/466437 is fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "466437 SharedMemory::Create::OpenReadonly"));
+ // Also open as readonly so that we can ShareReadOnlyToProcess.
+ readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+ if (!readonly_fd->is_valid()) {
+ DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+ fp->reset();
+ return false;
+ }
+ }
+ }
+ return true;
+}
+#endif // !defined(OS_ANDROID) && !defined(__ANDROID__)
+}
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+ : name_deprecated(nullptr),
+ open_existing_deprecated(false),
+ size(0),
+ executable(false),
+ share_read_only(false) {}
+
+SharedMemory::SharedMemory()
+ : mapped_file_(-1),
+ readonly_mapped_file_(-1),
+ mapped_size_(0),
+ memory_(NULL),
+ read_only_(false),
+ requested_size_(0) {
+}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+ : mapped_file_(handle.fd),
+ readonly_mapped_file_(-1),
+ mapped_size_(0),
+ memory_(NULL),
+ read_only_(read_only),
+ requested_size_(0) {
+}
+
+SharedMemory::~SharedMemory() {
+ Unmap();
+ Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+ return handle.fd >= 0;
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+ return SharedMemoryHandle();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+ DCHECK_GE(handle.fd, 0);
+ if (IGNORE_EINTR(close(handle.fd)) < 0)
+ DPLOG(ERROR) << "close";
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+ return base::GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+ const SharedMemoryHandle& handle) {
+ int duped_handle = HANDLE_EINTR(dup(handle.fd));
+ if (duped_handle < 0)
+ return base::SharedMemory::NULLHandle();
+ return base::FileDescriptor(duped_handle, true);
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle) {
+ return handle.fd;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+ return CreateAnonymous(size) && Map(size);
+}
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// static
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t* size) {
+ struct stat st;
+ if (fstat(handle.fd, &st) != 0)
+ return false;
+ if (st.st_size < 0)
+ return false;
+ *size = st.st_size;
+ return true;
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+// TODO(jrg): there is no way to "clean up" all unused named shmem if
+// we restart from a crash. (That isn't a new problem, but it is a problem.)
+// In case we want to delete it later, it may be useful to save the value
+// of mem_filename after FilePathForMemoryName().
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+ // is fixed.
+ tracked_objects::ScopedTracker tracking_profile1(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "466437 SharedMemory::Create::Start"));
+ DCHECK_EQ(-1, mapped_file_);
+ if (options.size == 0) return false;
+
+ if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return false;
+
+ // This function theoretically can block on the disk, but realistically
+ // the temporary files we create will just go into the buffer cache
+ // and be deleted before they ever make it out to disk.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ ScopedFILE fp;
+ bool fix_size = true;
+ ScopedFD readonly_fd;
+
+ FilePath path;
+ if (options.name_deprecated == NULL || options.name_deprecated->empty()) {
+ bool result =
+ CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+ if (!result)
+ return false;
+ } else {
+ if (!FilePathForMemoryName(*options.name_deprecated, &path))
+ return false;
+
+ // Make sure that the file is opened without any permission
+ // to other users on the system.
+ const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
+
+ // First, try to create the file.
+ int fd = HANDLE_EINTR(
+ open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly));
+ if (fd == -1 && options.open_existing_deprecated) {
+ // If this doesn't work, try and open an existing file in append mode.
+ // Opening an existing file in a world writable directory has two main
+ // security implications:
+ // - Attackers could plant a file under their control, so ownership of
+ // the file is checked below.
+ // - Attackers could plant a symbolic link so that an unexpected file
+ // is opened, so O_NOFOLLOW is passed to open().
+ fd = HANDLE_EINTR(
+ open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW));
+
+ // Check that the current user owns the file.
+ // If uid != euid, then a more complex permission model is used and this
+ // API is not appropriate.
+ const uid_t real_uid = getuid();
+ const uid_t effective_uid = geteuid();
+ struct stat sb;
+ if (fd >= 0 &&
+ (fstat(fd, &sb) != 0 || sb.st_uid != real_uid ||
+ sb.st_uid != effective_uid)) {
+ LOG(ERROR) <<
+ "Invalid owner when opening existing shared memory file.";
+ close(fd);
+ return false;
+ }
+
+ // An existing file was opened, so its size should not be fixed.
+ fix_size = false;
+ }
+
+ if (options.share_read_only) {
+ // Also open as readonly so that we can ShareReadOnlyToProcess.
+ readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+ if (!readonly_fd.is_valid()) {
+ DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+ close(fd);
+ fd = -1;
+ return false;
+ }
+ }
+ if (fd >= 0) {
+ // "a+" is always appropriate: if it's a new file, a+ is similar to w+.
+ fp.reset(fdopen(fd, "a+"));
+ }
+ }
+ if (fp && fix_size) {
+ // Get current size.
+ struct stat stat;
+ if (fstat(fileno(fp.get()), &stat) != 0)
+ return false;
+ const size_t current_size = stat.st_size;
+ if (current_size != options.size) {
+ if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+ return false;
+ }
+ requested_size_ = options.size;
+ }
+ if (fp == nullptr) {
+ PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+ FilePath dir = path.DirName();
+ if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+ PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+ if (dir.value() == "/dev/shm") {
+ LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+ << "/dev/shm. Try 'sudo chmod 1777 /dev/shm' to fix.";
+ }
+ }
+ return false;
+ }
+
+ return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+
+// Our current implementation of shmem is with mmap()ing of files.
+// These files need to be deleted explicitly.
+// In practice this call is only needed for unit tests.
+bool SharedMemory::Delete(const std::string& name) {
+ FilePath path;
+ if (!FilePathForMemoryName(name, &path))
+ return false;
+
+ if (PathExists(path))
+ return base::DeleteFile(path, false);
+
+ // Doesn't exist, so success.
+ return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+ FilePath path;
+ if (!FilePathForMemoryName(name, &path))
+ return false;
+
+ read_only_ = read_only;
+
+ const char *mode = read_only ? "r" : "r+";
+ ScopedFILE fp(base::OpenFile(path, mode));
+ ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+ if (!readonly_fd.is_valid()) {
+ DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+ return false;
+ }
+ return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+#endif // !defined(OS_ANDROID) && !defined(__ANDROID__)
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+ if (mapped_file_ == -1)
+ return false;
+
+ if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+ return false;
+
+ if (memory_)
+ return false;
+
+#if defined(OS_ANDROID) || defined(__ANDROID__)
+ // On Android, Map can be called with a size and offset of zero to use the
+ // ashmem-determined size.
+ if (bytes == 0) {
+ DCHECK_EQ(0, offset);
+ int ashmem_bytes = ashmem_get_size_region(mapped_file_);
+ if (ashmem_bytes < 0)
+ return false;
+ bytes = ashmem_bytes;
+ }
+#endif
+
+ memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+ MAP_SHARED, mapped_file_, offset);
+
+ bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
+ if (mmap_succeeded) {
+ mapped_size_ = bytes;
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+ (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ } else {
+ memory_ = NULL;
+ }
+
+ return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+ if (memory_ == NULL)
+ return false;
+
+ munmap(memory_, mapped_size_);
+ memory_ = NULL;
+ mapped_size_ = 0;
+ return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+ return FileDescriptor(mapped_file_, false);
+}
+
+void SharedMemory::Close() {
+ if (mapped_file_ > 0) {
+ if (IGNORE_EINTR(close(mapped_file_)) < 0)
+ PLOG(ERROR) << "close";
+ mapped_file_ = -1;
+ }
+ if (readonly_mapped_file_ > 0) {
+ if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+ PLOG(ERROR) << "close";
+ readonly_mapped_file_ = -1;
+ }
+}
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
+ DCHECK_EQ(-1, mapped_file_);
+ DCHECK_EQ(-1, readonly_mapped_file_);
+ if (fp == nullptr)
+ return false;
+
+ // This function theoretically can block on the disk, but realistically
+ // the temporary files we create will just go into the buffer cache
+ // and be deleted before they ever make it out to disk.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ struct stat st = {};
+ if (fstat(fileno(fp.get()), &st))
+ NOTREACHED();
+ if (readonly_fd.is_valid()) {
+ struct stat readonly_st = {};
+ if (fstat(readonly_fd.get(), &readonly_st))
+ NOTREACHED();
+ if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+ LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+ return false;
+ }
+ }
+
+ mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
+ if (mapped_file_ == -1) {
+ if (errno == EMFILE) {
+ LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
+ return false;
+ } else {
+ NOTREACHED() << "Call to dup failed, errno=" << errno;
+ }
+ }
+ readonly_mapped_file_ = readonly_fd.release();
+
+ return true;
+}
+
+// For the given shmem named |mem_name|, return a filename to mmap()
+// (and possibly create). Modifies |filename|. Return false on
+// error, or true of we are happy.
+bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
+ FilePath* path) {
+ // mem_name will be used for a filename; make sure it doesn't
+ // contain anything which will confuse us.
+ DCHECK_EQ(std::string::npos, mem_name.find('/'));
+ DCHECK_EQ(std::string::npos, mem_name.find('\0'));
+
+ FilePath temp_dir;
+ if (!GetShmemTempDir(false, &temp_dir))
+ return false;
+
+#if defined(GOOGLE_CHROME_BUILD)
+ std::string name_base = std::string("com.google.Chrome");
+#else
+ std::string name_base = std::string("org.chromium.Chromium");
+#endif
+ *path = temp_dir.AppendASCII(name_base + ".shmem." + mem_name);
+ return true;
+}
+#endif // !defined(OS_ANDROID) && !defined(__ANDROID__)
+
+bool SharedMemory::ShareToProcessCommon(ProcessHandle,
+ SharedMemoryHandle* new_handle,
+ bool close_self,
+ ShareMode share_mode) {
+ int handle_to_dup = -1;
+ switch(share_mode) {
+ case SHARE_CURRENT_MODE:
+ handle_to_dup = mapped_file_;
+ break;
+ case SHARE_READONLY:
+ // We could imagine re-opening the file from /dev/fd, but that can't make
+ // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
+ CHECK_GE(readonly_mapped_file_, 0);
+ handle_to_dup = readonly_mapped_file_;
+ break;
+ }
+
+ const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
+ if (new_fd < 0) {
+ if (close_self) {
+ Unmap();
+ Close();
+ }
+ DPLOG(ERROR) << "dup() failed.";
+ return false;
+ }
+
+ new_handle->fd = new_fd;
+ new_handle->auto_close = true;
+
+ if (close_self) {
+ Unmap();
+ Close();
+ }
+
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/memory/shared_memory_unittest.cc b/libchrome/base/memory/shared_memory_unittest.cc
new file mode 100644
index 0000000..f29865c
--- /dev/null
+++ b/libchrome/base/memory/shared_memory_unittest.cc
@@ -0,0 +1,697 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/kill.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+namespace {
+
+#if !defined(OS_MACOSX)
+// Each thread will open the shared memory. Each thread will take a different 4
+// byte int pointer, and keep changing it, with some small pauses in between.
+// Verify that each thread's value in the shared memory is always correct.
+class MultipleThreadMain : public PlatformThread::Delegate {
+ public:
+ explicit MultipleThreadMain(int16_t id) : id_(id) {}
+ ~MultipleThreadMain() override {}
+
+ static void CleanUp() {
+ SharedMemory memory;
+ memory.Delete(s_test_name_);
+ }
+
+ // PlatformThread::Delegate interface.
+ void ThreadMain() override {
+ const uint32_t kDataSize = 1024;
+ SharedMemory memory;
+ bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
+ EXPECT_TRUE(rv);
+ rv = memory.Map(kDataSize);
+ EXPECT_TRUE(rv);
+ int* ptr = static_cast<int*>(memory.memory()) + id_;
+ EXPECT_EQ(0, *ptr);
+
+ for (int idx = 0; idx < 100; idx++) {
+ *ptr = idx;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+ EXPECT_EQ(*ptr, idx);
+ }
+ // Reset back to 0 for the next test that uses the same name.
+ *ptr = 0;
+
+ memory.Close();
+ }
+
+ private:
+ int16_t id_;
+
+ static const char s_test_name_[];
+
+ DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
+};
+
+const char MultipleThreadMain::s_test_name_[] =
+ "SharedMemoryOpenThreadTest";
+#endif // !defined(OS_MACOSX)
+
+} // namespace
+
+// Android/Mac doesn't support SharedMemory::Open/Delete/
+// CreateNamedDeprecated(openExisting=true)
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX)
+TEST(SharedMemoryTest, OpenClose) {
+ const uint32_t kDataSize = 1024;
+ std::string test_name = "SharedMemoryOpenCloseTest";
+
+ // Open two handles to a memory segment, confirm that they are mapped
+ // separately yet point to the same space.
+ SharedMemory memory1;
+ bool rv = memory1.Delete(test_name);
+ EXPECT_TRUE(rv);
+ rv = memory1.Delete(test_name);
+ EXPECT_TRUE(rv);
+ rv = memory1.Open(test_name, false);
+ EXPECT_FALSE(rv);
+ rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+ EXPECT_TRUE(rv);
+ rv = memory1.Map(kDataSize);
+ EXPECT_TRUE(rv);
+ SharedMemory memory2;
+ rv = memory2.Open(test_name, false);
+ EXPECT_TRUE(rv);
+ rv = memory2.Map(kDataSize);
+ EXPECT_TRUE(rv);
+ EXPECT_NE(memory1.memory(), memory2.memory()); // Compare the pointers.
+
+ // Make sure we don't segfault. (it actually happened!)
+ ASSERT_NE(memory1.memory(), static_cast<void*>(NULL));
+ ASSERT_NE(memory2.memory(), static_cast<void*>(NULL));
+
+ // Write data to the first memory segment, verify contents of second.
+ memset(memory1.memory(), '1', kDataSize);
+ EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
+
+ // Close the first memory segment, and verify the second has the right data.
+ memory1.Close();
+ char* start_ptr = static_cast<char*>(memory2.memory());
+ char* end_ptr = start_ptr + kDataSize;
+ for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
+ EXPECT_EQ(*ptr, '1');
+
+ // Close the second memory segment.
+ memory2.Close();
+
+ rv = memory1.Delete(test_name);
+ EXPECT_TRUE(rv);
+ rv = memory2.Delete(test_name);
+ EXPECT_TRUE(rv);
+}
+
+TEST(SharedMemoryTest, OpenExclusive) {
+ const uint32_t kDataSize = 1024;
+ const uint32_t kDataSize2 = 2048;
+ std::ostringstream test_name_stream;
+ test_name_stream << "SharedMemoryOpenExclusiveTest."
+ << Time::Now().ToDoubleT();
+ std::string test_name = test_name_stream.str();
+
+ // Open two handles to a memory segment and check that
+ // open_existing_deprecated works as expected.
+ SharedMemory memory1;
+ bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+ EXPECT_TRUE(rv);
+
+ // Memory1 knows it's size because it created it.
+ EXPECT_EQ(memory1.requested_size(), kDataSize);
+
+ rv = memory1.Map(kDataSize);
+ EXPECT_TRUE(rv);
+
+ // The mapped memory1 must be at least the size we asked for.
+ EXPECT_GE(memory1.mapped_size(), kDataSize);
+
+ // The mapped memory1 shouldn't exceed rounding for allocation granularity.
+ EXPECT_LT(memory1.mapped_size(),
+ kDataSize + SysInfo::VMAllocationGranularity());
+
+ memset(memory1.memory(), 'G', kDataSize);
+
+ SharedMemory memory2;
+ // Should not be able to create if openExisting is false.
+ rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
+ EXPECT_FALSE(rv);
+
+ // Should be able to create with openExisting true.
+ rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
+ EXPECT_TRUE(rv);
+
+ // Memory2 shouldn't know the size because we didn't create it.
+ EXPECT_EQ(memory2.requested_size(), 0U);
+
+ // We should be able to map the original size.
+ rv = memory2.Map(kDataSize);
+ EXPECT_TRUE(rv);
+
+ // The mapped memory2 must be at least the size of the original.
+ EXPECT_GE(memory2.mapped_size(), kDataSize);
+
+ // The mapped memory2 shouldn't exceed rounding for allocation granularity.
+ EXPECT_LT(memory2.mapped_size(),
+ kDataSize2 + SysInfo::VMAllocationGranularity());
+
+ // Verify that opening memory2 didn't truncate or delete memory 1.
+ char* start_ptr = static_cast<char*>(memory2.memory());
+ char* end_ptr = start_ptr + kDataSize;
+ for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
+ EXPECT_EQ(*ptr, 'G');
+ }
+
+ memory1.Close();
+ memory2.Close();
+
+ rv = memory1.Delete(test_name);
+ EXPECT_TRUE(rv);
+}
+#endif // !defined(OS_ANDROID) && !defined(OS_MACOSX)
+
+// Check that memory is still mapped after its closed.
+TEST(SharedMemoryTest, CloseNoUnmap) {
+ const size_t kDataSize = 4096;
+
+ SharedMemory memory;
+ ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+ char* ptr = static_cast<char*>(memory.memory());
+ ASSERT_NE(ptr, static_cast<void*>(NULL));
+ memset(ptr, 'G', kDataSize);
+
+ memory.Close();
+
+ EXPECT_EQ(ptr, memory.memory());
+ EXPECT_EQ(SharedMemory::NULLHandle(), memory.handle());
+
+ for (size_t i = 0; i < kDataSize; i++) {
+ EXPECT_EQ('G', ptr[i]);
+ }
+
+ memory.Unmap();
+ EXPECT_EQ(nullptr, memory.memory());
+}
+
+#if !defined(OS_MACOSX)
+// Create a set of N threads to each open a shared memory segment and write to
+// it. Verify that they are always reading/writing consistent data.
+TEST(SharedMemoryTest, MultipleThreads) {
+ const int kNumThreads = 5;
+
+ MultipleThreadMain::CleanUp();
+ // On POSIX we have a problem when 2 threads try to create the shmem
+ // (a file) at exactly the same time, since create both creates the
+ // file and zerofills it. We solve the problem for this unit test
+ // (make it not flaky) by starting with 1 thread, then
+ // intentionally don't clean up its shmem before running with
+ // kNumThreads.
+
+ int threadcounts[] = { 1, kNumThreads };
+ for (size_t i = 0; i < arraysize(threadcounts); i++) {
+ int numthreads = threadcounts[i];
+ std::unique_ptr<PlatformThreadHandle[]> thread_handles;
+ std::unique_ptr<MultipleThreadMain* []> thread_delegates;
+
+ thread_handles.reset(new PlatformThreadHandle[numthreads]);
+ thread_delegates.reset(new MultipleThreadMain*[numthreads]);
+
+ // Spawn the threads.
+ for (int16_t index = 0; index < numthreads; index++) {
+ PlatformThreadHandle pth;
+ thread_delegates[index] = new MultipleThreadMain(index);
+ EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
+ thread_handles[index] = pth;
+ }
+
+ // Wait for the threads to finish.
+ for (int index = 0; index < numthreads; index++) {
+ PlatformThread::Join(thread_handles[index]);
+ delete thread_delegates[index];
+ }
+ }
+ MultipleThreadMain::CleanUp();
+}
+#endif
+
+// Allocate private (unique) shared memory with an empty string for a
+// name. Make sure several of them don't point to the same thing as
+// we might expect if the names are equal.
+TEST(SharedMemoryTest, AnonymousPrivate) {
+ int i, j;
+ int count = 4;
+ bool rv;
+ const uint32_t kDataSize = 8192;
+
+ std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
+ std::unique_ptr<int* []> pointers(new int*[count]);
+ ASSERT_TRUE(memories.get());
+ ASSERT_TRUE(pointers.get());
+
+ for (i = 0; i < count; i++) {
+ rv = memories[i].CreateAndMapAnonymous(kDataSize);
+ EXPECT_TRUE(rv);
+ int* ptr = static_cast<int*>(memories[i].memory());
+ EXPECT_TRUE(ptr);
+ pointers[i] = ptr;
+ }
+
+ for (i = 0; i < count; i++) {
+ // zero out the first int in each except for i; for that one, make it 100.
+ for (j = 0; j < count; j++) {
+ if (i == j)
+ pointers[j][0] = 100;
+ else
+ pointers[j][0] = 0;
+ }
+ // make sure there is no bleeding of the 100 into the other pointers
+ for (j = 0; j < count; j++) {
+ if (i == j)
+ EXPECT_EQ(100, pointers[j][0]);
+ else
+ EXPECT_EQ(0, pointers[j][0]);
+ }
+ }
+
+ for (int i = 0; i < count; i++) {
+ memories[i].Close();
+ }
+}
+
+// The Mach functionality is tested in shared_memory_mac_unittest.cc.
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+TEST(SharedMemoryTest, ShareReadOnly) {
+ StringPiece contents = "Hello World";
+
+ SharedMemory writable_shmem;
+ SharedMemoryCreateOptions options;
+ options.size = contents.size();
+ options.share_read_only = true;
+ ASSERT_TRUE(writable_shmem.Create(options));
+ ASSERT_TRUE(writable_shmem.Map(options.size));
+ memcpy(writable_shmem.memory(), contents.data(), contents.size());
+ EXPECT_TRUE(writable_shmem.Unmap());
+
+ SharedMemoryHandle readonly_handle;
+ ASSERT_TRUE(writable_shmem.ShareReadOnlyToProcess(GetCurrentProcessHandle(),
+ &readonly_handle));
+ SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
+
+ ASSERT_TRUE(readonly_shmem.Map(contents.size()));
+ EXPECT_EQ(contents,
+ StringPiece(static_cast<const char*>(readonly_shmem.memory()),
+ contents.size()));
+ EXPECT_TRUE(readonly_shmem.Unmap());
+
+ // Make sure the writable instance is still writable.
+ ASSERT_TRUE(writable_shmem.Map(contents.size()));
+ StringPiece new_contents = "Goodbye";
+ memcpy(writable_shmem.memory(), new_contents.data(), new_contents.size());
+ EXPECT_EQ(new_contents,
+ StringPiece(static_cast<const char*>(writable_shmem.memory()),
+ new_contents.size()));
+
+ // We'd like to check that if we send the read-only segment to another
+ // process, then that other process can't reopen it read/write. (Since that
+ // would be a security hole.) Setting up multiple processes is hard in a
+ // unittest, so this test checks that the *current* process can't reopen the
+ // segment read/write. I think the test here is stronger than we actually
+ // care about, but there's a remote possibility that sending a file over a
+ // pipe would transform it into read/write.
+ SharedMemoryHandle handle = readonly_shmem.handle();
+
+#if defined(OS_ANDROID)
+ // The "read-only" handle is still writable on Android:
+ // http://crbug.com/320865
+ (void)handle;
+#elif defined(OS_POSIX)
+ int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
+ EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
+ << "The descriptor itself should be read-only.";
+
+ errno = 0;
+ void* writable = mmap(NULL, contents.size(), PROT_READ | PROT_WRITE,
+ MAP_SHARED, handle_fd, 0);
+ int mmap_errno = errno;
+ EXPECT_EQ(MAP_FAILED, writable)
+ << "It shouldn't be possible to re-mmap the descriptor writable.";
+ EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
+ if (writable != MAP_FAILED)
+ EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
+
+#elif defined(OS_WIN)
+ EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
+ << "Shouldn't be able to map memory writable.";
+
+ HANDLE temp_handle;
+ BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+ GetCurrentProcess(), &temp_handle,
+ FILE_MAP_ALL_ACCESS, false, 0);
+ EXPECT_EQ(FALSE, rv)
+ << "Shouldn't be able to duplicate the handle into a writable one.";
+ if (rv)
+ win::ScopedHandle writable_handle(temp_handle);
+ rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+ GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
+ false, 0);
+ EXPECT_EQ(TRUE, rv)
+ << "Should be able to duplicate the handle into a readable one.";
+ if (rv)
+ win::ScopedHandle writable_handle(temp_handle);
+#else
+#error Unexpected platform; write a test that tries to make 'handle' writable.
+#endif // defined(OS_POSIX) || defined(OS_WIN)
+}
+#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
+
+TEST(SharedMemoryTest, ShareToSelf) {
+ StringPiece contents = "Hello World";
+
+ SharedMemory shmem;
+ ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
+ memcpy(shmem.memory(), contents.data(), contents.size());
+ EXPECT_TRUE(shmem.Unmap());
+
+ SharedMemoryHandle shared_handle;
+ ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+#if defined(OS_WIN)
+ ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+#endif
+ SharedMemory shared(shared_handle, /*readonly=*/false);
+
+ ASSERT_TRUE(shared.Map(contents.size()));
+ EXPECT_EQ(
+ contents,
+ StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
+
+ shared_handle = SharedMemoryHandle();
+ ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+#if defined(OS_WIN)
+ ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+#endif
+ SharedMemory readonly(shared_handle, /*readonly=*/true);
+
+ ASSERT_TRUE(readonly.Map(contents.size()));
+ EXPECT_EQ(contents,
+ StringPiece(static_cast<const char*>(readonly.memory()),
+ contents.size()));
+}
+
+TEST(SharedMemoryTest, MapAt) {
+ ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
+ const size_t kCount = SysInfo::VMAllocationGranularity();
+ const size_t kDataSize = kCount * sizeof(uint32_t);
+
+ SharedMemory memory;
+ ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+ uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
+ ASSERT_NE(ptr, static_cast<void*>(NULL));
+
+ for (size_t i = 0; i < kCount; ++i) {
+ ptr[i] = i;
+ }
+
+ memory.Unmap();
+
+ off_t offset = SysInfo::VMAllocationGranularity();
+ ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
+ offset /= sizeof(uint32_t);
+ ptr = static_cast<uint32_t*>(memory.memory());
+ ASSERT_NE(ptr, static_cast<void*>(NULL));
+ for (size_t i = offset; i < kCount; ++i) {
+ EXPECT_EQ(ptr[i - offset], i);
+ }
+}
+
+TEST(SharedMemoryTest, MapTwice) {
+ const uint32_t kDataSize = 1024;
+ SharedMemory memory;
+ bool rv = memory.CreateAndMapAnonymous(kDataSize);
+ EXPECT_TRUE(rv);
+
+ void* old_address = memory.memory();
+
+ rv = memory.Map(kDataSize);
+ EXPECT_FALSE(rv);
+ EXPECT_EQ(old_address, memory.memory());
+}
+
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+// This test is not applicable for iOS (crbug.com/399384).
+#if !defined(OS_IOS)
+// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
+TEST(SharedMemoryTest, AnonymousExecutable) {
+ const uint32_t kTestSize = 1 << 16;
+
+ SharedMemory shared_memory;
+ SharedMemoryCreateOptions options;
+ options.size = kTestSize;
+ options.executable = true;
+
+ EXPECT_TRUE(shared_memory.Create(options));
+ EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+ EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+ PROT_READ | PROT_EXEC));
+}
+#endif // !defined(OS_IOS)
+
+// Android supports a different permission model than POSIX for its "ashmem"
+// shared memory implementation. So the tests about file permissions are not
+// included on Android.
+#if !defined(OS_ANDROID)
+
+// Set a umask and restore the old mask on destruction.
+class ScopedUmaskSetter {
+ public:
+ explicit ScopedUmaskSetter(mode_t target_mask) {
+ old_umask_ = umask(target_mask);
+ }
+ ~ScopedUmaskSetter() { umask(old_umask_); }
+ private:
+ mode_t old_umask_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
+};
+
+// Create a shared memory object, check its permissions.
+TEST(SharedMemoryTest, FilePermissionsAnonymous) {
+ const uint32_t kTestSize = 1 << 8;
+
+ SharedMemory shared_memory;
+ SharedMemoryCreateOptions options;
+ options.size = kTestSize;
+ // Set a file mode creation mask that gives all permissions.
+ ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+ EXPECT_TRUE(shared_memory.Create(options));
+
+ int shm_fd =
+ SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+ struct stat shm_stat;
+ EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
+ // Neither the group, nor others should be able to read the shared memory
+ // file.
+ EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+ EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+
+// Create a shared memory object, check its permissions.
+TEST(SharedMemoryTest, FilePermissionsNamed) {
+ const uint32_t kTestSize = 1 << 8;
+
+ SharedMemory shared_memory;
+ SharedMemoryCreateOptions options;
+ options.size = kTestSize;
+
+ // Set a file mode creation mask that gives all permissions.
+ ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+ EXPECT_TRUE(shared_memory.Create(options));
+
+ int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+ struct stat shm_stat;
+ EXPECT_EQ(0, fstat(fd, &shm_stat));
+ // Neither the group, nor others should have been able to open the shared
+ // memory file while its name existed.
+ EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+ EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+#endif // !defined(OS_ANDROID)
+
+#endif // defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though. Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TEST(SharedMemoryTest, MapMinimumAlignment) {
+ static const int kDataSize = 8192;
+
+ SharedMemory shared_memory;
+ ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
+ EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
+ shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ shared_memory.Close();
+}
+
+#if defined(OS_WIN)
+TEST(SharedMemoryTest, UnsafeImageSection) {
+ const char kTestSectionName[] = "UnsafeImageSection";
+ wchar_t path[MAX_PATH];
+ EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
+
+ // Map the current executable image to save us creating a new PE file on disk.
+ base::win::ScopedHandle file_handle(::CreateFile(
+ path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
+ EXPECT_TRUE(file_handle.IsValid());
+ base::win::ScopedHandle section_handle(
+ ::CreateFileMappingA(file_handle.Get(), nullptr,
+ PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
+ EXPECT_TRUE(section_handle.IsValid());
+
+ // Check direct opening by name, from handle and duplicated from handle.
+ SharedMemory shared_memory_open;
+ EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
+ EXPECT_FALSE(shared_memory_open.Map(1));
+ EXPECT_EQ(nullptr, shared_memory_open.memory());
+
+ SharedMemory shared_memory_handle_local(
+ SharedMemoryHandle(section_handle.Take(), ::GetCurrentProcessId()), true);
+ EXPECT_FALSE(shared_memory_handle_local.Map(1));
+ EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
+
+ // Check that a handle without SECTION_QUERY also can't be mapped as it can't
+ // be checked.
+ SharedMemory shared_memory_handle_dummy;
+ SharedMemoryCreateOptions options;
+ options.size = 0x1000;
+ EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
+ HANDLE handle_no_query;
+ EXPECT_TRUE(::DuplicateHandle(
+ ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
+ ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
+ SharedMemory shared_memory_handle_no_query(
+ SharedMemoryHandle(handle_no_query, ::GetCurrentProcessId()), true);
+ EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
+ EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
+}
+#endif // defined(OS_WIN)
+
+// iOS does not allow multiple processes.
+// Android ashmem does not support named shared memory.
+// Mac SharedMemory does not support named shared memory. crbug.com/345734
+#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
+// On POSIX it is especially important we test shmem across processes,
+// not just across threads. But the test is enabled on all platforms.
+class SharedMemoryProcessTest : public MultiProcessTest {
+ public:
+ static void CleanUp() {
+ SharedMemory memory;
+ memory.Delete(s_test_name_);
+ }
+
+ static int TaskTestMain() {
+ int errors = 0;
+ SharedMemory memory;
+ bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+ EXPECT_TRUE(rv);
+ if (rv != true)
+ errors++;
+ rv = memory.Map(s_data_size_);
+ EXPECT_TRUE(rv);
+ if (rv != true)
+ errors++;
+ int* ptr = static_cast<int*>(memory.memory());
+
+ // This runs concurrently in multiple processes. Writes need to be atomic.
+ subtle::Barrier_AtomicIncrement(ptr, 1);
+ memory.Close();
+ return errors;
+ }
+
+ static const char s_test_name_[];
+ static const uint32_t s_data_size_;
+};
+
+const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
+const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
+
+TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
+ const int kNumTasks = 5;
+
+ SharedMemoryProcessTest::CleanUp();
+
+ // Create a shared memory region. Set the first word to 0.
+ SharedMemory memory;
+ bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+ ASSERT_TRUE(rv);
+ rv = memory.Map(s_data_size_);
+ ASSERT_TRUE(rv);
+ int* ptr = static_cast<int*>(memory.memory());
+ *ptr = 0;
+
+ // Start |kNumTasks| processes, each of which atomically increments the first
+ // word by 1.
+ Process processes[kNumTasks];
+ for (int index = 0; index < kNumTasks; ++index) {
+ processes[index] = SpawnChild("SharedMemoryTestMain");
+ ASSERT_TRUE(processes[index].IsValid());
+ }
+
+ // Check that each process exited correctly.
+ int exit_code = 0;
+ for (int index = 0; index < kNumTasks; ++index) {
+ EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+ EXPECT_EQ(0, exit_code);
+ }
+
+ // Check that the shared memory region reflects |kNumTasks| increments.
+ ASSERT_EQ(kNumTasks, *ptr);
+
+ memory.Close();
+ SharedMemoryProcessTest::CleanUp();
+}
+
+MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
+ return SharedMemoryProcessTest::TaskTestMain();
+}
+#endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
+
+} // namespace base
diff --git a/libchrome/base/memory/singleton.cc b/libchrome/base/memory/singleton.cc
new file mode 100644
index 0000000..f68ecaa
--- /dev/null
+++ b/libchrome/base/memory/singleton.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/singleton.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance) {
+ // Handle the race. Another thread beat us and either:
+ // - Has the object in BeingCreated state
+ // - Already has the object created...
+ // We know value != NULL. It could be kBeingCreatedMarker, or a valid ptr.
+ // Unless your constructor can be very time consuming, it is very unlikely
+ // to hit this race. When it does, we just spin and yield the thread until
+ // the object has been created.
+ subtle::AtomicWord value;
+ while (true) {
+ // The load has acquire memory ordering as the thread which reads the
+ // instance pointer must acquire visibility over the associated data.
+ // The pairing Release_Store operation is in Singleton::get().
+ value = subtle::Acquire_Load(instance);
+ if (value != kBeingCreatedMarker)
+ break;
+ PlatformThread::YieldCurrentThread();
+ }
+ return value;
+}
+
+} // namespace internal
+} // namespace base
+
diff --git a/libchrome/base/memory/singleton.h b/libchrome/base/memory/singleton.h
new file mode 100644
index 0000000..79e4441
--- /dev/null
+++ b/libchrome/base/memory/singleton.h
@@ -0,0 +1,284 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PLEASE READ: Do you really need a singleton?
+//
+// Singletons make it hard to determine the lifetime of an object, which can
+// lead to buggy code and spurious crashes.
+//
+// Instead of adding another singleton into the mix, try to identify either:
+// a) An existing singleton that can manage your object's lifetime
+// b) Locations where you can deterministically create the object and pass
+// into other objects
+//
+// If you absolutely need a singleton, please keep them as trivial as possible
+// and ideally a leaf dependency. Singletons get problematic when they attempt
+// to do too much in their destructor or have circular dependencies.
+
+#ifndef BASE_MEMORY_SINGLETON_H_
+#define BASE_MEMORY_SINGLETON_H_
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+namespace internal {
+
+// Our AtomicWord doubles as a spinlock, where a value of
+// kBeingCreatedMarker means the spinlock is being held for creation.
+static const subtle::AtomicWord kBeingCreatedMarker = 1;
+
+// We pull out some of the functionality into a non-templated function, so that
+// we can implement the more complicated pieces out of line in the .cc file.
+BASE_EXPORT subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance);
+
+class DeleteTraceLogForTesting;
+
+} // namespace internal
+
+
+// Default traits for Singleton<Type>. Calls operator new and operator delete on
+// the object. Registers automatic deletion at process exit.
+// Overload if you need arguments or another memory allocation function.
+template<typename Type>
+struct DefaultSingletonTraits {
+ // Allocates the object.
+ static Type* New() {
+ // The parenthesis is very important here; it forces POD type
+ // initialization.
+ return new Type();
+ }
+
+ // Destroys the object.
+ static void Delete(Type* x) {
+ delete x;
+ }
+
+ // Set to true to automatically register deletion of the object on process
+ // exit. See below for the required call that makes this happen.
+ static const bool kRegisterAtExit = true;
+
+#ifndef NDEBUG
+ // Set to false to disallow access on a non-joinable thread. This is
+ // different from kRegisterAtExit because StaticMemorySingletonTraits allows
+ // access on non-joinable threads, and gracefully handles this.
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Identical to
+// DefaultSingletonTraits except that the Singleton will not be cleaned up
+// at exit.
+template<typename Type>
+struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
+ static const bool kRegisterAtExit = false;
+#ifndef NDEBUG
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Allocates memory
+// for the singleton instance from a static buffer. The singleton will
+// be cleaned up at exit, but can't be revived after destruction unless
+// the Resurrect() method is called.
+//
+// This is useful for a certain category of things, notably logging and
+// tracing, where the singleton instance is of a type carefully constructed to
+// be safe to access post-destruction.
+// In logging and tracing you'll typically get stray calls at odd times, like
+// during static destruction, thread teardown and the like, and there's a
+// termination race on the heap-based singleton - e.g. if one thread calls
+// get(), but then another thread initiates AtExit processing, the first thread
+// may call into an object residing in unallocated memory. If the instance is
+// allocated from the data segment, then this is survivable.
+//
+// The destructor is to deallocate system resources, in this case to unregister
+// a callback the system will invoke when logging levels change. Note that
+// this is also used in e.g. Chrome Frame, where you have to allow for the
+// possibility of loading briefly into someone else's process space, and
+// so leaking is not an option, as that would sabotage the state of your host
+// process once you've unloaded.
+template <typename Type>
+struct StaticMemorySingletonTraits {
+ // WARNING: User has to deal with get() in the singleton class
+ // this is traits for returning NULL.
+ static Type* New() {
+ // Only constructs once and returns pointer; otherwise returns NULL.
+ if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ return NULL;
+
+ return new(buffer_.void_data()) Type();
+ }
+
+ static void Delete(Type* p) {
+ if (p != NULL)
+ p->Type::~Type();
+ }
+
+ static const bool kRegisterAtExit = true;
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+
+ // Exposed for unittesting.
+ static void Resurrect() { subtle::NoBarrier_Store(&dead_, 0); }
+
+ private:
+ static AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
+ // Signal the object was already deleted, so it is not revived.
+ static subtle::Atomic32 dead_;
+};
+
+template <typename Type>
+AlignedMemory<sizeof(Type), ALIGNOF(Type)>
+ StaticMemorySingletonTraits<Type>::buffer_;
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
+
+// The Singleton<Type, Traits, DifferentiatingType> class manages a single
+// instance of Type which will be created on first use and will be destroyed at
+// normal process exit). The Trait::Delete function will not be called on
+// abnormal process exit.
+//
+// DifferentiatingType is used as a key to differentiate two different
+// singletons having the same memory allocation functions but serving a
+// different purpose. This is mainly used for Locks serving different purposes.
+//
+// Example usage:
+//
+// In your header:
+// template <typename T> struct DefaultSingletonTraits;
+// class FooClass {
+// public:
+// static FooClass* GetInstance(); <-- See comment below on this.
+// void Bar() { ... }
+// private:
+// FooClass() { ... }
+// friend struct DefaultSingletonTraits<FooClass>;
+//
+// DISALLOW_COPY_AND_ASSIGN(FooClass);
+// };
+//
+// In your source file:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return Singleton<FooClass>::get();
+// }
+//
+// And to call methods on FooClass:
+// FooClass::GetInstance()->Bar();
+//
+// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance
+// and it is important that FooClass::GetInstance() is not inlined in the
+// header. This makes sure that when source files from multiple targets include
+// this header they don't end up with different copies of the inlined code
+// creating multiple copies of the singleton.
+//
+// Singleton<> has no non-static members and doesn't need to actually be
+// instantiated.
+//
+// This class is itself thread-safe. The underlying Type must of course be
+// thread-safe if you want to use it concurrently. Two parameters may be tuned
+// depending on the user's requirements.
+//
+// Glossary:
+// RAE = kRegisterAtExit
+//
+// On every platform, if Traits::RAE is true, the singleton will be destroyed at
+// process exit. More precisely it uses AtExitManager which requires an
+// object of this type to be instantiated. AtExitManager mimics the semantics
+// of atexit() such as LIFO order but under Windows is safer to call. For more
+// information see at_exit.h.
+//
+// If Traits::RAE is false, the singleton will not be freed at process exit,
+// thus the singleton will be leaked if it is ever accessed. Traits::RAE
+// shouldn't be false unless absolutely necessary. Remember that the heap where
+// the object is allocated may be destroyed by the CRT anyway.
+//
+// Caveats:
+// (a) Every call to get(), operator->() and operator*() incurs some overhead
+// (16ns on my P4/2.8GHz) to check whether the object has already been
+// initialized. You may wish to cache the result of get(); it will not
+// change.
+//
+// (b) Your factory function must never throw an exception. This class is not
+// exception-safe.
+//
+
+template <typename Type,
+ typename Traits = DefaultSingletonTraits<Type>,
+ typename DifferentiatingType = Type>
+class Singleton {
+ private:
+ // Classes using the Singleton<T> pattern should declare a GetInstance()
+ // method and call Singleton::get() from within that.
+ friend Type* Type::GetInstance();
+
+ // Allow TraceLog tests to test tracing after OnExit.
+ friend class internal::DeleteTraceLogForTesting;
+
+ // This class is safe to be constructed and copy-constructed since it has no
+ // member.
+
+ // Return a pointer to the one true instance of the class.
+ static Type* get() {
+#ifndef NDEBUG
+ // Avoid making TLS lookup on release builds.
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ // The load has acquire memory ordering as the thread which reads the
+ // instance_ pointer must acquire visibility over the singleton data.
+ subtle::AtomicWord value = subtle::Acquire_Load(&instance_);
+ if (value != 0 && value != internal::kBeingCreatedMarker) {
+ return reinterpret_cast<Type*>(value);
+ }
+
+ // Object isn't created yet, maybe we will get to create it, let's try...
+ if (subtle::Acquire_CompareAndSwap(&instance_, 0,
+ internal::kBeingCreatedMarker) == 0) {
+ // instance_ was NULL and is now kBeingCreatedMarker. Only one thread
+ // will ever get here. Threads might be spinning on us, and they will
+ // stop right after we do this store.
+ Type* newval = Traits::New();
+
+ // Releases the visibility over instance_ to the readers.
+ subtle::Release_Store(&instance_,
+ reinterpret_cast<subtle::AtomicWord>(newval));
+
+ if (newval != NULL && Traits::kRegisterAtExit)
+ AtExitManager::RegisterCallback(OnExit, NULL);
+
+ return newval;
+ }
+
+ // We hit a race. Wait for the other thread to complete it.
+ value = internal::WaitForInstance(&instance_);
+
+ return reinterpret_cast<Type*>(value);
+ }
+
+ // Adapter function for use with AtExit(). This should be called single
+ // threaded, so don't use atomic operations.
+ // Calling OnExit while singleton is in use by other threads is a mistake.
+ static void OnExit(void* /*unused*/) {
+ // AtExit should only ever be register after the singleton instance was
+ // created. We should only ever get here with a valid instance_ pointer.
+ Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
+ instance_ = 0;
+ }
+ static subtle::AtomicWord instance_;
+};
+
+template <typename Type, typename Traits, typename DifferentiatingType>
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+} // namespace base
+
+#endif // BASE_MEMORY_SINGLETON_H_
diff --git a/libchrome/base/memory/singleton_objc.h b/libchrome/base/memory/singleton_objc.h
new file mode 100644
index 0000000..6df3f77
--- /dev/null
+++ b/libchrome/base/memory/singleton_objc.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Support for using the Singleton<T> pattern with Objective-C objects. A
+// SingletonObjC is the same as a Singleton, except the default traits are
+// appropriate for Objective-C objects. A typical Objective-C object of type
+// NSExampleType can be maintained as a singleton and accessed with:
+//
+// NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
+//
+// The first time this is used, it will create exampleSingleton as the result
+// of [[NSExampleType alloc] init]. Subsequent calls will return the same
+// NSExampleType* object. The object will be released by calling
+// -[NSExampleType release] when Singleton's atexit routines run
+// (see singleton.h).
+//
+// For Objective-C objects initialized through means other than the
+// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
+// as needed:
+//
+// struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
+// static Foo* New() {
+// return [[Foo alloc] initWithName:@"selecty"];
+// }
+// };
+// ...
+// Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
+
+#ifndef BASE_MEMORY_SINGLETON_OBJC_H_
+#define BASE_MEMORY_SINGLETON_OBJC_H_
+
+#import <Foundation/Foundation.h>
+#include "base/memory/singleton.h"
+
+// Singleton traits usable to manage traditional Objective-C objects, which
+// are instantiated by sending |alloc| and |init| messages, and are deallocated
+// in a memory-managed environment when their retain counts drop to 0 by
+// sending |release| messages.
+template<typename Type>
+struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
+ static Type* New() {
+ return [[Type alloc] init];
+ }
+
+ static void Delete(Type* object) {
+ [object release];
+ }
+};
+
+// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
+// default trait class. This makes it straightforward for Objective-C++ code
+// to hold Objective-C objects as singletons.
+template<typename Type,
+ typename Traits = DefaultSingletonObjCTraits<Type>,
+ typename DifferentiatingType = Type>
+class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
+};
+
+#endif // BASE_MEMORY_SINGLETON_OBJC_H_
diff --git a/libchrome/base/memory/singleton_unittest.cc b/libchrome/base/memory/singleton_unittest.cc
new file mode 100644
index 0000000..a15145c
--- /dev/null
+++ b/libchrome/base/memory/singleton_unittest.cc
@@ -0,0 +1,293 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/at_exit.h"
+#include "base/memory/singleton.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+static_assert(DefaultSingletonTraits<int>::kRegisterAtExit == true,
+ "object must be deleted on process exit");
+
+typedef void (*CallbackFunc)();
+
+class IntSingleton {
+ public:
+ static IntSingleton* GetInstance() {
+ return Singleton<IntSingleton>::get();
+ }
+
+ int value_;
+};
+
+class Init5Singleton {
+ public:
+ struct Trait;
+
+ static Init5Singleton* GetInstance() {
+ return Singleton<Init5Singleton, Trait>::get();
+ }
+
+ int value_;
+};
+
+struct Init5Singleton::Trait : public DefaultSingletonTraits<Init5Singleton> {
+ static Init5Singleton* New() {
+ Init5Singleton* instance = new Init5Singleton();
+ instance->value_ = 5;
+ return instance;
+ }
+};
+
+int* SingletonInt() {
+ return &IntSingleton::GetInstance()->value_;
+}
+
+int* SingletonInt5() {
+ return &Init5Singleton::GetInstance()->value_;
+}
+
+template <typename Type>
+struct CallbackTrait : public DefaultSingletonTraits<Type> {
+ static void Delete(Type* instance) {
+ if (instance->callback_)
+ (instance->callback_)();
+ DefaultSingletonTraits<Type>::Delete(instance);
+ }
+};
+
+class CallbackSingleton {
+ public:
+ CallbackSingleton() : callback_(NULL) { }
+ CallbackFunc callback_;
+};
+
+class CallbackSingletonWithNoLeakTrait : public CallbackSingleton {
+ public:
+ struct Trait : public CallbackTrait<CallbackSingletonWithNoLeakTrait> { };
+
+ CallbackSingletonWithNoLeakTrait() : CallbackSingleton() { }
+
+ static CallbackSingletonWithNoLeakTrait* GetInstance() {
+ return Singleton<CallbackSingletonWithNoLeakTrait, Trait>::get();
+ }
+};
+
+class CallbackSingletonWithLeakTrait : public CallbackSingleton {
+ public:
+ struct Trait : public CallbackTrait<CallbackSingletonWithLeakTrait> {
+ static const bool kRegisterAtExit = false;
+ };
+
+ CallbackSingletonWithLeakTrait() : CallbackSingleton() { }
+
+ static CallbackSingletonWithLeakTrait* GetInstance() {
+ return Singleton<CallbackSingletonWithLeakTrait, Trait>::get();
+ }
+};
+
+class CallbackSingletonWithStaticTrait : public CallbackSingleton {
+ public:
+ struct Trait;
+
+ CallbackSingletonWithStaticTrait() : CallbackSingleton() { }
+
+ static CallbackSingletonWithStaticTrait* GetInstance() {
+ return Singleton<CallbackSingletonWithStaticTrait, Trait>::get();
+ }
+};
+
+struct CallbackSingletonWithStaticTrait::Trait
+ : public StaticMemorySingletonTraits<CallbackSingletonWithStaticTrait> {
+ static void Delete(CallbackSingletonWithStaticTrait* instance) {
+ if (instance->callback_)
+ (instance->callback_)();
+ StaticMemorySingletonTraits<CallbackSingletonWithStaticTrait>::Delete(
+ instance);
+ }
+};
+
+template <class Type>
+class AlignedTestSingleton {
+ public:
+ AlignedTestSingleton() {}
+ ~AlignedTestSingleton() {}
+ static AlignedTestSingleton* GetInstance() {
+ return Singleton<AlignedTestSingleton,
+ StaticMemorySingletonTraits<AlignedTestSingleton>>::get();
+ }
+
+ Type type_;
+};
+
+
+void SingletonNoLeak(CallbackFunc CallOnQuit) {
+ CallbackSingletonWithNoLeakTrait::GetInstance()->callback_ = CallOnQuit;
+}
+
+void SingletonLeak(CallbackFunc CallOnQuit) {
+ CallbackSingletonWithLeakTrait::GetInstance()->callback_ = CallOnQuit;
+}
+
+CallbackFunc* GetLeakySingleton() {
+ return &CallbackSingletonWithLeakTrait::GetInstance()->callback_;
+}
+
+void DeleteLeakySingleton() {
+ DefaultSingletonTraits<CallbackSingletonWithLeakTrait>::Delete(
+ CallbackSingletonWithLeakTrait::GetInstance());
+}
+
+void SingletonStatic(CallbackFunc CallOnQuit) {
+ CallbackSingletonWithStaticTrait::GetInstance()->callback_ = CallOnQuit;
+}
+
+CallbackFunc* GetStaticSingleton() {
+ return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
+}
+
+
+class SingletonTest : public testing::Test {
+ public:
+ SingletonTest() {}
+
+ void SetUp() override {
+ non_leak_called_ = false;
+ leaky_called_ = false;
+ static_called_ = false;
+ }
+
+ protected:
+ void VerifiesCallbacks() {
+ EXPECT_TRUE(non_leak_called_);
+ EXPECT_FALSE(leaky_called_);
+ EXPECT_TRUE(static_called_);
+ non_leak_called_ = false;
+ leaky_called_ = false;
+ static_called_ = false;
+ }
+
+ void VerifiesCallbacksNotCalled() {
+ EXPECT_FALSE(non_leak_called_);
+ EXPECT_FALSE(leaky_called_);
+ EXPECT_FALSE(static_called_);
+ non_leak_called_ = false;
+ leaky_called_ = false;
+ static_called_ = false;
+ }
+
+ static void CallbackNoLeak() {
+ non_leak_called_ = true;
+ }
+
+ static void CallbackLeak() {
+ leaky_called_ = true;
+ }
+
+ static void CallbackStatic() {
+ static_called_ = true;
+ }
+
+ private:
+ static bool non_leak_called_;
+ static bool leaky_called_;
+ static bool static_called_;
+};
+
+bool SingletonTest::non_leak_called_ = false;
+bool SingletonTest::leaky_called_ = false;
+bool SingletonTest::static_called_ = false;
+
+TEST_F(SingletonTest, Basic) {
+ int* singleton_int;
+ int* singleton_int_5;
+ CallbackFunc* leaky_singleton;
+ CallbackFunc* static_singleton;
+
+ {
+ ShadowingAtExitManager sem;
+ {
+ singleton_int = SingletonInt();
+ }
+ // Ensure POD type initialization.
+ EXPECT_EQ(*singleton_int, 0);
+ *singleton_int = 1;
+
+ EXPECT_EQ(singleton_int, SingletonInt());
+ EXPECT_EQ(*singleton_int, 1);
+
+ {
+ singleton_int_5 = SingletonInt5();
+ }
+ // Is default initialized to 5.
+ EXPECT_EQ(*singleton_int_5, 5);
+
+ SingletonNoLeak(&CallbackNoLeak);
+ SingletonLeak(&CallbackLeak);
+ SingletonStatic(&CallbackStatic);
+ static_singleton = GetStaticSingleton();
+ leaky_singleton = GetLeakySingleton();
+ EXPECT_TRUE(leaky_singleton);
+ }
+
+ // Verify that only the expected callback has been called.
+ VerifiesCallbacks();
+ // Delete the leaky singleton.
+ DeleteLeakySingleton();
+
+ // The static singleton can't be acquired post-atexit.
+ EXPECT_EQ(NULL, GetStaticSingleton());
+
+ {
+ ShadowingAtExitManager sem;
+ // Verifiy that the variables were reset.
+ {
+ singleton_int = SingletonInt();
+ EXPECT_EQ(*singleton_int, 0);
+ }
+ {
+ singleton_int_5 = SingletonInt5();
+ EXPECT_EQ(*singleton_int_5, 5);
+ }
+ {
+ // Resurrect the static singleton, and assert that it
+ // still points to the same (static) memory.
+ CallbackSingletonWithStaticTrait::Trait::Resurrect();
+ EXPECT_EQ(GetStaticSingleton(), static_singleton);
+ }
+ }
+ // The leaky singleton shouldn't leak since SingletonLeak has not been called.
+ VerifiesCallbacksNotCalled();
+}
+
+#define EXPECT_ALIGNED(ptr, align) \
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+
+TEST_F(SingletonTest, Alignment) {
+ using base::AlignedMemory;
+
+ // Create some static singletons with increasing sizes and alignment
+ // requirements. By ordering this way, the linker will need to do some work to
+ // ensure proper alignment of the static data.
+ AlignedTestSingleton<int32_t>* align4 =
+ AlignedTestSingleton<int32_t>::GetInstance();
+ AlignedTestSingleton<AlignedMemory<32, 32> >* align32 =
+ AlignedTestSingleton<AlignedMemory<32, 32> >::GetInstance();
+ AlignedTestSingleton<AlignedMemory<128, 128> >* align128 =
+ AlignedTestSingleton<AlignedMemory<128, 128> >::GetInstance();
+ AlignedTestSingleton<AlignedMemory<4096, 4096> >* align4096 =
+ AlignedTestSingleton<AlignedMemory<4096, 4096> >::GetInstance();
+
+ EXPECT_ALIGNED(align4, 4);
+ EXPECT_ALIGNED(align32, 32);
+ EXPECT_ALIGNED(align128, 128);
+ EXPECT_ALIGNED(align4096, 4096);
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/memory/weak_ptr.cc b/libchrome/base/memory/weak_ptr.cc
new file mode 100644
index 0000000..4e77b04
--- /dev/null
+++ b/libchrome/base/memory/weak_ptr.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+namespace internal {
+
+WeakReference::Flag::Flag() : is_valid_(true) {
+ // Flags only become bound when checked for validity, or invalidated,
+ // so that we can check that later validity/invalidation operations on
+ // the same Flag take place on the same sequenced thread.
+ sequence_checker_.DetachFromSequence();
+}
+
+void WeakReference::Flag::Invalidate() {
+ // The flag being invalidated with a single ref implies that there are no
+ // weak pointers in existence. Allow deletion on other thread in this case.
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread() || HasOneRef())
+ << "WeakPtrs must be invalidated on the same sequenced thread.";
+ is_valid_ = false;
+}
+
+bool WeakReference::Flag::IsValid() const {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread())
+ << "WeakPtrs must be checked on the same sequenced thread.";
+ return is_valid_;
+}
+
+WeakReference::Flag::~Flag() {
+}
+
+WeakReference::WeakReference() {
+}
+
+WeakReference::WeakReference(const Flag* flag) : flag_(flag) {
+}
+
+WeakReference::~WeakReference() {
+}
+
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
+bool WeakReference::is_valid() const { return flag_.get() && flag_->IsValid(); }
+
+WeakReferenceOwner::WeakReferenceOwner() {
+}
+
+WeakReferenceOwner::~WeakReferenceOwner() {
+ Invalidate();
+}
+
+WeakReference WeakReferenceOwner::GetRef() const {
+ // If we hold the last reference to the Flag then create a new one.
+ if (!HasRefs())
+ flag_ = new WeakReference::Flag();
+
+ return WeakReference(flag_.get());
+}
+
+void WeakReferenceOwner::Invalidate() {
+ if (flag_.get()) {
+ flag_->Invalidate();
+ flag_ = NULL;
+ }
+}
+
+WeakPtrBase::WeakPtrBase() {
+}
+
+WeakPtrBase::~WeakPtrBase() {
+}
+
+WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) {
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/memory/weak_ptr.h b/libchrome/base/memory/weak_ptr.h
new file mode 100644
index 0000000..3544439
--- /dev/null
+++ b/libchrome/base/memory/weak_ptr.h
@@ -0,0 +1,361 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
+// EXAMPLE:
+//
+// class Controller {
+// public:
+// Controller() : weak_factory_(this) {}
+// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
+// void WorkComplete(const Result& result) { ... }
+// private:
+// // Member variables should appear before the WeakPtrFactory, to ensure
+// // that any WeakPtrs to Controller are invalidated before its members
+// // variable's destructors are executed, rendering them invalid.
+// WeakPtrFactory<Controller> weak_factory_;
+// };
+//
+// class Worker {
+// public:
+// static void StartNew(const WeakPtr<Controller>& controller) {
+// Worker* worker = new Worker(controller);
+// // Kick off asynchronous processing...
+// }
+// private:
+// Worker(const WeakPtr<Controller>& controller)
+// : controller_(controller) {}
+// void DidCompleteAsynchronousProcessing(const Result& result) {
+// if (controller_)
+// controller_->WorkComplete(result);
+// }
+// WeakPtr<Controller> controller_;
+// };
+//
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between threads, but must always be
+// dereferenced and invalidated on the same SequencedTaskRunner otherwise
+// checking the pointer would be racey.
+//
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// thread or current SequencedWorkerPool token, and cannot be dereferenced or
+// invalidated on any other task runner. Bound WeakPtrs can still be handed
+// off to other task runners, e.g. to use to post tasks back to object on the
+// bound sequence.
+//
+// If all WeakPtr objects are destroyed or invalidated then the factory is
+// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
+// destroyed, or new WeakPtr objects may be used, from a different sequence.
+//
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct thread to enforce that other WeakPtr objects will enforce they
+// are used on the desired thread.
+
+#ifndef BASE_MEMORY_WEAK_PTR_H_
+#define BASE_MEMORY_WEAK_PTR_H_
+
+#include <cstddef>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+
+namespace base {
+
+template <typename T> class SupportsWeakPtr;
+template <typename T> class WeakPtr;
+
+namespace internal {
+// These classes are part of the WeakPtr implementation.
+// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
+
+class BASE_EXPORT WeakReference {
+ public:
+ // Although Flag is bound to a specific SequencedTaskRunner, it may be
+ // deleted from another via base::WeakPtr::~WeakPtr().
+ class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
+ public:
+ Flag();
+
+ void Invalidate();
+ bool IsValid() const;
+
+ private:
+ friend class base::RefCountedThreadSafe<Flag>;
+
+ ~Flag();
+
+ SequenceChecker sequence_checker_;
+ bool is_valid_;
+ };
+
+ WeakReference();
+ explicit WeakReference(const Flag* flag);
+ ~WeakReference();
+
+ WeakReference(WeakReference&& other);
+ WeakReference(const WeakReference& other);
+ WeakReference& operator=(WeakReference&& other) = default;
+ WeakReference& operator=(const WeakReference& other) = default;
+
+ bool is_valid() const;
+
+ private:
+ scoped_refptr<const Flag> flag_;
+};
+
+class BASE_EXPORT WeakReferenceOwner {
+ public:
+ WeakReferenceOwner();
+ ~WeakReferenceOwner();
+
+ WeakReference GetRef() const;
+
+ bool HasRefs() const {
+ return flag_.get() && !flag_->HasOneRef();
+ }
+
+ void Invalidate();
+
+ private:
+ mutable scoped_refptr<WeakReference::Flag> flag_;
+};
+
+// This class simplifies the implementation of WeakPtr's type conversion
+// constructor by avoiding the need for a public accessor for ref_. A
+// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
+// base class gives us a way to access ref_ in a protected fashion.
+class BASE_EXPORT WeakPtrBase {
+ public:
+ WeakPtrBase();
+ ~WeakPtrBase();
+
+ WeakPtrBase(const WeakPtrBase& other) = default;
+ WeakPtrBase(WeakPtrBase&& other) = default;
+ WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+ WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
+ protected:
+ explicit WeakPtrBase(const WeakReference& ref);
+
+ WeakReference ref_;
+};
+
+// This class provides a common implementation of common functions that would
+// otherwise get instantiated separately for each distinct instantiation of
+// SupportsWeakPtr<>.
+class SupportsWeakPtrBase {
+ public:
+ // A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
+ // conversion will only compile if there is exists a Base which inherits
+ // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
+ // function that makes calling this easier.
+ template<typename Derived>
+ static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
+ static_assert(
+ std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+ "AsWeakPtr argument must inherit from SupportsWeakPtr");
+ return AsWeakPtrImpl<Derived>(t, *t);
+ }
+
+ private:
+ // This template function uses type inference to find a Base of Derived
+ // which is an instance of SupportsWeakPtr<Base>. We can then safely
+ // static_cast the Base* to a Derived*.
+ template <typename Derived, typename Base>
+ static WeakPtr<Derived> AsWeakPtrImpl(
+ Derived* t, const SupportsWeakPtr<Base>&) {
+ WeakPtr<Base> ptr = t->Base::AsWeakPtr();
+ return WeakPtr<Derived>(ptr.ref_, static_cast<Derived*>(ptr.ptr_));
+ }
+};
+
+} // namespace internal
+
+template <typename T> class WeakPtrFactory;
+
+// The WeakPtr class holds a weak reference to |T*|.
+//
+// This class is designed to be used like a normal pointer. You should always
+// null-test an object of this class before using it or invoking a method that
+// may result in the underlying object being destroyed.
+//
+// EXAMPLE:
+//
+// class Foo { ... };
+// WeakPtr<Foo> foo;
+// if (foo)
+// foo->method();
+//
+template <typename T>
+class WeakPtr : public internal::WeakPtrBase {
+ public:
+ WeakPtr() : ptr_(nullptr) {}
+
+ WeakPtr(std::nullptr_t) : ptr_(nullptr) {}
+
+ // Allow conversion from U to T provided U "is a" T. Note that this
+ // is separate from the (implicit) copy and move constructors.
+ template <typename U>
+ WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.ptr_) {
+ }
+ template <typename U>
+ WeakPtr(WeakPtr<U>&& other)
+ : WeakPtrBase(std::move(other)), ptr_(other.ptr_) {}
+
+ T* get() const { return ref_.is_valid() ? ptr_ : nullptr; }
+
+ T& operator*() const {
+ DCHECK(get() != nullptr);
+ return *get();
+ }
+ T* operator->() const {
+ DCHECK(get() != nullptr);
+ return get();
+ }
+
+ void reset() {
+ ref_ = internal::WeakReference();
+ ptr_ = nullptr;
+ }
+
+ // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+ explicit operator bool() const { return get() != nullptr; }
+
+ private:
+ friend class internal::SupportsWeakPtrBase;
+ template <typename U> friend class WeakPtr;
+ friend class SupportsWeakPtr<T>;
+ friend class WeakPtrFactory<T>;
+
+ WeakPtr(const internal::WeakReference& ref, T* ptr)
+ : WeakPtrBase(ref),
+ ptr_(ptr) {
+ }
+
+ // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
+ // value is undefined (as opposed to nullptr).
+ T* ptr_;
+};
+
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+ return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+ return weak_ptr == nullptr;
+}
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself. This is helpful if you only
+// need weak pointers within the implementation of a class. This class is also
+// useful when working with primitive types. For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+template <class T>
+class WeakPtrFactory {
+ public:
+ explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {
+ }
+
+ ~WeakPtrFactory() { ptr_ = nullptr; }
+
+ WeakPtr<T> GetWeakPtr() {
+ DCHECK(ptr_);
+ return WeakPtr<T>(weak_reference_owner_.GetRef(), ptr_);
+ }
+
+ // Call this method to invalidate all existing weak pointers.
+ void InvalidateWeakPtrs() {
+ DCHECK(ptr_);
+ weak_reference_owner_.Invalidate();
+ }
+
+ // Call this method to determine if any weak pointers exist.
+ bool HasWeakPtrs() const {
+ DCHECK(ptr_);
+ return weak_reference_owner_.HasRefs();
+ }
+
+ private:
+ internal::WeakReferenceOwner weak_reference_owner_;
+ T* ptr_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+// A class may extend from SupportsWeakPtr to let others take weak pointers to
+// it. This avoids the class itself implementing boilerplate to dispense weak
+// pointers. However, since SupportsWeakPtr's destructor won't invalidate
+// weak pointers to the class until after the derived class' members have been
+// destroyed, its use can lead to subtle use-after-destroy issues.
+template <class T>
+class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
+ public:
+ SupportsWeakPtr() {}
+
+ WeakPtr<T> AsWeakPtr() {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
+ }
+
+ protected:
+ ~SupportsWeakPtr() {}
+
+ private:
+ internal::WeakReferenceOwner weak_reference_owner_;
+ DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
+};
+
+// Helper function that uses type deduction to safely return a WeakPtr<Derived>
+// when Derived doesn't directly extend SupportsWeakPtr<Derived>, instead it
+// extends a Base that extends SupportsWeakPtr<Base>.
+//
+// EXAMPLE:
+// class Base : public base::SupportsWeakPtr<Producer> {};
+// class Derived : public Base {};
+//
+// Derived derived;
+// base::WeakPtr<Derived> ptr = base::AsWeakPtr(&derived);
+//
+// Note that the following doesn't work (invalid type conversion) since
+// Derived::AsWeakPtr() is WeakPtr<Base> SupportsWeakPtr<Base>::AsWeakPtr(),
+// and there's no way to safely cast WeakPtr<Base> to WeakPtr<Derived> at
+// the caller.
+//
+// base::WeakPtr<Derived> ptr = derived.AsWeakPtr(); // Fails.
+
+template <typename Derived>
+WeakPtr<Derived> AsWeakPtr(Derived* t) {
+ return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_WEAK_PTR_H_
diff --git a/libchrome/base/memory/weak_ptr_unittest.cc b/libchrome/base/memory/weak_ptr_unittest.cc
new file mode 100644
index 0000000..ebcf33c
--- /dev/null
+++ b/libchrome/base/memory/weak_ptr_unittest.cc
@@ -0,0 +1,681 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/weak_ptr.h"
+
+#include <memory>
+#include <string>
+
+#include "base/bind.h"
+#include "base/debug/leak_annotations.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+WeakPtr<int> PassThru(WeakPtr<int> ptr) {
+ return ptr;
+}
+
+template <class T>
+class OffThreadObjectCreator {
+ public:
+ static T* NewObject() {
+ T* result;
+ {
+ Thread creator_thread("creator_thread");
+ creator_thread.Start();
+ creator_thread.task_runner()->PostTask(
+ FROM_HERE, base::Bind(OffThreadObjectCreator::CreateObject, &result));
+ }
+ DCHECK(result); // We synchronized on thread destruction above.
+ return result;
+ }
+ private:
+ static void CreateObject(T** result) {
+ *result = new T;
+ }
+};
+
+struct Base {
+ std::string member;
+};
+struct Derived : public Base {};
+
+struct TargetBase {};
+struct Target : public TargetBase, public SupportsWeakPtr<Target> {
+ virtual ~Target() {}
+};
+struct DerivedTarget : public Target {};
+struct Arrow {
+ WeakPtr<Target> target;
+};
+struct TargetWithFactory : public Target {
+ TargetWithFactory() : factory(this) {}
+ WeakPtrFactory<Target> factory;
+};
+
+// Helper class to create and destroy weak pointer copies
+// and delete objects on a background thread.
+class BackgroundThread : public Thread {
+ public:
+ BackgroundThread() : Thread("owner_thread") {}
+
+ ~BackgroundThread() override { Stop(); }
+
+ void CreateArrowFromTarget(Arrow** arrow, Target* target) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromTarget, arrow,
+ target, &completion));
+ completion.Wait();
+ }
+
+ void CreateArrowFromArrow(Arrow** arrow, const Arrow* other) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromArrow, arrow,
+ other, &completion));
+ completion.Wait();
+ }
+
+ void DeleteTarget(Target* object) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&BackgroundThread::DoDeleteTarget, object, &completion));
+ completion.Wait();
+ }
+
+ void CopyAndAssignArrow(Arrow* object) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrow, object,
+ &completion));
+ completion.Wait();
+ }
+
+ void CopyAndAssignArrowBase(Arrow* object) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(
+ FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrowBase,
+ object, &completion));
+ completion.Wait();
+ }
+
+ void DeleteArrow(Arrow* object) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&BackgroundThread::DoDeleteArrow, object, &completion));
+ completion.Wait();
+ }
+
+ Target* DeRef(const Arrow* arrow) {
+ WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ Target* result = nullptr;
+ task_runner()->PostTask(FROM_HERE, base::Bind(&BackgroundThread::DoDeRef,
+ arrow, &result, &completion));
+ completion.Wait();
+ return result;
+ }
+
+ protected:
+ static void DoCreateArrowFromArrow(Arrow** arrow,
+ const Arrow* other,
+ WaitableEvent* completion) {
+ *arrow = new Arrow;
+ **arrow = *other;
+ completion->Signal();
+ }
+
+ static void DoCreateArrowFromTarget(Arrow** arrow,
+ Target* target,
+ WaitableEvent* completion) {
+ *arrow = new Arrow;
+ (*arrow)->target = target->AsWeakPtr();
+ completion->Signal();
+ }
+
+ static void DoDeRef(const Arrow* arrow,
+ Target** result,
+ WaitableEvent* completion) {
+ *result = arrow->target.get();
+ completion->Signal();
+ }
+
+ static void DoDeleteTarget(Target* object, WaitableEvent* completion) {
+ delete object;
+ completion->Signal();
+ }
+
+ static void DoCopyAndAssignArrow(Arrow* object, WaitableEvent* completion) {
+ // Copy constructor.
+ Arrow a = *object;
+ // Assignment operator.
+ *object = a;
+ completion->Signal();
+ }
+
+ static void DoCopyAndAssignArrowBase(
+ Arrow* object,
+ WaitableEvent* completion) {
+ // Copy constructor.
+ WeakPtr<TargetBase> b = object->target;
+ // Assignment operator.
+ WeakPtr<TargetBase> c;
+ c = object->target;
+ completion->Signal();
+ }
+
+ static void DoDeleteArrow(Arrow* object, WaitableEvent* completion) {
+ delete object;
+ completion->Signal();
+ }
+};
+
+} // namespace
+
+TEST(WeakPtrFactoryTest, Basic) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ EXPECT_EQ(&data, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, Comparison) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ WeakPtr<int> ptr2 = ptr;
+ EXPECT_EQ(ptr.get(), ptr2.get());
+}
+
+TEST(WeakPtrFactoryTest, Move) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ WeakPtr<int> ptr2 = factory.GetWeakPtr();
+ WeakPtr<int> ptr3 = std::move(ptr2);
+ EXPECT_NE(ptr.get(), ptr2.get());
+ EXPECT_EQ(ptr.get(), ptr3.get());
+}
+
+TEST(WeakPtrFactoryTest, OutOfScope) {
+ WeakPtr<int> ptr;
+ EXPECT_EQ(nullptr, ptr.get());
+ {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ ptr = factory.GetWeakPtr();
+ }
+ EXPECT_EQ(nullptr, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, Multiple) {
+ WeakPtr<int> a, b;
+ {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ a = factory.GetWeakPtr();
+ b = factory.GetWeakPtr();
+ EXPECT_EQ(&data, a.get());
+ EXPECT_EQ(&data, b.get());
+ }
+ EXPECT_EQ(nullptr, a.get());
+ EXPECT_EQ(nullptr, b.get());
+}
+
+TEST(WeakPtrFactoryTest, MultipleStaged) {
+ WeakPtr<int> a;
+ {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ a = factory.GetWeakPtr();
+ {
+ WeakPtr<int> b = factory.GetWeakPtr();
+ }
+ EXPECT_NE(nullptr, a.get());
+ }
+ EXPECT_EQ(nullptr, a.get());
+}
+
+TEST(WeakPtrFactoryTest, Dereference) {
+ Base data;
+ data.member = "123456";
+ WeakPtrFactory<Base> factory(&data);
+ WeakPtr<Base> ptr = factory.GetWeakPtr();
+ EXPECT_EQ(&data, ptr.get());
+ EXPECT_EQ(data.member, (*ptr).member);
+ EXPECT_EQ(data.member, ptr->member);
+}
+
+TEST(WeakPtrFactoryTest, UpCast) {
+ Derived data;
+ WeakPtrFactory<Derived> factory(&data);
+ WeakPtr<Base> ptr = factory.GetWeakPtr();
+ ptr = factory.GetWeakPtr();
+ EXPECT_EQ(ptr.get(), &data);
+}
+
+TEST(WeakPtrTest, ConstructFromNullptr) {
+ WeakPtr<int> ptr = PassThru(nullptr);
+ EXPECT_EQ(nullptr, ptr.get());
+}
+
+TEST(WeakPtrTest, SupportsWeakPtr) {
+ Target target;
+ WeakPtr<Target> ptr = target.AsWeakPtr();
+ EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrTest, DerivedTarget) {
+ DerivedTarget target;
+ WeakPtr<DerivedTarget> ptr = AsWeakPtr(&target);
+ EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrFactoryTest, BooleanTesting) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+
+ WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+ EXPECT_TRUE(ptr_to_an_instance);
+ EXPECT_FALSE(!ptr_to_an_instance);
+
+ if (ptr_to_an_instance) {
+ } else {
+ ADD_FAILURE() << "Pointer to an instance should result in true.";
+ }
+
+ if (!ptr_to_an_instance) { // check for operator!().
+ ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+ }
+
+ WeakPtr<int> null_ptr;
+ EXPECT_FALSE(null_ptr);
+ EXPECT_TRUE(!null_ptr);
+
+ if (null_ptr) {
+ ADD_FAILURE() << "Null pointer should result in false.";
+ }
+
+ if (!null_ptr) { // check for operator!().
+ } else {
+ ADD_FAILURE() << "Null pointer should result in !x being true.";
+ }
+}
+
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+
+ WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+ EXPECT_NE(nullptr, ptr_to_an_instance);
+ EXPECT_NE(ptr_to_an_instance, nullptr);
+
+ WeakPtr<int> null_ptr;
+ EXPECT_EQ(null_ptr, nullptr);
+ EXPECT_EQ(nullptr, null_ptr);
+}
+
+TEST(WeakPtrTest, InvalidateWeakPtrs) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ EXPECT_EQ(&data, ptr.get());
+ EXPECT_TRUE(factory.HasWeakPtrs());
+ factory.InvalidateWeakPtrs();
+ EXPECT_EQ(nullptr, ptr.get());
+ EXPECT_FALSE(factory.HasWeakPtrs());
+
+ // Test that the factory can create new weak pointers after a
+ // InvalidateWeakPtrs call, and they remain valid until the next
+ // InvalidateWeakPtrs call.
+ WeakPtr<int> ptr2 = factory.GetWeakPtr();
+ EXPECT_EQ(&data, ptr2.get());
+ EXPECT_TRUE(factory.HasWeakPtrs());
+ factory.InvalidateWeakPtrs();
+ EXPECT_EQ(nullptr, ptr2.get());
+ EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+TEST(WeakPtrTest, HasWeakPtrs) {
+ int data;
+ WeakPtrFactory<int> factory(&data);
+ {
+ WeakPtr<int> ptr = factory.GetWeakPtr();
+ EXPECT_TRUE(factory.HasWeakPtrs());
+ }
+ EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+TEST(WeakPtrTest, ObjectAndWeakPtrOnDifferentThreads) {
+ // Test that it is OK to create an object that supports WeakPtr on one thread,
+ // but use it on another. This tests that we do not trip runtime checks that
+ // ensure that a WeakPtr is not used by multiple threads.
+ std::unique_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
+ WeakPtr<Target> weak_ptr = target->AsWeakPtr();
+ EXPECT_EQ(target.get(), weak_ptr.get());
+}
+
+TEST(WeakPtrTest, WeakPtrInitiateAndUseOnDifferentThreads) {
+ // Test that it is OK to create an object that has a WeakPtr member on one
+ // thread, but use it on another. This tests that we do not trip runtime
+ // checks that ensure that a WeakPtr is not used by multiple threads.
+ std::unique_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
+ Target target;
+ arrow->target = target.AsWeakPtr();
+ EXPECT_EQ(&target, arrow->target.get());
+}
+
+TEST(WeakPtrTest, MoveOwnershipImplicitly) {
+ // Move object ownership to another thread by releasing all weak pointers
+ // on the original thread first, and then establish WeakPtr on a different
+ // thread.
+ BackgroundThread background;
+ background.Start();
+
+ Target* target = new Target();
+ {
+ WeakPtr<Target> weak_ptr = target->AsWeakPtr();
+ // Main thread deletes the WeakPtr, then the thread ownership of the
+ // object can be implicitly moved.
+ }
+ Arrow* arrow;
+
+ // Background thread creates WeakPtr(and implicitly owns the object).
+ background.CreateArrowFromTarget(&arrow, target);
+ EXPECT_EQ(background.DeRef(arrow), target);
+
+ {
+ // Main thread creates another WeakPtr, but this does not trigger implicitly
+ // thread ownership move.
+ Arrow arrow;
+ arrow.target = target->AsWeakPtr();
+
+ // The new WeakPtr is owned by background thread.
+ EXPECT_EQ(target, background.DeRef(&arrow));
+ }
+
+ // Target can only be deleted on background thread.
+ background.DeleteTarget(target);
+ background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, MoveOwnershipOfUnreferencedObject) {
+ BackgroundThread background;
+ background.Start();
+
+ Arrow* arrow;
+ {
+ Target target;
+ // Background thread creates WeakPtr.
+ background.CreateArrowFromTarget(&arrow, &target);
+
+ // Bind to background thread.
+ EXPECT_EQ(&target, background.DeRef(arrow));
+
+ // Release the only WeakPtr.
+ arrow->target.reset();
+
+ // Now we should be able to create a new reference from this thread.
+ arrow->target = target.AsWeakPtr();
+
+ // Re-bind to main thread.
+ EXPECT_EQ(&target, arrow->target.get());
+
+ // And the main thread can now delete the target.
+ }
+
+ delete arrow;
+}
+
+TEST(WeakPtrTest, MoveOwnershipAfterInvalidate) {
+ BackgroundThread background;
+ background.Start();
+
+ Arrow arrow;
+ std::unique_ptr<TargetWithFactory> target(new TargetWithFactory);
+
+ // Bind to main thread.
+ arrow.target = target->factory.GetWeakPtr();
+ EXPECT_EQ(target.get(), arrow.target.get());
+
+ target->factory.InvalidateWeakPtrs();
+ EXPECT_EQ(nullptr, arrow.target.get());
+
+ arrow.target = target->factory.GetWeakPtr();
+ // Re-bind to background thread.
+ EXPECT_EQ(target.get(), background.DeRef(&arrow));
+
+ // And the background thread can now delete the target.
+ background.DeleteTarget(target.release());
+}
+
+TEST(WeakPtrTest, MainThreadRefOutlivesBackgroundThreadRef) {
+ // Originating thread has a WeakPtr that outlives others.
+ // - Main thread creates a WeakPtr
+ // - Background thread creates a WeakPtr copy from the one in main thread
+ // - Destruct the WeakPtr on background thread
+ // - Destruct the WeakPtr on main thread
+ BackgroundThread background;
+ background.Start();
+
+ Target target;
+ Arrow arrow;
+ arrow.target = target.AsWeakPtr();
+
+ Arrow* arrow_copy;
+ background.CreateArrowFromArrow(&arrow_copy, &arrow);
+ EXPECT_EQ(arrow_copy->target.get(), &target);
+ background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrTest, BackgroundThreadRefOutlivesMainThreadRef) {
+ // Originating thread drops all references before another thread.
+ // - Main thread creates a WeakPtr and passes copy to background thread
+ // - Destruct the pointer on main thread
+ // - Destruct the pointer on background thread
+ BackgroundThread background;
+ background.Start();
+
+ Target target;
+ Arrow* arrow_copy;
+ {
+ Arrow arrow;
+ arrow.target = target.AsWeakPtr();
+ background.CreateArrowFromArrow(&arrow_copy, &arrow);
+ }
+ EXPECT_EQ(arrow_copy->target.get(), &target);
+ background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrTest, OwnerThreadDeletesObject) {
+ // Originating thread invalidates WeakPtrs while its held by other thread.
+ // - Main thread creates WeakPtr and passes Copy to background thread
+ // - Object gets destroyed on main thread
+ // (invalidates WeakPtr on background thread)
+ // - WeakPtr gets destroyed on Thread B
+ BackgroundThread background;
+ background.Start();
+ Arrow* arrow_copy;
+ {
+ Target target;
+ Arrow arrow;
+ arrow.target = target.AsWeakPtr();
+ background.CreateArrowFromArrow(&arrow_copy, &arrow);
+ }
+ EXPECT_EQ(nullptr, arrow_copy->target.get());
+ background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanCopyAndAssignWeakPtr) {
+ // Main thread creates a Target object.
+ Target target;
+ // Main thread creates an arrow referencing the Target.
+ Arrow *arrow = new Arrow();
+ arrow->target = target.AsWeakPtr();
+
+ // Background can copy and assign arrow (as well as the WeakPtr inside).
+ BackgroundThread background;
+ background.Start();
+ background.CopyAndAssignArrow(arrow);
+ background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanCopyAndAssignWeakPtrBase) {
+ // Main thread creates a Target object.
+ Target target;
+ // Main thread creates an arrow referencing the Target.
+ Arrow *arrow = new Arrow();
+ arrow->target = target.AsWeakPtr();
+
+ // Background can copy and assign arrow's WeakPtr to a base class WeakPtr.
+ BackgroundThread background;
+ background.Start();
+ background.CopyAndAssignArrowBase(arrow);
+ background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanDeleteWeakPtr) {
+ // Main thread creates a Target object.
+ Target target;
+ // Main thread creates an arrow referencing the Target.
+ Arrow* arrow = new Arrow();
+ arrow->target = target.AsWeakPtr();
+
+ // Background can delete arrow (as well as the WeakPtr inside).
+ BackgroundThread background;
+ background.Start();
+ background.DeleteArrow(arrow);
+}
+
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ BackgroundThread background;
+ background.Start();
+
+ // Main thread creates a Target object.
+ Target target;
+ // Main thread creates an arrow referencing the Target.
+ Arrow arrow;
+ arrow.target = target.AsWeakPtr();
+
+ // Background copies the WeakPtr.
+ Arrow* arrow_copy;
+ background.CreateArrowFromArrow(&arrow_copy, &arrow);
+
+ // The copy is still bound to main thread so I can deref.
+ EXPECT_EQ(arrow.target.get(), arrow_copy->target.get());
+
+ // Although background thread created the copy, it can not deref the copied
+ // WeakPtr.
+ ASSERT_DEATH(background.DeRef(arrow_copy), "");
+
+ background.DeleteArrow(arrow_copy);
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadDereferencesWeakPtrAfterReference) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ // Main thread creates a Target object.
+ Target target;
+
+ // Main thread creates an arrow referencing the Target (so target's
+ // thread ownership can not be implicitly moved).
+ Arrow arrow;
+ arrow.target = target.AsWeakPtr();
+ arrow.target.get();
+
+ // Background thread tries to deref target, which violates thread ownership.
+ BackgroundThread background;
+ background.Start();
+ ASSERT_DEATH(background.DeRef(&arrow), "");
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadDeletesWeakPtrAfterReference) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ std::unique_ptr<Target> target(new Target());
+
+ // Main thread creates an arrow referencing the Target.
+ Arrow arrow;
+ arrow.target = target->AsWeakPtr();
+
+ // Background thread tries to deref target, binding it to the thread.
+ BackgroundThread background;
+ background.Start();
+ background.DeRef(&arrow);
+
+ // Main thread deletes Target, violating thread binding.
+ ASSERT_DEATH(target.reset(), "");
+
+ // |target.reset()| died so |target| still holds the object, so we
+ // must pass it to the background thread to teardown.
+ background.DeleteTarget(target.release());
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadDeletesObjectAfterReference) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ std::unique_ptr<Target> target(new Target());
+
+ // Main thread creates an arrow referencing the Target, and references it, so
+ // that it becomes bound to the thread.
+ Arrow arrow;
+ arrow.target = target->AsWeakPtr();
+ arrow.target.get();
+
+ // Background thread tries to delete target, volating thread binding.
+ BackgroundThread background;
+ background.Start();
+ ASSERT_DEATH(background.DeleteTarget(target.release()), "");
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadReferencesObjectAfterDeletion) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ std::unique_ptr<Target> target(new Target());
+
+ // Main thread creates an arrow referencing the Target.
+ Arrow arrow;
+ arrow.target = target->AsWeakPtr();
+
+ // Background thread tries to delete target, binding the object to the thread.
+ BackgroundThread background;
+ background.Start();
+ background.DeleteTarget(target.release());
+
+ // Main thread attempts to dereference the target, violating thread binding.
+ ASSERT_DEATH(arrow.target.get(), "");
+}
+
+#endif
+
+} // namespace base
diff --git a/libchrome/base/memory/weak_ptr_unittest.nc b/libchrome/base/memory/weak_ptr_unittest.nc
new file mode 100644
index 0000000..9b1226b
--- /dev/null
+++ b/libchrome/base/memory/weak_ptr_unittest.nc
@@ -0,0 +1,141 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+struct Producer : SupportsWeakPtr<Producer> {};
+struct DerivedProducer : Producer {};
+struct OtherDerivedProducer : Producer {};
+struct MultiplyDerivedProducer : Producer,
+ SupportsWeakPtr<MultiplyDerivedProducer> {};
+struct Unrelated {};
+struct DerivedUnrelated : Unrelated {};
+
+#if defined(NCTEST_AUTO_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<Producer> ptr = f.AsWeakPtr();
+ WeakPtr<DerivedProducer> derived_ptr = ptr;
+}
+
+#elif defined(NCTEST_STATIC_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<Producer> ptr = f.AsWeakPtr();
+ WeakPtr<DerivedProducer> derived_ptr =
+ static_cast<WeakPtr<DerivedProducer> >(ptr);
+}
+
+#elif defined(NCTEST_AUTO_REF_DOWNCAST) // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<Producer> ptr = f.AsWeakPtr();
+ WeakPtr<DerivedProducer>& derived_ptr = ptr;
+}
+
+#elif defined(NCTEST_STATIC_REF_DOWNCAST) // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<Producer> ptr = f.AsWeakPtr();
+ WeakPtr<DerivedProducer>& derived_ptr =
+ static_cast<WeakPtr<DerivedProducer>&>(ptr);
+}
+
+#elif defined(NCTEST_STATIC_ASWEAKPTR_DOWNCAST) // [r"no matching function"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<DerivedProducer> ptr =
+ SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<DerivedProducer> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_DOWNCAST) // [r"no matching function"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
+
+void WontCompile() {
+ Producer f;
+ WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
+
+void WontCompile() {
+ DerivedProducer f;
+ WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_SIDECAST) // [r"fatal error: no matching function for call to 'AsWeakPtr'"]
+
+void WontCompile() {
+ DerivedProducer f;
+ WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
+
+void WontCompile() {
+ DerivedProducer f;
+ WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
+}
+
+#elif defined(NCTEST_UNRELATED_HELPER) // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*'"]
+
+void WontCompile() {
+ DerivedProducer f;
+ WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_UNRELATED_INSTANTIATED_HELPER) // [r"no matching function"]
+
+void WontCompile() {
+ DerivedProducer f;
+ WeakPtr<Unrelated> ptr = AsWeakPtr<Unrelated>(&f);
+}
+
+#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER) // [r"fatal error: static_assert failed \"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
+
+void WontCompile() {
+ Unrelated f;
+ WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER) // [r"fatal error: static_assert failed \"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
+
+void WontCompile() {
+ DerivedUnrelated f;
+ WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
+}
+
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
+
+void WontCompile() {
+ MultiplyDerivedProducer f;
+ WeakPtr<MultiplyDerivedProducer> ptr = AsWeakPtr(&f);
+}
+
+#endif
+
+}
diff --git a/libchrome/base/message_loop/incoming_task_queue.cc b/libchrome/base/message_loop/incoming_task_queue.cc
new file mode 100644
index 0000000..bca1d52
--- /dev/null
+++ b/libchrome/base/message_loop/incoming_task_queue.cc
@@ -0,0 +1,200 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/incoming_task_queue.h"
+
+#include <limits>
+
+#include "base/location.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+// Delays larger than this are often bogus, and a warning should be emitted in
+// debug builds to warn developers. http://crbug.com/450045
+const int kTaskDelayWarningThresholdInSeconds =
+ 14 * 24 * 60 * 60; // 14 days.
+#endif
+
+// Returns true if MessagePump::ScheduleWork() must be called one
+// time for every task that is added to the MessageLoop incoming queue.
+bool AlwaysNotifyPump(MessageLoop::Type type) {
+#if defined(OS_ANDROID)
+ // The Android UI message loop needs to get notified each time a task is
+ // added
+ // to the incoming queue.
+ return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
+#else
+ (void)type; // Avoid an unused warning.
+ return false;
+#endif
+}
+
+TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
+ TimeTicks delayed_run_time;
+ if (delay > TimeDelta())
+ delayed_run_time = TimeTicks::Now() + delay;
+ else
+ DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
+ return delayed_run_time;
+}
+
+} // namespace
+
+IncomingTaskQueue::IncomingTaskQueue(MessageLoop* message_loop)
+ : high_res_task_count_(0),
+ message_loop_(message_loop),
+ next_sequence_num_(0),
+ message_loop_scheduled_(false),
+ always_schedule_work_(AlwaysNotifyPump(message_loop_->type())),
+ is_ready_for_scheduling_(false) {
+}
+
+bool IncomingTaskQueue::AddToIncomingQueue(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay,
+ bool nestable) {
+ DLOG_IF(WARNING,
+ delay.InSeconds() > kTaskDelayWarningThresholdInSeconds)
+ << "Requesting super-long task delay period of " << delay.InSeconds()
+ << " seconds from here: " << from_here.ToString();
+
+ PendingTask pending_task(
+ from_here, task, CalculateDelayedRuntime(delay), nestable);
+#if defined(OS_WIN)
+ // We consider the task needs a high resolution timer if the delay is
+ // more than 0 and less than 32ms. This caps the relative error to
+ // less than 50% : a 33ms wait can wake at 48ms since the default
+ // resolution on Windows is between 10 and 15ms.
+ if (delay > TimeDelta() &&
+ delay.InMilliseconds() < (2 * Time::kMinLowResolutionThresholdMs)) {
+ pending_task.is_high_res = true;
+ }
+#endif
+ return PostPendingTask(&pending_task);
+}
+
+bool IncomingTaskQueue::HasHighResolutionTasks() {
+ AutoLock lock(incoming_queue_lock_);
+ return high_res_task_count_ > 0;
+}
+
+bool IncomingTaskQueue::IsIdleForTesting() {
+ AutoLock lock(incoming_queue_lock_);
+ return incoming_queue_.empty();
+}
+
+int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
+ // Make sure no tasks are lost.
+ DCHECK(work_queue->empty());
+
+ // Acquire all we can from the inter-thread queue with one lock acquisition.
+ AutoLock lock(incoming_queue_lock_);
+ if (incoming_queue_.empty()) {
+ // If the loop attempts to reload but there are no tasks in the incoming
+ // queue, that means it will go to sleep waiting for more work. If the
+ // incoming queue becomes nonempty we need to schedule it again.
+ message_loop_scheduled_ = false;
+ } else {
+ incoming_queue_.swap(*work_queue);
+ }
+ // Reset the count of high resolution tasks since our queue is now empty.
+ int high_res_tasks = high_res_task_count_;
+ high_res_task_count_ = 0;
+ return high_res_tasks;
+}
+
+void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
+ base::subtle::AutoWriteLock lock(message_loop_lock_);
+ message_loop_ = NULL;
+}
+
+void IncomingTaskQueue::StartScheduling() {
+ bool schedule_work;
+ {
+ AutoLock lock(incoming_queue_lock_);
+ DCHECK(!is_ready_for_scheduling_);
+ DCHECK(!message_loop_scheduled_);
+ is_ready_for_scheduling_ = true;
+ schedule_work = !incoming_queue_.empty();
+ }
+ if (schedule_work) {
+ DCHECK(message_loop_);
+ // Don't need to lock |message_loop_lock_| here because this function is
+ // called by MessageLoop on its thread.
+ message_loop_->ScheduleWork();
+ }
+}
+
+IncomingTaskQueue::~IncomingTaskQueue() {
+ // Verify that WillDestroyCurrentMessageLoop() has been called.
+ DCHECK(!message_loop_);
+}
+
+bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
+ // Warning: Don't try to short-circuit, and handle this thread's tasks more
+ // directly, as it could starve handling of foreign threads. Put every task
+ // into this queue.
+
+ // Ensures |message_loop_| isn't destroyed while running.
+ base::subtle::AutoReadLock hold_message_loop(message_loop_lock_);
+
+ if (!message_loop_) {
+ pending_task->task.Reset();
+ return false;
+ }
+
+ bool schedule_work = false;
+ {
+ AutoLock hold(incoming_queue_lock_);
+
+#if defined(OS_WIN)
+ if (pending_task->is_high_res)
+ ++high_res_task_count_;
+#endif
+
+ // Initialize the sequence number. The sequence number is used for delayed
+ // tasks (to facilitate FIFO sorting when two tasks have the same
+ // delayed_run_time value) and for identifying the task in about:tracing.
+ pending_task->sequence_num = next_sequence_num_++;
+
+ message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
+ *pending_task);
+
+ bool was_empty = incoming_queue_.empty();
+ incoming_queue_.push(std::move(*pending_task));
+
+ if (is_ready_for_scheduling_ &&
+ (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
+ schedule_work = true;
+ // After we've scheduled the message loop, we do not need to do so again
+ // until we know it has processed all of the work in our queue and is
+ // waiting for more work again. The message loop will always attempt to
+ // reload from the incoming queue before waiting again so we clear this
+ // flag in ReloadWorkQueue().
+ message_loop_scheduled_ = true;
+ }
+ }
+
+ // Wake up the message loop and schedule work. This is done outside
+ // |incoming_queue_lock_| because signaling the message loop may cause this
+ // thread to be switched. If |incoming_queue_lock_| is held, any other thread
+ // that wants to post a task will be blocked until this thread switches back
+ // in and releases |incoming_queue_lock_|.
+ if (schedule_work)
+ message_loop_->ScheduleWork();
+
+ return true;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/message_loop/incoming_task_queue.h b/libchrome/base/message_loop/incoming_task_queue.h
new file mode 100644
index 0000000..aff71d2
--- /dev/null
+++ b/libchrome/base/message_loop/incoming_task_queue.h
@@ -0,0 +1,115 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
+#define BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/read_write_lock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class MessageLoop;
+class WaitableEvent;
+
+namespace internal {
+
+// Implements a queue of tasks posted to the message loop running on the current
+// thread. This class takes care of synchronizing posting tasks from different
+// threads and together with MessageLoop ensures clean shutdown.
+class BASE_EXPORT IncomingTaskQueue
+ : public RefCountedThreadSafe<IncomingTaskQueue> {
+ public:
+ explicit IncomingTaskQueue(MessageLoop* message_loop);
+
+ // Appends a task to the incoming queue. Posting of all tasks is routed though
+ // AddToIncomingQueue() or TryAddToIncomingQueue() to make sure that posting
+ // task is properly synchronized between different threads.
+ //
+ // Returns true if the task was successfully added to the queue, otherwise
+ // returns false. In all cases, the ownership of |task| is transferred to the
+ // called method.
+ bool AddToIncomingQueue(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay,
+ bool nestable);
+
+ // Returns true if the queue contains tasks that require higher than default
+ // timer resolution. Currently only needed for Windows.
+ bool HasHighResolutionTasks();
+
+ // Returns true if the message loop is "idle". Provided for testing.
+ bool IsIdleForTesting();
+
+ // Loads tasks from the |incoming_queue_| into |*work_queue|. Must be called
+ // from the thread that is running the loop. Returns the number of tasks that
+ // require high resolution timers.
+ int ReloadWorkQueue(TaskQueue* work_queue);
+
+ // Disconnects |this| from the parent message loop.
+ void WillDestroyCurrentMessageLoop();
+
+ // This should be called when the message loop becomes ready for
+ // scheduling work.
+ void StartScheduling();
+
+ private:
+ friend class RefCountedThreadSafe<IncomingTaskQueue>;
+ virtual ~IncomingTaskQueue();
+
+ // Adds a task to |incoming_queue_|. The caller retains ownership of
+ // |pending_task|, but this function will reset the value of
+ // |pending_task->task|. This is needed to ensure that the posting call stack
+ // does not retain |pending_task->task| beyond this function call.
+ bool PostPendingTask(PendingTask* pending_task);
+
+ // Wakes up the message loop and schedules work.
+ void ScheduleWork();
+
+ // Number of tasks that require high resolution timing. This value is kept
+ // so that ReloadWorkQueue() completes in constant time.
+ int high_res_task_count_;
+
+ // The lock that protects access to the members of this class, except
+ // |message_loop_|.
+ base::Lock incoming_queue_lock_;
+
+ // Lock that protects |message_loop_| to prevent it from being deleted while a
+ // task is being posted.
+ base::subtle::ReadWriteLock message_loop_lock_;
+
+ // An incoming queue of tasks that are acquired under a mutex for processing
+ // on this instance's thread. These tasks have not yet been been pushed to
+ // |message_loop_|.
+ TaskQueue incoming_queue_;
+
+ // Points to the message loop that owns |this|.
+ MessageLoop* message_loop_;
+
+ // The next sequence number to use for delayed tasks.
+ int next_sequence_num_;
+
+ // True if our message loop has already been scheduled and does not need to be
+ // scheduled again until an empty reload occurs.
+ bool message_loop_scheduled_;
+
+ // True if we always need to call ScheduleWork when receiving a new task, even
+ // if the incoming queue was not empty.
+ const bool always_schedule_work_;
+
+ // False until StartScheduling() is called.
+ bool is_ready_for_scheduling_;
+
+ DISALLOW_COPY_AND_ASSIGN(IncomingTaskQueue);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
diff --git a/libchrome/base/message_loop/message_loop.cc b/libchrome/base/message_loop/message_loop.cc
new file mode 100644
index 0000000..54369a9
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop.cc
@@ -0,0 +1,778 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_pump_default.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/run_loop.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "base/tracked_objects.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/message_loop/message_pump_mac.h"
+#endif
+#if defined(OS_POSIX) && !defined(OS_IOS)
+#include "base/message_loop/message_pump_libevent.h"
+#endif
+#if defined(OS_ANDROID)
+#include "base/message_loop/message_pump_android.h"
+#endif
+#if defined(USE_GLIB)
+#include "base/message_loop/message_pump_glib.h"
+#endif
+
+namespace base {
+
+namespace {
+
+// A lazily created thread local storage for quick access to a thread's message
+// loop, if one exists. This should be safe and free of static constructors.
+LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
+ LAZY_INSTANCE_INITIALIZER;
+
+// Logical events for Histogram profiling. Run with --message-loop-histogrammer
+// to get an accounting of messages and actions taken on each thread.
+const int kTaskRunEvent = 0x1;
+#if !defined(OS_NACL)
+const int kTimerEvent = 0x2;
+
+// Provide range of message IDs for use in histogramming and debug display.
+const int kLeastNonZeroMessageId = 1;
+const int kMaxMessageId = 1099;
+const int kNumberOfDistinctMessagesDisplayed = 1100;
+
+// Provide a macro that takes an expression (such as a constant, or macro
+// constant) and creates a pair to initialize an array of pairs. In this case,
+// our pair consists of the expressions value, and the "stringized" version
+// of the expression (i.e., the expression put in quotes). For example, if
+// we have:
+// #define FOO 2
+// #define BAR 5
+// then the following:
+// VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
+// will expand to:
+// {7, "FOO + BAR"}
+// We use the resulting array as an argument to our histogram, which reads the
+// number as a bucket identifier, and proceeds to use the corresponding name
+// in the pair (i.e., the quoted string) when printing out a histogram.
+#define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
+
+const LinearHistogram::DescriptionPair event_descriptions_[] = {
+ // Provide some pretty print capability in our histogram for our internal
+ // messages.
+
+ // A few events we handle (kindred to messages), and used to profile actions.
+ VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
+ VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
+
+ {-1, NULL} // The list must be null-terminated, per API to histogram.
+};
+#endif // !defined(OS_NACL)
+
+bool enable_histogrammer_ = false;
+
+MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
+
+#if defined(OS_IOS)
+typedef MessagePumpIOSForIO MessagePumpForIO;
+#elif defined(OS_NACL_SFI)
+typedef MessagePumpDefault MessagePumpForIO;
+#elif defined(OS_POSIX)
+typedef MessagePumpLibevent MessagePumpForIO;
+#endif
+
+#if !defined(OS_NACL_SFI)
+MessagePumpForIO* ToPumpIO(MessagePump* pump) {
+ return static_cast<MessagePumpForIO*>(pump);
+}
+#endif // !defined(OS_NACL_SFI)
+
+std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
+ return pump;
+}
+
+} // namespace
+
+//------------------------------------------------------------------------------
+
+MessageLoop::TaskObserver::TaskObserver() {
+}
+
+MessageLoop::TaskObserver::~TaskObserver() {
+}
+
+MessageLoop::DestructionObserver::~DestructionObserver() {
+}
+
+MessageLoop::NestingObserver::~NestingObserver() {}
+
+//------------------------------------------------------------------------------
+
+MessageLoop::MessageLoop(Type type)
+ : MessageLoop(type, MessagePumpFactoryCallback()) {
+ BindToCurrentThread();
+}
+
+MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
+ : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
+ BindToCurrentThread();
+}
+
+MessageLoop::~MessageLoop() {
+ // If |pump_| is non-null, this message loop has been bound and should be the
+ // current one on this thread. Otherwise, this loop is being destructed before
+ // it was bound to a thread, so a different message loop (or no loop at all)
+ // may be current.
+ DCHECK((pump_ && current() == this) || (!pump_ && current() != this));
+
+ // iOS just attaches to the loop, it doesn't Run it.
+ // TODO(stuartmorgan): Consider wiring up a Detach().
+#if !defined(OS_IOS)
+ DCHECK(!run_loop_);
+#endif
+
+#if defined(OS_WIN)
+ if (in_high_res_mode_)
+ Time::ActivateHighResolutionTimer(false);
+#endif
+ // Clean up any unprocessed tasks, but take care: deleting a task could
+ // result in the addition of more tasks (e.g., via DeleteSoon). We set a
+ // limit on the number of times we will allow a deleted task to generate more
+ // tasks. Normally, we should only pass through this loop once or twice. If
+ // we end up hitting the loop limit, then it is probably due to one task that
+ // is being stubborn. Inspect the queues to see who is left.
+ bool did_work;
+ for (int i = 0; i < 100; ++i) {
+ DeletePendingTasks();
+ ReloadWorkQueue();
+ // If we end up with empty queues, then break out of the loop.
+ did_work = DeletePendingTasks();
+ if (!did_work)
+ break;
+ }
+ DCHECK(!did_work);
+
+ // Let interested parties have one last shot at accessing this.
+ FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
+ WillDestroyCurrentMessageLoop());
+
+ thread_task_runner_handle_.reset();
+
+ // Tell the incoming queue that we are dying.
+ incoming_task_queue_->WillDestroyCurrentMessageLoop();
+ incoming_task_queue_ = NULL;
+ unbound_task_runner_ = NULL;
+ task_runner_ = NULL;
+
+ // OK, now make it so that no one can find us.
+ if (current() == this)
+ lazy_tls_ptr.Pointer()->Set(nullptr);
+}
+
+// static
+MessageLoop* MessageLoop::current() {
+ // TODO(darin): sadly, we cannot enable this yet since people call us even
+ // when they have no intention of using us.
+ // DCHECK(loop) << "Ouch, did you forget to initialize me?";
+ return lazy_tls_ptr.Pointer()->Get();
+}
+
+// static
+void MessageLoop::EnableHistogrammer(bool enable) {
+ enable_histogrammer_ = enable;
+}
+
+// static
+bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
+ if (message_pump_for_ui_factory_)
+ return false;
+
+ message_pump_for_ui_factory_ = factory;
+ return true;
+}
+
+// static
+std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
+// TODO(rvargas): Get rid of the OS guards.
+#if defined(USE_GLIB) && !defined(OS_NACL)
+ typedef MessagePumpGlib MessagePumpForUI;
+#elif defined(OS_LINUX) && !defined(OS_NACL)
+ typedef MessagePumpLibevent MessagePumpForUI;
+#endif
+
+#if defined(OS_IOS) || defined(OS_MACOSX)
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(MessagePumpMac::Create())
+#elif defined(OS_NACL)
+// Currently NaCl doesn't have a UI MessageLoop.
+// TODO(abarth): Figure out if we need this.
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>()
+#else
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(new MessagePumpForUI())
+#endif
+
+#if defined(OS_MACOSX)
+ // Use an OS native runloop on Mac to support timer coalescing.
+#define MESSAGE_PUMP_DEFAULT \
+ std::unique_ptr<MessagePump>(new MessagePumpCFRunLoop())
+#else
+#define MESSAGE_PUMP_DEFAULT \
+ std::unique_ptr<MessagePump>(new MessagePumpDefault())
+#endif
+
+ if (type == MessageLoop::TYPE_UI) {
+ if (message_pump_for_ui_factory_)
+ return message_pump_for_ui_factory_();
+ return MESSAGE_PUMP_UI;
+ }
+ if (type == MessageLoop::TYPE_IO)
+ return std::unique_ptr<MessagePump>(new MessagePumpForIO());
+
+#if defined(OS_ANDROID)
+ if (type == MessageLoop::TYPE_JAVA)
+ return std::unique_ptr<MessagePump>(new MessagePumpForUI());
+#endif
+
+ DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
+ return MESSAGE_PUMP_DEFAULT;
+}
+
+void MessageLoop::AddDestructionObserver(
+ DestructionObserver* destruction_observer) {
+ DCHECK_EQ(this, current());
+ destruction_observers_.AddObserver(destruction_observer);
+}
+
+void MessageLoop::RemoveDestructionObserver(
+ DestructionObserver* destruction_observer) {
+ DCHECK_EQ(this, current());
+ destruction_observers_.RemoveObserver(destruction_observer);
+}
+
+void MessageLoop::AddNestingObserver(NestingObserver* observer) {
+ DCHECK_EQ(this, current());
+ nesting_observers_.AddObserver(observer);
+}
+
+void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
+ DCHECK_EQ(this, current());
+ nesting_observers_.RemoveObserver(observer);
+}
+
+void MessageLoop::PostTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ task_runner_->PostTask(from_here, task);
+}
+
+void MessageLoop::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ task_runner_->PostDelayedTask(from_here, task, delay);
+}
+
+void MessageLoop::Run() {
+ DCHECK(pump_);
+ RunLoop run_loop;
+ run_loop.Run();
+}
+
+void MessageLoop::RunUntilIdle() {
+ DCHECK(pump_);
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+}
+
+void MessageLoop::QuitWhenIdle() {
+ DCHECK_EQ(this, current());
+ if (run_loop_) {
+ run_loop_->QuitWhenIdle();
+ } else {
+ NOTREACHED() << "Must be inside Run to call QuitWhenIdle";
+ }
+}
+
+void MessageLoop::QuitNow() {
+ DCHECK_EQ(this, current());
+ if (run_loop_) {
+ pump_->Quit();
+ } else {
+ NOTREACHED() << "Must be inside Run to call Quit";
+ }
+}
+
+bool MessageLoop::IsType(Type type) const {
+ return type_ == type;
+}
+
+static void QuitCurrentWhenIdle() {
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+// static
+Closure MessageLoop::QuitWhenIdleClosure() {
+ return Bind(&QuitCurrentWhenIdle);
+}
+
+void MessageLoop::SetNestableTasksAllowed(bool allowed) {
+ if (allowed) {
+ // Kick the native pump just in case we enter a OS-driven nested message
+ // loop.
+ pump_->ScheduleWork();
+ }
+ nestable_tasks_allowed_ = allowed;
+}
+
+bool MessageLoop::NestableTasksAllowed() const {
+ return nestable_tasks_allowed_;
+}
+
+bool MessageLoop::IsNested() {
+ return run_loop_->run_depth_ > 1;
+}
+
+void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
+ DCHECK_EQ(this, current());
+ task_observers_.AddObserver(task_observer);
+}
+
+void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
+ DCHECK_EQ(this, current());
+ task_observers_.RemoveObserver(task_observer);
+}
+
+bool MessageLoop::is_running() const {
+ DCHECK_EQ(this, current());
+ return run_loop_ != NULL;
+}
+
+bool MessageLoop::HasHighResolutionTasks() {
+ return incoming_task_queue_->HasHighResolutionTasks();
+}
+
+bool MessageLoop::IsIdleForTesting() {
+ // We only check the incoming queue, since we don't want to lock the work
+ // queue.
+ return incoming_task_queue_->IsIdleForTesting();
+}
+
+//------------------------------------------------------------------------------
+
+// static
+std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
+ Type type,
+ MessagePumpFactoryCallback pump_factory) {
+ return WrapUnique(new MessageLoop(type, pump_factory));
+}
+
+MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
+ : type_(type),
+#if defined(OS_WIN)
+ pending_high_res_tasks_(0),
+ in_high_res_mode_(false),
+#endif
+ nestable_tasks_allowed_(true),
+ pump_factory_(pump_factory),
+ message_histogram_(NULL),
+ run_loop_(NULL),
+ incoming_task_queue_(new internal::IncomingTaskQueue(this)),
+ unbound_task_runner_(
+ new internal::MessageLoopTaskRunner(incoming_task_queue_)),
+ task_runner_(unbound_task_runner_),
+ thread_id_(kInvalidThreadId) {
+ // If type is TYPE_CUSTOM non-null pump_factory must be given.
+ DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
+}
+
+void MessageLoop::BindToCurrentThread() {
+ DCHECK(!pump_);
+ if (!pump_factory_.is_null())
+ pump_ = pump_factory_.Run();
+ else
+ pump_ = CreateMessagePumpForType(type_);
+
+ DCHECK(!current()) << "should only have one message loop per thread";
+ lazy_tls_ptr.Pointer()->Set(this);
+
+ incoming_task_queue_->StartScheduling();
+ unbound_task_runner_->BindToCurrentThread();
+ unbound_task_runner_ = nullptr;
+ SetThreadTaskRunnerHandle();
+ {
+ // Save the current thread's ID for potential use by other threads
+ // later from GetThreadName().
+ thread_id_ = PlatformThread::CurrentId();
+ subtle::MemoryBarrier();
+ }
+}
+
+std::string MessageLoop::GetThreadName() const {
+ if (thread_id_ == kInvalidThreadId) {
+ // |thread_id_| may already have been initialized but this thread might not
+ // have received the update yet.
+ subtle::MemoryBarrier();
+ DCHECK_NE(kInvalidThreadId, thread_id_);
+ }
+ return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
+}
+
+void MessageLoop::SetTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ DCHECK_EQ(this, current());
+ DCHECK(task_runner->BelongsToCurrentThread());
+ DCHECK(!unbound_task_runner_);
+ task_runner_ = std::move(task_runner);
+ SetThreadTaskRunnerHandle();
+}
+
+void MessageLoop::SetThreadTaskRunnerHandle() {
+ DCHECK_EQ(this, current());
+ // Clear the previous thread task runner first, because only one can exist at
+ // a time.
+ thread_task_runner_handle_.reset();
+ thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
+}
+
+void MessageLoop::RunHandler() {
+ DCHECK_EQ(this, current());
+ StartHistogrammer();
+ pump_->Run(this);
+}
+
+bool MessageLoop::ProcessNextDelayedNonNestableTask() {
+ if (run_loop_->run_depth_ != 1)
+ return false;
+
+ if (deferred_non_nestable_work_queue_.empty())
+ return false;
+
+ PendingTask pending_task =
+ std::move(deferred_non_nestable_work_queue_.front());
+ deferred_non_nestable_work_queue_.pop();
+
+ RunTask(pending_task);
+ return true;
+}
+
+void MessageLoop::RunTask(const PendingTask& pending_task) {
+ DCHECK(nestable_tasks_allowed_);
+
+#if defined(OS_WIN)
+ if (pending_task.is_high_res) {
+ pending_high_res_tasks_--;
+ CHECK_GE(pending_high_res_tasks_, 0);
+ }
+#endif
+
+ // Execute the task and assume the worst: It is probably not reentrant.
+ nestable_tasks_allowed_ = false;
+
+ HistogramEvent(kTaskRunEvent);
+
+ TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
+
+ FOR_EACH_OBSERVER(TaskObserver, task_observers_,
+ WillProcessTask(pending_task));
+ task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
+ FOR_EACH_OBSERVER(TaskObserver, task_observers_,
+ DidProcessTask(pending_task));
+
+ nestable_tasks_allowed_ = true;
+}
+
+bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
+ if (pending_task.nestable || run_loop_->run_depth_ == 1) {
+ RunTask(pending_task);
+ // Show that we ran a task (Note: a new one might arrive as a
+ // consequence!).
+ return true;
+ }
+
+ // We couldn't run the task now because we're in a nested message loop
+ // and the task isn't nestable.
+ deferred_non_nestable_work_queue_.push(std::move(pending_task));
+ return false;
+}
+
+void MessageLoop::AddToDelayedWorkQueue(PendingTask pending_task) {
+ // Move to the delayed work queue.
+ delayed_work_queue_.push(std::move(pending_task));
+}
+
+bool MessageLoop::DeletePendingTasks() {
+ bool did_work = !work_queue_.empty();
+ while (!work_queue_.empty()) {
+ PendingTask pending_task = std::move(work_queue_.front());
+ work_queue_.pop();
+ if (!pending_task.delayed_run_time.is_null()) {
+ // We want to delete delayed tasks in the same order in which they would
+ // normally be deleted in case of any funny dependencies between delayed
+ // tasks.
+ AddToDelayedWorkQueue(std::move(pending_task));
+ }
+ }
+ did_work |= !deferred_non_nestable_work_queue_.empty();
+ while (!deferred_non_nestable_work_queue_.empty()) {
+ deferred_non_nestable_work_queue_.pop();
+ }
+ did_work |= !delayed_work_queue_.empty();
+
+ // Historically, we always delete the task regardless of valgrind status. It's
+ // not completely clear why we want to leak them in the loops above. This
+ // code is replicating legacy behavior, and should not be considered
+ // absolutely "correct" behavior. See TODO above about deleting all tasks
+ // when it's safe.
+ while (!delayed_work_queue_.empty()) {
+ delayed_work_queue_.pop();
+ }
+ return did_work;
+}
+
+void MessageLoop::ReloadWorkQueue() {
+ // We can improve performance of our loading tasks from the incoming queue to
+ // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
+ // load. That reduces the number of locks-per-task significantly when our
+ // queues get large.
+ if (work_queue_.empty()) {
+#if defined(OS_WIN)
+ pending_high_res_tasks_ +=
+ incoming_task_queue_->ReloadWorkQueue(&work_queue_);
+#else
+ incoming_task_queue_->ReloadWorkQueue(&work_queue_);
+#endif
+ }
+}
+
+void MessageLoop::ScheduleWork() {
+ pump_->ScheduleWork();
+}
+
+#if defined(OS_WIN)
+bool MessageLoop::MessagePumpWasSignaled() {
+ return pump_->WasSignaled();
+}
+#endif
+
+//------------------------------------------------------------------------------
+// Method and data for histogramming events and actions taken by each instance
+// on each thread.
+
+void MessageLoop::StartHistogrammer() {
+#if !defined(OS_NACL) // NaCl build has no metrics code.
+ if (enable_histogrammer_ && !message_histogram_
+ && StatisticsRecorder::IsActive()) {
+ std::string thread_name = GetThreadName();
+ DCHECK(!thread_name.empty());
+ message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
+ "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
+ kNumberOfDistinctMessagesDisplayed,
+ HistogramBase::kHexRangePrintingFlag, event_descriptions_);
+ }
+#endif
+}
+
+void MessageLoop::HistogramEvent(int event) {
+#if !defined(OS_NACL)
+ if (message_histogram_)
+ message_histogram_->Add(event);
+#endif
+}
+
+void MessageLoop::NotifyBeginNestedLoop() {
+ FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
+ OnBeginNestedMessageLoop());
+}
+
+bool MessageLoop::DoWork() {
+ if (!nestable_tasks_allowed_) {
+ // Task can't be executed right now.
+ return false;
+ }
+
+ for (;;) {
+ ReloadWorkQueue();
+ if (work_queue_.empty())
+ break;
+
+ // Execute oldest task.
+ do {
+ PendingTask pending_task = std::move(work_queue_.front());
+ work_queue_.pop();
+ if (!pending_task.delayed_run_time.is_null()) {
+ int sequence_num = pending_task.sequence_num;
+ TimeTicks delayed_run_time = pending_task.delayed_run_time;
+ AddToDelayedWorkQueue(std::move(pending_task));
+ // If we changed the topmost task, then it is time to reschedule.
+ if (delayed_work_queue_.top().sequence_num == sequence_num)
+ pump_->ScheduleDelayedWork(delayed_run_time);
+ } else {
+ if (DeferOrRunPendingTask(std::move(pending_task)))
+ return true;
+ }
+ } while (!work_queue_.empty());
+ }
+
+ // Nothing happened.
+ return false;
+}
+
+bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
+ if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
+ recent_time_ = *next_delayed_work_time = TimeTicks();
+ return false;
+ }
+
+ // When we "fall behind", there will be a lot of tasks in the delayed work
+ // queue that are ready to run. To increase efficiency when we fall behind,
+ // we will only call Time::Now() intermittently, and then process all tasks
+ // that are ready to run before calling it again. As a result, the more we
+ // fall behind (and have a lot of ready-to-run delayed tasks), the more
+ // efficient we'll be at handling the tasks.
+
+ TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
+ if (next_run_time > recent_time_) {
+ recent_time_ = TimeTicks::Now(); // Get a better view of Now();
+ if (next_run_time > recent_time_) {
+ *next_delayed_work_time = next_run_time;
+ return false;
+ }
+ }
+
+ PendingTask pending_task =
+ std::move(const_cast<PendingTask&>(delayed_work_queue_.top()));
+ delayed_work_queue_.pop();
+
+ if (!delayed_work_queue_.empty())
+ *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
+
+ return DeferOrRunPendingTask(std::move(pending_task));
+}
+
+bool MessageLoop::DoIdleWork() {
+ if (ProcessNextDelayedNonNestableTask())
+ return true;
+
+ if (run_loop_->quit_when_idle_received_)
+ pump_->Quit();
+
+ // When we return we will do a kernel wait for more tasks.
+#if defined(OS_WIN)
+ // On Windows we activate the high resolution timer so that the wait
+ // _if_ triggered by the timer happens with good resolution. If we don't
+ // do this the default resolution is 15ms which might not be acceptable
+ // for some tasks.
+ bool high_res = pending_high_res_tasks_ > 0;
+ if (high_res != in_high_res_mode_) {
+ in_high_res_mode_ = high_res;
+ Time::ActivateHighResolutionTimer(in_high_res_mode_);
+ }
+#endif
+ return false;
+}
+
+void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
+ void(*deleter)(const void*),
+ const void* object) {
+ task_runner()->PostNonNestableTask(from_here, Bind(deleter, object));
+}
+
+void MessageLoop::ReleaseSoonInternal(
+ const tracked_objects::Location& from_here,
+ void(*releaser)(const void*),
+ const void* object) {
+ task_runner()->PostNonNestableTask(from_here, Bind(releaser, object));
+}
+
+#if !defined(OS_NACL)
+//------------------------------------------------------------------------------
+// MessageLoopForUI
+
+MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
+ : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {}
+
+#if defined(OS_ANDROID)
+void MessageLoopForUI::Start() {
+ // No Histogram support for UI message loop as it is managed by Java side
+ static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
+}
+#endif
+
+#if defined(OS_IOS)
+void MessageLoopForUI::Attach() {
+ static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
+}
+#endif
+
+#if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
+bool MessageLoopForUI::WatchFileDescriptor(
+ int fd,
+ bool persistent,
+ MessagePumpLibevent::Mode mode,
+ MessagePumpLibevent::FileDescriptorWatcher *controller,
+ MessagePumpLibevent::Watcher *delegate) {
+ return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
+ fd,
+ persistent,
+ mode,
+ controller,
+ delegate);
+}
+#endif
+
+#endif // !defined(OS_NACL)
+
+//------------------------------------------------------------------------------
+// MessageLoopForIO
+
+MessageLoopForIO::MessageLoopForIO() : MessageLoop(TYPE_IO) {}
+
+#if !defined(OS_NACL_SFI)
+
+#if defined(OS_WIN)
+void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
+ ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
+}
+
+bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
+ return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
+}
+
+bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
+ return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
+}
+#elif defined(OS_POSIX)
+bool MessageLoopForIO::WatchFileDescriptor(int fd,
+ bool persistent,
+ Mode mode,
+ FileDescriptorWatcher* controller,
+ Watcher* delegate) {
+ return ToPumpIO(pump_.get())->WatchFileDescriptor(
+ fd,
+ persistent,
+ mode,
+ controller,
+ delegate);
+}
+#endif
+
+#endif // !defined(OS_NACL_SFI)
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_loop.h b/libchrome/base/message_loop/message_loop.h
new file mode 100644
index 0000000..ac522cf
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop.h
@@ -0,0 +1,700 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
+
+#include <memory>
+#include <queue>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/debug/task_annotator.h"
+#include "base/gtest_prod_util.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/incoming_task_queue.h"
+#include "base/message_loop/message_loop_task_runner.h"
+#include "base/message_loop/message_pump.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/observer_list.h"
+#include "base/pending_task.h"
+#include "base/sequenced_task_runner_helpers.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "base/tracking_info.h"
+#include "build/build_config.h"
+
+// TODO(sky): these includes should not be necessary. Nuke them.
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#elif defined(OS_IOS)
+#include "base/message_loop/message_pump_io_ios.h"
+#elif defined(OS_POSIX)
+#include "base/message_loop/message_pump_libevent.h"
+#endif
+
+namespace base {
+
+class HistogramBase;
+class RunLoop;
+class ThreadTaskRunnerHandle;
+class WaitableEvent;
+
+// A MessageLoop is used to process events for a particular thread. There is
+// at most one MessageLoop instance per thread.
+//
+// Events include at a minimum Task instances submitted to PostTask and its
+// variants. Depending on the type of message pump used by the MessageLoop
+// other events such as UI messages may be processed. On Windows APC calls (as
+// time permits) and signals sent to a registered set of HANDLEs may also be
+// processed.
+//
+// NOTE: Unless otherwise specified, a MessageLoop's methods may only be called
+// on the thread where the MessageLoop's Run method executes.
+//
+// NOTE: MessageLoop has task reentrancy protection. This means that if a
+// task is being processed, a second task cannot start until the first task is
+// finished. Reentrancy can happen when processing a task, and an inner
+// message pump is created. That inner pump then processes native messages
+// which could implicitly start an inner task. Inner message pumps are created
+// with dialogs (DialogBox), common dialogs (GetOpenFileName), OLE functions
+// (DoDragDrop), printer functions (StartDoc) and *many* others.
+//
+// Sample workaround when inner task processing is needed:
+// HRESULT hr;
+// {
+// MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
+// hr = DoDragDrop(...); // Implicitly runs a modal message loop.
+// }
+// // Process |hr| (the result returned by DoDragDrop()).
+//
+// Please be SURE your task is reentrant (nestable) and all global variables
+// are stable and accessible before calling SetNestableTasksAllowed(true).
+//
+class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
+ public:
+ // A MessageLoop has a particular type, which indicates the set of
+ // asynchronous events it may process in addition to tasks and timers.
+ //
+ // TYPE_DEFAULT
+ // This type of ML only supports tasks and timers.
+ //
+ // TYPE_UI
+ // This type of ML also supports native UI events (e.g., Windows messages).
+ // See also MessageLoopForUI.
+ //
+ // TYPE_IO
+ // This type of ML also supports asynchronous IO. See also
+ // MessageLoopForIO.
+ //
+ // TYPE_JAVA
+ // This type of ML is backed by a Java message handler which is responsible
+ // for running the tasks added to the ML. This is only for use on Android.
+ // TYPE_JAVA behaves in essence like TYPE_UI, except during construction
+ // where it does not use the main thread specific pump factory.
+ //
+ // TYPE_CUSTOM
+ // MessagePump was supplied to constructor.
+ //
+ enum Type {
+ TYPE_DEFAULT,
+ TYPE_UI,
+ TYPE_CUSTOM,
+ TYPE_IO,
+#if defined(OS_ANDROID)
+ TYPE_JAVA,
+#endif // defined(OS_ANDROID)
+ };
+
+ // Normally, it is not necessary to instantiate a MessageLoop. Instead, it
+ // is typical to make use of the current thread's MessageLoop instance.
+ explicit MessageLoop(Type type = TYPE_DEFAULT);
+ // Creates a TYPE_CUSTOM MessageLoop with the supplied MessagePump, which must
+ // be non-NULL.
+ explicit MessageLoop(std::unique_ptr<MessagePump> pump);
+
+ ~MessageLoop() override;
+
+ // Returns the MessageLoop object for the current thread, or null if none.
+ static MessageLoop* current();
+
+ static void EnableHistogrammer(bool enable_histogrammer);
+
+ typedef std::unique_ptr<MessagePump>(MessagePumpFactory)();
+ // Uses the given base::MessagePumpForUIFactory to override the default
+ // MessagePump implementation for 'TYPE_UI'. Returns true if the factory
+ // was successfully registered.
+ static bool InitMessagePumpForUIFactory(MessagePumpFactory* factory);
+
+ // Creates the default MessagePump based on |type|. Caller owns return
+ // value.
+ static std::unique_ptr<MessagePump> CreateMessagePumpForType(Type type);
+
+ // A DestructionObserver is notified when the current MessageLoop is being
+ // destroyed. These observers are notified prior to MessageLoop::current()
+ // being changed to return NULL. This gives interested parties the chance to
+ // do final cleanup that depends on the MessageLoop.
+ //
+ // NOTE: Any tasks posted to the MessageLoop during this notification will
+ // not be run. Instead, they will be deleted.
+ //
+ class BASE_EXPORT DestructionObserver {
+ public:
+ virtual void WillDestroyCurrentMessageLoop() = 0;
+
+ protected:
+ virtual ~DestructionObserver();
+ };
+
+ // Add a DestructionObserver, which will start receiving notifications
+ // immediately.
+ void AddDestructionObserver(DestructionObserver* destruction_observer);
+
+ // Remove a DestructionObserver. It is safe to call this method while a
+ // DestructionObserver is receiving a notification callback.
+ void RemoveDestructionObserver(DestructionObserver* destruction_observer);
+
+ // A NestingObserver is notified when a nested message loop begins. The
+ // observers are notified before the first task is processed.
+ class BASE_EXPORT NestingObserver {
+ public:
+ virtual void OnBeginNestedMessageLoop() = 0;
+
+ protected:
+ virtual ~NestingObserver();
+ };
+
+ void AddNestingObserver(NestingObserver* observer);
+ void RemoveNestingObserver(NestingObserver* observer);
+
+ // NOTE: Deprecated; prefer task_runner() and the TaskRunner interfaces.
+ // TODO(skyostil): Remove these functions (crbug.com/465354).
+ //
+ // The "PostTask" family of methods call the task's Run method asynchronously
+ // from within a message loop at some point in the future.
+ //
+ // With the PostTask variant, tasks are invoked in FIFO order, inter-mixed
+ // with normal UI or IO event processing. With the PostDelayedTask variant,
+ // tasks are called after at least approximately 'delay_ms' have elapsed.
+ //
+ // The NonNestable variants work similarly except that they promise never to
+ // dispatch the task from a nested invocation of MessageLoop::Run. Instead,
+ // such tasks get deferred until the top-most MessageLoop::Run is executing.
+ //
+ // The MessageLoop takes ownership of the Task, and deletes it after it has
+ // been Run().
+ //
+ // PostTask(from_here, task) is equivalent to
+ // PostDelayedTask(from_here, task, 0).
+ //
+ // NOTE: These methods may be called on any thread. The Task will be invoked
+ // on the thread that executes MessageLoop::Run().
+ void PostTask(const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ void PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay);
+
+ // A variant on PostTask that deletes the given object. This is useful
+ // if the object needs to live until the next run of the MessageLoop (for
+ // example, deleting a RenderProcessHost from within an IPC callback is not
+ // good).
+ //
+ // NOTE: This method may be called on any thread. The object will be deleted
+ // on the thread that executes MessageLoop::Run().
+ template <class T>
+ void DeleteSoon(const tracked_objects::Location& from_here, const T* object) {
+ base::subtle::DeleteHelperInternal<T, void>::DeleteViaSequencedTaskRunner(
+ this, from_here, object);
+ }
+
+ // A variant on PostTask that releases the given reference counted object
+ // (by calling its Release method). This is useful if the object needs to
+ // live until the next run of the MessageLoop, or if the object needs to be
+ // released on a particular thread.
+ //
+ // A common pattern is to manually increment the object's reference count
+ // (AddRef), clear the pointer, then issue a ReleaseSoon. The reference count
+ // is incremented manually to ensure clearing the pointer does not trigger a
+ // delete and to account for the upcoming decrement (ReleaseSoon). For
+ // example:
+ //
+ // scoped_refptr<Foo> foo = ...
+ // foo->AddRef();
+ // Foo* raw_foo = foo.get();
+ // foo = NULL;
+ // message_loop->ReleaseSoon(raw_foo);
+ //
+ // NOTE: This method may be called on any thread. The object will be
+ // released (and thus possibly deleted) on the thread that executes
+ // MessageLoop::Run(). If this is not the same as the thread that calls
+ // ReleaseSoon(FROM_HERE, ), then T MUST inherit from
+ // RefCountedThreadSafe<T>!
+ template <class T>
+ void ReleaseSoon(const tracked_objects::Location& from_here,
+ const T* object) {
+ base::subtle::ReleaseHelperInternal<T, void>::ReleaseViaSequencedTaskRunner(
+ this, from_here, object);
+ }
+
+ // Deprecated: use RunLoop instead.
+ // Run the message loop.
+ void Run();
+
+ // Deprecated: use RunLoop instead.
+ // Process all pending tasks, windows messages, etc., but don't wait/sleep.
+ // Return as soon as all items that can be run are taken care of.
+ void RunUntilIdle();
+
+ // Deprecated: use RunLoop instead.
+ //
+ // Signals the Run method to return when it becomes idle. It will continue to
+ // process pending messages and future messages as long as they are enqueued.
+ // Warning: if the MessageLoop remains busy, it may never quit. Only use this
+ // Quit method when looping procedures (such as web pages) have been shut
+ // down.
+ //
+ // This method may only be called on the same thread that called Run, and Run
+ // must still be on the call stack.
+ //
+ // Use QuitClosure variants if you need to Quit another thread's MessageLoop,
+ // but note that doing so is fairly dangerous if the target thread makes
+ // nested calls to MessageLoop::Run. The problem being that you won't know
+ // which nested run loop you are quitting, so be careful!
+ void QuitWhenIdle();
+
+ // Deprecated: use RunLoop instead.
+ //
+ // This method is a variant of Quit, that does not wait for pending messages
+ // to be processed before returning from Run.
+ void QuitNow();
+
+ // Deprecated: use RunLoop instead.
+ // Construct a Closure that will call QuitWhenIdle(). Useful to schedule an
+ // arbitrary MessageLoop to QuitWhenIdle.
+ static Closure QuitWhenIdleClosure();
+
+ // Set the timer slack for this message loop.
+ void SetTimerSlack(TimerSlack timer_slack) {
+ pump_->SetTimerSlack(timer_slack);
+ }
+
+ // Returns true if this loop is |type|. This allows subclasses (especially
+ // those in tests) to specialize how they are identified.
+ virtual bool IsType(Type type) const;
+
+ // Returns the type passed to the constructor.
+ Type type() const { return type_; }
+
+ // Returns the name of the thread this message loop is bound to.
+ // This function is only valid when this message loop is running and
+ // BindToCurrentThread has already been called.
+ std::string GetThreadName() const;
+
+ // Gets the TaskRunner associated with this message loop.
+ const scoped_refptr<SingleThreadTaskRunner>& task_runner() {
+ return task_runner_;
+ }
+
+ // Sets a new TaskRunner for this message loop. The message loop must already
+ // have been bound to a thread prior to this call, and the task runner must
+ // belong to that thread. Note that changing the task runner will also affect
+ // the ThreadTaskRunnerHandle for the target thread. Must be called on the
+ // thread to which the message loop is bound.
+ void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+ // Enables or disables the recursive task processing. This happens in the case
+ // of recursive message loops. Some unwanted message loops may occur when
+ // using common controls or printer functions. By default, recursive task
+ // processing is disabled.
+ //
+ // Please use |ScopedNestableTaskAllower| instead of calling these methods
+ // directly. In general, nestable message loops are to be avoided. They are
+ // dangerous and difficult to get right, so please use with extreme caution.
+ //
+ // The specific case where tasks get queued is:
+ // - The thread is running a message loop.
+ // - It receives a task #1 and executes it.
+ // - The task #1 implicitly starts a message loop, like a MessageBox in the
+ // unit test. This can also be StartDoc or GetSaveFileName.
+ // - The thread receives a task #2 before or while in this second message
+ // loop.
+ // - With NestableTasksAllowed set to true, the task #2 will run right away.
+ // Otherwise, it will get executed right after task #1 completes at "thread
+ // message loop level".
+ void SetNestableTasksAllowed(bool allowed);
+ bool NestableTasksAllowed() const;
+
+ // Enables nestable tasks on |loop| while in scope.
+ class ScopedNestableTaskAllower {
+ public:
+ explicit ScopedNestableTaskAllower(MessageLoop* loop)
+ : loop_(loop),
+ old_state_(loop_->NestableTasksAllowed()) {
+ loop_->SetNestableTasksAllowed(true);
+ }
+ ~ScopedNestableTaskAllower() {
+ loop_->SetNestableTasksAllowed(old_state_);
+ }
+
+ private:
+ MessageLoop* loop_;
+ bool old_state_;
+ };
+
+ // Returns true if we are currently running a nested message loop.
+ bool IsNested();
+
+ // A TaskObserver is an object that receives task notifications from the
+ // MessageLoop.
+ //
+ // NOTE: A TaskObserver implementation should be extremely fast!
+ class BASE_EXPORT TaskObserver {
+ public:
+ TaskObserver();
+
+ // This method is called before processing a task.
+ virtual void WillProcessTask(const PendingTask& pending_task) = 0;
+
+ // This method is called after processing a task.
+ virtual void DidProcessTask(const PendingTask& pending_task) = 0;
+
+ protected:
+ virtual ~TaskObserver();
+ };
+
+ // These functions can only be called on the same thread that |this| is
+ // running on.
+ void AddTaskObserver(TaskObserver* task_observer);
+ void RemoveTaskObserver(TaskObserver* task_observer);
+
+ // Can only be called from the thread that owns the MessageLoop.
+ bool is_running() const;
+
+ // Returns true if the message loop has high resolution timers enabled.
+ // Provided for testing.
+ bool HasHighResolutionTasks();
+
+ // Returns true if the message loop is "idle". Provided for testing.
+ bool IsIdleForTesting();
+
+ // Returns the TaskAnnotator which is used to add debug information to posted
+ // tasks.
+ debug::TaskAnnotator* task_annotator() { return &task_annotator_; }
+
+ // Runs the specified PendingTask.
+ void RunTask(const PendingTask& pending_task);
+
+#if defined(OS_WIN)
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ bool MessagePumpWasSignaled();
+#endif
+
+ //----------------------------------------------------------------------------
+ protected:
+ std::unique_ptr<MessagePump> pump_;
+
+ using MessagePumpFactoryCallback = Callback<std::unique_ptr<MessagePump>()>;
+
+ // Common protected constructor. Other constructors delegate the
+ // initialization to this constructor.
+ // A subclass can invoke this constructor to create a message_loop of a
+ // specific type with a custom loop. The implementation does not call
+ // BindToCurrentThread. If this constructor is invoked directly by a subclass,
+ // then the subclass must subsequently bind the message loop.
+ MessageLoop(Type type, MessagePumpFactoryCallback pump_factory);
+
+ // Configure various members and bind this message loop to the current thread.
+ void BindToCurrentThread();
+
+ private:
+ friend class RunLoop;
+ friend class internal::IncomingTaskQueue;
+ friend class ScheduleWorkTest;
+ friend class Thread;
+ FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
+
+ // Creates a MessageLoop without binding to a thread.
+ // If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
+ // to create a message pump for this message loop. Otherwise a default
+ // message pump for the |type| is created.
+ //
+ // It is valid to call this to create a new message loop on one thread,
+ // and then pass it to the thread where the message loop actually runs.
+ // The message loop's BindToCurrentThread() method must be called on the
+ // thread the message loop runs on, before calling Run().
+ // Before BindToCurrentThread() is called, only Post*Task() functions can
+ // be called on the message loop.
+ static std::unique_ptr<MessageLoop> CreateUnbound(
+ Type type,
+ MessagePumpFactoryCallback pump_factory);
+
+ // Sets the ThreadTaskRunnerHandle for the current thread to point to the
+ // task runner for this message loop.
+ void SetThreadTaskRunnerHandle();
+
+ // Invokes the actual run loop using the message pump.
+ void RunHandler();
+
+ // Called to process any delayed non-nestable tasks.
+ bool ProcessNextDelayedNonNestableTask();
+
+ // Calls RunTask or queues the pending_task on the deferred task list if it
+ // cannot be run right now. Returns true if the task was run.
+ bool DeferOrRunPendingTask(PendingTask pending_task);
+
+ // Adds the pending task to delayed_work_queue_.
+ void AddToDelayedWorkQueue(PendingTask pending_task);
+
+ // Delete tasks that haven't run yet without running them. Used in the
+ // destructor to make sure all the task's destructors get called. Returns
+ // true if some work was done.
+ bool DeletePendingTasks();
+
+ // Loads tasks from the incoming queue to |work_queue_| if the latter is
+ // empty.
+ void ReloadWorkQueue();
+
+ // Wakes up the message pump. Can be called on any thread. The caller is
+ // responsible for synchronizing ScheduleWork() calls.
+ void ScheduleWork();
+
+ // Start recording histogram info about events and action IF it was enabled
+ // and IF the statistics recorder can accept a registration of our histogram.
+ void StartHistogrammer();
+
+ // Add occurrence of event to our histogram, so that we can see what is being
+ // done in a specific MessageLoop instance (i.e., specific thread).
+ // If message_histogram_ is NULL, this is a no-op.
+ void HistogramEvent(int event);
+
+ // Notify observers that a nested message loop is starting.
+ void NotifyBeginNestedLoop();
+
+ // MessagePump::Delegate methods:
+ bool DoWork() override;
+ bool DoDelayedWork(TimeTicks* next_delayed_work_time) override;
+ bool DoIdleWork() override;
+
+ const Type type_;
+
+ // A list of tasks that need to be processed by this instance. Note that
+ // this queue is only accessed (push/pop) by our current thread.
+ TaskQueue work_queue_;
+
+#if defined(OS_WIN)
+ // How many high resolution tasks are in the pending task queue. This value
+ // increases by N every time we call ReloadWorkQueue() and decreases by 1
+ // every time we call RunTask() if the task needs a high resolution timer.
+ int pending_high_res_tasks_;
+ // Tracks if we have requested high resolution timers. Its only use is to
+ // turn off the high resolution timer upon loop destruction.
+ bool in_high_res_mode_;
+#endif
+
+ // Contains delayed tasks, sorted by their 'delayed_run_time' property.
+ DelayedTaskQueue delayed_work_queue_;
+
+ // A recent snapshot of Time::Now(), used to check delayed_work_queue_.
+ TimeTicks recent_time_;
+
+ // A queue of non-nestable tasks that we had to defer because when it came
+ // time to execute them we were in a nested message loop. They will execute
+ // once we're out of nested message loops.
+ TaskQueue deferred_non_nestable_work_queue_;
+
+ ObserverList<DestructionObserver> destruction_observers_;
+
+ ObserverList<NestingObserver> nesting_observers_;
+
+ // A recursion block that prevents accidentally running additional tasks when
+ // insider a (accidentally induced?) nested message pump.
+ bool nestable_tasks_allowed_;
+
+ // pump_factory_.Run() is called to create a message pump for this loop
+ // if type_ is TYPE_CUSTOM and pump_ is null.
+ MessagePumpFactoryCallback pump_factory_;
+
+ // A profiling histogram showing the counts of various messages and events.
+ HistogramBase* message_histogram_;
+
+ RunLoop* run_loop_;
+
+ ObserverList<TaskObserver> task_observers_;
+
+ debug::TaskAnnotator task_annotator_;
+
+ scoped_refptr<internal::IncomingTaskQueue> incoming_task_queue_;
+
+ // A task runner which we haven't bound to a thread yet.
+ scoped_refptr<internal::MessageLoopTaskRunner> unbound_task_runner_;
+
+ // The task runner associated with this message loop.
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+ std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+
+ // Id of the thread this message loop is bound to.
+ PlatformThreadId thread_id_;
+
+ template <class T, class R> friend class base::subtle::DeleteHelperInternal;
+ template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
+
+ void DeleteSoonInternal(const tracked_objects::Location& from_here,
+ void(*deleter)(const void*),
+ const void* object);
+ void ReleaseSoonInternal(const tracked_objects::Location& from_here,
+ void(*releaser)(const void*),
+ const void* object);
+
+ DISALLOW_COPY_AND_ASSIGN(MessageLoop);
+};
+
+#if !defined(OS_NACL)
+
+//-----------------------------------------------------------------------------
+// MessageLoopForUI extends MessageLoop with methods that are particular to a
+// MessageLoop instantiated with TYPE_UI.
+//
+// This class is typically used like so:
+// MessageLoopForUI::current()->...call some method...
+//
+class BASE_EXPORT MessageLoopForUI : public MessageLoop {
+ public:
+ MessageLoopForUI() : MessageLoop(TYPE_UI) {
+ }
+
+ explicit MessageLoopForUI(std::unique_ptr<MessagePump> pump);
+
+ // Returns the MessageLoopForUI of the current thread.
+ static MessageLoopForUI* current() {
+ MessageLoop* loop = MessageLoop::current();
+ DCHECK(loop);
+ DCHECK(loop->IsType(MessageLoop::TYPE_UI));
+ return static_cast<MessageLoopForUI*>(loop);
+ }
+
+ static bool IsCurrent() {
+ MessageLoop* loop = MessageLoop::current();
+ return loop && loop->IsType(MessageLoop::TYPE_UI);
+ }
+
+#if defined(OS_IOS)
+ // On iOS, the main message loop cannot be Run(). Instead call Attach(),
+ // which connects this MessageLoop to the UI thread's CFRunLoop and allows
+ // PostTask() to work.
+ void Attach();
+#endif
+
+#if defined(OS_ANDROID)
+ // On Android, the UI message loop is handled by Java side. So Run() should
+ // never be called. Instead use Start(), which will forward all the native UI
+ // events to the Java message loop.
+ void Start();
+#endif
+
+#if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
+ // Please see MessagePumpLibevent for definition.
+ bool WatchFileDescriptor(
+ int fd,
+ bool persistent,
+ MessagePumpLibevent::Mode mode,
+ MessagePumpLibevent::FileDescriptorWatcher* controller,
+ MessagePumpLibevent::Watcher* delegate);
+#endif
+};
+
+// Do not add any member variables to MessageLoopForUI! This is important b/c
+// MessageLoopForUI is often allocated via MessageLoop(TYPE_UI). Any extra
+// data that you need should be stored on the MessageLoop's pump_ instance.
+static_assert(sizeof(MessageLoop) == sizeof(MessageLoopForUI),
+ "MessageLoopForUI should not have extra member variables");
+
+#endif // !defined(OS_NACL)
+
+//-----------------------------------------------------------------------------
+// MessageLoopForIO extends MessageLoop with methods that are particular to a
+// MessageLoop instantiated with TYPE_IO.
+//
+// This class is typically used like so:
+// MessageLoopForIO::current()->...call some method...
+//
+class BASE_EXPORT MessageLoopForIO : public MessageLoop {
+ public:
+ MessageLoopForIO();
+
+ // Returns the MessageLoopForIO of the current thread.
+ static MessageLoopForIO* current() {
+ MessageLoop* loop = MessageLoop::current();
+ DCHECK(loop) << "Can't call MessageLoopForIO::current() when no message "
+ "loop was created for this thread. Use "
+ " MessageLoop::current() or MessageLoopForIO::IsCurrent().";
+ DCHECK_EQ(MessageLoop::TYPE_IO, loop->type());
+ return static_cast<MessageLoopForIO*>(loop);
+ }
+
+ static bool IsCurrent() {
+ MessageLoop* loop = MessageLoop::current();
+ return loop && loop->type() == MessageLoop::TYPE_IO;
+ }
+
+#if !defined(OS_NACL_SFI)
+
+#if defined(OS_WIN)
+ typedef MessagePumpForIO::IOHandler IOHandler;
+ typedef MessagePumpForIO::IOContext IOContext;
+#elif defined(OS_IOS)
+ typedef MessagePumpIOSForIO::Watcher Watcher;
+ typedef MessagePumpIOSForIO::FileDescriptorWatcher
+ FileDescriptorWatcher;
+
+ enum Mode {
+ WATCH_READ = MessagePumpIOSForIO::WATCH_READ,
+ WATCH_WRITE = MessagePumpIOSForIO::WATCH_WRITE,
+ WATCH_READ_WRITE = MessagePumpIOSForIO::WATCH_READ_WRITE
+ };
+#elif defined(OS_POSIX)
+ typedef MessagePumpLibevent::Watcher Watcher;
+ typedef MessagePumpLibevent::FileDescriptorWatcher
+ FileDescriptorWatcher;
+
+ enum Mode {
+ WATCH_READ = MessagePumpLibevent::WATCH_READ,
+ WATCH_WRITE = MessagePumpLibevent::WATCH_WRITE,
+ WATCH_READ_WRITE = MessagePumpLibevent::WATCH_READ_WRITE
+ };
+#endif
+
+#if defined(OS_WIN)
+ // Please see MessagePumpWin for definitions of these methods.
+ void RegisterIOHandler(HANDLE file, IOHandler* handler);
+ bool RegisterJobObject(HANDLE job, IOHandler* handler);
+ bool WaitForIOCompletion(DWORD timeout, IOHandler* filter);
+#elif defined(OS_POSIX)
+ // Please see MessagePumpIOSForIO/MessagePumpLibevent for definition.
+ bool WatchFileDescriptor(int fd,
+ bool persistent,
+ Mode mode,
+ FileDescriptorWatcher* controller,
+ Watcher* delegate);
+#endif // defined(OS_IOS) || defined(OS_POSIX)
+#endif // !defined(OS_NACL_SFI)
+};
+
+// Do not add any member variables to MessageLoopForIO! This is important b/c
+// MessageLoopForIO is often allocated via MessageLoop(TYPE_IO). Any extra
+// data that you need should be stored on the MessageLoop's pump_ instance.
+static_assert(sizeof(MessageLoop) == sizeof(MessageLoopForIO),
+ "MessageLoopForIO should not have extra member variables");
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
diff --git a/libchrome/base/message_loop/message_loop_task_runner.cc b/libchrome/base/message_loop/message_loop_task_runner.cc
new file mode 100644
index 0000000..c9b5ffe
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop_task_runner.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_task_runner.h"
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/message_loop/incoming_task_queue.h"
+
+namespace base {
+namespace internal {
+
+MessageLoopTaskRunner::MessageLoopTaskRunner(
+ scoped_refptr<IncomingTaskQueue> incoming_queue)
+ : incoming_queue_(incoming_queue), valid_thread_id_(kInvalidThreadId) {
+}
+
+void MessageLoopTaskRunner::BindToCurrentThread() {
+ AutoLock lock(valid_thread_id_lock_);
+ DCHECK_EQ(kInvalidThreadId, valid_thread_id_);
+ valid_thread_id_ = PlatformThread::CurrentId();
+}
+
+bool MessageLoopTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ DCHECK(!task.is_null()) << from_here.ToString();
+ return incoming_queue_->AddToIncomingQueue(from_here, task, delay, true);
+}
+
+bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ DCHECK(!task.is_null()) << from_here.ToString();
+ return incoming_queue_->AddToIncomingQueue(from_here, task, delay, false);
+}
+
+bool MessageLoopTaskRunner::RunsTasksOnCurrentThread() const {
+ AutoLock lock(valid_thread_id_lock_);
+ return valid_thread_id_ == PlatformThread::CurrentId();
+}
+
+MessageLoopTaskRunner::~MessageLoopTaskRunner() {
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_loop_task_runner.h b/libchrome/base/message_loop/message_loop_task_runner.h
new file mode 100644
index 0000000..5e70b12
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop_task_runner.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+class IncomingTaskQueue;
+
+// A stock implementation of SingleThreadTaskRunner that is created and managed
+// by a MessageLoop. For now a MessageLoopTaskRunner can only be created as
+// part of a MessageLoop.
+class BASE_EXPORT MessageLoopTaskRunner : public SingleThreadTaskRunner {
+ public:
+ explicit MessageLoopTaskRunner(
+ scoped_refptr<IncomingTaskQueue> incoming_queue);
+
+ // Initialize this message loop task runner on the current thread.
+ void BindToCurrentThread();
+
+ // SingleThreadTaskRunner implementation
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) override;
+ bool RunsTasksOnCurrentThread() const override;
+
+ private:
+ friend class RefCountedThreadSafe<MessageLoopTaskRunner>;
+ ~MessageLoopTaskRunner() override;
+
+ // The incoming queue receiving all posted tasks.
+ scoped_refptr<IncomingTaskQueue> incoming_queue_;
+
+ // ID of the thread |this| was created on. Could be accessed on multiple
+ // threads, protected by |valid_thread_id_lock_|.
+ PlatformThreadId valid_thread_id_;
+ mutable Lock valid_thread_id_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageLoopTaskRunner);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
diff --git a/libchrome/base/message_loop/message_loop_task_runner_unittest.cc b/libchrome/base/message_loop/message_loop_task_runner_unittest.cc
new file mode 100644
index 0000000..cabd250
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop_task_runner_unittest.cc
@@ -0,0 +1,363 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_task_runner.h"
+
+#include <memory>
+
+#include "base/atomic_sequence_num.h"
+#include "base/bind.h"
+#include "base/debug/leak_annotations.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_task_runner.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+class MessageLoopTaskRunnerTest : public testing::Test {
+ public:
+ MessageLoopTaskRunnerTest()
+ : current_loop_(new MessageLoop()),
+ task_thread_("task_thread"),
+ thread_sync_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ void DeleteCurrentMessageLoop() { current_loop_.reset(); }
+
+ protected:
+ void SetUp() override {
+ // Use SetUp() instead of the constructor to avoid posting a task to a
+ // partially constructed object.
+ task_thread_.Start();
+
+ // Allow us to pause the |task_thread_|'s MessageLoop.
+ task_thread_.message_loop()->task_runner()->PostTask(
+ FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
+ Unretained(this)));
+ }
+
+ void TearDown() override {
+ // Make sure the |task_thread_| is not blocked, and stop the thread
+ // fully before destruction because its tasks may still depend on the
+ // |thread_sync_| event.
+ thread_sync_.Signal();
+ task_thread_.Stop();
+ DeleteCurrentMessageLoop();
+ }
+
+ // Make LoopRecorder threadsafe so that there is defined behavior even if a
+ // threading mistake sneaks into the PostTaskAndReplyRelay implementation.
+ class LoopRecorder : public RefCountedThreadSafe<LoopRecorder> {
+ public:
+ LoopRecorder(MessageLoop** run_on,
+ MessageLoop** deleted_on,
+ int* destruct_order)
+ : run_on_(run_on),
+ deleted_on_(deleted_on),
+ destruct_order_(destruct_order) {}
+
+ void RecordRun() { *run_on_ = MessageLoop::current(); }
+
+ private:
+ friend class RefCountedThreadSafe<LoopRecorder>;
+ ~LoopRecorder() {
+ *deleted_on_ = MessageLoop::current();
+ *destruct_order_ = g_order.GetNext();
+ }
+
+ MessageLoop** run_on_;
+ MessageLoop** deleted_on_;
+ int* destruct_order_;
+ };
+
+ static void RecordLoop(scoped_refptr<LoopRecorder> recorder) {
+ recorder->RecordRun();
+ }
+
+ static void RecordLoopAndQuit(scoped_refptr<LoopRecorder> recorder) {
+ recorder->RecordRun();
+ MessageLoop::current()->QuitWhenIdle();
+ }
+
+ void UnblockTaskThread() { thread_sync_.Signal(); }
+
+ void BlockTaskThreadHelper() { thread_sync_.Wait(); }
+
+ static StaticAtomicSequenceNumber g_order;
+
+ std::unique_ptr<MessageLoop> current_loop_;
+ Thread task_thread_;
+
+ private:
+ base::WaitableEvent thread_sync_;
+};
+
+StaticAtomicSequenceNumber MessageLoopTaskRunnerTest::g_order;
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_Basic) {
+ MessageLoop* task_run_on = NULL;
+ MessageLoop* task_deleted_on = NULL;
+ int task_delete_order = -1;
+ MessageLoop* reply_run_on = NULL;
+ MessageLoop* reply_deleted_on = NULL;
+ int reply_delete_order = -1;
+
+ scoped_refptr<LoopRecorder> task_recoder =
+ new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+ scoped_refptr<LoopRecorder> reply_recoder =
+ new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+ ASSERT_TRUE(task_thread_.task_runner()->PostTaskAndReply(
+ FROM_HERE, Bind(&RecordLoop, task_recoder),
+ Bind(&RecordLoopAndQuit, reply_recoder)));
+
+ // Die if base::Bind doesn't retain a reference to the recorders.
+ task_recoder = NULL;
+ reply_recoder = NULL;
+ ASSERT_FALSE(task_deleted_on);
+ ASSERT_FALSE(reply_deleted_on);
+
+ UnblockTaskThread();
+ current_loop_->Run();
+
+ EXPECT_EQ(task_thread_.message_loop(), task_run_on);
+ EXPECT_EQ(current_loop_.get(), task_deleted_on);
+ EXPECT_EQ(current_loop_.get(), reply_run_on);
+ EXPECT_EQ(current_loop_.get(), reply_deleted_on);
+ EXPECT_LT(task_delete_order, reply_delete_order);
+}
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReplyOnDeletedThreadDoesNotLeak) {
+ MessageLoop* task_run_on = NULL;
+ MessageLoop* task_deleted_on = NULL;
+ int task_delete_order = -1;
+ MessageLoop* reply_run_on = NULL;
+ MessageLoop* reply_deleted_on = NULL;
+ int reply_delete_order = -1;
+
+ scoped_refptr<LoopRecorder> task_recoder =
+ new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+ scoped_refptr<LoopRecorder> reply_recoder =
+ new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+ // Grab a task runner to a dead MessageLoop.
+ scoped_refptr<SingleThreadTaskRunner> task_runner =
+ task_thread_.task_runner();
+ UnblockTaskThread();
+ task_thread_.Stop();
+
+ ASSERT_FALSE(
+ task_runner->PostTaskAndReply(FROM_HERE, Bind(&RecordLoop, task_recoder),
+ Bind(&RecordLoopAndQuit, reply_recoder)));
+
+ // The relay should have properly deleted its resources leaving us as the only
+ // reference.
+ EXPECT_EQ(task_delete_order, reply_delete_order);
+ ASSERT_TRUE(task_recoder->HasOneRef());
+ ASSERT_TRUE(reply_recoder->HasOneRef());
+
+ // Nothing should have run though.
+ EXPECT_FALSE(task_run_on);
+ EXPECT_FALSE(reply_run_on);
+}
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_SameLoop) {
+ MessageLoop* task_run_on = NULL;
+ MessageLoop* task_deleted_on = NULL;
+ int task_delete_order = -1;
+ MessageLoop* reply_run_on = NULL;
+ MessageLoop* reply_deleted_on = NULL;
+ int reply_delete_order = -1;
+
+ scoped_refptr<LoopRecorder> task_recoder =
+ new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+ scoped_refptr<LoopRecorder> reply_recoder =
+ new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+ // Enqueue the relay.
+ ASSERT_TRUE(current_loop_->task_runner()->PostTaskAndReply(
+ FROM_HERE, Bind(&RecordLoop, task_recoder),
+ Bind(&RecordLoopAndQuit, reply_recoder)));
+
+ // Die if base::Bind doesn't retain a reference to the recorders.
+ task_recoder = NULL;
+ reply_recoder = NULL;
+ ASSERT_FALSE(task_deleted_on);
+ ASSERT_FALSE(reply_deleted_on);
+
+ current_loop_->Run();
+
+ EXPECT_EQ(current_loop_.get(), task_run_on);
+ EXPECT_EQ(current_loop_.get(), task_deleted_on);
+ EXPECT_EQ(current_loop_.get(), reply_run_on);
+ EXPECT_EQ(current_loop_.get(), reply_deleted_on);
+ EXPECT_LT(task_delete_order, reply_delete_order);
+}
+
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
+ // Annotate the scope as having memory leaks to suppress heapchecker reports.
+ ANNOTATE_SCOPED_MEMORY_LEAK;
+ MessageLoop* task_run_on = NULL;
+ MessageLoop* task_deleted_on = NULL;
+ int task_delete_order = -1;
+ MessageLoop* reply_run_on = NULL;
+ MessageLoop* reply_deleted_on = NULL;
+ int reply_delete_order = -1;
+
+ scoped_refptr<LoopRecorder> task_recoder =
+ new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
+ scoped_refptr<LoopRecorder> reply_recoder =
+ new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
+
+ // Enqueue the relay.
+ task_thread_.task_runner()->PostTaskAndReply(
+ FROM_HERE, Bind(&RecordLoop, task_recoder),
+ Bind(&RecordLoopAndQuit, reply_recoder));
+
+ // Die if base::Bind doesn't retain a reference to the recorders.
+ task_recoder = NULL;
+ reply_recoder = NULL;
+ ASSERT_FALSE(task_deleted_on);
+ ASSERT_FALSE(reply_deleted_on);
+
+ UnblockTaskThread();
+
+ // Mercilessly whack the current loop before |reply| gets to run.
+ current_loop_.reset();
+
+ // This should ensure the relay has been run. We need to record the
+ // MessageLoop pointer before stopping the thread because Thread::Stop() will
+ // NULL out its own pointer.
+ MessageLoop* task_loop = task_thread_.message_loop();
+ task_thread_.Stop();
+
+ EXPECT_EQ(task_loop, task_run_on);
+ ASSERT_FALSE(task_deleted_on);
+ EXPECT_FALSE(reply_run_on);
+ ASSERT_FALSE(reply_deleted_on);
+ EXPECT_EQ(task_delete_order, reply_delete_order);
+
+ // The PostTaskAndReplyRelay is leaked here. Even if we had a reference to
+ // it, we cannot just delete it because PostTaskAndReplyRelay's destructor
+ // checks that MessageLoop::current() is the the same as when the
+ // PostTaskAndReplyRelay object was constructed. However, this loop must have
+ // already been deleted in order to perform this test. See
+ // http://crbug.com/86301.
+}
+
+class MessageLoopTaskRunnerThreadingTest : public testing::Test {
+ public:
+ void Release() const {
+ AssertOnIOThread();
+ Quit();
+ }
+
+ void Quit() const {
+ loop_.task_runner()->PostTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure());
+ }
+
+ void AssertOnIOThread() const {
+ ASSERT_TRUE(io_thread_->task_runner()->BelongsToCurrentThread());
+ ASSERT_EQ(io_thread_->task_runner(), ThreadTaskRunnerHandle::Get());
+ }
+
+ void AssertOnFileThread() const {
+ ASSERT_TRUE(file_thread_->task_runner()->BelongsToCurrentThread());
+ ASSERT_EQ(file_thread_->task_runner(), ThreadTaskRunnerHandle::Get());
+ }
+
+ protected:
+ void SetUp() override {
+ io_thread_.reset(new Thread("MessageLoopTaskRunnerThreadingTest_IO"));
+ file_thread_.reset(new Thread("MessageLoopTaskRunnerThreadingTest_File"));
+ io_thread_->Start();
+ file_thread_->Start();
+ }
+
+ void TearDown() override {
+ io_thread_->Stop();
+ file_thread_->Stop();
+ }
+
+ static void BasicFunction(MessageLoopTaskRunnerThreadingTest* test) {
+ test->AssertOnFileThread();
+ test->Quit();
+ }
+
+ static void AssertNotRun() { FAIL() << "Callback Should not get executed."; }
+
+ class DeletedOnFile {
+ public:
+ explicit DeletedOnFile(MessageLoopTaskRunnerThreadingTest* test)
+ : test_(test) {}
+
+ ~DeletedOnFile() {
+ test_->AssertOnFileThread();
+ test_->Quit();
+ }
+
+ private:
+ MessageLoopTaskRunnerThreadingTest* test_;
+ };
+
+ std::unique_ptr<Thread> io_thread_;
+ std::unique_ptr<Thread> file_thread_;
+
+ private:
+ mutable MessageLoop loop_;
+};
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, Release) {
+ EXPECT_TRUE(io_thread_->task_runner()->ReleaseSoon(FROM_HERE, this));
+ RunLoop().Run();
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, Delete) {
+ DeletedOnFile* deleted_on_file = new DeletedOnFile(this);
+ EXPECT_TRUE(
+ file_thread_->task_runner()->DeleteSoon(FROM_HERE, deleted_on_file));
+ RunLoop().Run();
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, PostTask) {
+ EXPECT_TRUE(file_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
+ Unretained(this))));
+ RunLoop().Run();
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
+ std::unique_ptr<Thread> test_thread(
+ new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
+ test_thread->Start();
+ scoped_refptr<SingleThreadTaskRunner> task_runner =
+ test_thread->task_runner();
+ test_thread->Stop();
+
+ bool ret = task_runner->PostTask(
+ FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
+ EXPECT_FALSE(ret);
+}
+
+TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadIsDeleted) {
+ scoped_refptr<SingleThreadTaskRunner> task_runner;
+ {
+ std::unique_ptr<Thread> test_thread(
+ new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
+ test_thread->Start();
+ task_runner = test_thread->task_runner();
+ }
+ bool ret = task_runner->PostTask(
+ FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::AssertNotRun));
+ EXPECT_FALSE(ret);
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_loop_test.cc b/libchrome/base/message_loop/message_loop_test.cc
new file mode 100644
index 0000000..1ab946f
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop_test.cc
@@ -0,0 +1,1055 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop_test.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+
+namespace base {
+namespace test {
+
+namespace {
+
+class Foo : public RefCounted<Foo> {
+ public:
+ Foo() : test_count_(0) {
+ }
+
+ void Test0() {
+ ++test_count_;
+ }
+
+ void Test1ConstRef(const std::string& a) {
+ ++test_count_;
+ result_.append(a);
+ }
+
+ void Test1Ptr(std::string* a) {
+ ++test_count_;
+ result_.append(*a);
+ }
+
+ void Test1Int(int a) {
+ test_count_ += a;
+ }
+
+ void Test2Ptr(std::string* a, std::string* b) {
+ ++test_count_;
+ result_.append(*a);
+ result_.append(*b);
+ }
+
+ void Test2Mixed(const std::string& a, std::string* b) {
+ ++test_count_;
+ result_.append(a);
+ result_.append(*b);
+ }
+
+ int test_count() const { return test_count_; }
+ const std::string& result() const { return result_; }
+
+ private:
+ friend class RefCounted<Foo>;
+
+ ~Foo() {}
+
+ int test_count_;
+ std::string result_;
+
+ DISALLOW_COPY_AND_ASSIGN(Foo);
+};
+
+// This function runs slowly to simulate a large amount of work being done.
+void SlowFunc(TimeDelta pause, int* quit_counter) {
+ PlatformThread::Sleep(pause);
+ if (--(*quit_counter) == 0)
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+// This function records the time when Run was called in a Time object, which is
+// useful for building a variety of MessageLoop tests.
+// TODO(sky): remove?
+void RecordRunTimeFunc(Time* run_time, int* quit_counter) {
+ *run_time = Time::Now();
+
+ // Cause our Run function to take some time to execute. As a result we can
+ // count on subsequent RecordRunTimeFunc()s running at a future time,
+ // without worry about the resolution of our system clock being an issue.
+ SlowFunc(TimeDelta::FromMilliseconds(10), quit_counter);
+}
+
+} // namespace
+
+void RunTest_PostTask(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+ // Add tests to message loop
+ scoped_refptr<Foo> foo(new Foo());
+ std::string a("a"), b("b"), c("c"), d("d");
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&Foo::Test0, foo.get()));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1ConstRef, foo.get(), a));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1Ptr, foo.get(), &b));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test1Int, foo.get(), 100));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test2Ptr, foo.get(), &a, &c));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&Foo::Test2Mixed, foo.get(), a, &d));
+ // After all tests, post a message that will shut down the message loop
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
+
+ // Now kick things off
+ RunLoop().Run();
+
+ EXPECT_EQ(foo->test_count(), 105);
+ EXPECT_EQ(foo->result(), "abacad");
+}
+
+void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ // Test that PostDelayedTask results in a delayed task.
+
+ const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+ int num_tasks = 1;
+ Time run_time;
+
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
+
+ Time time_before_run = Time::Now();
+ RunLoop().Run();
+ Time time_after_run = Time::Now();
+
+ EXPECT_EQ(0, num_tasks);
+ EXPECT_LT(kDelay, time_after_run - time_before_run);
+}
+
+void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ // Test that two tasks with different delays run in the right order.
+ int num_tasks = 2;
+ Time run_time1, run_time2;
+
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ TimeDelta::FromMilliseconds(200));
+ // If we get a large pause in execution (due to a context switch) here, this
+ // test could fail.
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ TimeDelta::FromMilliseconds(10));
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_TRUE(run_time2 < run_time1);
+}
+
+void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ // Test that two tasks with the same delay run in the order in which they
+ // were posted.
+ //
+ // NOTE: This is actually an approximate test since the API only takes a
+ // "delay" parameter, so we are not exactly simulating two tasks that get
+ // posted at the exact same time. It would be nice if the API allowed us to
+ // specify the desired run time.
+
+ const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+ int num_tasks = 2;
+ Time run_time1, run_time2;
+
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_TRUE(run_time1 < run_time2);
+}
+
+void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ // Test that a delayed task still runs after a normal tasks even if the
+ // normal tasks take a long time to run.
+
+ const TimeDelta kPause = TimeDelta::FromMilliseconds(50);
+
+ int num_tasks = 2;
+ Time run_time;
+
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+ TimeDelta::FromMilliseconds(10));
+
+ Time time_before_run = Time::Now();
+ RunLoop().Run();
+ Time time_after_run = Time::Now();
+
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_LT(kPause, time_after_run - time_before_run);
+}
+
+void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ // Test that a delayed task still runs after a pile of normal tasks. The key
+ // difference between this test and the previous one is that here we return
+ // the MessageLoop a lot so we give the MessageLoop plenty of opportunities
+ // to maybe run the delayed task. It should know not to do so until the
+ // delayed task's delay has passed.
+
+ int num_tasks = 11;
+ Time run_time1, run_time2;
+
+ // Clutter the ML with tasks.
+ for (int i = 1; i < num_tasks; ++i)
+ loop.task_runner()->PostTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
+
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ TimeDelta::FromMilliseconds(1));
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_TRUE(run_time2 > run_time1);
+}
+
+void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ // Test that the interval of the timer, used to run the next delayed task, is
+ // set to a value corresponding to when the next delayed task should run.
+
+ // By setting num_tasks to 1, we ensure that the first task to run causes the
+ // run loop to exit.
+ int num_tasks = 1;
+ Time run_time1, run_time2;
+
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ TimeDelta::FromSeconds(1000));
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ TimeDelta::FromMilliseconds(10));
+
+ Time start_time = Time::Now();
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ // Ensure that we ran in far less time than the slower timer.
+ TimeDelta total_time = Time::Now() - start_time;
+ EXPECT_GT(5000, total_time.InMilliseconds());
+
+ // In case both timers somehow run at nearly the same time, sleep a little
+ // and then run all pending to force them both to have run. This is just
+ // encouraging flakiness if there is any.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(run_time1.is_null());
+ EXPECT_FALSE(run_time2.is_null());
+}
+
+// This is used to inject a test point for recording the destructor calls for
+// Closure objects send to MessageLoop::PostTask(). It is awkward usage since we
+// are trying to hook the actual destruction, which is not a common operation.
+class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
+ public:
+ RecordDeletionProbe(RecordDeletionProbe* post_on_delete, bool* was_deleted)
+ : post_on_delete_(post_on_delete), was_deleted_(was_deleted) {
+ }
+ void Run() {}
+
+ private:
+ friend class RefCounted<RecordDeletionProbe>;
+
+ ~RecordDeletionProbe() {
+ *was_deleted_ = true;
+ if (post_on_delete_.get())
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_.get()));
+ }
+
+ scoped_refptr<RecordDeletionProbe> post_on_delete_;
+ bool* was_deleted_;
+};
+
+void RunTest_EnsureDeletion(MessagePumpFactory factory) {
+ bool a_was_deleted = false;
+ bool b_was_deleted = false;
+ {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+ loop.task_runner()->PostTask(
+ FROM_HERE, Bind(&RecordDeletionProbe::Run,
+ new RecordDeletionProbe(NULL, &a_was_deleted)));
+ // TODO(ajwong): Do we really need 1000ms here?
+ loop.task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&RecordDeletionProbe::Run,
+ new RecordDeletionProbe(NULL, &b_was_deleted)),
+ TimeDelta::FromMilliseconds(1000));
+ }
+ EXPECT_TRUE(a_was_deleted);
+ EXPECT_TRUE(b_was_deleted);
+}
+
+void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory) {
+ bool a_was_deleted = false;
+ bool b_was_deleted = false;
+ bool c_was_deleted = false;
+ {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+ // The scoped_refptr for each of the below is held either by the chained
+ // RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
+ RecordDeletionProbe* a = new RecordDeletionProbe(NULL, &a_was_deleted);
+ RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
+ RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
+ }
+ EXPECT_TRUE(a_was_deleted);
+ EXPECT_TRUE(b_was_deleted);
+ EXPECT_TRUE(c_was_deleted);
+}
+
+void NestingFunc(int* depth) {
+ if (*depth > 0) {
+ *depth -= 1;
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, depth));
+
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ RunLoop().Run();
+ }
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+void RunTest_Nesting(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ int depth = 100;
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&NestingFunc, &depth));
+ RunLoop().Run();
+ EXPECT_EQ(depth, 0);
+}
+
+// A NestingObserver that tracks the number of nested message loop starts it
+// has seen.
+class TestNestingObserver : public MessageLoop::NestingObserver {
+ public:
+ TestNestingObserver() {}
+ ~TestNestingObserver() override {}
+
+ int begin_nested_loop_count() const { return begin_nested_loop_count_; }
+
+ // MessageLoop::NestingObserver:
+ void OnBeginNestedMessageLoop() override { begin_nested_loop_count_++; }
+
+ private:
+ int begin_nested_loop_count_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(TestNestingObserver);
+};
+
+void ExpectOneBeginNestedLoop(TestNestingObserver* observer) {
+ EXPECT_EQ(1, observer->begin_nested_loop_count());
+}
+
+// Starts a nested message loop.
+void RunNestedLoop(TestNestingObserver* observer,
+ const Closure& quit_outer_loop) {
+ // The nested loop hasn't started yet.
+ EXPECT_EQ(0, observer->begin_nested_loop_count());
+
+ MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
+ RunLoop nested_loop;
+ // Verify that by the time the first task is run the observer has seen the
+ // message loop begin.
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop.QuitClosure());
+ nested_loop.Run();
+
+ // Quitting message loops doesn't change the begin count.
+ EXPECT_EQ(1, observer->begin_nested_loop_count());
+
+ quit_outer_loop.Run();
+}
+
+// Tests that a NestingObserver is notified when a nested message loop begins.
+void RunTest_NestingObserver(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop outer_loop(std::move(pump));
+
+ // Observe the outer loop for nested message loops beginning.
+ TestNestingObserver nesting_observer;
+ outer_loop.AddNestingObserver(&nesting_observer);
+
+ // Post a task that runs a nested message loop.
+ outer_loop.task_runner()->PostTask(FROM_HERE,
+ Bind(&RunNestedLoop, &nesting_observer,
+ outer_loop.QuitWhenIdleClosure()));
+ RunLoop().Run();
+
+ outer_loop.RemoveNestingObserver(&nesting_observer);
+}
+
+enum TaskType {
+ MESSAGEBOX,
+ ENDDIALOG,
+ RECURSIVE,
+ TIMEDMESSAGELOOP,
+ QUITMESSAGELOOP,
+ ORDERED,
+ PUMPS,
+ SLEEP,
+ RUNS,
+};
+
+struct TaskItem {
+ TaskItem(TaskType t, int c, bool s)
+ : type(t),
+ cookie(c),
+ start(s) {
+ }
+
+ TaskType type;
+ int cookie;
+ bool start;
+
+ bool operator == (const TaskItem& other) const {
+ return type == other.type && cookie == other.cookie && start == other.start;
+ }
+};
+
+std::ostream& operator <<(std::ostream& os, TaskType type) {
+ switch (type) {
+ case MESSAGEBOX: os << "MESSAGEBOX"; break;
+ case ENDDIALOG: os << "ENDDIALOG"; break;
+ case RECURSIVE: os << "RECURSIVE"; break;
+ case TIMEDMESSAGELOOP: os << "TIMEDMESSAGELOOP"; break;
+ case QUITMESSAGELOOP: os << "QUITMESSAGELOOP"; break;
+ case ORDERED: os << "ORDERED"; break;
+ case PUMPS: os << "PUMPS"; break;
+ case SLEEP: os << "SLEEP"; break;
+ default:
+ NOTREACHED();
+ os << "Unknown TaskType";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator <<(std::ostream& os, const TaskItem& item) {
+ if (item.start)
+ return os << item.type << " " << item.cookie << " starts";
+ else
+ return os << item.type << " " << item.cookie << " ends";
+}
+
+class TaskList {
+ public:
+ void RecordStart(TaskType type, int cookie) {
+ TaskItem item(type, cookie, true);
+ DVLOG(1) << item;
+ task_list_.push_back(item);
+ }
+
+ void RecordEnd(TaskType type, int cookie) {
+ TaskItem item(type, cookie, false);
+ DVLOG(1) << item;
+ task_list_.push_back(item);
+ }
+
+ size_t Size() {
+ return task_list_.size();
+ }
+
+ TaskItem Get(int n) {
+ return task_list_[n];
+ }
+
+ private:
+ std::vector<TaskItem> task_list_;
+};
+
+void RecursiveFunc(TaskList* order, int cookie, int depth,
+ bool is_reentrant) {
+ order->RecordStart(RECURSIVE, cookie);
+ if (depth > 0) {
+ if (is_reentrant)
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
+ }
+ order->RecordEnd(RECURSIVE, cookie);
+}
+
+void QuitFunc(TaskList* order, int cookie) {
+ order->RecordStart(QUITMESSAGELOOP, cookie);
+ MessageLoop::current()->QuitWhenIdle();
+ order->RecordEnd(QUITMESSAGELOOP, cookie);
+}
+void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
+ TaskList order;
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
+
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(14U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+ EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
+}
+
+void RecursiveSlowFunc(TaskList* order, int cookie, int depth,
+ bool is_reentrant) {
+ RecursiveFunc(order, cookie, depth, is_reentrant);
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+}
+
+void OrderedFunc(TaskList* order, int cookie) {
+ order->RecordStart(ORDERED, cookie);
+ order->RecordEnd(ORDERED, cookie);
+}
+
+void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
+ TaskList order;
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
+ MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
+ MessageLoop::current()->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
+
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(16U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(6), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(7), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(10), TaskItem(QUITMESSAGELOOP, 4, true));
+ EXPECT_EQ(order.Get(11), TaskItem(QUITMESSAGELOOP, 4, false));
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 2, false));
+}
+
+void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
+
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(14U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+ EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
+}
+
+// Tests that non nestable tasks run in FIFO if there are no nested loops.
+void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ MessageLoop::current()->task_runner()->PostNonNestableTask(
+ FROM_HERE,
+ Bind(&OrderedFunc, &order, 1));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&QuitFunc, &order, 3));
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(6U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(ORDERED, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+}
+
+void FuncThatPumps(TaskList* order, int cookie) {
+ order->RecordStart(PUMPS, cookie);
+ {
+ MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
+ RunLoop().RunUntilIdle();
+ }
+ order->RecordEnd(PUMPS, cookie);
+}
+
+void SleepFunc(TaskList* order, int cookie, TimeDelta delay) {
+ order->RecordStart(SLEEP, cookie);
+ PlatformThread::Sleep(delay);
+ order->RecordEnd(SLEEP, cookie);
+}
+
+// Tests that non nestable tasks don't run when there's code in the call stack.
+void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&FuncThatPumps, &order, 1));
+ MessageLoop::current()->task_runner()->PostNonNestableTask(
+ FROM_HERE,
+ Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&OrderedFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&OrderedFunc, &order, 5));
+ MessageLoop::current()->task_runner()->PostNonNestableTask(
+ FROM_HERE,
+ Bind(&QuitFunc, &order, 6));
+
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(12U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(PUMPS, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(order.Get(3), TaskItem(SLEEP, 4, true));
+ EXPECT_EQ(order.Get(4), TaskItem(SLEEP, 4, false));
+ EXPECT_EQ(order.Get(5), TaskItem(ORDERED, 5, true));
+ EXPECT_EQ(order.Get(6), TaskItem(ORDERED, 5, false));
+ EXPECT_EQ(order.Get(7), TaskItem(PUMPS, 1, false));
+ EXPECT_EQ(order.Get(8), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(9), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(10), TaskItem(QUITMESSAGELOOP, 6, true));
+ EXPECT_EQ(order.Get(11), TaskItem(QUITMESSAGELOOP, 6, false));
+}
+
+void FuncThatRuns(TaskList* order, int cookie, RunLoop* run_loop) {
+ order->RecordStart(RUNS, cookie);
+ {
+ MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
+ run_loop->Run();
+ }
+ order->RecordEnd(RUNS, cookie);
+}
+
+void FuncThatQuitsNow() {
+ MessageLoop::current()->QuitNow();
+}
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+void RunTest_QuitNow(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop run_loop;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
+
+ RunLoop().Run();
+
+ ASSERT_EQ(6U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, nested_run_loop.QuitClosure());
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(4U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, nested_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(4U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop;
+ RunLoop bogus_run_loop;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ bogus_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, nested_run_loop.QuitClosure());
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(4U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_loop1;
+ RunLoop nested_loop2;
+ RunLoop nested_loop3;
+ RunLoop nested_loop4;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 5));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 6));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop1.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 7));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop2.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 8));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop3.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 9));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ nested_loop4.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 10));
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(18U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit works before RunWithID.
+void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop run_loop;
+
+ run_loop.Quit();
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+
+ run_loop.Run();
+
+ ASSERT_EQ(0U, order.Size());
+}
+
+// Tests RunLoopQuit works during RunWithID.
+void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop run_loop;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 1));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ run_loop.QuitClosure());
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+
+ run_loop.Run();
+
+ ASSERT_EQ(2U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit works after RunWithID.
+void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+
+ TaskList order;
+
+ RunLoop run_loop;
+
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 2));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 3));
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, run_loop.QuitClosure()); // has no affect
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&OrderedFunc, &order, 4));
+ MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+ Bind(&FuncThatQuitsNow));
+
+ RunLoop outer_run_loop;
+ outer_run_loop.Run();
+
+ ASSERT_EQ(8U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+void PostNTasksThenQuit(int posts_remaining) {
+ if (posts_remaining > 1) {
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ } else {
+ MessageLoop::current()->QuitWhenIdle();
+ }
+}
+
+// There was a bug in the MessagePumpGLib where posting tasks recursively
+// caused the message loop to hang, due to the buffer of the internal pipe
+// becoming full. Test all MessageLoop types to ensure this issue does not
+// exist in other MessagePumps.
+//
+// On Linux, the pipe buffer size is 64KiB by default. The bug caused one
+// byte accumulated in the pipe per two posts, so we should repeat 128K
+// times to reproduce the bug.
+void RunTest_RecursivePosts(MessagePumpFactory factory) {
+ const int kNumTimes = 1 << 17;
+ std::unique_ptr<MessagePump> pump(factory());
+ MessageLoop loop(std::move(pump));
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
+ RunLoop().Run();
+}
+
+} // namespace test
+} // namespace base
diff --git a/libchrome/base/message_loop/message_loop_test.h b/libchrome/base/message_loop/message_loop_test.h
new file mode 100644
index 0000000..b7ae28e
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop_test.h
@@ -0,0 +1,130 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_TEST_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TEST_H_
+
+#include "base/message_loop/message_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// This file consists of tests meant to exercise the combination of MessageLoop
+// and MessagePump. To use these define the macro RUN_MESSAGE_LOOP_TESTS using
+// an ID appropriate for your MessagePump, eg
+// RUN_MESSAGE_LOOP_TESTS(UI, factory). Factory is a function called to create
+// the MessagePump.
+namespace base {
+namespace test {
+
+typedef MessageLoop::MessagePumpFactory MessagePumpFactory;
+
+void RunTest_PostTask(MessagePumpFactory factory);
+void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory);
+void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory);
+void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory);
+void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory);
+void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory);
+void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory);
+void RunTest_EnsureDeletion(MessagePumpFactory factory);
+void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory);
+void RunTest_Nesting(MessagePumpFactory factory);
+void RunTest_NestingObserver(MessagePumpFactory factory);
+void RunTest_RecursiveDenial1(MessagePumpFactory factory);
+void RunTest_RecursiveDenial3(MessagePumpFactory factory);
+void RunTest_RecursiveSupport1(MessagePumpFactory factory);
+void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory);
+void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory);
+void RunTest_QuitNow(MessagePumpFactory factory);
+void RunTest_RunLoopQuitTop(MessagePumpFactory factory);
+void RunTest_RunLoopQuitNested(MessagePumpFactory factory);
+void RunTest_RunLoopQuitBogus(MessagePumpFactory factory);
+void RunTest_RunLoopQuitDeep(MessagePumpFactory factory);
+void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory);
+void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory);
+void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory);
+void RunTest_RecursivePosts(MessagePumpFactory factory);
+
+} // namespace test
+} // namespace base
+
+#define RUN_MESSAGE_LOOP_TESTS(id, factory) \
+ TEST(MessageLoopTestType##id, PostTask) { \
+ base::test::RunTest_PostTask(factory); \
+ } \
+ TEST(MessageLoopTestType##id, PostDelayedTask_Basic) { \
+ base::test::RunTest_PostDelayedTask_Basic(factory); \
+ } \
+ TEST(MessageLoopTestType##id, PostDelayedTask_InDelayOrder) { \
+ base::test::RunTest_PostDelayedTask_InDelayOrder(factory); \
+ } \
+ TEST(MessageLoopTestType##id, PostDelayedTask_InPostOrder) { \
+ base::test::RunTest_PostDelayedTask_InPostOrder(factory); \
+ } \
+ TEST(MessageLoopTestType##id, PostDelayedTask_InPostOrder_2) { \
+ base::test::RunTest_PostDelayedTask_InPostOrder_2(factory); \
+ } \
+ TEST(MessageLoopTestType##id, PostDelayedTask_InPostOrder_3) { \
+ base::test::RunTest_PostDelayedTask_InPostOrder_3(factory); \
+ } \
+ TEST(MessageLoopTestType##id, PostDelayedTask_SharedTimer) { \
+ base::test::RunTest_PostDelayedTask_SharedTimer(factory); \
+ } \
+ /* TODO(darin): MessageLoop does not support deleting all tasks in the */ \
+ /* destructor. */ \
+ /* Fails, http://crbug.com/50272. */ \
+ TEST(MessageLoopTestType##id, DISABLED_EnsureDeletion) { \
+ base::test::RunTest_EnsureDeletion(factory); \
+ } \
+ /* TODO(darin): MessageLoop does not support deleting all tasks in the */ \
+ /* destructor. */ \
+ /* Fails, http://crbug.com/50272. */ \
+ TEST(MessageLoopTestType##id, DISABLED_EnsureDeletion_Chain) { \
+ base::test::RunTest_EnsureDeletion_Chain(factory); \
+ } \
+ TEST(MessageLoopTestType##id, Nesting) { \
+ base::test::RunTest_Nesting(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RecursiveDenial1) { \
+ base::test::RunTest_RecursiveDenial1(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RecursiveDenial3) { \
+ base::test::RunTest_RecursiveDenial3(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RecursiveSupport1) { \
+ base::test::RunTest_RecursiveSupport1(factory); \
+ } \
+ TEST(MessageLoopTestType##id, NonNestableWithNoNesting) { \
+ base::test::RunTest_NonNestableWithNoNesting(factory); \
+ } \
+ TEST(MessageLoopTestType##id, NonNestableDelayedInNestedLoop) { \
+ base::test::RunTest_NonNestableInNestedLoop(factory); \
+ } \
+ TEST(MessageLoopTestType##id, QuitNow) { \
+ base::test::RunTest_QuitNow(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitTop) { \
+ base::test::RunTest_RunLoopQuitTop(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitNested) { \
+ base::test::RunTest_RunLoopQuitNested(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitBogus) { \
+ base::test::RunTest_RunLoopQuitBogus(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitDeep) { \
+ base::test::RunTest_RunLoopQuitDeep(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitOrderBefore) { \
+ base::test::RunTest_RunLoopQuitOrderBefore(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitOrderDuring) { \
+ base::test::RunTest_RunLoopQuitOrderDuring(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RunLoopQuitOrderAfter) { \
+ base::test::RunTest_RunLoopQuitOrderAfter(factory); \
+ } \
+ TEST(MessageLoopTestType##id, RecursivePosts) { \
+ base::test::RunTest_RecursivePosts(factory); \
+ } \
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_LOOP_TEST_H_
diff --git a/libchrome/base/message_loop/message_loop_unittest.cc b/libchrome/base/message_loop/message_loop_unittest.cc
new file mode 100644
index 0000000..52337e3
--- /dev/null
+++ b/libchrome/base/message_loop/message_loop_unittest.cc
@@ -0,0 +1,997 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_test.h"
+#include "base/pending_task.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#include "base/process/memory.h"
+#include "base/strings/string16.h"
+#include "base/win/current_module.h"
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+// TODO(darin): Platform-specific MessageLoop tests should be grouped together
+// to avoid chopping this file up with so many #ifdefs.
+
+namespace {
+
+std::unique_ptr<MessagePump> TypeDefaultMessagePumpFactory() {
+ return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_DEFAULT);
+}
+
+std::unique_ptr<MessagePump> TypeIOMessagePumpFactory() {
+ return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_IO);
+}
+
+std::unique_ptr<MessagePump> TypeUIMessagePumpFactory() {
+ return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_UI);
+}
+
+class Foo : public RefCounted<Foo> {
+ public:
+ Foo() : test_count_(0) {
+ }
+
+ void Test1ConstRef(const std::string& a) {
+ ++test_count_;
+ result_.append(a);
+ }
+
+ int test_count() const { return test_count_; }
+ const std::string& result() const { return result_; }
+
+ private:
+ friend class RefCounted<Foo>;
+
+ ~Foo() {}
+
+ int test_count_;
+ std::string result_;
+};
+
+#if defined(OS_WIN)
+
+// This function runs slowly to simulate a large amount of work being done.
+static void SlowFunc(TimeDelta pause, int* quit_counter) {
+ PlatformThread::Sleep(pause);
+ if (--(*quit_counter) == 0)
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+// This function records the time when Run was called in a Time object, which is
+// useful for building a variety of MessageLoop tests.
+static void RecordRunTimeFunc(Time* run_time, int* quit_counter) {
+ *run_time = Time::Now();
+
+ // Cause our Run function to take some time to execute. As a result we can
+ // count on subsequent RecordRunTimeFunc()s running at a future time,
+ // without worry about the resolution of our system clock being an issue.
+ SlowFunc(TimeDelta::FromMilliseconds(10), quit_counter);
+}
+
+void SubPumpFunc() {
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ MSG msg;
+ while (GetMessage(&msg, NULL, 0, 0)) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ }
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+void RunTest_PostDelayedTask_SharedTimer_SubPump() {
+ MessageLoop loop(MessageLoop::TYPE_UI);
+
+ // Test that the interval of the timer, used to run the next delayed task, is
+ // set to a value corresponding to when the next delayed task should run.
+
+ // By setting num_tasks to 1, we ensure that the first task to run causes the
+ // run loop to exit.
+ int num_tasks = 1;
+ Time run_time;
+
+ loop.PostTask(FROM_HERE, Bind(&SubPumpFunc));
+
+ // This very delayed task should never run.
+ loop.PostDelayedTask(
+ FROM_HERE,
+ Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+ TimeDelta::FromSeconds(1000));
+
+ // This slightly delayed task should run from within SubPumpFunc.
+ loop.PostDelayedTask(
+ FROM_HERE,
+ Bind(&PostQuitMessage, 0),
+ TimeDelta::FromMilliseconds(10));
+
+ Time start_time = Time::Now();
+
+ loop.Run();
+ EXPECT_EQ(1, num_tasks);
+
+ // Ensure that we ran in far less time than the slower timer.
+ TimeDelta total_time = Time::Now() - start_time;
+ EXPECT_GT(5000, total_time.InMilliseconds());
+
+ // In case both timers somehow run at nearly the same time, sleep a little
+ // and then run all pending to force them both to have run. This is just
+ // encouraging flakiness if there is any.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(run_time.is_null());
+}
+
+const wchar_t kMessageBoxTitle[] = L"MessageLoop Unit Test";
+
+enum TaskType {
+ MESSAGEBOX,
+ ENDDIALOG,
+ RECURSIVE,
+ TIMEDMESSAGELOOP,
+ QUITMESSAGELOOP,
+ ORDERED,
+ PUMPS,
+ SLEEP,
+ RUNS,
+};
+
+// Saves the order in which the tasks executed.
+struct TaskItem {
+ TaskItem(TaskType t, int c, bool s)
+ : type(t),
+ cookie(c),
+ start(s) {
+ }
+
+ TaskType type;
+ int cookie;
+ bool start;
+
+ bool operator == (const TaskItem& other) const {
+ return type == other.type && cookie == other.cookie && start == other.start;
+ }
+};
+
+std::ostream& operator <<(std::ostream& os, TaskType type) {
+ switch (type) {
+ case MESSAGEBOX: os << "MESSAGEBOX"; break;
+ case ENDDIALOG: os << "ENDDIALOG"; break;
+ case RECURSIVE: os << "RECURSIVE"; break;
+ case TIMEDMESSAGELOOP: os << "TIMEDMESSAGELOOP"; break;
+ case QUITMESSAGELOOP: os << "QUITMESSAGELOOP"; break;
+ case ORDERED: os << "ORDERED"; break;
+ case PUMPS: os << "PUMPS"; break;
+ case SLEEP: os << "SLEEP"; break;
+ default:
+ NOTREACHED();
+ os << "Unknown TaskType";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator <<(std::ostream& os, const TaskItem& item) {
+ if (item.start)
+ return os << item.type << " " << item.cookie << " starts";
+ else
+ return os << item.type << " " << item.cookie << " ends";
+}
+
+class TaskList {
+ public:
+ void RecordStart(TaskType type, int cookie) {
+ TaskItem item(type, cookie, true);
+ DVLOG(1) << item;
+ task_list_.push_back(item);
+ }
+
+ void RecordEnd(TaskType type, int cookie) {
+ TaskItem item(type, cookie, false);
+ DVLOG(1) << item;
+ task_list_.push_back(item);
+ }
+
+ size_t Size() {
+ return task_list_.size();
+ }
+
+ TaskItem Get(int n) {
+ return task_list_[n];
+ }
+
+ private:
+ std::vector<TaskItem> task_list_;
+};
+
+// MessageLoop implicitly start a "modal message loop". Modal dialog boxes,
+// common controls (like OpenFile) and StartDoc printing function can cause
+// implicit message loops.
+void MessageBoxFunc(TaskList* order, int cookie, bool is_reentrant) {
+ order->RecordStart(MESSAGEBOX, cookie);
+ if (is_reentrant)
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ MessageBox(NULL, L"Please wait...", kMessageBoxTitle, MB_OK);
+ order->RecordEnd(MESSAGEBOX, cookie);
+}
+
+// Will end the MessageBox.
+void EndDialogFunc(TaskList* order, int cookie) {
+ order->RecordStart(ENDDIALOG, cookie);
+ HWND window = GetActiveWindow();
+ if (window != NULL) {
+ EXPECT_NE(EndDialog(window, IDCONTINUE), 0);
+ // Cheap way to signal that the window wasn't found if RunEnd() isn't
+ // called.
+ order->RecordEnd(ENDDIALOG, cookie);
+ }
+}
+
+void RecursiveFunc(TaskList* order, int cookie, int depth,
+ bool is_reentrant) {
+ order->RecordStart(RECURSIVE, cookie);
+ if (depth > 0) {
+ if (is_reentrant)
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
+ }
+ order->RecordEnd(RECURSIVE, cookie);
+}
+
+void QuitFunc(TaskList* order, int cookie) {
+ order->RecordStart(QUITMESSAGELOOP, cookie);
+ MessageLoop::current()->QuitWhenIdle();
+ order->RecordEnd(QUITMESSAGELOOP, cookie);
+}
+
+void RecursiveFuncWin(MessageLoop* target,
+ HANDLE event,
+ bool expect_window,
+ TaskList* order,
+ bool is_reentrant) {
+ target->PostTask(FROM_HERE,
+ Bind(&RecursiveFunc, order, 1, 2, is_reentrant));
+ target->PostTask(FROM_HERE,
+ Bind(&MessageBoxFunc, order, 2, is_reentrant));
+ target->PostTask(FROM_HERE,
+ Bind(&RecursiveFunc, order, 3, 2, is_reentrant));
+ // The trick here is that for recursive task processing, this task will be
+ // ran _inside_ the MessageBox message loop, dismissing the MessageBox
+ // without a chance.
+ // For non-recursive task processing, this will be executed _after_ the
+ // MessageBox will have been dismissed by the code below, where
+ // expect_window_ is true.
+ target->PostTask(FROM_HERE,
+ Bind(&EndDialogFunc, order, 4));
+ target->PostTask(FROM_HERE,
+ Bind(&QuitFunc, order, 5));
+
+ // Enforce that every tasks are sent before starting to run the main thread
+ // message loop.
+ ASSERT_TRUE(SetEvent(event));
+
+ // Poll for the MessageBox. Don't do this at home! At the speed we do it,
+ // you will never realize one MessageBox was shown.
+ for (; expect_window;) {
+ HWND window = FindWindow(L"#32770", kMessageBoxTitle);
+ if (window) {
+ // Dismiss it.
+ for (;;) {
+ HWND button = FindWindowEx(window, NULL, L"Button", NULL);
+ if (button != NULL) {
+ EXPECT_EQ(0, SendMessage(button, WM_LBUTTONDOWN, 0, 0));
+ EXPECT_EQ(0, SendMessage(button, WM_LBUTTONUP, 0, 0));
+ break;
+ }
+ }
+ break;
+ }
+ }
+}
+
+// TODO(darin): These tests need to be ported since they test critical
+// message loop functionality.
+
+// A side effect of this test is the generation a beep. Sorry.
+void RunTest_RecursiveDenial2(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
+
+ Thread worker("RecursiveDenial2_worker");
+ Thread::Options options;
+ options.message_loop_type = message_loop_type;
+ ASSERT_EQ(true, worker.StartWithOptions(options));
+ TaskList order;
+ win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
+ worker.message_loop()->PostTask(FROM_HERE,
+ Bind(&RecursiveFuncWin,
+ MessageLoop::current(),
+ event.Get(),
+ true,
+ &order,
+ false));
+ // Let the other thread execute.
+ WaitForSingleObject(event.Get(), INFINITE);
+ MessageLoop::current()->Run();
+
+ ASSERT_EQ(17u, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(MESSAGEBOX, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 3, false));
+ // When EndDialogFunc is processed, the window is already dismissed, hence no
+ // "end" entry.
+ EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, true));
+ EXPECT_EQ(order.Get(7), TaskItem(QUITMESSAGELOOP, 5, true));
+ EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, false));
+ EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, false));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, false));
+}
+
+// A side effect of this test is the generation a beep. Sorry. This test also
+// needs to process windows messages on the current thread.
+void RunTest_RecursiveSupport2(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
+
+ Thread worker("RecursiveSupport2_worker");
+ Thread::Options options;
+ options.message_loop_type = message_loop_type;
+ ASSERT_EQ(true, worker.StartWithOptions(options));
+ TaskList order;
+ win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
+ worker.message_loop()->PostTask(FROM_HERE,
+ Bind(&RecursiveFuncWin,
+ MessageLoop::current(),
+ event.Get(),
+ false,
+ &order,
+ true));
+ // Let the other thread execute.
+ WaitForSingleObject(event.Get(), INFINITE);
+ MessageLoop::current()->Run();
+
+ ASSERT_EQ(18u, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
+ // Note that this executes in the MessageBox modal loop.
+ EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, false));
+ EXPECT_EQ(order.Get(5), TaskItem(ENDDIALOG, 4, true));
+ EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, false));
+ EXPECT_EQ(order.Get(7), TaskItem(MESSAGEBOX, 2, false));
+ /* The order can subtly change here. The reason is that when RecursiveFunc(1)
+ is called in the main thread, if it is faster than getting to the
+ PostTask(FROM_HERE, Bind(&QuitFunc) execution, the order of task
+ execution can change. We don't care anyway that the order isn't correct.
+ EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, true));
+ EXPECT_EQ(order.Get(9), TaskItem(QUITMESSAGELOOP, 5, false));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+ */
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 3, false));
+ EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(17), TaskItem(RECURSIVE, 3, false));
+}
+
+#endif // defined(OS_WIN)
+
+void PostNTasksThenQuit(int posts_remaining) {
+ if (posts_remaining > 1) {
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
+ } else {
+ MessageLoop::current()->QuitWhenIdle();
+ }
+}
+
+#if defined(OS_WIN)
+
+class TestIOHandler : public MessageLoopForIO::IOHandler {
+ public:
+ TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
+
+ void OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) override;
+
+ void Init();
+ void WaitForIO();
+ OVERLAPPED* context() { return &context_.overlapped; }
+ DWORD size() { return sizeof(buffer_); }
+
+ private:
+ char buffer_[48];
+ MessageLoopForIO::IOContext context_;
+ HANDLE signal_;
+ win::ScopedHandle file_;
+ bool wait_;
+};
+
+TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait)
+ : signal_(signal), wait_(wait) {
+ memset(buffer_, 0, sizeof(buffer_));
+
+ file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED, NULL));
+ EXPECT_TRUE(file_.IsValid());
+}
+
+void TestIOHandler::Init() {
+ MessageLoopForIO::current()->RegisterIOHandler(file_.Get(), this);
+
+ DWORD read;
+ EXPECT_FALSE(ReadFile(file_.Get(), buffer_, size(), &read, context()));
+ EXPECT_EQ(static_cast<DWORD>(ERROR_IO_PENDING), GetLastError());
+ if (wait_)
+ WaitForIO();
+}
+
+void TestIOHandler::OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered, DWORD error) {
+ ASSERT_TRUE(context == &context_);
+ ASSERT_TRUE(SetEvent(signal_));
+}
+
+void TestIOHandler::WaitForIO() {
+ EXPECT_TRUE(MessageLoopForIO::current()->WaitForIOCompletion(300, this));
+ EXPECT_TRUE(MessageLoopForIO::current()->WaitForIOCompletion(400, this));
+}
+
+void RunTest_IOHandler() {
+ win::ScopedHandle callback_called(CreateEvent(NULL, TRUE, FALSE, NULL));
+ ASSERT_TRUE(callback_called.IsValid());
+
+ const wchar_t* kPipeName = L"\\\\.\\pipe\\iohandler_pipe";
+ win::ScopedHandle server(
+ CreateNamedPipe(kPipeName, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+ ASSERT_TRUE(server.IsValid());
+
+ Thread thread("IOHandler test");
+ Thread::Options options;
+ options.message_loop_type = MessageLoop::TYPE_IO;
+ ASSERT_TRUE(thread.StartWithOptions(options));
+
+ MessageLoop* thread_loop = thread.message_loop();
+ ASSERT_TRUE(NULL != thread_loop);
+
+ TestIOHandler handler(kPipeName, callback_called.Get(), false);
+ thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
+ Unretained(&handler)));
+ // Make sure the thread runs and sleeps for lack of work.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+
+ const char buffer[] = "Hello there!";
+ DWORD written;
+ EXPECT_TRUE(WriteFile(server.Get(), buffer, sizeof(buffer), &written, NULL));
+
+ DWORD result = WaitForSingleObject(callback_called.Get(), 1000);
+ EXPECT_EQ(WAIT_OBJECT_0, result);
+
+ thread.Stop();
+}
+
+void RunTest_WaitForIO() {
+ win::ScopedHandle callback1_called(
+ CreateEvent(NULL, TRUE, FALSE, NULL));
+ win::ScopedHandle callback2_called(
+ CreateEvent(NULL, TRUE, FALSE, NULL));
+ ASSERT_TRUE(callback1_called.IsValid());
+ ASSERT_TRUE(callback2_called.IsValid());
+
+ const wchar_t* kPipeName1 = L"\\\\.\\pipe\\iohandler_pipe1";
+ const wchar_t* kPipeName2 = L"\\\\.\\pipe\\iohandler_pipe2";
+ win::ScopedHandle server1(
+ CreateNamedPipe(kPipeName1, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+ win::ScopedHandle server2(
+ CreateNamedPipe(kPipeName2, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+ ASSERT_TRUE(server1.IsValid());
+ ASSERT_TRUE(server2.IsValid());
+
+ Thread thread("IOHandler test");
+ Thread::Options options;
+ options.message_loop_type = MessageLoop::TYPE_IO;
+ ASSERT_TRUE(thread.StartWithOptions(options));
+
+ MessageLoop* thread_loop = thread.message_loop();
+ ASSERT_TRUE(NULL != thread_loop);
+
+ TestIOHandler handler1(kPipeName1, callback1_called.Get(), false);
+ TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
+ thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
+ Unretained(&handler1)));
+ // TODO(ajwong): Do we really need such long Sleeps in this function?
+ // Make sure the thread runs and sleeps for lack of work.
+ TimeDelta delay = TimeDelta::FromMilliseconds(100);
+ PlatformThread::Sleep(delay);
+ thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
+ Unretained(&handler2)));
+ PlatformThread::Sleep(delay);
+
+ // At this time handler1 is waiting to be called, and the thread is waiting
+ // on the Init method of handler2, filtering only handler2 callbacks.
+
+ const char buffer[] = "Hello there!";
+ DWORD written;
+ EXPECT_TRUE(WriteFile(server1.Get(), buffer, sizeof(buffer), &written, NULL));
+ PlatformThread::Sleep(2 * delay);
+ EXPECT_EQ(static_cast<DWORD>(WAIT_TIMEOUT),
+ WaitForSingleObject(callback1_called.Get(), 0))
+ << "handler1 has not been called";
+
+ EXPECT_TRUE(WriteFile(server2.Get(), buffer, sizeof(buffer), &written, NULL));
+
+ HANDLE objects[2] = { callback1_called.Get(), callback2_called.Get() };
+ DWORD result = WaitForMultipleObjects(2, objects, TRUE, 1000);
+ EXPECT_EQ(WAIT_OBJECT_0, result);
+
+ thread.Stop();
+}
+
+#endif // defined(OS_WIN)
+
+} // namespace
+
+//-----------------------------------------------------------------------------
+// Each test is run against each type of MessageLoop. That way we are sure
+// that message loops work properly in all configurations. Of course, in some
+// cases, a unit test may only be for a particular type of loop.
+
+RUN_MESSAGE_LOOP_TESTS(Default, &TypeDefaultMessagePumpFactory);
+RUN_MESSAGE_LOOP_TESTS(UI, &TypeUIMessagePumpFactory);
+RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
+
+#if defined(OS_WIN)
+// Additional set of tests for GPU version of UI message loop.
+RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
+
+TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
+ RunTest_PostDelayedTask_SharedTimer_SubPump();
+}
+
+// This test occasionally hangs. See http://crbug.com/44567.
+TEST(MessageLoopTest, DISABLED_RecursiveDenial2) {
+ RunTest_RecursiveDenial2(MessageLoop::TYPE_DEFAULT);
+ RunTest_RecursiveDenial2(MessageLoop::TYPE_UI);
+ RunTest_RecursiveDenial2(MessageLoop::TYPE_IO);
+}
+
+TEST(MessageLoopTest, RecursiveSupport2) {
+ // This test requires a UI loop.
+ RunTest_RecursiveSupport2(MessageLoop::TYPE_UI);
+}
+#endif // defined(OS_WIN)
+
+class DummyTaskObserver : public MessageLoop::TaskObserver {
+ public:
+ explicit DummyTaskObserver(int num_tasks)
+ : num_tasks_started_(0),
+ num_tasks_processed_(0),
+ num_tasks_(num_tasks) {}
+
+ ~DummyTaskObserver() override {}
+
+ void WillProcessTask(const PendingTask& pending_task) override {
+ num_tasks_started_++;
+ EXPECT_LE(num_tasks_started_, num_tasks_);
+ EXPECT_EQ(num_tasks_started_, num_tasks_processed_ + 1);
+ }
+
+ void DidProcessTask(const PendingTask& pending_task) override {
+ num_tasks_processed_++;
+ EXPECT_LE(num_tasks_started_, num_tasks_);
+ EXPECT_EQ(num_tasks_started_, num_tasks_processed_);
+ }
+
+ int num_tasks_started() const { return num_tasks_started_; }
+ int num_tasks_processed() const { return num_tasks_processed_; }
+
+ private:
+ int num_tasks_started_;
+ int num_tasks_processed_;
+ const int num_tasks_;
+
+ DISALLOW_COPY_AND_ASSIGN(DummyTaskObserver);
+};
+
+TEST(MessageLoopTest, TaskObserver) {
+ const int kNumPosts = 6;
+ DummyTaskObserver observer(kNumPosts);
+
+ MessageLoop loop;
+ loop.AddTaskObserver(&observer);
+ loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
+ RunLoop().Run();
+ loop.RemoveTaskObserver(&observer);
+
+ EXPECT_EQ(kNumPosts, observer.num_tasks_started());
+ EXPECT_EQ(kNumPosts, observer.num_tasks_processed());
+}
+
+#if defined(OS_WIN)
+TEST(MessageLoopTest, IOHandler) {
+ RunTest_IOHandler();
+}
+
+TEST(MessageLoopTest, WaitForIO) {
+ RunTest_WaitForIO();
+}
+
+TEST(MessageLoopTest, HighResolutionTimer) {
+ MessageLoop loop;
+ Time::EnableHighResolutionTimer(true);
+
+ const TimeDelta kFastTimer = TimeDelta::FromMilliseconds(5);
+ const TimeDelta kSlowTimer = TimeDelta::FromMilliseconds(100);
+
+ EXPECT_FALSE(loop.HasHighResolutionTasks());
+ // Post a fast task to enable the high resolution timers.
+ loop.PostDelayedTask(FROM_HERE, Bind(&PostNTasksThenQuit, 1),
+ kFastTimer);
+ EXPECT_TRUE(loop.HasHighResolutionTasks());
+ loop.Run();
+ EXPECT_FALSE(loop.HasHighResolutionTasks());
+ EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
+ // Check that a slow task does not trigger the high resolution logic.
+ loop.PostDelayedTask(FROM_HERE, Bind(&PostNTasksThenQuit, 1),
+ kSlowTimer);
+ EXPECT_FALSE(loop.HasHighResolutionTasks());
+ loop.Run();
+ EXPECT_FALSE(loop.HasHighResolutionTasks());
+ Time::EnableHighResolutionTimer(false);
+}
+
+#endif // defined(OS_WIN)
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+
+namespace {
+
+class QuitDelegate : public MessageLoopForIO::Watcher {
+ public:
+ void OnFileCanWriteWithoutBlocking(int fd) override {
+ MessageLoop::current()->QuitWhenIdle();
+ }
+ void OnFileCanReadWithoutBlocking(int fd) override {
+ MessageLoop::current()->QuitWhenIdle();
+ }
+};
+
+TEST(MessageLoopTest, FileDescriptorWatcherOutlivesMessageLoop) {
+ // Simulate a MessageLoop that dies before an FileDescriptorWatcher.
+ // This could happen when people use the Singleton pattern or atexit.
+
+ // Create a file descriptor. Doesn't need to be readable or writable,
+ // as we don't need to actually get any notifications.
+ // pipe() is just the easiest way to do it.
+ int pipefds[2];
+ int err = pipe(pipefds);
+ ASSERT_EQ(0, err);
+ int fd = pipefds[1];
+ {
+ // Arrange for controller to live longer than message loop.
+ MessageLoopForIO::FileDescriptorWatcher controller;
+ {
+ MessageLoopForIO message_loop;
+
+ QuitDelegate delegate;
+ message_loop.WatchFileDescriptor(fd,
+ true, MessageLoopForIO::WATCH_WRITE, &controller, &delegate);
+ // and don't run the message loop, just destroy it.
+ }
+ }
+ if (IGNORE_EINTR(close(pipefds[0])) < 0)
+ PLOG(ERROR) << "close";
+ if (IGNORE_EINTR(close(pipefds[1])) < 0)
+ PLOG(ERROR) << "close";
+}
+
+TEST(MessageLoopTest, FileDescriptorWatcherDoubleStop) {
+ // Verify that it's ok to call StopWatchingFileDescriptor().
+ // (Errors only showed up in valgrind.)
+ int pipefds[2];
+ int err = pipe(pipefds);
+ ASSERT_EQ(0, err);
+ int fd = pipefds[1];
+ {
+ // Arrange for message loop to live longer than controller.
+ MessageLoopForIO message_loop;
+ {
+ MessageLoopForIO::FileDescriptorWatcher controller;
+
+ QuitDelegate delegate;
+ message_loop.WatchFileDescriptor(fd,
+ true, MessageLoopForIO::WATCH_WRITE, &controller, &delegate);
+ controller.StopWatchingFileDescriptor();
+ }
+ }
+ if (IGNORE_EINTR(close(pipefds[0])) < 0)
+ PLOG(ERROR) << "close";
+ if (IGNORE_EINTR(close(pipefds[1])) < 0)
+ PLOG(ERROR) << "close";
+}
+
+} // namespace
+
+#endif // defined(OS_POSIX) && !defined(OS_NACL)
+
+namespace {
+// Inject a test point for recording the destructor calls for Closure objects
+// send to MessageLoop::PostTask(). It is awkward usage since we are trying to
+// hook the actual destruction, which is not a common operation.
+class DestructionObserverProbe :
+ public RefCounted<DestructionObserverProbe> {
+ public:
+ DestructionObserverProbe(bool* task_destroyed,
+ bool* destruction_observer_called)
+ : task_destroyed_(task_destroyed),
+ destruction_observer_called_(destruction_observer_called) {
+ }
+ virtual void Run() {
+ // This task should never run.
+ ADD_FAILURE();
+ }
+ private:
+ friend class RefCounted<DestructionObserverProbe>;
+
+ virtual ~DestructionObserverProbe() {
+ EXPECT_FALSE(*destruction_observer_called_);
+ *task_destroyed_ = true;
+ }
+
+ bool* task_destroyed_;
+ bool* destruction_observer_called_;
+};
+
+class MLDestructionObserver : public MessageLoop::DestructionObserver {
+ public:
+ MLDestructionObserver(bool* task_destroyed, bool* destruction_observer_called)
+ : task_destroyed_(task_destroyed),
+ destruction_observer_called_(destruction_observer_called),
+ task_destroyed_before_message_loop_(false) {
+ }
+ void WillDestroyCurrentMessageLoop() override {
+ task_destroyed_before_message_loop_ = *task_destroyed_;
+ *destruction_observer_called_ = true;
+ }
+ bool task_destroyed_before_message_loop() const {
+ return task_destroyed_before_message_loop_;
+ }
+ private:
+ bool* task_destroyed_;
+ bool* destruction_observer_called_;
+ bool task_destroyed_before_message_loop_;
+};
+
+} // namespace
+
+TEST(MessageLoopTest, DestructionObserverTest) {
+ // Verify that the destruction observer gets called at the very end (after
+ // all the pending tasks have been destroyed).
+ MessageLoop* loop = new MessageLoop;
+ const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+ bool task_destroyed = false;
+ bool destruction_observer_called = false;
+
+ MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
+ loop->AddDestructionObserver(&observer);
+ loop->task_runner()->PostDelayedTask(
+ FROM_HERE, Bind(&DestructionObserverProbe::Run,
+ new DestructionObserverProbe(
+ &task_destroyed, &destruction_observer_called)),
+ kDelay);
+ delete loop;
+ EXPECT_TRUE(observer.task_destroyed_before_message_loop());
+ // The task should have been destroyed when we deleted the loop.
+ EXPECT_TRUE(task_destroyed);
+ EXPECT_TRUE(destruction_observer_called);
+}
+
+
+// Verify that MessageLoop sets ThreadMainTaskRunner::current() and it
+// posts tasks on that message loop.
+TEST(MessageLoopTest, ThreadMainTaskRunner) {
+ MessageLoop loop;
+
+ scoped_refptr<Foo> foo(new Foo());
+ std::string a("a");
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(
+ &Foo::Test1ConstRef, foo.get(), a));
+
+ // Post quit task;
+ MessageLoop::current()->task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
+
+ // Now kick things off
+ RunLoop().Run();
+
+ EXPECT_EQ(foo->test_count(), 1);
+ EXPECT_EQ(foo->result(), "a");
+}
+
+TEST(MessageLoopTest, IsType) {
+ MessageLoop loop(MessageLoop::TYPE_UI);
+ EXPECT_TRUE(loop.IsType(MessageLoop::TYPE_UI));
+ EXPECT_FALSE(loop.IsType(MessageLoop::TYPE_IO));
+ EXPECT_FALSE(loop.IsType(MessageLoop::TYPE_DEFAULT));
+}
+
+#if defined(OS_WIN)
+void EmptyFunction() {}
+
+void PostMultipleTasks() {
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&EmptyFunction));
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&EmptyFunction));
+}
+
+static const int kSignalMsg = WM_USER + 2;
+
+void PostWindowsMessage(HWND message_hwnd) {
+ PostMessage(message_hwnd, kSignalMsg, 0, 2);
+}
+
+void EndTest(bool* did_run, HWND hwnd) {
+ *did_run = true;
+ PostMessage(hwnd, WM_CLOSE, 0, 0);
+}
+
+int kMyMessageFilterCode = 0x5002;
+
+LRESULT CALLBACK TestWndProcThunk(HWND hwnd, UINT message,
+ WPARAM wparam, LPARAM lparam) {
+ if (message == WM_CLOSE)
+ EXPECT_TRUE(DestroyWindow(hwnd));
+ if (message != kSignalMsg)
+ return DefWindowProc(hwnd, message, wparam, lparam);
+
+ switch (lparam) {
+ case 1:
+ // First, we post a task that will post multiple no-op tasks to make sure
+ // that the pump's incoming task queue does not become empty during the
+ // test.
+ MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&PostMultipleTasks));
+ // Next, we post a task that posts a windows message to trigger the second
+ // stage of the test.
+ MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(&PostWindowsMessage, hwnd));
+ break;
+ case 2:
+ // Since we're about to enter a modal loop, tell the message loop that we
+ // intend to nest tasks.
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ bool did_run = false;
+ MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(&EndTest, &did_run, hwnd));
+ // Run a nested windows-style message loop and verify that our task runs. If
+ // it doesn't, then we'll loop here until the test times out.
+ MSG msg;
+ while (GetMessage(&msg, 0, 0, 0)) {
+ if (!CallMsgFilter(&msg, kMyMessageFilterCode))
+ DispatchMessage(&msg);
+ // If this message is a WM_CLOSE, explicitly exit the modal loop. Posting
+ // a WM_QUIT should handle this, but unfortunately MessagePumpWin eats
+ // WM_QUIT messages even when running inside a modal loop.
+ if (msg.message == WM_CLOSE)
+ break;
+ }
+ EXPECT_TRUE(did_run);
+ MessageLoop::current()->QuitWhenIdle();
+ break;
+ }
+ return 0;
+}
+
+TEST(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
+ MessageLoop loop(MessageLoop::TYPE_UI);
+ HINSTANCE instance = CURRENT_MODULE();
+ WNDCLASSEX wc = {0};
+ wc.cbSize = sizeof(wc);
+ wc.lpfnWndProc = TestWndProcThunk;
+ wc.hInstance = instance;
+ wc.lpszClassName = L"MessageLoopTest_HWND";
+ ATOM atom = RegisterClassEx(&wc);
+ ASSERT_TRUE(atom);
+
+ HWND message_hwnd = CreateWindow(MAKEINTATOM(atom), 0, 0, 0, 0, 0, 0,
+ HWND_MESSAGE, 0, instance, 0);
+ ASSERT_TRUE(message_hwnd) << GetLastError();
+
+ ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));
+
+ loop.Run();
+
+ ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
+}
+#endif // defined(OS_WIN)
+
+TEST(MessageLoopTest, SetTaskRunner) {
+ MessageLoop loop;
+ scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
+
+ loop.SetTaskRunner(new_runner);
+ EXPECT_EQ(new_runner, loop.task_runner());
+ EXPECT_EQ(new_runner, ThreadTaskRunnerHandle::Get());
+}
+
+TEST(MessageLoopTest, OriginalRunnerWorks) {
+ MessageLoop loop;
+ scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
+ scoped_refptr<SingleThreadTaskRunner> original_runner(loop.task_runner());
+ loop.SetTaskRunner(new_runner);
+
+ scoped_refptr<Foo> foo(new Foo());
+ original_runner->PostTask(FROM_HERE,
+ Bind(&Foo::Test1ConstRef, foo.get(), "a"));
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(1, foo->test_count());
+}
+
+TEST(MessageLoopTest, DeleteUnboundLoop) {
+ // It should be possible to delete an unbound message loop on a thread which
+ // already has another active loop. This happens when thread creation fails.
+ MessageLoop loop;
+ std::unique_ptr<MessageLoop> unbound_loop(MessageLoop::CreateUnbound(
+ MessageLoop::TYPE_DEFAULT, MessageLoop::MessagePumpFactoryCallback()));
+ unbound_loop.reset();
+ EXPECT_EQ(&loop, MessageLoop::current());
+ EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
+}
+
+TEST(MessageLoopTest, ThreadName) {
+ {
+ std::string kThreadName("foo");
+ MessageLoop loop;
+ PlatformThread::SetName(kThreadName);
+ EXPECT_EQ(kThreadName, loop.GetThreadName());
+ }
+
+ {
+ std::string kThreadName("bar");
+ base::Thread thread(kThreadName);
+ ASSERT_TRUE(thread.StartAndWaitForTesting());
+ EXPECT_EQ(kThreadName, thread.message_loop()->GetThreadName());
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_pump.cc b/libchrome/base/message_loop/message_pump.cc
new file mode 100644
index 0000000..2f740f2
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump.cc
@@ -0,0 +1,25 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump.h"
+
+namespace base {
+
+MessagePump::MessagePump() {
+}
+
+MessagePump::~MessagePump() {
+}
+
+void MessagePump::SetTimerSlack(TimerSlack) {
+}
+
+#if defined(OS_WIN)
+bool MessagePump::WasSignaled() {
+ NOTREACHED();
+ return false;
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_pump.h b/libchrome/base/message_loop/message_pump.h
new file mode 100644
index 0000000..af8ed41
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump.h
@@ -0,0 +1,140 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
+
+#include "base/base_export.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/threading/non_thread_safe.h"
+
+namespace base {
+
+class TimeTicks;
+
+class BASE_EXPORT MessagePump : public NonThreadSafe {
+ public:
+ // Please see the comments above the Run method for an illustration of how
+ // these delegate methods are used.
+ class BASE_EXPORT Delegate {
+ public:
+ virtual ~Delegate() {}
+
+ // Called from within Run in response to ScheduleWork or when the message
+ // pump would otherwise call DoDelayedWork. Returns true to indicate that
+ // work was done. DoDelayedWork will still be called if DoWork returns
+ // true, but DoIdleWork will not.
+ virtual bool DoWork() = 0;
+
+ // Called from within Run in response to ScheduleDelayedWork or when the
+ // message pump would otherwise sleep waiting for more work. Returns true
+ // to indicate that delayed work was done. DoIdleWork will not be called
+ // if DoDelayedWork returns true. Upon return |next_delayed_work_time|
+ // indicates the time when DoDelayedWork should be called again. If
+ // |next_delayed_work_time| is null (per Time::is_null), then the queue of
+ // future delayed work (timer events) is currently empty, and no additional
+ // calls to this function need to be scheduled.
+ virtual bool DoDelayedWork(TimeTicks* next_delayed_work_time) = 0;
+
+ // Called from within Run just before the message pump goes to sleep.
+ // Returns true to indicate that idle work was done. Returning false means
+ // the pump will now wait.
+ virtual bool DoIdleWork() = 0;
+ };
+
+ MessagePump();
+ virtual ~MessagePump();
+
+ // The Run method is called to enter the message pump's run loop.
+ //
+ // Within the method, the message pump is responsible for processing native
+ // messages as well as for giving cycles to the delegate periodically. The
+ // message pump should take care to mix delegate callbacks with native
+ // message processing so neither type of event starves the other of cycles.
+ //
+ // The anatomy of a typical run loop:
+ //
+ // for (;;) {
+ // bool did_work = DoInternalWork();
+ // if (should_quit_)
+ // break;
+ //
+ // did_work |= delegate_->DoWork();
+ // if (should_quit_)
+ // break;
+ //
+ // TimeTicks next_time;
+ // did_work |= delegate_->DoDelayedWork(&next_time);
+ // if (should_quit_)
+ // break;
+ //
+ // if (did_work)
+ // continue;
+ //
+ // did_work = delegate_->DoIdleWork();
+ // if (should_quit_)
+ // break;
+ //
+ // if (did_work)
+ // continue;
+ //
+ // WaitForWork();
+ // }
+ //
+ // Here, DoInternalWork is some private method of the message pump that is
+ // responsible for dispatching the next UI message or notifying the next IO
+ // completion (for example). WaitForWork is a private method that simply
+ // blocks until there is more work of any type to do.
+ //
+ // Notice that the run loop cycles between calling DoInternalWork, DoWork,
+ // and DoDelayedWork methods. This helps ensure that none of these work
+ // queues starve the others. This is important for message pumps that are
+ // used to drive animations, for example.
+ //
+ // Notice also that after each callout to foreign code, the run loop checks
+ // to see if it should quit. The Quit method is responsible for setting this
+ // flag. No further work is done once the quit flag is set.
+ //
+ // NOTE: Care must be taken to handle Run being called again from within any
+ // of the callouts to foreign code. Native message pumps may also need to
+ // deal with other native message pumps being run outside their control
+ // (e.g., the MessageBox API on Windows pumps UI messages!). To be specific,
+ // the callouts (DoWork and DoDelayedWork) MUST still be provided even in
+ // nested sub-loops that are "seemingly" outside the control of this message
+ // pump. DoWork in particular must never be starved for time slices unless
+ // it returns false (meaning it has run out of things to do).
+ //
+ virtual void Run(Delegate* delegate) = 0;
+
+ // Quit immediately from the most recently entered run loop. This method may
+ // only be used on the thread that called Run.
+ virtual void Quit() = 0;
+
+ // Schedule a DoWork callback to happen reasonably soon. Does nothing if a
+ // DoWork callback is already scheduled. This method may be called from any
+ // thread. Once this call is made, DoWork should not be "starved" at least
+ // until it returns a value of false.
+ virtual void ScheduleWork() = 0;
+
+ // Schedule a DoDelayedWork callback to happen at the specified time,
+ // cancelling any pending DoDelayedWork callback. This method may only be
+ // used on the thread that called Run.
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time) = 0;
+
+ // Sets the timer slack to the specified value.
+ virtual void SetTimerSlack(TimerSlack timer_slack);
+
+#if defined(OS_WIN)
+ // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+ // has been investigated.
+ // This should be used for diagnostic only. If message pump wake-up mechanism
+ // is based on auto-reset event this call would reset the event to unset
+ // state.
+ virtual bool WasSignaled();
+#endif
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
diff --git a/libchrome/base/message_loop/message_pump_default.cc b/libchrome/base/message_loop/message_pump_default.cc
new file mode 100644
index 0000000..3449aec
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_default.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_default.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+namespace base {
+
+MessagePumpDefault::MessagePumpDefault()
+ : keep_running_(true),
+ event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+MessagePumpDefault::~MessagePumpDefault() {
+}
+
+void MessagePumpDefault::Run(Delegate* delegate) {
+ DCHECK(keep_running_) << "Quit must have been called outside of Run!";
+
+ for (;;) {
+#if defined(OS_MACOSX)
+ mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+ bool did_work = delegate->DoWork();
+ if (!keep_running_)
+ break;
+
+ did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ did_work = delegate->DoIdleWork();
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ ThreadRestrictions::ScopedAllowWait allow_wait;
+ if (delayed_work_time_.is_null()) {
+ event_.Wait();
+ } else {
+ TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
+ if (delay > TimeDelta()) {
+#if defined(OS_WIN)
+ // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
+ // logic into TimedWait implementation in waitable_event_win.cc.
+
+ // crbug.com/487724: on Windows, waiting for less than 1 ms results in
+ // returning from TimedWait promptly and spinning
+ // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
+ // run a delayed task. |min_delay| is the minimum possible wait to
+ // to avoid the spinning.
+ constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
+ do {
+ delay = std::max(delay, min_delay);
+ if (event_.TimedWait(delay))
+ break;
+
+ // TimedWait can time out earlier than the specified |delay| on
+ // Windows. It doesn't make sense to run the outer loop in that case
+ // because there isn't going to be any new work. It is less overhead
+ // to just go back to wait.
+ // In practice this inner wait loop might have up to 3 iterations.
+ delay = delayed_work_time_ - TimeTicks::Now();
+ } while (delay > TimeDelta());
+#else
+ event_.TimedWait(delay);
+#endif
+ } else {
+ // It looks like delayed_work_time_ indicates a time in the past, so we
+ // need to call DoDelayedWork now.
+ delayed_work_time_ = TimeTicks();
+ }
+ }
+ // Since event_ is auto-reset, we don't need to do anything special here
+ // other than service each delegate method.
+ }
+
+ keep_running_ = true;
+}
+
+void MessagePumpDefault::Quit() {
+ keep_running_ = false;
+}
+
+void MessagePumpDefault::ScheduleWork() {
+ // Since this can be called on any thread, we need to ensure that our Run
+ // loop wakes up.
+ event_.Signal();
+}
+
+void MessagePumpDefault::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ // We know that we can't be blocked on Wait right now since this method can
+ // only be called on the same thread as Run, so we only need to update our
+ // record of how long to sleep when we do sleep.
+ delayed_work_time_ = delayed_work_time;
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_pump_default.h b/libchrome/base/message_loop/message_pump_default.h
new file mode 100644
index 0000000..4cd7cd1
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_default.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_DEFAULT_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_DEFAULT_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BASE_EXPORT MessagePumpDefault : public MessagePump {
+ public:
+ MessagePumpDefault();
+ ~MessagePumpDefault() override;
+
+ // MessagePump methods:
+ void Run(Delegate* delegate) override;
+ void Quit() override;
+ void ScheduleWork() override;
+ void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+ // This flag is set to false when Run should return.
+ bool keep_running_;
+
+ // Used to sleep until there is more work to do.
+ WaitableEvent event_;
+
+ // The time at which we should call DoDelayedWork.
+ TimeTicks delayed_work_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpDefault);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_DEFAULT_H_
diff --git a/libchrome/base/message_loop/message_pump_glib.cc b/libchrome/base/message_loop/message_pump_glib.cc
new file mode 100644
index 0000000..fd23745
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_glib.cc
@@ -0,0 +1,363 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_glib.h"
+
+#include <fcntl.h>
+#include <math.h>
+
+#include <glib.h>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+namespace {
+
+// Return a timeout suitable for the glib loop, -1 to block forever,
+// 0 to return right away, or a timeout in milliseconds from now.
+int GetTimeIntervalMilliseconds(const TimeTicks& from) {
+ if (from.is_null())
+ return -1;
+
+ // Be careful here. TimeDelta has a precision of microseconds, but we want a
+ // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
+ // 6? It should be 6 to avoid executing delayed work too early.
+ int delay = static_cast<int>(
+ ceil((from - TimeTicks::Now()).InMillisecondsF()));
+
+ // If this value is negative, then we need to run delayed work soon.
+ return delay < 0 ? 0 : delay;
+}
+
+// A brief refresher on GLib:
+// GLib sources have four callbacks: Prepare, Check, Dispatch and Finalize.
+// On each iteration of the GLib pump, it calls each source's Prepare function.
+// This function should return TRUE if it wants GLib to call its Dispatch, and
+// FALSE otherwise. It can also set a timeout in this case for the next time
+// Prepare should be called again (it may be called sooner).
+// After the Prepare calls, GLib does a poll to check for events from the
+// system. File descriptors can be attached to the sources. The poll may block
+// if none of the Prepare calls returned TRUE. It will block indefinitely, or
+// by the minimum time returned by a source in Prepare.
+// After the poll, GLib calls Check for each source that returned FALSE
+// from Prepare. The return value of Check has the same meaning as for Prepare,
+// making Check a second chance to tell GLib we are ready for Dispatch.
+// Finally, GLib calls Dispatch for each source that is ready. If Dispatch
+// returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
+// (i.e., you can call Run from them), but Prepare and Check cannot.
+// Finalize is called when the source is destroyed.
+// NOTE: It is common for subsystems to want to process pending events while
+// doing intensive work, for example the flash plugin. They usually use the
+// following pattern (recommended by the GTK docs):
+// while (gtk_events_pending()) {
+// gtk_main_iteration();
+// }
+//
+// gtk_events_pending just calls g_main_context_pending, which does the
+// following:
+// - Call prepare on all the sources.
+// - Do the poll with a timeout of 0 (not blocking).
+// - Call check on all the sources.
+// - *Does not* call dispatch on the sources.
+// - Return true if any of prepare() or check() returned true.
+//
+// gtk_main_iteration just calls g_main_context_iteration, which does the whole
+// thing, respecting the timeout for the poll (and block, although it is
+// expected not to if gtk_events_pending returned true), and call dispatch.
+//
+// Thus it is important to only return true from prepare or check if we
+// actually have events or work to do. We also need to make sure we keep
+// internal state consistent so that if prepare/check return true when called
+// from gtk_events_pending, they will still return true when called right
+// after, from gtk_main_iteration.
+//
+// For the GLib pump we try to follow the Windows UI pump model:
+// - Whenever we receive a wakeup event or the timer for delayed work expires,
+// we run DoWork and/or DoDelayedWork. That part will also run in the other
+// event pumps.
+// - We also run DoWork, DoDelayedWork, and possibly DoIdleWork in the main
+// loop, around event handling.
+
+struct WorkSource : public GSource {
+ MessagePumpGlib* pump;
+};
+
+gboolean WorkSourcePrepare(GSource* source,
+ gint* timeout_ms) {
+ *timeout_ms = static_cast<WorkSource*>(source)->pump->HandlePrepare();
+ // We always return FALSE, so that our timeout is honored. If we were
+ // to return TRUE, the timeout would be considered to be 0 and the poll
+ // would never block. Once the poll is finished, Check will be called.
+ return FALSE;
+}
+
+gboolean WorkSourceCheck(GSource* source) {
+ // Only return TRUE if Dispatch should be called.
+ return static_cast<WorkSource*>(source)->pump->HandleCheck();
+}
+
+gboolean WorkSourceDispatch(GSource* source,
+ GSourceFunc unused_func,
+ gpointer unused_data) {
+
+ static_cast<WorkSource*>(source)->pump->HandleDispatch();
+ // Always return TRUE so our source stays registered.
+ return TRUE;
+}
+
+// I wish these could be const, but g_source_new wants non-const.
+GSourceFuncs WorkSourceFuncs = {
+ WorkSourcePrepare,
+ WorkSourceCheck,
+ WorkSourceDispatch,
+ NULL
+};
+
+// The following is used to make sure we only run the MessagePumpGlib on one
+// thread. X only has one message pump so we can only have one UI loop per
+// process.
+#ifndef NDEBUG
+
+// Tracks the pump the most recent pump that has been run.
+struct ThreadInfo {
+ // The pump.
+ MessagePumpGlib* pump;
+
+ // ID of the thread the pump was run on.
+ PlatformThreadId thread_id;
+};
+
+// Used for accesing |thread_info|.
+static LazyInstance<Lock>::Leaky thread_info_lock = LAZY_INSTANCE_INITIALIZER;
+
+// If non-NULL it means a MessagePumpGlib exists and has been Run. This is
+// destroyed when the MessagePump is destroyed.
+ThreadInfo* thread_info = NULL;
+
+void CheckThread(MessagePumpGlib* pump) {
+ AutoLock auto_lock(thread_info_lock.Get());
+ if (!thread_info) {
+ thread_info = new ThreadInfo;
+ thread_info->pump = pump;
+ thread_info->thread_id = PlatformThread::CurrentId();
+ }
+ DCHECK(thread_info->thread_id == PlatformThread::CurrentId()) <<
+ "Running MessagePumpGlib on two different threads; "
+ "this is unsupported by GLib!";
+}
+
+void PumpDestroyed(MessagePumpGlib* pump) {
+ AutoLock auto_lock(thread_info_lock.Get());
+ if (thread_info && thread_info->pump == pump) {
+ delete thread_info;
+ thread_info = NULL;
+ }
+}
+
+#endif
+
+} // namespace
+
+struct MessagePumpGlib::RunState {
+ Delegate* delegate;
+
+ // Used to flag that the current Run() invocation should return ASAP.
+ bool should_quit;
+
+ // Used to count how many Run() invocations are on the stack.
+ int run_depth;
+
+ // This keeps the state of whether the pump got signaled that there was new
+ // work to be done. Since we eat the message on the wake up pipe as soon as
+ // we get it, we keep that state here to stay consistent.
+ bool has_work;
+};
+
+MessagePumpGlib::MessagePumpGlib()
+ : state_(NULL),
+ context_(g_main_context_default()),
+ wakeup_gpollfd_(new GPollFD) {
+ // Create our wakeup pipe, which is used to flag when work was scheduled.
+ int fds[2];
+ int ret = pipe(fds);
+ DCHECK_EQ(ret, 0);
+ (void)ret; // Prevent warning in release mode.
+
+ wakeup_pipe_read_ = fds[0];
+ wakeup_pipe_write_ = fds[1];
+ wakeup_gpollfd_->fd = wakeup_pipe_read_;
+ wakeup_gpollfd_->events = G_IO_IN;
+
+ work_source_ = g_source_new(&WorkSourceFuncs, sizeof(WorkSource));
+ static_cast<WorkSource*>(work_source_)->pump = this;
+ g_source_add_poll(work_source_, wakeup_gpollfd_.get());
+ // Use a low priority so that we let other events in the queue go first.
+ g_source_set_priority(work_source_, G_PRIORITY_DEFAULT_IDLE);
+ // This is needed to allow Run calls inside Dispatch.
+ g_source_set_can_recurse(work_source_, TRUE);
+ g_source_attach(work_source_, context_);
+}
+
+MessagePumpGlib::~MessagePumpGlib() {
+#ifndef NDEBUG
+ PumpDestroyed(this);
+#endif
+ g_source_destroy(work_source_);
+ g_source_unref(work_source_);
+ close(wakeup_pipe_read_);
+ close(wakeup_pipe_write_);
+}
+
+// Return the timeout we want passed to poll.
+int MessagePumpGlib::HandlePrepare() {
+ // We know we have work, but we haven't called HandleDispatch yet. Don't let
+ // the pump block so that we can do some processing.
+ if (state_ && // state_ may be null during tests.
+ state_->has_work)
+ return 0;
+
+ // We don't think we have work to do, but make sure not to block
+ // longer than the next time we need to run delayed work.
+ return GetTimeIntervalMilliseconds(delayed_work_time_);
+}
+
+bool MessagePumpGlib::HandleCheck() {
+ if (!state_) // state_ may be null during tests.
+ return false;
+
+ // We usually have a single message on the wakeup pipe, since we are only
+ // signaled when the queue went from empty to non-empty, but there can be
+ // two messages if a task posted a task, hence we read at most two bytes.
+ // The glib poll will tell us whether there was data, so this read
+ // shouldn't block.
+ if (wakeup_gpollfd_->revents & G_IO_IN) {
+ char msg[2];
+ const int num_bytes = HANDLE_EINTR(read(wakeup_pipe_read_, msg, 2));
+ if (num_bytes < 1) {
+ NOTREACHED() << "Error reading from the wakeup pipe.";
+ }
+ DCHECK((num_bytes == 1 && msg[0] == '!') ||
+ (num_bytes == 2 && msg[0] == '!' && msg[1] == '!'));
+ // Since we ate the message, we need to record that we have more work,
+ // because HandleCheck() may be called without HandleDispatch being called
+ // afterwards.
+ state_->has_work = true;
+ }
+
+ if (state_->has_work)
+ return true;
+
+ if (GetTimeIntervalMilliseconds(delayed_work_time_) == 0) {
+ // The timer has expired. That condition will stay true until we process
+ // that delayed work, so we don't need to record this differently.
+ return true;
+ }
+
+ return false;
+}
+
+void MessagePumpGlib::HandleDispatch() {
+ state_->has_work = false;
+ if (state_->delegate->DoWork()) {
+ // NOTE: on Windows at this point we would call ScheduleWork (see
+ // MessagePumpGlib::HandleWorkMessage in message_pump_win.cc). But here,
+ // instead of posting a message on the wakeup pipe, we can avoid the
+ // syscalls and just signal that we have more work.
+ state_->has_work = true;
+ }
+
+ if (state_->should_quit)
+ return;
+
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+}
+
+void MessagePumpGlib::Run(Delegate* delegate) {
+#ifndef NDEBUG
+ CheckThread(this);
+#endif
+
+ RunState state;
+ state.delegate = delegate;
+ state.should_quit = false;
+ state.run_depth = state_ ? state_->run_depth + 1 : 1;
+ state.has_work = false;
+
+ RunState* previous_state = state_;
+ state_ = &state;
+
+ // We really only do a single task for each iteration of the loop. If we
+ // have done something, assume there is likely something more to do. This
+ // will mean that we don't block on the message pump until there was nothing
+ // more to do. We also set this to true to make sure not to block on the
+ // first iteration of the loop, so RunUntilIdle() works correctly.
+ bool more_work_is_plausible = true;
+
+ // We run our own loop instead of using g_main_loop_quit in one of the
+ // callbacks. This is so we only quit our own loops, and we don't quit
+ // nested loops run by others. TODO(deanm): Is this what we want?
+ for (;;) {
+ // Don't block if we think we have more work to do.
+ bool block = !more_work_is_plausible;
+
+ more_work_is_plausible = g_main_context_iteration(context_, block);
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |= state_->delegate->DoWork();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |=
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ more_work_is_plausible = state_->delegate->DoIdleWork();
+ if (state_->should_quit)
+ break;
+ }
+
+ state_ = previous_state;
+}
+
+void MessagePumpGlib::Quit() {
+ if (state_) {
+ state_->should_quit = true;
+ } else {
+ NOTREACHED() << "Quit called outside Run!";
+ }
+}
+
+void MessagePumpGlib::ScheduleWork() {
+ // This can be called on any thread, so we don't want to touch any state
+ // variables as we would then need locks all over. This ensures that if
+ // we are sleeping in a poll that we will wake up.
+ char msg = '!';
+ if (HANDLE_EINTR(write(wakeup_pipe_write_, &msg, 1)) != 1) {
+ NOTREACHED() << "Could not write to the UI message loop wakeup pipe!";
+ }
+}
+
+void MessagePumpGlib::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ // We need to wake up the loop in case the poll timeout needs to be
+ // adjusted. This will cause us to try to do work, but that's OK.
+ delayed_work_time_ = delayed_work_time;
+ ScheduleWork();
+}
+
+bool MessagePumpGlib::ShouldQuit() const {
+ CHECK(state_);
+ return state_->should_quit;
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_pump_glib.h b/libchrome/base/message_loop/message_pump_glib.h
new file mode 100644
index 0000000..a2b54d8
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_glib.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/observer_list.h"
+#include "base/time/time.h"
+
+typedef struct _GMainContext GMainContext;
+typedef struct _GPollFD GPollFD;
+typedef struct _GSource GSource;
+
+namespace base {
+
+// This class implements a base MessagePump needed for TYPE_UI MessageLoops on
+// platforms using GLib.
+class BASE_EXPORT MessagePumpGlib : public MessagePump {
+ public:
+ MessagePumpGlib();
+ ~MessagePumpGlib() override;
+
+ // Internal methods used for processing the pump callbacks. They are
+ // public for simplicity but should not be used directly. HandlePrepare
+ // is called during the prepare step of glib, and returns a timeout that
+ // will be passed to the poll. HandleCheck is called after the poll
+ // has completed, and returns whether or not HandleDispatch should be called.
+ // HandleDispatch is called if HandleCheck returned true.
+ int HandlePrepare();
+ bool HandleCheck();
+ void HandleDispatch();
+
+ // Overridden from MessagePump:
+ void Run(Delegate* delegate) override;
+ void Quit() override;
+ void ScheduleWork() override;
+ void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+ bool ShouldQuit() const;
+
+ // We may make recursive calls to Run, so we save state that needs to be
+ // separate between them in this structure type.
+ struct RunState;
+
+ RunState* state_;
+
+ // This is a GLib structure that we can add event sources to. We use the
+ // default GLib context, which is the one to which all GTK events are
+ // dispatched.
+ GMainContext* context_;
+
+ // This is the time when we need to do delayed work.
+ TimeTicks delayed_work_time_;
+
+ // The work source. It is shared by all calls to Run and destroyed when
+ // the message pump is destroyed.
+ GSource* work_source_;
+
+ // We use a wakeup pipe to make sure we'll get out of the glib polling phase
+ // when another thread has scheduled us to do some work. There is a glib
+ // mechanism g_main_context_wakeup, but this won't guarantee that our event's
+ // Dispatch() will be called.
+ int wakeup_pipe_read_;
+ int wakeup_pipe_write_;
+ // Use a scoped_ptr to avoid needing the definition of GPollFD in the header.
+ std::unique_ptr<GPollFD> wakeup_gpollfd_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
diff --git a/libchrome/base/message_loop/message_pump_glib_unittest.cc b/libchrome/base/message_loop/message_pump_glib_unittest.cc
new file mode 100644
index 0000000..7ddd4f0
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_glib_unittest.cc
@@ -0,0 +1,534 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_glib.h"
+
+#include <glib.h>
+#include <math.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+// This class injects dummy "events" into the GLib loop. When "handled" these
+// events can run tasks. This is intended to mock gtk events (the corresponding
+// GLib source runs at the same priority).
+class EventInjector {
+ public:
+ EventInjector() : processed_events_(0) {
+ source_ = static_cast<Source*>(g_source_new(&SourceFuncs, sizeof(Source)));
+ source_->injector = this;
+ g_source_attach(source_, NULL);
+ g_source_set_can_recurse(source_, TRUE);
+ }
+
+ ~EventInjector() {
+ g_source_destroy(source_);
+ g_source_unref(source_);
+ }
+
+ int HandlePrepare() {
+ // If the queue is empty, block.
+ if (events_.empty())
+ return -1;
+ TimeDelta delta = events_[0].time - Time::NowFromSystemTime();
+ return std::max(0, static_cast<int>(ceil(delta.InMillisecondsF())));
+ }
+
+ bool HandleCheck() {
+ if (events_.empty())
+ return false;
+ return events_[0].time <= Time::NowFromSystemTime();
+ }
+
+ void HandleDispatch() {
+ if (events_.empty())
+ return;
+ Event event = events_[0];
+ events_.erase(events_.begin());
+ ++processed_events_;
+ if (!event.callback.is_null())
+ event.callback.Run();
+ else if (!event.task.is_null())
+ event.task.Run();
+ }
+
+ // Adds an event to the queue. When "handled", executes |callback|.
+ // delay_ms is relative to the last event if any, or to Now() otherwise.
+ void AddEvent(int delay_ms, const Closure& callback) {
+ AddEventHelper(delay_ms, callback, Closure());
+ }
+
+ void AddDummyEvent(int delay_ms) {
+ AddEventHelper(delay_ms, Closure(), Closure());
+ }
+
+ void AddEventAsTask(int delay_ms, const Closure& task) {
+ AddEventHelper(delay_ms, Closure(), task);
+ }
+
+ void Reset() {
+ processed_events_ = 0;
+ events_.clear();
+ }
+
+ int processed_events() const { return processed_events_; }
+
+ private:
+ struct Event {
+ Time time;
+ Closure callback;
+ Closure task;
+ };
+
+ struct Source : public GSource {
+ EventInjector* injector;
+ };
+
+ void AddEventHelper(
+ int delay_ms, const Closure& callback, const Closure& task) {
+ Time last_time;
+ if (!events_.empty())
+ last_time = (events_.end()-1)->time;
+ else
+ last_time = Time::NowFromSystemTime();
+
+ Time future = last_time + TimeDelta::FromMilliseconds(delay_ms);
+ EventInjector::Event event = {future, callback, task};
+ events_.push_back(event);
+ }
+
+ static gboolean Prepare(GSource* source, gint* timeout_ms) {
+ *timeout_ms = static_cast<Source*>(source)->injector->HandlePrepare();
+ return FALSE;
+ }
+
+ static gboolean Check(GSource* source) {
+ return static_cast<Source*>(source)->injector->HandleCheck();
+ }
+
+ static gboolean Dispatch(GSource* source,
+ GSourceFunc unused_func,
+ gpointer unused_data) {
+ static_cast<Source*>(source)->injector->HandleDispatch();
+ return TRUE;
+ }
+
+ Source* source_;
+ std::vector<Event> events_;
+ int processed_events_;
+ static GSourceFuncs SourceFuncs;
+ DISALLOW_COPY_AND_ASSIGN(EventInjector);
+};
+
+GSourceFuncs EventInjector::SourceFuncs = {
+ EventInjector::Prepare,
+ EventInjector::Check,
+ EventInjector::Dispatch,
+ NULL
+};
+
+void IncrementInt(int *value) {
+ ++*value;
+}
+
+// Checks how many events have been processed by the injector.
+void ExpectProcessedEvents(EventInjector* injector, int count) {
+ EXPECT_EQ(injector->processed_events(), count);
+}
+
+// Posts a task on the current message loop.
+void PostMessageLoopTask(const tracked_objects::Location& from_here,
+ const Closure& task) {
+ MessageLoop::current()->PostTask(from_here, task);
+}
+
+// Test fixture.
+class MessagePumpGLibTest : public testing::Test {
+ public:
+ MessagePumpGLibTest() : loop_(NULL), injector_(NULL) { }
+
+ // Overridden from testing::Test:
+ void SetUp() override {
+ loop_ = new MessageLoop(MessageLoop::TYPE_UI);
+ injector_ = new EventInjector();
+ }
+ void TearDown() override {
+ delete injector_;
+ injector_ = NULL;
+ delete loop_;
+ loop_ = NULL;
+ }
+
+ MessageLoop* loop() const { return loop_; }
+ EventInjector* injector() const { return injector_; }
+
+ private:
+ MessageLoop* loop_;
+ EventInjector* injector_;
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpGLibTest);
+};
+
+} // namespace
+
+TEST_F(MessagePumpGLibTest, TestQuit) {
+ // Checks that Quit works and that the basic infrastructure is working.
+
+ // Quit from a task
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(0, injector()->processed_events());
+
+ injector()->Reset();
+ // Quit from an event
+ injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
+ loop()->Run();
+ EXPECT_EQ(1, injector()->processed_events());
+}
+
+TEST_F(MessagePumpGLibTest, TestEventTaskInterleave) {
+ // Checks that tasks posted by events are executed before the next event if
+ // the posted task queue is empty.
+ // MessageLoop doesn't make strong guarantees that it is the case, but the
+ // current implementation ensures it and the tests below rely on it.
+ // If changes cause this test to fail, it is reasonable to change it, but
+ // TestWorkWhileWaitingForEvents and TestEventsWhileWaitingForWork have to be
+ // changed accordingly, otherwise they can become flaky.
+ injector()->AddEventAsTask(0, Bind(&DoNothing));
+ Closure check_task =
+ Bind(&ExpectProcessedEvents, Unretained(injector()), 2);
+ Closure posted_task =
+ Bind(&PostMessageLoopTask, FROM_HERE, check_task);
+ injector()->AddEventAsTask(0, posted_task);
+ injector()->AddEventAsTask(0, Bind(&DoNothing));
+ injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
+ loop()->Run();
+ EXPECT_EQ(4, injector()->processed_events());
+
+ injector()->Reset();
+ injector()->AddEventAsTask(0, Bind(&DoNothing));
+ check_task =
+ Bind(&ExpectProcessedEvents, Unretained(injector()), 2);
+ posted_task = Bind(&PostMessageLoopTask, FROM_HERE, check_task);
+ injector()->AddEventAsTask(0, posted_task);
+ injector()->AddEventAsTask(10, Bind(&DoNothing));
+ injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
+ loop()->Run();
+ EXPECT_EQ(4, injector()->processed_events());
+}
+
+TEST_F(MessagePumpGLibTest, TestWorkWhileWaitingForEvents) {
+ int task_count = 0;
+ // Tests that we process tasks while waiting for new events.
+ // The event queue is empty at first.
+ for (int i = 0; i < 10; ++i) {
+ loop()->PostTask(FROM_HERE, Bind(&IncrementInt, &task_count));
+ }
+ // After all the previous tasks have executed, enqueue an event that will
+ // quit.
+ loop()->PostTask(
+ FROM_HERE,
+ Bind(&EventInjector::AddEvent, Unretained(injector()), 0,
+ MessageLoop::QuitWhenIdleClosure()));
+ loop()->Run();
+ ASSERT_EQ(10, task_count);
+ EXPECT_EQ(1, injector()->processed_events());
+
+ // Tests that we process delayed tasks while waiting for new events.
+ injector()->Reset();
+ task_count = 0;
+ for (int i = 0; i < 10; ++i) {
+ loop()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&IncrementInt, &task_count),
+ TimeDelta::FromMilliseconds(10*i));
+ }
+ // After all the previous tasks have executed, enqueue an event that will
+ // quit.
+ // This relies on the fact that delayed tasks are executed in delay order.
+ // That is verified in message_loop_unittest.cc.
+ loop()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&EventInjector::AddEvent, Unretained(injector()), 10,
+ MessageLoop::QuitWhenIdleClosure()),
+ TimeDelta::FromMilliseconds(150));
+ loop()->Run();
+ ASSERT_EQ(10, task_count);
+ EXPECT_EQ(1, injector()->processed_events());
+}
+
+TEST_F(MessagePumpGLibTest, TestEventsWhileWaitingForWork) {
+ // Tests that we process events while waiting for work.
+ // The event queue is empty at first.
+ for (int i = 0; i < 10; ++i) {
+ injector()->AddDummyEvent(0);
+ }
+ // After all the events have been processed, post a task that will check that
+ // the events have been processed (note: the task executes after the event
+ // that posted it has been handled, so we expect 11 at that point).
+ Closure check_task =
+ Bind(&ExpectProcessedEvents, Unretained(injector()), 11);
+ Closure posted_task =
+ Bind(&PostMessageLoopTask, FROM_HERE, check_task);
+ injector()->AddEventAsTask(10, posted_task);
+
+ // And then quit (relies on the condition tested by TestEventTaskInterleave).
+ injector()->AddEvent(10, MessageLoop::QuitWhenIdleClosure());
+ loop()->Run();
+
+ EXPECT_EQ(12, injector()->processed_events());
+}
+
+namespace {
+
+// This class is a helper for the concurrent events / posted tasks test below.
+// It will quit the main loop once enough tasks and events have been processed,
+// while making sure there is always work to do and events in the queue.
+class ConcurrentHelper : public RefCounted<ConcurrentHelper> {
+ public:
+ explicit ConcurrentHelper(EventInjector* injector)
+ : injector_(injector),
+ event_count_(kStartingEventCount),
+ task_count_(kStartingTaskCount) {
+ }
+
+ void FromTask() {
+ if (task_count_ > 0) {
+ --task_count_;
+ }
+ if (task_count_ == 0 && event_count_ == 0) {
+ MessageLoop::current()->QuitWhenIdle();
+ } else {
+ MessageLoop::current()->PostTask(
+ FROM_HERE, Bind(&ConcurrentHelper::FromTask, this));
+ }
+ }
+
+ void FromEvent() {
+ if (event_count_ > 0) {
+ --event_count_;
+ }
+ if (task_count_ == 0 && event_count_ == 0) {
+ MessageLoop::current()->QuitWhenIdle();
+ } else {
+ injector_->AddEventAsTask(
+ 0, Bind(&ConcurrentHelper::FromEvent, this));
+ }
+ }
+
+ int event_count() const { return event_count_; }
+ int task_count() const { return task_count_; }
+
+ private:
+ friend class RefCounted<ConcurrentHelper>;
+
+ ~ConcurrentHelper() {}
+
+ static const int kStartingEventCount = 20;
+ static const int kStartingTaskCount = 20;
+
+ EventInjector* injector_;
+ int event_count_;
+ int task_count_;
+};
+
+} // namespace
+
+TEST_F(MessagePumpGLibTest, TestConcurrentEventPostedTask) {
+ // Tests that posted tasks don't starve events, nor the opposite.
+ // We use the helper class above. We keep both event and posted task queues
+ // full, the helper verifies that both tasks and events get processed.
+ // If that is not the case, either event_count_ or task_count_ will not get
+ // to 0, and MessageLoop::QuitWhenIdle() will never be called.
+ scoped_refptr<ConcurrentHelper> helper = new ConcurrentHelper(injector());
+
+ // Add 2 events to the queue to make sure it is always full (when we remove
+ // the event before processing it).
+ injector()->AddEventAsTask(
+ 0, Bind(&ConcurrentHelper::FromEvent, helper.get()));
+ injector()->AddEventAsTask(
+ 0, Bind(&ConcurrentHelper::FromEvent, helper.get()));
+
+ // Similarly post 2 tasks.
+ loop()->PostTask(
+ FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper.get()));
+ loop()->PostTask(
+ FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper.get()));
+
+ loop()->Run();
+ EXPECT_EQ(0, helper->event_count());
+ EXPECT_EQ(0, helper->task_count());
+}
+
+namespace {
+
+void AddEventsAndDrainGLib(EventInjector* injector) {
+ // Add a couple of dummy events
+ injector->AddDummyEvent(0);
+ injector->AddDummyEvent(0);
+ // Then add an event that will quit the main loop.
+ injector->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
+
+ // Post a couple of dummy tasks
+ MessageLoop::current()->PostTask(FROM_HERE, Bind(&DoNothing));
+ MessageLoop::current()->PostTask(FROM_HERE, Bind(&DoNothing));
+
+ // Drain the events
+ while (g_main_context_pending(NULL)) {
+ g_main_context_iteration(NULL, FALSE);
+ }
+}
+
+} // namespace
+
+TEST_F(MessagePumpGLibTest, TestDrainingGLib) {
+ // Tests that draining events using GLib works.
+ loop()->PostTask(
+ FROM_HERE,
+ Bind(&AddEventsAndDrainGLib, Unretained(injector())));
+ loop()->Run();
+
+ EXPECT_EQ(3, injector()->processed_events());
+}
+
+namespace {
+
+// Helper class that lets us run the GLib message loop.
+class GLibLoopRunner : public RefCounted<GLibLoopRunner> {
+ public:
+ GLibLoopRunner() : quit_(false) { }
+
+ void RunGLib() {
+ while (!quit_) {
+ g_main_context_iteration(NULL, TRUE);
+ }
+ }
+
+ void RunLoop() {
+ while (!quit_) {
+ g_main_context_iteration(NULL, TRUE);
+ }
+ }
+
+ void Quit() {
+ quit_ = true;
+ }
+
+ void Reset() {
+ quit_ = false;
+ }
+
+ private:
+ friend class RefCounted<GLibLoopRunner>;
+
+ ~GLibLoopRunner() {}
+
+ bool quit_;
+};
+
+void TestGLibLoopInternal(EventInjector* injector) {
+ // Allow tasks to be processed from 'native' event loops.
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
+
+ int task_count = 0;
+ // Add a couple of dummy events
+ injector->AddDummyEvent(0);
+ injector->AddDummyEvent(0);
+ // Post a couple of dummy tasks
+ MessageLoop::current()->PostTask(
+ FROM_HERE, Bind(&IncrementInt, &task_count));
+ MessageLoop::current()->PostTask(
+ FROM_HERE, Bind(&IncrementInt, &task_count));
+ // Delayed events
+ injector->AddDummyEvent(10);
+ injector->AddDummyEvent(10);
+ // Delayed work
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&IncrementInt, &task_count),
+ TimeDelta::FromMilliseconds(30));
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&GLibLoopRunner::Quit, runner.get()),
+ TimeDelta::FromMilliseconds(40));
+
+ // Run a nested, straight GLib message loop.
+ runner->RunGLib();
+
+ ASSERT_EQ(3, task_count);
+ EXPECT_EQ(4, injector->processed_events());
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+void TestGtkLoopInternal(EventInjector* injector) {
+ // Allow tasks to be processed from 'native' event loops.
+ MessageLoop::current()->SetNestableTasksAllowed(true);
+ scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
+
+ int task_count = 0;
+ // Add a couple of dummy events
+ injector->AddDummyEvent(0);
+ injector->AddDummyEvent(0);
+ // Post a couple of dummy tasks
+ MessageLoop::current()->PostTask(
+ FROM_HERE, Bind(&IncrementInt, &task_count));
+ MessageLoop::current()->PostTask(
+ FROM_HERE, Bind(&IncrementInt, &task_count));
+ // Delayed events
+ injector->AddDummyEvent(10);
+ injector->AddDummyEvent(10);
+ // Delayed work
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&IncrementInt, &task_count),
+ TimeDelta::FromMilliseconds(30));
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ Bind(&GLibLoopRunner::Quit, runner.get()),
+ TimeDelta::FromMilliseconds(40));
+
+ // Run a nested, straight Gtk message loop.
+ runner->RunLoop();
+
+ ASSERT_EQ(3, task_count);
+ EXPECT_EQ(4, injector->processed_events());
+ MessageLoop::current()->QuitWhenIdle();
+}
+
+} // namespace
+
+TEST_F(MessagePumpGLibTest, TestGLibLoop) {
+ // Tests that events and posted tasks are correctly executed if the message
+ // loop is not run by MessageLoop::Run() but by a straight GLib loop.
+ // Note that in this case we don't make strong guarantees about niceness
+ // between events and posted tasks.
+ loop()->PostTask(
+ FROM_HERE,
+ Bind(&TestGLibLoopInternal, Unretained(injector())));
+ loop()->Run();
+}
+
+TEST_F(MessagePumpGLibTest, TestGtkLoop) {
+ // Tests that events and posted tasks are correctly executed if the message
+ // loop is not run by MessageLoop::Run() but by a straight Gtk loop.
+ // Note that in this case we don't make strong guarantees about niceness
+ // between events and posted tasks.
+ loop()->PostTask(
+ FROM_HERE,
+ Bind(&TestGtkLoopInternal, Unretained(injector())));
+ loop()->Run();
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_pump_libevent.cc b/libchrome/base/message_loop/message_pump_libevent.cc
new file mode 100644
index 0000000..5aa5567
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_libevent.cc
@@ -0,0 +1,365 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_pump_libevent.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/auto_reset.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/observer_list.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/third_party/libevent/event.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#endif
+
+// Lifecycle of struct event
+// Libevent uses two main data structures:
+// struct event_base (of which there is one per message pump), and
+// struct event (of which there is roughly one per socket).
+// The socket's struct event is created in
+// MessagePumpLibevent::WatchFileDescriptor(),
+// is owned by the FileDescriptorWatcher, and is destroyed in
+// StopWatchingFileDescriptor().
+// It is moved into and out of lists in struct event_base by
+// the libevent functions event_add() and event_del().
+//
+// TODO(dkegel):
+// At the moment bad things happen if a FileDescriptorWatcher
+// is active after its MessagePumpLibevent has been destroyed.
+// See MessageLoopTest.FileDescriptorWatcherOutlivesMessageLoop
+// Not clear yet whether that situation occurs in practice,
+// but if it does, we need to fix it.
+
+namespace base {
+
+MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher()
+ : event_(NULL),
+ pump_(NULL),
+ watcher_(NULL),
+ was_destroyed_(NULL) {
+}
+
+MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
+ if (event_) {
+ StopWatchingFileDescriptor();
+ }
+ if (was_destroyed_) {
+ DCHECK(!*was_destroyed_);
+ *was_destroyed_ = true;
+ }
+}
+
+bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
+ event* e = ReleaseEvent();
+ if (e == NULL)
+ return true;
+
+ // event_del() is a no-op if the event isn't active.
+ int rv = event_del(e);
+ delete e;
+ pump_ = NULL;
+ watcher_ = NULL;
+ return (rv == 0);
+}
+
+void MessagePumpLibevent::FileDescriptorWatcher::Init(event *e) {
+ DCHECK(e);
+ DCHECK(!event_);
+
+ event_ = e;
+}
+
+event *MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
+ struct event *e = event_;
+ event_ = NULL;
+ return e;
+}
+
+void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
+ int fd,
+ MessagePumpLibevent*) {
+ // Since OnFileCanWriteWithoutBlocking() gets called first, it can stop
+ // watching the file descriptor.
+ if (!watcher_)
+ return;
+ watcher_->OnFileCanReadWithoutBlocking(fd);
+}
+
+void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanWriteWithoutBlocking(
+ int fd,
+ MessagePumpLibevent*) {
+ DCHECK(watcher_);
+ watcher_->OnFileCanWriteWithoutBlocking(fd);
+}
+
+MessagePumpLibevent::MessagePumpLibevent()
+ : keep_running_(true),
+ in_run_(false),
+ processed_io_events_(false),
+ event_base_(event_base_new()),
+ wakeup_pipe_in_(-1),
+ wakeup_pipe_out_(-1) {
+ if (!Init())
+ NOTREACHED();
+}
+
+MessagePumpLibevent::~MessagePumpLibevent() {
+ DCHECK(wakeup_event_);
+ DCHECK(event_base_);
+ event_del(wakeup_event_);
+ delete wakeup_event_;
+ if (wakeup_pipe_in_ >= 0) {
+ if (IGNORE_EINTR(close(wakeup_pipe_in_)) < 0)
+ DPLOG(ERROR) << "close";
+ }
+ if (wakeup_pipe_out_ >= 0) {
+ if (IGNORE_EINTR(close(wakeup_pipe_out_)) < 0)
+ DPLOG(ERROR) << "close";
+ }
+ event_base_free(event_base_);
+}
+
+bool MessagePumpLibevent::WatchFileDescriptor(int fd,
+ bool persistent,
+ int mode,
+ FileDescriptorWatcher *controller,
+ Watcher *delegate) {
+ DCHECK_GE(fd, 0);
+ DCHECK(controller);
+ DCHECK(delegate);
+ DCHECK(mode == WATCH_READ || mode == WATCH_WRITE || mode == WATCH_READ_WRITE);
+ // WatchFileDescriptor should be called on the pump thread. It is not
+ // threadsafe, and your watcher may never be registered.
+ DCHECK(watch_file_descriptor_caller_checker_.CalledOnValidThread());
+
+ int event_mask = persistent ? EV_PERSIST : 0;
+ if (mode & WATCH_READ) {
+ event_mask |= EV_READ;
+ }
+ if (mode & WATCH_WRITE) {
+ event_mask |= EV_WRITE;
+ }
+
+ std::unique_ptr<event> evt(controller->ReleaseEvent());
+ if (evt.get() == NULL) {
+ // Ownership is transferred to the controller.
+ evt.reset(new event);
+ } else {
+ // Make sure we don't pick up any funky internal libevent masks.
+ int old_interest_mask = evt.get()->ev_events &
+ (EV_READ | EV_WRITE | EV_PERSIST);
+
+ // Combine old/new event masks.
+ event_mask |= old_interest_mask;
+
+ // Must disarm the event before we can reuse it.
+ event_del(evt.get());
+
+ // It's illegal to use this function to listen on 2 separate fds with the
+ // same |controller|.
+ if (EVENT_FD(evt.get()) != fd) {
+ NOTREACHED() << "FDs don't match" << EVENT_FD(evt.get()) << "!=" << fd;
+ return false;
+ }
+ }
+
+ // Set current interest mask and message pump for this event.
+ event_set(evt.get(), fd, event_mask, OnLibeventNotification, controller);
+
+ // Tell libevent which message pump this socket will belong to when we add it.
+ if (event_base_set(event_base_, evt.get())) {
+ return false;
+ }
+
+ // Add this socket to the list of monitored sockets.
+ if (event_add(evt.get(), NULL)) {
+ return false;
+ }
+
+ // Transfer ownership of evt to controller.
+ controller->Init(evt.release());
+
+ controller->set_watcher(delegate);
+ controller->set_pump(this);
+
+ return true;
+}
+
+// Tell libevent to break out of inner loop.
+static void timer_callback(int /*fd*/, short /*events*/, void* context) {
+ event_base_loopbreak((struct event_base *)context);
+}
+
+// Reentrant!
+void MessagePumpLibevent::Run(Delegate* delegate) {
+ AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+ AutoReset<bool> auto_reset_in_run(&in_run_, true);
+
+ // event_base_loopexit() + EVLOOP_ONCE is leaky, see http://crbug.com/25641.
+ // Instead, make our own timer and reuse it on each call to event_base_loop().
+ std::unique_ptr<event> timer_event(new event);
+
+ for (;;) {
+#if defined(OS_MACOSX)
+ mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+ bool did_work = delegate->DoWork();
+ if (!keep_running_)
+ break;
+
+ event_base_loop(event_base_, EVLOOP_NONBLOCK);
+ did_work |= processed_io_events_;
+ processed_io_events_ = false;
+ if (!keep_running_)
+ break;
+
+ did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ did_work = delegate->DoIdleWork();
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ // EVLOOP_ONCE tells libevent to only block once,
+ // but to service all pending events when it wakes up.
+ if (delayed_work_time_.is_null()) {
+ event_base_loop(event_base_, EVLOOP_ONCE);
+ } else {
+ TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
+ if (delay > TimeDelta()) {
+ struct timeval poll_tv;
+ poll_tv.tv_sec = delay.InSeconds();
+ poll_tv.tv_usec = delay.InMicroseconds() % Time::kMicrosecondsPerSecond;
+ event_set(timer_event.get(), -1, 0, timer_callback, event_base_);
+ event_base_set(event_base_, timer_event.get());
+ event_add(timer_event.get(), &poll_tv);
+ event_base_loop(event_base_, EVLOOP_ONCE);
+ event_del(timer_event.get());
+ } else {
+ // It looks like delayed_work_time_ indicates a time in the past, so we
+ // need to call DoDelayedWork now.
+ delayed_work_time_ = TimeTicks();
+ }
+ }
+
+ if (!keep_running_)
+ break;
+ }
+}
+
+void MessagePumpLibevent::Quit() {
+ DCHECK(in_run_) << "Quit was called outside of Run!";
+ // Tell both libevent and Run that they should break out of their loops.
+ keep_running_ = false;
+ ScheduleWork();
+}
+
+void MessagePumpLibevent::ScheduleWork() {
+ // Tell libevent (in a threadsafe way) that it should break out of its loop.
+ char buf = 0;
+ int nwrite = HANDLE_EINTR(write(wakeup_pipe_in_, &buf, 1));
+ DCHECK(nwrite == 1 || errno == EAGAIN)
+ << "[nwrite:" << nwrite << "] [errno:" << errno << "]";
+}
+
+void MessagePumpLibevent::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ // We know that we can't be blocked on Wait right now since this method can
+ // only be called on the same thread as Run, so we only need to update our
+ // record of how long to sleep when we do sleep.
+ delayed_work_time_ = delayed_work_time;
+}
+
+bool MessagePumpLibevent::Init() {
+ int fds[2];
+ if (pipe(fds)) {
+ DLOG(ERROR) << "pipe() failed, errno: " << errno;
+ return false;
+ }
+ if (!SetNonBlocking(fds[0])) {
+ DLOG(ERROR) << "SetNonBlocking for pipe fd[0] failed, errno: " << errno;
+ return false;
+ }
+ if (!SetNonBlocking(fds[1])) {
+ DLOG(ERROR) << "SetNonBlocking for pipe fd[1] failed, errno: " << errno;
+ return false;
+ }
+ wakeup_pipe_out_ = fds[0];
+ wakeup_pipe_in_ = fds[1];
+
+ wakeup_event_ = new event;
+ event_set(wakeup_event_, wakeup_pipe_out_, EV_READ | EV_PERSIST,
+ OnWakeup, this);
+ event_base_set(event_base_, wakeup_event_);
+
+ if (event_add(wakeup_event_, 0))
+ return false;
+ return true;
+}
+
+// static
+void MessagePumpLibevent::OnLibeventNotification(int fd,
+ short flags,
+ void* context) {
+ FileDescriptorWatcher* controller =
+ static_cast<FileDescriptorWatcher*>(context);
+ DCHECK(controller);
+ TRACE_EVENT1("toplevel", "MessagePumpLibevent::OnLibeventNotification",
+ "fd", fd);
+
+ MessagePumpLibevent* pump = controller->pump();
+ pump->processed_io_events_ = true;
+
+ if ((flags & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
+ // Both callbacks will be called. It is necessary to check that |controller|
+ // is not destroyed.
+ bool controller_was_destroyed = false;
+ controller->was_destroyed_ = &controller_was_destroyed;
+ controller->OnFileCanWriteWithoutBlocking(fd, pump);
+ if (!controller_was_destroyed)
+ controller->OnFileCanReadWithoutBlocking(fd, pump);
+ if (!controller_was_destroyed)
+ controller->was_destroyed_ = nullptr;
+ } else if (flags & EV_WRITE) {
+ controller->OnFileCanWriteWithoutBlocking(fd, pump);
+ } else if (flags & EV_READ) {
+ controller->OnFileCanReadWithoutBlocking(fd, pump);
+ }
+}
+
+// Called if a byte is received on the wakeup pipe.
+// static
+void MessagePumpLibevent::OnWakeup(int socket, short /*flags*/, void* context) {
+ MessagePumpLibevent* that = static_cast<MessagePumpLibevent*>(context);
+ DCHECK(that->wakeup_pipe_out_ == socket);
+
+ // Remove and discard the wakeup byte.
+ char buf;
+ int nread = HANDLE_EINTR(read(socket, &buf, 1));
+ DCHECK_EQ(nread, 1);
+ that->processed_io_events_ = true;
+ // Tell libevent to break out of inner loop.
+ event_base_loopbreak(that->event_base_);
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/message_pump_libevent.h b/libchrome/base/message_loop/message_pump_libevent.h
new file mode 100644
index 0000000..76f882f
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_libevent.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/message_loop/message_pump.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+
+// Declare structs we need from libevent.h rather than including it
+struct event_base;
+struct event;
+
+namespace base {
+
+// Class to monitor sockets and issue callbacks when sockets are ready for I/O
+// TODO(dkegel): add support for background file IO somehow
+class BASE_EXPORT MessagePumpLibevent : public MessagePump {
+ public:
+ // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
+ // of a file descriptor.
+ class Watcher {
+ public:
+ // Called from MessageLoop::Run when an FD can be read from/written to
+ // without blocking
+ virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
+ virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
+
+ protected:
+ virtual ~Watcher() {}
+ };
+
+ // Object returned by WatchFileDescriptor to manage further watching.
+ class FileDescriptorWatcher {
+ public:
+ FileDescriptorWatcher();
+ ~FileDescriptorWatcher(); // Implicitly calls StopWatchingFileDescriptor.
+
+ // NOTE: These methods aren't called StartWatching()/StopWatching() to
+ // avoid confusion with the win32 ObjectWatcher class.
+
+ // Stop watching the FD, always safe to call. No-op if there's nothing
+ // to do.
+ bool StopWatchingFileDescriptor();
+
+ private:
+ friend class MessagePumpLibevent;
+ friend class MessagePumpLibeventTest;
+
+ // Called by MessagePumpLibevent, ownership of |e| is transferred to this
+ // object.
+ void Init(event* e);
+
+ // Used by MessagePumpLibevent to take ownership of event_.
+ event* ReleaseEvent();
+
+ void set_pump(MessagePumpLibevent* pump) { pump_ = pump; }
+ MessagePumpLibevent* pump() const { return pump_; }
+
+ void set_watcher(Watcher* watcher) { watcher_ = watcher; }
+
+ void OnFileCanReadWithoutBlocking(int fd, MessagePumpLibevent* pump);
+ void OnFileCanWriteWithoutBlocking(int fd, MessagePumpLibevent* pump);
+
+ event* event_;
+ MessagePumpLibevent* pump_;
+ Watcher* watcher_;
+ // If this pointer is non-NULL, the pointee is set to true in the
+ // destructor.
+ bool* was_destroyed_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
+ };
+
+ enum Mode {
+ WATCH_READ = 1 << 0,
+ WATCH_WRITE = 1 << 1,
+ WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+ };
+
+ MessagePumpLibevent();
+ ~MessagePumpLibevent() override;
+
+ // Have the current thread's message loop watch for a a situation in which
+ // reading/writing to the FD can be performed without blocking.
+ // Callers must provide a preallocated FileDescriptorWatcher object which
+ // can later be used to manage the lifetime of this event.
+ // If a FileDescriptorWatcher is passed in which is already attached to
+ // an event, then the effect is cumulative i.e. after the call |controller|
+ // will watch both the previous event and the new one.
+ // If an error occurs while calling this method in a cumulative fashion, the
+ // event previously attached to |controller| is aborted.
+ // Returns true on success.
+ // Must be called on the same thread the message_pump is running on.
+ // TODO(dkegel): switch to edge-triggered readiness notification
+ bool WatchFileDescriptor(int fd,
+ bool persistent,
+ int mode,
+ FileDescriptorWatcher *controller,
+ Watcher *delegate);
+
+ // MessagePump methods:
+ void Run(Delegate* delegate) override;
+ void Quit() override;
+ void ScheduleWork() override;
+ void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ private:
+ friend class MessagePumpLibeventTest;
+
+ void WillProcessIOEvent();
+ void DidProcessIOEvent();
+
+ // Risky part of constructor. Returns true on success.
+ bool Init();
+
+ // Called by libevent to tell us a registered FD can be read/written to.
+ static void OnLibeventNotification(int fd, short flags,
+ void* context);
+
+ // Unix pipe used to implement ScheduleWork()
+ // ... callback; called by libevent inside Run() when pipe is ready to read
+ static void OnWakeup(int socket, short flags, void* context);
+
+ // This flag is set to false when Run should return.
+ bool keep_running_;
+
+ // This flag is set when inside Run.
+ bool in_run_;
+
+ // This flag is set if libevent has processed I/O events.
+ bool processed_io_events_;
+
+ // The time at which we should call DoDelayedWork.
+ TimeTicks delayed_work_time_;
+
+ // Libevent dispatcher. Watches all sockets registered with it, and sends
+ // readiness callbacks when a socket is ready for I/O.
+ event_base* event_base_;
+
+ // ... write end; ScheduleWork() writes a single byte to it
+ int wakeup_pipe_in_;
+ // ... read end; OnWakeup reads it and then breaks Run() out of its sleep
+ int wakeup_pipe_out_;
+ // ... libevent wrapper for read end
+ event* wakeup_event_;
+
+ ThreadChecker watch_file_descriptor_caller_checker_;
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpLibevent);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
diff --git a/libchrome/base/message_loop/message_pump_mac.h b/libchrome/base/message_loop/message_pump_mac.h
new file mode 100644
index 0000000..14b8377
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_mac.h
@@ -0,0 +1,354 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The basis for all native run loops on the Mac is the CFRunLoop. It can be
+// used directly, it can be used as the driving force behind the similar
+// Foundation NSRunLoop, and it can be used to implement higher-level event
+// loops such as the NSApplication event loop.
+//
+// This file introduces a basic CFRunLoop-based implementation of the
+// MessagePump interface called CFRunLoopBase. CFRunLoopBase contains all
+// of the machinery necessary to dispatch events to a delegate, but does not
+// implement the specific run loop. Concrete subclasses must provide their
+// own DoRun and Quit implementations.
+//
+// A concrete subclass that just runs a CFRunLoop loop is provided in
+// MessagePumpCFRunLoop. For an NSRunLoop, the similar MessagePumpNSRunLoop
+// is provided.
+//
+// For the application's event loop, an implementation based on AppKit's
+// NSApplication event system is provided in MessagePumpNSApplication.
+//
+// Typically, MessagePumpNSApplication only makes sense on a Cocoa
+// application's main thread. If a CFRunLoop-based message pump is needed on
+// any other thread, one of the other concrete subclasses is preferable.
+// MessagePumpMac::Create is defined, which returns a new NSApplication-based
+// or NSRunLoop-based MessagePump subclass depending on which thread it is
+// called on.
+
+#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
+#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
+
+#include "base/message_loop/message_pump.h"
+
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/timer_slack.h"
+#include "build/build_config.h"
+
+#if defined(__OBJC__)
+#if defined(OS_IOS)
+#import <Foundation/Foundation.h>
+#else
+#import <AppKit/AppKit.h>
+
+// Clients must subclass NSApplication and implement this protocol if they use
+// MessagePumpMac.
+@protocol CrAppProtocol
+// Must return true if -[NSApplication sendEvent:] is currently on the stack.
+// See the comment for |CreateAutoreleasePool()| in the cc file for why this is
+// necessary.
+- (BOOL)isHandlingSendEvent;
+@end
+#endif // !defined(OS_IOS)
+#endif // defined(__OBJC__)
+
+namespace base {
+
+class RunLoop;
+class TimeTicks;
+
+// AutoreleasePoolType is a proxy type for autorelease pools. Its definition
+// depends on the translation unit (TU) in which this header appears. In pure
+// C++ TUs, it is defined as a forward C++ class declaration (that is never
+// defined), because autorelease pools are an Objective-C concept. In Automatic
+// Reference Counting (ARC) Objective-C TUs, it is similarly defined as a
+// forward C++ class declaration, because clang will not allow the type
+// "NSAutoreleasePool" in such TUs. Finally, in Manual Retain Release (MRR)
+// Objective-C TUs, it is a type alias for NSAutoreleasePool. In all cases, a
+// method that takes or returns an NSAutoreleasePool* can use
+// AutoreleasePoolType* instead.
+#if !defined(__OBJC__) || __has_feature(objc_arc)
+class AutoreleasePoolType;
+#else // !defined(__OBJC__) || __has_feature(objc_arc)
+typedef NSAutoreleasePool AutoreleasePoolType;
+#endif // !defined(__OBJC__) || __has_feature(objc_arc)
+
+class MessagePumpCFRunLoopBase : public MessagePump {
+ // Needs access to CreateAutoreleasePool.
+ friend class MessagePumpScopedAutoreleasePool;
+ public:
+ MessagePumpCFRunLoopBase();
+ ~MessagePumpCFRunLoopBase() override;
+
+ // Subclasses should implement the work they need to do in MessagePump::Run
+ // in the DoRun method. MessagePumpCFRunLoopBase::Run calls DoRun directly.
+ // This arrangement is used because MessagePumpCFRunLoopBase needs to set
+ // up and tear down things before and after the "meat" of DoRun.
+ void Run(Delegate* delegate) override;
+ virtual void DoRun(Delegate* delegate) = 0;
+
+ void ScheduleWork() override;
+ void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+ void SetTimerSlack(TimerSlack timer_slack) override;
+
+ protected:
+ // Accessors for private data members to be used by subclasses.
+ CFRunLoopRef run_loop() const { return run_loop_; }
+ int nesting_level() const { return nesting_level_; }
+ int run_nesting_level() const { return run_nesting_level_; }
+
+ // Sets this pump's delegate. Signals the appropriate sources if
+ // |delegateless_work_| is true. |delegate| can be NULL.
+ void SetDelegate(Delegate* delegate);
+
+ // Return an autorelease pool to wrap around any work being performed.
+ // In some cases, CreateAutoreleasePool may return nil intentionally to
+ // preventing an autorelease pool from being created, allowing any
+ // objects autoreleased by work to fall into the current autorelease pool.
+ virtual AutoreleasePoolType* CreateAutoreleasePool();
+
+ private:
+ // Timer callback scheduled by ScheduleDelayedWork. This does not do any
+ // work, but it signals work_source_ so that delayed work can be performed
+ // within the appropriate priority constraints.
+ static void RunDelayedWorkTimer(CFRunLoopTimerRef timer, void* info);
+
+ // Perform highest-priority work. This is associated with work_source_
+ // signalled by ScheduleWork or RunDelayedWorkTimer. The static method calls
+ // the instance method; the instance method returns true if it resignalled
+ // work_source_ to be called again from the loop.
+ static void RunWorkSource(void* info);
+ bool RunWork();
+
+ // Perform idle-priority work. This is normally called by PreWaitObserver,
+ // but is also associated with idle_work_source_. When this function
+ // actually does perform idle work, it will resignal that source. The
+ // static method calls the instance method; the instance method returns
+ // true if idle work was done.
+ static void RunIdleWorkSource(void* info);
+ bool RunIdleWork();
+
+ // Perform work that may have been deferred because it was not runnable
+ // within a nested run loop. This is associated with
+ // nesting_deferred_work_source_ and is signalled by
+ // MaybeScheduleNestingDeferredWork when returning from a nested loop,
+ // so that an outer loop will be able to perform the necessary tasks if it
+ // permits nestable tasks.
+ static void RunNestingDeferredWorkSource(void* info);
+ bool RunNestingDeferredWork();
+
+ // Schedules possible nesting-deferred work to be processed before the run
+ // loop goes to sleep, exits, or begins processing sources at the top of its
+ // loop. If this function detects that a nested loop had run since the
+ // previous attempt to schedule nesting-deferred work, it will schedule a
+ // call to RunNestingDeferredWorkSource.
+ void MaybeScheduleNestingDeferredWork();
+
+ // Observer callback responsible for performing idle-priority work, before
+ // the run loop goes to sleep. Associated with idle_work_observer_.
+ static void PreWaitObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity, void* info);
+
+ // Observer callback called before the run loop processes any sources.
+ // Associated with pre_source_observer_.
+ static void PreSourceObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity, void* info);
+
+ // Observer callback called when the run loop starts and stops, at the
+ // beginning and end of calls to CFRunLoopRun. This is used to maintain
+ // nesting_level_. Associated with enter_exit_observer_.
+ static void EnterExitObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity, void* info);
+
+ // Called by EnterExitObserver after performing maintenance on nesting_level_.
+ // This allows subclasses an opportunity to perform additional processing on
+ // the basis of run loops starting and stopping.
+ virtual void EnterExitRunLoop(CFRunLoopActivity activity);
+
+ // The thread's run loop.
+ CFRunLoopRef run_loop_;
+
+ // The timer, sources, and observers are described above alongside their
+ // callbacks.
+ CFRunLoopTimerRef delayed_work_timer_;
+ CFRunLoopSourceRef work_source_;
+ CFRunLoopSourceRef idle_work_source_;
+ CFRunLoopSourceRef nesting_deferred_work_source_;
+ CFRunLoopObserverRef pre_wait_observer_;
+ CFRunLoopObserverRef pre_source_observer_;
+ CFRunLoopObserverRef enter_exit_observer_;
+
+ // (weak) Delegate passed as an argument to the innermost Run call.
+ Delegate* delegate_;
+
+ // The time that delayed_work_timer_ is scheduled to fire. This is tracked
+ // independently of CFRunLoopTimerGetNextFireDate(delayed_work_timer_)
+ // to be able to reset the timer properly after waking from system sleep.
+ // See PowerStateNotification.
+ CFAbsoluteTime delayed_work_fire_time_;
+
+ base::TimerSlack timer_slack_;
+
+ // The recursion depth of the currently-executing CFRunLoopRun loop on the
+ // run loop's thread. 0 if no run loops are running inside of whatever scope
+ // the object was created in.
+ int nesting_level_;
+
+ // The recursion depth (calculated in the same way as nesting_level_) of the
+ // innermost executing CFRunLoopRun loop started by a call to Run.
+ int run_nesting_level_;
+
+ // The deepest (numerically highest) recursion depth encountered since the
+ // most recent attempt to run nesting-deferred work.
+ int deepest_nesting_level_;
+
+ // "Delegateless" work flags are set when work is ready to be performed but
+ // must wait until a delegate is available to process it. This can happen
+ // when a MessagePumpCFRunLoopBase is instantiated and work arrives without
+ // any call to Run on the stack. The Run method will check for delegateless
+ // work on entry and redispatch it as needed once a delegate is available.
+ bool delegateless_work_;
+ bool delegateless_idle_work_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoopBase);
+};
+
+class BASE_EXPORT MessagePumpCFRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpCFRunLoop();
+ ~MessagePumpCFRunLoop() override;
+
+ void DoRun(Delegate* delegate) override;
+ void Quit() override;
+
+ private:
+ void EnterExitRunLoop(CFRunLoopActivity activity) override;
+
+ // True if Quit is called to stop the innermost MessagePump
+ // (innermost_quittable_) but some other CFRunLoopRun loop (nesting_level_)
+ // is running inside the MessagePump's innermost Run call.
+ bool quit_pending_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoop);
+};
+
+class BASE_EXPORT MessagePumpNSRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpNSRunLoop();
+ ~MessagePumpNSRunLoop() override;
+
+ void DoRun(Delegate* delegate) override;
+ void Quit() override;
+
+ private:
+ // A source that doesn't do anything but provide something signalable
+ // attached to the run loop. This source will be signalled when Quit
+ // is called, to cause the loop to wake up so that it can stop.
+ CFRunLoopSourceRef quit_source_;
+
+ // False after Quit is called.
+ bool keep_running_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpNSRunLoop);
+};
+
+#if defined(OS_IOS)
+// This is a fake message pump. It attaches sources to the main thread's
+// CFRunLoop, so PostTask() will work, but it is unable to drive the loop
+// directly, so calling Run() or Quit() are errors.
+class MessagePumpUIApplication : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpUIApplication();
+ ~MessagePumpUIApplication() override;
+ void DoRun(Delegate* delegate) override;
+ void Quit() override;
+
+ // This message pump can not spin the main message loop directly. Instead,
+ // call |Attach()| to set up a delegate. It is an error to call |Run()|.
+ virtual void Attach(Delegate* delegate);
+
+ private:
+ RunLoop* run_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpUIApplication);
+};
+
+#else
+
+class MessagePumpNSApplication : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpNSApplication();
+ ~MessagePumpNSApplication() override;
+
+ void DoRun(Delegate* delegate) override;
+ void Quit() override;
+
+ private:
+ // False after Quit is called.
+ bool keep_running_;
+
+ // True if DoRun is managing its own run loop as opposed to letting
+ // -[NSApplication run] handle it. The outermost run loop in the application
+ // is managed by -[NSApplication run], inner run loops are handled by a loop
+ // in DoRun.
+ bool running_own_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpNSApplication);
+};
+
+class MessagePumpCrApplication : public MessagePumpNSApplication {
+ public:
+ MessagePumpCrApplication();
+ ~MessagePumpCrApplication() override;
+
+ protected:
+ // Returns nil if NSApp is currently in the middle of calling
+ // -sendEvent. Requires NSApp implementing CrAppProtocol.
+ AutoreleasePoolType* CreateAutoreleasePool() override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpCrApplication);
+};
+#endif // !defined(OS_IOS)
+
+class BASE_EXPORT MessagePumpMac {
+ public:
+ // If not on the main thread, returns a new instance of
+ // MessagePumpNSRunLoop.
+ //
+ // On the main thread, if NSApp exists and conforms to
+ // CrAppProtocol, creates an instances of MessagePumpCrApplication.
+ //
+ // Otherwise creates an instance of MessagePumpNSApplication using a
+ // default NSApplication.
+ static MessagePump* Create();
+
+#if !defined(OS_IOS)
+ // If a pump is created before the required CrAppProtocol is
+ // created, the wrong MessagePump subclass could be used.
+ // UsingCrApp() returns false if the message pump was created before
+ // NSApp was initialized, or if NSApp does not implement
+ // CrAppProtocol. NSApp must be initialized before calling.
+ static bool UsingCrApp();
+
+ // Wrapper to query -[NSApp isHandlingSendEvent] from C++ code.
+ // Requires NSApp to implement CrAppProtocol.
+ static bool IsHandlingSendEvent();
+#endif // !defined(OS_IOS)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MessagePumpMac);
+};
+
+// Tasks posted to the message loop are posted under this mode, as well
+// as kCFRunLoopCommonModes.
+extern const CFStringRef BASE_EXPORT kMessageLoopExclusiveRunLoopMode;
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_MESSAGE_PUMP_MAC_H_
diff --git a/libchrome/base/message_loop/message_pump_mac.mm b/libchrome/base/message_loop/message_pump_mac.mm
new file mode 100644
index 0000000..95d1c5f
--- /dev/null
+++ b/libchrome/base/message_loop/message_pump_mac.mm
@@ -0,0 +1,782 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "base/message_loop/message_pump_mac.h"
+
+#include <dlfcn.h>
+#import <Foundation/Foundation.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/run_loop.h"
+#include "base/time/time.h"
+
+#if !defined(OS_IOS)
+#import <AppKit/AppKit.h>
+#endif // !defined(OS_IOS)
+
+namespace base {
+
+namespace {
+
+void CFRunLoopAddSourceToAllModes(CFRunLoopRef rl, CFRunLoopSourceRef source) {
+ CFRunLoopAddSource(rl, source, kCFRunLoopCommonModes);
+ CFRunLoopAddSource(rl, source, kMessageLoopExclusiveRunLoopMode);
+}
+
+void CFRunLoopRemoveSourceFromAllModes(CFRunLoopRef rl,
+ CFRunLoopSourceRef source) {
+ CFRunLoopRemoveSource(rl, source, kCFRunLoopCommonModes);
+ CFRunLoopRemoveSource(rl, source, kMessageLoopExclusiveRunLoopMode);
+}
+
+void CFRunLoopAddTimerToAllModes(CFRunLoopRef rl, CFRunLoopTimerRef timer) {
+ CFRunLoopAddTimer(rl, timer, kCFRunLoopCommonModes);
+ CFRunLoopAddTimer(rl, timer, kMessageLoopExclusiveRunLoopMode);
+}
+
+void CFRunLoopRemoveTimerFromAllModes(CFRunLoopRef rl,
+ CFRunLoopTimerRef timer) {
+ CFRunLoopRemoveTimer(rl, timer, kCFRunLoopCommonModes);
+ CFRunLoopRemoveTimer(rl, timer, kMessageLoopExclusiveRunLoopMode);
+}
+
+void CFRunLoopAddObserverToAllModes(CFRunLoopRef rl,
+ CFRunLoopObserverRef observer) {
+ CFRunLoopAddObserver(rl, observer, kCFRunLoopCommonModes);
+ CFRunLoopAddObserver(rl, observer, kMessageLoopExclusiveRunLoopMode);
+}
+
+void CFRunLoopRemoveObserverFromAllModes(CFRunLoopRef rl,
+ CFRunLoopObserverRef observer) {
+ CFRunLoopRemoveObserver(rl, observer, kCFRunLoopCommonModes);
+ CFRunLoopRemoveObserver(rl, observer, kMessageLoopExclusiveRunLoopMode);
+}
+
+void NoOp(void* /* info */) {
+}
+
+const CFTimeInterval kCFTimeIntervalMax =
+ std::numeric_limits<CFTimeInterval>::max();
+
+#if !defined(OS_IOS)
+// Set to true if MessagePumpMac::Create() is called before NSApp is
+// initialized. Only accessed from the main thread.
+bool g_not_using_cr_app = false;
+#endif
+
+// Call through to CFRunLoopTimerSetTolerance(), which is only available on
+// OS X 10.9.
+void SetTimerTolerance(CFRunLoopTimerRef timer, CFTimeInterval tolerance) {
+ typedef void (*CFRunLoopTimerSetTolerancePtr)(CFRunLoopTimerRef timer,
+ CFTimeInterval tolerance);
+
+ static CFRunLoopTimerSetTolerancePtr settimertolerance_function_ptr;
+
+ static dispatch_once_t get_timer_tolerance_function_ptr_once;
+ dispatch_once(&get_timer_tolerance_function_ptr_once, ^{
+ NSBundle* bundle =[NSBundle
+ bundleWithPath:@"/System/Library/Frameworks/CoreFoundation.framework"];
+ const char* path = [[bundle executablePath] fileSystemRepresentation];
+ CHECK(path);
+ void* library_handle = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+ CHECK(library_handle) << dlerror();
+ settimertolerance_function_ptr =
+ reinterpret_cast<CFRunLoopTimerSetTolerancePtr>(
+ dlsym(library_handle, "CFRunLoopTimerSetTolerance"));
+
+ dlclose(library_handle);
+ });
+
+ if (settimertolerance_function_ptr)
+ settimertolerance_function_ptr(timer, tolerance);
+}
+
+} // namespace
+
+// static
+const CFStringRef kMessageLoopExclusiveRunLoopMode =
+ CFSTR("kMessageLoopExclusiveRunLoopMode");
+
+// A scoper for autorelease pools created from message pump run loops.
+// Avoids dirtying up the ScopedNSAutoreleasePool interface for the rare
+// case where an autorelease pool needs to be passed in.
+class MessagePumpScopedAutoreleasePool {
+ public:
+ explicit MessagePumpScopedAutoreleasePool(MessagePumpCFRunLoopBase* pump) :
+ pool_(pump->CreateAutoreleasePool()) {
+ }
+ ~MessagePumpScopedAutoreleasePool() {
+ [pool_ drain];
+ }
+
+ private:
+ NSAutoreleasePool* pool_;
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpScopedAutoreleasePool);
+};
+
+// Must be called on the run loop thread.
+MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase()
+ : delegate_(NULL),
+ delayed_work_fire_time_(kCFTimeIntervalMax),
+ timer_slack_(base::TIMER_SLACK_NONE),
+ nesting_level_(0),
+ run_nesting_level_(0),
+ deepest_nesting_level_(0),
+ delegateless_work_(false),
+ delegateless_idle_work_(false) {
+ run_loop_ = CFRunLoopGetCurrent();
+ CFRetain(run_loop_);
+
+ // Set a repeating timer with a preposterous firing time and interval. The
+ // timer will effectively never fire as-is. The firing time will be adjusted
+ // as needed when ScheduleDelayedWork is called.
+ CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
+ timer_context.info = this;
+ delayed_work_timer_ = CFRunLoopTimerCreate(NULL, // allocator
+ kCFTimeIntervalMax, // fire time
+ kCFTimeIntervalMax, // interval
+ 0, // flags
+ 0, // priority
+ RunDelayedWorkTimer,
+ &timer_context);
+ CFRunLoopAddTimerToAllModes(run_loop_, delayed_work_timer_);
+
+ CFRunLoopSourceContext source_context = CFRunLoopSourceContext();
+ source_context.info = this;
+ source_context.perform = RunWorkSource;
+ work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 1, // priority
+ &source_context);
+ CFRunLoopAddSourceToAllModes(run_loop_, work_source_);
+
+ source_context.perform = RunIdleWorkSource;
+ idle_work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 2, // priority
+ &source_context);
+ CFRunLoopAddSourceToAllModes(run_loop_, idle_work_source_);
+
+ source_context.perform = RunNestingDeferredWorkSource;
+ nesting_deferred_work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 0, // priority
+ &source_context);
+ CFRunLoopAddSourceToAllModes(run_loop_, nesting_deferred_work_source_);
+
+ CFRunLoopObserverContext observer_context = CFRunLoopObserverContext();
+ observer_context.info = this;
+ pre_wait_observer_ = CFRunLoopObserverCreate(NULL, // allocator
+ kCFRunLoopBeforeWaiting,
+ true, // repeat
+ 0, // priority
+ PreWaitObserver,
+ &observer_context);
+ CFRunLoopAddObserverToAllModes(run_loop_, pre_wait_observer_);
+
+ pre_source_observer_ = CFRunLoopObserverCreate(NULL, // allocator
+ kCFRunLoopBeforeSources,
+ true, // repeat
+ 0, // priority
+ PreSourceObserver,
+ &observer_context);
+ CFRunLoopAddObserverToAllModes(run_loop_, pre_source_observer_);
+
+ enter_exit_observer_ = CFRunLoopObserverCreate(NULL, // allocator
+ kCFRunLoopEntry |
+ kCFRunLoopExit,
+ true, // repeat
+ 0, // priority
+ EnterExitObserver,
+ &observer_context);
+ CFRunLoopAddObserverToAllModes(run_loop_, enter_exit_observer_);
+}
+
+// Ideally called on the run loop thread. If other run loops were running
+// lower on the run loop thread's stack when this object was created, the
+// same number of run loops must be running when this object is destroyed.
+MessagePumpCFRunLoopBase::~MessagePumpCFRunLoopBase() {
+ CFRunLoopRemoveObserverFromAllModes(run_loop_, enter_exit_observer_);
+ CFRelease(enter_exit_observer_);
+
+ CFRunLoopRemoveObserverFromAllModes(run_loop_, pre_source_observer_);
+ CFRelease(pre_source_observer_);
+
+ CFRunLoopRemoveObserverFromAllModes(run_loop_, pre_wait_observer_);
+ CFRelease(pre_wait_observer_);
+
+ CFRunLoopRemoveSourceFromAllModes(run_loop_, nesting_deferred_work_source_);
+ CFRelease(nesting_deferred_work_source_);
+
+ CFRunLoopRemoveSourceFromAllModes(run_loop_, idle_work_source_);
+ CFRelease(idle_work_source_);
+
+ CFRunLoopRemoveSourceFromAllModes(run_loop_, work_source_);
+ CFRelease(work_source_);
+
+ CFRunLoopRemoveTimerFromAllModes(run_loop_, delayed_work_timer_);
+ CFRelease(delayed_work_timer_);
+
+ CFRelease(run_loop_);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::Run(Delegate* delegate) {
+ // nesting_level_ will be incremented in EnterExitRunLoop, so set
+ // run_nesting_level_ accordingly.
+ int last_run_nesting_level = run_nesting_level_;
+ run_nesting_level_ = nesting_level_ + 1;
+
+ Delegate* last_delegate = delegate_;
+ SetDelegate(delegate);
+
+ DoRun(delegate);
+
+ // Restore the previous state of the object.
+ SetDelegate(last_delegate);
+ run_nesting_level_ = last_run_nesting_level;
+}
+
+void MessagePumpCFRunLoopBase::SetDelegate(Delegate* delegate) {
+ delegate_ = delegate;
+
+ if (delegate) {
+ // If any work showed up but could not be dispatched for want of a
+ // delegate, set it up for dispatch again now that a delegate is
+ // available.
+ if (delegateless_work_) {
+ CFRunLoopSourceSignal(work_source_);
+ delegateless_work_ = false;
+ }
+ if (delegateless_idle_work_) {
+ CFRunLoopSourceSignal(idle_work_source_);
+ delegateless_idle_work_ = false;
+ }
+ }
+}
+
+// May be called on any thread.
+void MessagePumpCFRunLoopBase::ScheduleWork() {
+ CFRunLoopSourceSignal(work_source_);
+ CFRunLoopWakeUp(run_loop_);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ TimeDelta delta = delayed_work_time - TimeTicks::Now();
+ delayed_work_fire_time_ = CFAbsoluteTimeGetCurrent() + delta.InSecondsF();
+ CFRunLoopTimerSetNextFireDate(delayed_work_timer_, delayed_work_fire_time_);
+ if (timer_slack_ == TIMER_SLACK_MAXIMUM) {
+ SetTimerTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
+ } else {
+ SetTimerTolerance(delayed_work_timer_, 0);
+ }
+}
+
+void MessagePumpCFRunLoopBase::SetTimerSlack(TimerSlack timer_slack) {
+ timer_slack_ = timer_slack;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(
+ CFRunLoopTimerRef /* timer */,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ // The timer won't fire again until it's reset.
+ self->delayed_work_fire_time_ = kCFTimeIntervalMax;
+
+ // CFRunLoopTimers fire outside of the priority scheme for CFRunLoopSources.
+ // In order to establish the proper priority in which work and delayed work
+ // are processed one for one, the timer used to schedule delayed work must
+ // signal a CFRunLoopSource used to dispatch both work and delayed work.
+ CFRunLoopSourceSignal(self->work_source_);
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunWorkSource.
+bool MessagePumpCFRunLoopBase::RunWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. Arrange to come back
+ // here when a delegate is available.
+ delegateless_work_ = true;
+ return false;
+ }
+
+ // The NSApplication-based run loop only drains the autorelease pool at each
+ // UI event (NSEvent). The autorelease pool is not drained for each
+ // CFRunLoopSource target that's run. Use a local pool for any autoreleased
+ // objects if the app is not currently handling a UI event to ensure they're
+ // released promptly even in the absence of UI events.
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+ // Call DoWork and DoDelayedWork once, and if something was done, arrange to
+ // come back here again as long as the loop is still running.
+ bool did_work = delegate_->DoWork();
+ bool resignal_work_source = did_work;
+
+ TimeTicks next_time;
+ delegate_->DoDelayedWork(&next_time);
+ if (!did_work) {
+ // Determine whether there's more delayed work, and if so, if it needs to
+ // be done at some point in the future or if it's already time to do it.
+ // Only do these checks if did_work is false. If did_work is true, this
+ // function, and therefore any additional delayed work, will get another
+ // chance to run before the loop goes to sleep.
+ bool more_delayed_work = !next_time.is_null();
+ if (more_delayed_work) {
+ TimeDelta delay = next_time - TimeTicks::Now();
+ if (delay > TimeDelta()) {
+ // There's more delayed work to be done in the future.
+ ScheduleDelayedWork(next_time);
+ } else {
+ // There's more delayed work to be done, and its time is in the past.
+ // Arrange to come back here directly as long as the loop is still
+ // running.
+ resignal_work_source = true;
+ }
+ }
+ }
+
+ if (resignal_work_source) {
+ CFRunLoopSourceSignal(work_source_);
+ }
+
+ return resignal_work_source;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunIdleWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunIdleWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunIdleWorkSource.
+bool MessagePumpCFRunLoopBase::RunIdleWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. Arrange to come back
+ // here when a delegate is available.
+ delegateless_idle_work_ = true;
+ return false;
+ }
+
+ // The NSApplication-based run loop only drains the autorelease pool at each
+ // UI event (NSEvent). The autorelease pool is not drained for each
+ // CFRunLoopSource target that's run. Use a local pool for any autoreleased
+ // objects if the app is not currently handling a UI event to ensure they're
+ // released promptly even in the absence of UI events.
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+ // Call DoIdleWork once, and if something was done, arrange to come back here
+ // again as long as the loop is still running.
+ bool did_work = delegate_->DoIdleWork();
+ if (did_work) {
+ CFRunLoopSourceSignal(idle_work_source_);
+ }
+
+ return did_work;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunNestingDeferredWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource.
+bool MessagePumpCFRunLoopBase::RunNestingDeferredWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. There's no sense in
+ // attempting to do any work or signalling the work sources because
+ // without a delegate, work is not possible.
+ return false;
+ }
+
+ // Immediately try work in priority order.
+ if (!RunWork()) {
+ if (!RunIdleWork()) {
+ return false;
+ }
+ } else {
+ // Work was done. Arrange for the loop to try non-nestable idle work on
+ // a subsequent pass.
+ CFRunLoopSourceSignal(idle_work_source_);
+ }
+
+ return true;
+}
+
+// Called before the run loop goes to sleep or exits, or processes sources.
+void MessagePumpCFRunLoopBase::MaybeScheduleNestingDeferredWork() {
+ // deepest_nesting_level_ is set as run loops are entered. If the deepest
+ // level encountered is deeper than the current level, a nested loop
+ // (relative to the current level) ran since the last time nesting-deferred
+ // work was scheduled. When that situation is encountered, schedule
+ // nesting-deferred work in case any work was deferred because nested work
+ // was disallowed.
+ if (deepest_nesting_level_ > nesting_level_) {
+ deepest_nesting_level_ = nesting_level_;
+ CFRunLoopSourceSignal(nesting_deferred_work_source_);
+ }
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreWaitObserver(
+ CFRunLoopObserverRef /* observer */,
+ CFRunLoopActivity /* activity */,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ // Attempt to do some idle work before going to sleep.
+ self->RunIdleWork();
+
+ // The run loop is about to go to sleep. If any of the work done since it
+ // started or woke up resulted in a nested run loop running,
+ // nesting-deferred work may have accumulated. Schedule it for processing
+ // if appropriate.
+ self->MaybeScheduleNestingDeferredWork();
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreSourceObserver(
+ CFRunLoopObserverRef /* observer */,
+ CFRunLoopActivity /* activity */,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ // The run loop has reached the top of the loop and is about to begin
+ // processing sources. If the last iteration of the loop at this nesting
+ // level did not sleep or exit, nesting-deferred work may have accumulated
+ // if a nested loop ran. Schedule nesting-deferred work for processing if
+ // appropriate.
+ self->MaybeScheduleNestingDeferredWork();
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::EnterExitObserver(
+ CFRunLoopObserverRef /* observer */,
+ CFRunLoopActivity activity,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ switch (activity) {
+ case kCFRunLoopEntry:
+ ++self->nesting_level_;
+ if (self->nesting_level_ > self->deepest_nesting_level_) {
+ self->deepest_nesting_level_ = self->nesting_level_;
+ }
+ break;
+
+ case kCFRunLoopExit:
+ // Not all run loops go to sleep. If a run loop is stopped before it
+ // goes to sleep due to a CFRunLoopStop call, or if the timeout passed
+ // to CFRunLoopRunInMode expires, the run loop may proceed directly from
+ // handling sources to exiting without any sleep. This most commonly
+ // occurs when CFRunLoopRunInMode is passed a timeout of 0, causing it
+ // to make a single pass through the loop and exit without sleep. Some
+ // native loops use CFRunLoop in this way. Because PreWaitObserver will
+ // not be called in these case, MaybeScheduleNestingDeferredWork needs
+ // to be called here, as the run loop exits.
+ //
+ // MaybeScheduleNestingDeferredWork consults self->nesting_level_
+ // to determine whether to schedule nesting-deferred work. It expects
+ // the nesting level to be set to the depth of the loop that is going
+ // to sleep or exiting. It must be called before decrementing the
+ // value so that the value still corresponds to the level of the exiting
+ // loop.
+ self->MaybeScheduleNestingDeferredWork();
+ --self->nesting_level_;
+ break;
+
+ default:
+ break;
+ }
+
+ self->EnterExitRunLoop(activity);
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitRunLoop. The default
+// implementation is a no-op.
+void MessagePumpCFRunLoopBase::EnterExitRunLoop(
+ CFRunLoopActivity /* activity */) {
+}
+
+// Base version returns a standard NSAutoreleasePool.
+AutoreleasePoolType* MessagePumpCFRunLoopBase::CreateAutoreleasePool() {
+ return [[NSAutoreleasePool alloc] init];
+}
+
+MessagePumpCFRunLoop::MessagePumpCFRunLoop()
+ : quit_pending_(false) {
+}
+
+MessagePumpCFRunLoop::~MessagePumpCFRunLoop() {}
+
+// Called by MessagePumpCFRunLoopBase::DoRun. If other CFRunLoopRun loops were
+// running lower on the run loop thread's stack when this object was created,
+// the same number of CFRunLoopRun loops must be running for the outermost call
+// to Run. Run/DoRun are reentrant after that point.
+void MessagePumpCFRunLoop::DoRun(Delegate* /* delegate */) {
+ // This is completely identical to calling CFRunLoopRun(), except autorelease
+ // pool management is introduced.
+ int result;
+ do {
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+ result = CFRunLoopRunInMode(kCFRunLoopDefaultMode,
+ kCFTimeIntervalMax,
+ false);
+ } while (result != kCFRunLoopRunStopped && result != kCFRunLoopRunFinished);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoop::Quit() {
+ // Stop the innermost run loop managed by this MessagePumpCFRunLoop object.
+ if (nesting_level() == run_nesting_level()) {
+ // This object is running the innermost loop, just stop it.
+ CFRunLoopStop(run_loop());
+ } else {
+ // There's another loop running inside the loop managed by this object.
+ // In other words, someone else called CFRunLoopRunInMode on the same
+ // thread, deeper on the stack than the deepest Run call. Don't preempt
+ // other run loops, just mark this object to quit the innermost Run as
+ // soon as the other inner loops not managed by Run are done.
+ quit_pending_ = true;
+ }
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitObserver.
+void MessagePumpCFRunLoop::EnterExitRunLoop(CFRunLoopActivity activity) {
+ if (activity == kCFRunLoopExit &&
+ nesting_level() == run_nesting_level() &&
+ quit_pending_) {
+ // Quit was called while loops other than those managed by this object
+ // were running further inside a run loop managed by this object. Now
+ // that all unmanaged inner run loops are gone, stop the loop running
+ // just inside Run.
+ CFRunLoopStop(run_loop());
+ quit_pending_ = false;
+ }
+}
+
+MessagePumpNSRunLoop::MessagePumpNSRunLoop()
+ : keep_running_(true) {
+ CFRunLoopSourceContext source_context = CFRunLoopSourceContext();
+ source_context.perform = NoOp;
+ quit_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 0, // priority
+ &source_context);
+ CFRunLoopAddSourceToAllModes(run_loop(), quit_source_);
+}
+
+MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
+ CFRunLoopRemoveSourceFromAllModes(run_loop(), quit_source_);
+ CFRelease(quit_source_);
+}
+
+void MessagePumpNSRunLoop::DoRun(Delegate* /* delegate */) {
+ while (keep_running_) {
+ // NSRunLoop manages autorelease pools itself.
+ [[NSRunLoop currentRunLoop] runMode:NSDefaultRunLoopMode
+ beforeDate:[NSDate distantFuture]];
+ }
+
+ keep_running_ = true;
+}
+
+void MessagePumpNSRunLoop::Quit() {
+ keep_running_ = false;
+ CFRunLoopSourceSignal(quit_source_);
+ CFRunLoopWakeUp(run_loop());
+}
+
+#if defined(OS_IOS)
+MessagePumpUIApplication::MessagePumpUIApplication()
+ : run_loop_(NULL) {
+}
+
+MessagePumpUIApplication::~MessagePumpUIApplication() {}
+
+void MessagePumpUIApplication::DoRun(Delegate* delegate) {
+ NOTREACHED();
+}
+
+void MessagePumpUIApplication::Quit() {
+ NOTREACHED();
+}
+
+void MessagePumpUIApplication::Attach(Delegate* delegate) {
+ DCHECK(!run_loop_);
+ run_loop_ = new RunLoop();
+ CHECK(run_loop_->BeforeRun());
+ SetDelegate(delegate);
+}
+
+#else
+
+MessagePumpNSApplication::MessagePumpNSApplication()
+ : keep_running_(true),
+ running_own_loop_(false) {
+}
+
+MessagePumpNSApplication::~MessagePumpNSApplication() {}
+
+void MessagePumpNSApplication::DoRun(Delegate* /* delegate */) {
+ bool last_running_own_loop_ = running_own_loop_;
+
+ // NSApp must be initialized by calling:
+ // [{some class which implements CrAppProtocol} sharedApplication]
+ // Most likely candidates are CrApplication or BrowserCrApplication.
+ // These can be initialized from C++ code by calling
+ // RegisterCrApp() or RegisterBrowserCrApp().
+ CHECK(NSApp);
+
+ if (![NSApp isRunning]) {
+ running_own_loop_ = false;
+ // NSApplication manages autorelease pools itself when run this way.
+ [NSApp run];
+ } else {
+ running_own_loop_ = true;
+ NSDate* distant_future = [NSDate distantFuture];
+ while (keep_running_) {
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+ NSEvent* event = [NSApp nextEventMatchingMask:NSAnyEventMask
+ untilDate:distant_future
+ inMode:NSDefaultRunLoopMode
+ dequeue:YES];
+ if (event) {
+ [NSApp sendEvent:event];
+ }
+ }
+ keep_running_ = true;
+ }
+
+ running_own_loop_ = last_running_own_loop_;
+}
+
+void MessagePumpNSApplication::Quit() {
+ if (!running_own_loop_) {
+ [[NSApplication sharedApplication] stop:nil];
+ } else {
+ keep_running_ = false;
+ }
+
+ // Send a fake event to wake the loop up.
+ [NSApp postEvent:[NSEvent otherEventWithType:NSApplicationDefined
+ location:NSZeroPoint
+ modifierFlags:0
+ timestamp:0
+ windowNumber:0
+ context:NULL
+ subtype:0
+ data1:0
+ data2:0]
+ atStart:NO];
+}
+
+MessagePumpCrApplication::MessagePumpCrApplication() {
+}
+
+MessagePumpCrApplication::~MessagePumpCrApplication() {
+}
+
+// Prevents an autorelease pool from being created if the app is in the midst of
+// handling a UI event because various parts of AppKit depend on objects that
+// are created while handling a UI event to be autoreleased in the event loop.
+// An example of this is NSWindowController. When a window with a window
+// controller is closed it goes through a stack like this:
+// (Several stack frames elided for clarity)
+//
+// #0 [NSWindowController autorelease]
+// #1 DoAClose
+// #2 MessagePumpCFRunLoopBase::DoWork()
+// #3 [NSRunLoop run]
+// #4 [NSButton performClick:]
+// #5 [NSWindow sendEvent:]
+// #6 [NSApp sendEvent:]
+// #7 [NSApp run]
+//
+// -performClick: spins a nested run loop. If the pool created in DoWork was a
+// standard NSAutoreleasePool, it would release the objects that were
+// autoreleased into it once DoWork released it. This would cause the window
+// controller, which autoreleased itself in frame #0, to release itself, and
+// possibly free itself. Unfortunately this window controller controls the
+// window in frame #5. When the stack is unwound to frame #5, the window would
+// no longer exists and crashes may occur. Apple gets around this by never
+// releasing the pool it creates in frame #4, and letting frame #7 clean it up
+// when it cleans up the pool that wraps frame #7. When an autorelease pool is
+// released it releases all other pools that were created after it on the
+// autorelease pool stack.
+//
+// CrApplication is responsible for setting handlingSendEvent to true just
+// before it sends the event through the event handling mechanism, and
+// returning it to its previous value once the event has been sent.
+AutoreleasePoolType* MessagePumpCrApplication::CreateAutoreleasePool() {
+ if (MessagePumpMac::IsHandlingSendEvent())
+ return nil;
+ return MessagePumpNSApplication::CreateAutoreleasePool();
+}
+
+// static
+bool MessagePumpMac::UsingCrApp() {
+ DCHECK([NSThread isMainThread]);
+
+ // If NSApp is still not initialized, then the subclass used cannot
+ // be determined.
+ DCHECK(NSApp);
+
+ // The pump was created using MessagePumpNSApplication.
+ if (g_not_using_cr_app)
+ return false;
+
+ return [NSApp conformsToProtocol:@protocol(CrAppProtocol)];
+}
+
+// static
+bool MessagePumpMac::IsHandlingSendEvent() {
+ DCHECK([NSApp conformsToProtocol:@protocol(CrAppProtocol)]);
+ NSObject<CrAppProtocol>* app = static_cast<NSObject<CrAppProtocol>*>(NSApp);
+ return [app isHandlingSendEvent];
+}
+#endif // !defined(OS_IOS)
+
+// static
+MessagePump* MessagePumpMac::Create() {
+ if ([NSThread isMainThread]) {
+#if defined(OS_IOS)
+ return new MessagePumpUIApplication;
+#else
+ if ([NSApp conformsToProtocol:@protocol(CrAppProtocol)])
+ return new MessagePumpCrApplication;
+
+ // The main-thread MessagePump implementations REQUIRE an NSApp.
+ // Executables which have specific requirements for their
+ // NSApplication subclass should initialize appropriately before
+ // creating an event loop.
+ [NSApplication sharedApplication];
+ g_not_using_cr_app = true;
+ return new MessagePumpNSApplication;
+#endif
+ }
+
+ return new MessagePumpNSRunLoop;
+}
+
+} // namespace base
diff --git a/libchrome/base/message_loop/timer_slack.h b/libchrome/base/message_loop/timer_slack.h
new file mode 100644
index 0000000..1ad6ca9
--- /dev/null
+++ b/libchrome/base/message_loop/timer_slack.h
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_TIMER_SLACK_H_
+#define BASE_MESSAGE_LOOP_TIMER_SLACK_H_
+
+namespace base {
+
+// Amount of timer slack to use for delayed timers. Increasing timer slack
+// allows the OS to coalesce timers more effectively.
+enum TimerSlack {
+ // Lowest value for timer slack allowed by OS.
+ TIMER_SLACK_NONE,
+
+ // Maximal value for timer slack allowed by OS.
+ TIMER_SLACK_MAXIMUM
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_LOOP_TIMER_SLACK_H_
diff --git a/libchrome/base/metrics/OWNERS b/libchrome/base/metrics/OWNERS
new file mode 100644
index 0000000..feb8271
--- /dev/null
+++ b/libchrome/base/metrics/OWNERS
@@ -0,0 +1,2 @@
+asvitkine@chromium.org
+isherman@chromium.org
diff --git a/libchrome/base/metrics/bucket_ranges.cc b/libchrome/base/metrics/bucket_ranges.cc
new file mode 100644
index 0000000..084cdd3
--- /dev/null
+++ b/libchrome/base/metrics/bucket_ranges.cc
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/bucket_ranges.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Static table of checksums for all possible 8 bit bytes.
+const uint32_t kCrcTable[256] = {
+ 0x0, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
+ 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L, 0x79dcb8a4L,
+ 0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+ 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+ 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+ 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+ 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+ 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+ 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+ 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+ 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+ 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+ 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+ 0x1db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
+ 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L, 0x9609a88eL,
+ 0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL, 0x91646c97L, 0xe6635c01L,
+ 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+ 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+ 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+ 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+ 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+ 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+ 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+ 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+ 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+ 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+ 0x3b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
+ 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL, 0x7a6a5aa8L,
+ 0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L, 0x7d079eb1L, 0xf00f9344L,
+ 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+ 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+ 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+ 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+ 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+ 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+ 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+ 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+ 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+ 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+ 0x26d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
+ 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L, 0x92d28e9bL,
+ 0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+ 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+ 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+ 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+ 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+ 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+ 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+ 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+ 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+ 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+ 0x2d02ef8dL,
+};
+
+// We generate the CRC-32 using the low order bits to select whether to XOR in
+// the reversed polynomial 0xedb88320L. This is nice and simple, and allows us
+// to keep the quotient in a uint32_t. Since we're not concerned about the
+// nature of corruptions (i.e., we don't care about bit sequencing, since we are
+// handling memory changes, which are more grotesque) so we don't bother to get
+// the CRC correct for big-endian vs little-ending calculations. All we need is
+// a nice hash, that tends to depend on all the bits of the sample, with very
+// little chance of changes in one place impacting changes in another place.
+static uint32_t Crc32(uint32_t sum, HistogramBase::Sample value) {
+ // TODO(jar): Switch to false and watch stats.
+ const bool kUseRealCrc = true;
+
+ if (kUseRealCrc) {
+ union {
+ HistogramBase::Sample range;
+ unsigned char bytes[sizeof(HistogramBase::Sample)];
+ } converter;
+ converter.range = value;
+ for (size_t i = 0; i < sizeof(converter); ++i)
+ sum = kCrcTable[(sum & 0xff) ^ converter.bytes[i]] ^ (sum >> 8);
+ } else {
+ // Use hash techniques provided in ReallyFastHash, except we don't care
+ // about "avalanching" (which would worsten the hash, and add collisions),
+ // and we don't care about edge cases since we have an even number of bytes.
+ union {
+ HistogramBase::Sample range;
+ uint16_t ints[sizeof(HistogramBase::Sample) / 2];
+ } converter;
+ DCHECK_EQ(sizeof(HistogramBase::Sample), sizeof(converter));
+ converter.range = value;
+ sum += converter.ints[0];
+ sum = (sum << 16) ^ sum ^ (static_cast<uint32_t>(converter.ints[1]) << 11);
+ sum += sum >> 11;
+ }
+ return sum;
+}
+
+BucketRanges::BucketRanges(size_t num_ranges)
+ : ranges_(num_ranges, 0),
+ checksum_(0) {}
+
+BucketRanges::~BucketRanges() {}
+
+void BucketRanges::set_range(size_t i, HistogramBase::Sample value) {
+ DCHECK_LT(i, ranges_.size());
+ CHECK_GE(value, 0);
+ ranges_[i] = value;
+}
+
+uint32_t BucketRanges::CalculateChecksum() const {
+ // Seed checksum.
+ uint32_t checksum = static_cast<uint32_t>(ranges_.size());
+
+ for (size_t index = 0; index < ranges_.size(); ++index)
+ checksum = Crc32(checksum, ranges_[index]);
+ return checksum;
+}
+
+bool BucketRanges::HasValidChecksum() const {
+ return CalculateChecksum() == checksum_;
+}
+
+void BucketRanges::ResetChecksum() {
+ checksum_ = CalculateChecksum();
+}
+
+bool BucketRanges::Equals(const BucketRanges* other) const {
+ if (checksum_ != other->checksum_)
+ return false;
+ if (ranges_.size() != other->ranges_.size())
+ return false;
+ for (size_t index = 0; index < ranges_.size(); ++index) {
+ if (ranges_[index] != other->ranges_[index])
+ return false;
+ }
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/bucket_ranges.h b/libchrome/base/metrics/bucket_ranges.h
new file mode 100644
index 0000000..c356195
--- /dev/null
+++ b/libchrome/base/metrics/bucket_ranges.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// BucketRanges stores the vector of ranges that delimit what samples are
+// tallied in the corresponding buckets of a histogram. Histograms that have
+// same ranges for all their corresponding buckets should share the same
+// BucketRanges object.
+//
+// E.g. A 5 buckets LinearHistogram with 1 as minimal value and 4 as maximal
+// value will need a BucketRanges with 6 ranges:
+// 0, 1, 2, 3, 4, INT_MAX
+//
+// TODO(kaiwang): Currently we keep all negative values in 0~1 bucket. Consider
+// changing 0 to INT_MIN.
+
+#ifndef BASE_METRICS_BUCKET_RANGES_H_
+#define BASE_METRICS_BUCKET_RANGES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include <limits.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+class BASE_EXPORT BucketRanges {
+ public:
+ typedef std::vector<HistogramBase::Sample> Ranges;
+
+ explicit BucketRanges(size_t num_ranges);
+ ~BucketRanges();
+
+ size_t size() const { return ranges_.size(); }
+ HistogramBase::Sample range(size_t i) const { return ranges_[i]; }
+ void set_range(size_t i, HistogramBase::Sample value);
+ uint32_t checksum() const { return checksum_; }
+ void set_checksum(uint32_t checksum) { checksum_ = checksum; }
+
+ // A bucket is defined by a consecutive pair of entries in |ranges|, so there
+ // is one fewer bucket than there are ranges. For example, if |ranges| is
+ // [0, 1, 3, 7, INT_MAX], then the buckets in this histogram are
+ // [0, 1), [1, 3), [3, 7), and [7, INT_MAX).
+ size_t bucket_count() const { return ranges_.size() - 1; }
+
+ // Checksum methods to verify whether the ranges are corrupted (e.g. bad
+ // memory access).
+ uint32_t CalculateChecksum() const;
+ bool HasValidChecksum() const;
+ void ResetChecksum();
+
+ // Return true iff |other| object has same ranges_ as |this| object's ranges_.
+ bool Equals(const BucketRanges* other) const;
+
+ private:
+ // A monotonically increasing list of values which determine which bucket to
+ // put a sample into. For each index, show the smallest sample that can be
+ // added to the corresponding bucket.
+ Ranges ranges_;
+
+ // Checksum for the conntents of ranges_. Used to detect random over-writes
+ // of our data, and to quickly see if some other BucketRanges instance is
+ // possibly Equal() to this instance.
+ // TODO(kaiwang): Consider change this to uint64_t. Because we see a lot of
+ // noise on UMA dashboard.
+ uint32_t checksum_;
+
+ DISALLOW_COPY_AND_ASSIGN(BucketRanges);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+// Expose only for test.
+BASE_EXPORT extern const uint32_t kCrcTable[256];
+
+} // namespace base
+
+#endif // BASE_METRICS_BUCKET_RANGES_H_
diff --git a/libchrome/base/metrics/bucket_ranges_unittest.cc b/libchrome/base/metrics/bucket_ranges_unittest.cc
new file mode 100644
index 0000000..481054c
--- /dev/null
+++ b/libchrome/base/metrics/bucket_ranges_unittest.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/bucket_ranges.h"
+
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(BucketRangesTest, NormalSetup) {
+ BucketRanges ranges(5);
+ ASSERT_EQ(5u, ranges.size());
+ ASSERT_EQ(4u, ranges.bucket_count());
+
+ for (int i = 0; i < 5; ++i) {
+ EXPECT_EQ(0, ranges.range(i));
+ }
+ EXPECT_EQ(0u, ranges.checksum());
+
+ ranges.set_range(3, 100);
+ EXPECT_EQ(100, ranges.range(3));
+}
+
+TEST(BucketRangesTest, Equals) {
+ // Compare empty ranges.
+ BucketRanges ranges1(3);
+ BucketRanges ranges2(3);
+ BucketRanges ranges3(5);
+
+ EXPECT_TRUE(ranges1.Equals(&ranges2));
+ EXPECT_FALSE(ranges1.Equals(&ranges3));
+ EXPECT_FALSE(ranges2.Equals(&ranges3));
+
+ // Compare full filled ranges.
+ ranges1.set_range(0, 0);
+ ranges1.set_range(1, 1);
+ ranges1.set_range(2, 2);
+ ranges1.set_checksum(100);
+ ranges2.set_range(0, 0);
+ ranges2.set_range(1, 1);
+ ranges2.set_range(2, 2);
+ ranges2.set_checksum(100);
+
+ EXPECT_TRUE(ranges1.Equals(&ranges2));
+
+ // Checksum does not match.
+ ranges1.set_checksum(99);
+ EXPECT_FALSE(ranges1.Equals(&ranges2));
+ ranges1.set_checksum(100);
+
+ // Range does not match.
+ ranges1.set_range(1, 3);
+ EXPECT_FALSE(ranges1.Equals(&ranges2));
+}
+
+TEST(BucketRangesTest, Checksum) {
+ BucketRanges ranges(3);
+ ranges.set_range(0, 0);
+ ranges.set_range(1, 1);
+ ranges.set_range(2, 2);
+
+ ranges.ResetChecksum();
+ EXPECT_EQ(289217253u, ranges.checksum());
+
+ ranges.set_range(2, 3);
+ EXPECT_FALSE(ranges.HasValidChecksum());
+
+ ranges.ResetChecksum();
+ EXPECT_EQ(2843835776u, ranges.checksum());
+ EXPECT_TRUE(ranges.HasValidChecksum());
+}
+
+// Table was generated similarly to sample code for CRC-32 given on:
+// http://www.w3.org/TR/PNG/#D-CRCAppendix.
+TEST(BucketRangesTest, Crc32TableTest) {
+ for (int i = 0; i < 256; ++i) {
+ uint32_t checksum = i;
+ for (int j = 0; j < 8; ++j) {
+ const uint32_t kReversedPolynomial = 0xedb88320L;
+ if (checksum & 1)
+ checksum = kReversedPolynomial ^ (checksum >> 1);
+ else
+ checksum >>= 1;
+ }
+ EXPECT_EQ(kCrcTable[i], checksum);
+ }
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/metrics/field_trial.cc b/libchrome/base/metrics/field_trial.cc
new file mode 100644
index 0000000..600b94e
--- /dev/null
+++ b/libchrome/base/metrics/field_trial.cc
@@ -0,0 +1,658 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial.h"
+
+#include <algorithm>
+
+#include "base/build_time.h"
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+
+namespace {
+
+// Define a separator character to use when creating a persistent form of an
+// instance. This is intended for use as a command line argument, passed to a
+// second process to mimic our state (i.e., provide the same group name).
+const char kPersistentStringSeparator = '/'; // Currently a slash.
+
+// Define a marker character to be used as a prefix to a trial name on the
+// command line which forces its activation.
+const char kActivationMarker = '*';
+
+// Created a time value based on |year|, |month| and |day_of_month| parameters.
+Time CreateTimeFromParams(int year, int month, int day_of_month) {
+ DCHECK_GT(year, 1970);
+ DCHECK_GT(month, 0);
+ DCHECK_LT(month, 13);
+ DCHECK_GT(day_of_month, 0);
+ DCHECK_LT(day_of_month, 32);
+
+ Time::Exploded exploded;
+ exploded.year = year;
+ exploded.month = month;
+ exploded.day_of_week = 0; // Should be unused.
+ exploded.day_of_month = day_of_month;
+ exploded.hour = 0;
+ exploded.minute = 0;
+ exploded.second = 0;
+ exploded.millisecond = 0;
+ Time out_time;
+ if (!Time::FromLocalExploded(exploded, &out_time)) {
+ // TODO(maksims): implement failure handling.
+ // We might just return |out_time|, which is Time(0).
+ NOTIMPLEMENTED();
+ }
+
+ return out_time;
+}
+
+// Returns the boundary value for comparing against the FieldTrial's added
+// groups for a given |divisor| (total probability) and |entropy_value|.
+FieldTrial::Probability GetGroupBoundaryValue(
+ FieldTrial::Probability divisor,
+ double entropy_value) {
+ // Add a tiny epsilon value to get consistent results when converting floating
+ // points to int. Without it, boundary values have inconsistent results, e.g.:
+ //
+ // static_cast<FieldTrial::Probability>(100 * 0.56) == 56
+ // static_cast<FieldTrial::Probability>(100 * 0.57) == 56
+ // static_cast<FieldTrial::Probability>(100 * 0.58) == 57
+ // static_cast<FieldTrial::Probability>(100 * 0.59) == 59
+ const double kEpsilon = 1e-8;
+ const FieldTrial::Probability result =
+ static_cast<FieldTrial::Probability>(divisor * entropy_value + kEpsilon);
+ // Ensure that adding the epsilon still results in a value < |divisor|.
+ return std::min(result, divisor - 1);
+}
+
+// Parses the --force-fieldtrials string |trials_string| into |entries|.
+// Returns true if the string was parsed correctly. On failure, the |entries|
+// array may end up being partially filled.
+bool ParseFieldTrialsString(const std::string& trials_string,
+ std::vector<FieldTrial::State>* entries) {
+ const StringPiece trials_string_piece(trials_string);
+
+ size_t next_item = 0;
+ while (next_item < trials_string.length()) {
+ size_t name_end = trials_string.find(kPersistentStringSeparator, next_item);
+ if (name_end == trials_string.npos || next_item == name_end)
+ return false;
+ size_t group_name_end =
+ trials_string.find(kPersistentStringSeparator, name_end + 1);
+ if (name_end + 1 == group_name_end)
+ return false;
+ if (group_name_end == trials_string.npos)
+ group_name_end = trials_string.length();
+
+ FieldTrial::State entry;
+ // Verify if the trial should be activated or not.
+ if (trials_string[next_item] == kActivationMarker) {
+ // Name cannot be only the indicator.
+ if (name_end - next_item == 1)
+ return false;
+ next_item++;
+ entry.activated = true;
+ }
+ entry.trial_name =
+ trials_string_piece.substr(next_item, name_end - next_item);
+ entry.group_name =
+ trials_string_piece.substr(name_end + 1, group_name_end - name_end - 1);
+ next_item = group_name_end + 1;
+
+ entries->push_back(entry);
+ }
+ return true;
+}
+
+} // namespace
+
+// statics
+const int FieldTrial::kNotFinalized = -1;
+const int FieldTrial::kDefaultGroupNumber = 0;
+bool FieldTrial::enable_benchmarking_ = false;
+
+int FieldTrialList::kNoExpirationYear = 0;
+
+//------------------------------------------------------------------------------
+// FieldTrial methods and members.
+
+FieldTrial::EntropyProvider::~EntropyProvider() {
+}
+
+FieldTrial::State::State() : activated(false) {}
+
+FieldTrial::State::State(const State& other) = default;
+
+FieldTrial::State::~State() {}
+
+void FieldTrial::Disable() {
+ DCHECK(!group_reported_);
+ enable_field_trial_ = false;
+
+ // In case we are disabled after initialization, we need to switch
+ // the trial to the default group.
+ if (group_ != kNotFinalized) {
+ // Only reset when not already the default group, because in case we were
+ // forced to the default group, the group number may not be
+ // kDefaultGroupNumber, so we should keep it as is.
+ if (group_name_ != default_group_name_)
+ SetGroupChoice(default_group_name_, kDefaultGroupNumber);
+ }
+}
+
+int FieldTrial::AppendGroup(const std::string& name,
+ Probability group_probability) {
+ // When the group choice was previously forced, we only need to return the
+ // the id of the chosen group, and anything can be returned for the others.
+ if (forced_) {
+ DCHECK(!group_name_.empty());
+ if (name == group_name_) {
+ // Note that while |group_| may be equal to |kDefaultGroupNumber| on the
+ // forced trial, it will not have the same value as the default group
+ // number returned from the non-forced |FactoryGetFieldTrial()| call,
+ // which takes care to ensure that this does not happen.
+ return group_;
+ }
+ DCHECK_NE(next_group_number_, group_);
+ // We still return different numbers each time, in case some caller need
+ // them to be different.
+ return next_group_number_++;
+ }
+
+ DCHECK_LE(group_probability, divisor_);
+ DCHECK_GE(group_probability, 0);
+
+ if (enable_benchmarking_ || !enable_field_trial_)
+ group_probability = 0;
+
+ accumulated_group_probability_ += group_probability;
+
+ DCHECK_LE(accumulated_group_probability_, divisor_);
+ if (group_ == kNotFinalized && accumulated_group_probability_ > random_) {
+ // This is the group that crossed the random line, so we do the assignment.
+ SetGroupChoice(name, next_group_number_);
+ }
+ return next_group_number_++;
+}
+
+int FieldTrial::group() {
+ FinalizeGroupChoice();
+ if (trial_registered_)
+ FieldTrialList::NotifyFieldTrialGroupSelection(this);
+ return group_;
+}
+
+const std::string& FieldTrial::group_name() {
+ // Call |group()| to ensure group gets assigned and observers are notified.
+ group();
+ DCHECK(!group_name_.empty());
+ return group_name_;
+}
+
+const std::string& FieldTrial::GetGroupNameWithoutActivation() {
+ FinalizeGroupChoice();
+ return group_name_;
+}
+
+void FieldTrial::SetForced() {
+ // We might have been forced before (e.g., by CreateFieldTrial) and it's
+ // first come first served, e.g., command line switch has precedence.
+ if (forced_)
+ return;
+
+ // And we must finalize the group choice before we mark ourselves as forced.
+ FinalizeGroupChoice();
+ forced_ = true;
+}
+
+// static
+void FieldTrial::EnableBenchmarking() {
+ DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
+ enable_benchmarking_ = true;
+}
+
+// static
+FieldTrial* FieldTrial::CreateSimulatedFieldTrial(
+ const std::string& trial_name,
+ Probability total_probability,
+ const std::string& default_group_name,
+ double entropy_value) {
+ return new FieldTrial(trial_name, total_probability, default_group_name,
+ entropy_value);
+}
+
+FieldTrial::FieldTrial(const std::string& trial_name,
+ const Probability total_probability,
+ const std::string& default_group_name,
+ double entropy_value)
+ : trial_name_(trial_name),
+ divisor_(total_probability),
+ default_group_name_(default_group_name),
+ random_(GetGroupBoundaryValue(total_probability, entropy_value)),
+ accumulated_group_probability_(0),
+ next_group_number_(kDefaultGroupNumber + 1),
+ group_(kNotFinalized),
+ enable_field_trial_(true),
+ forced_(false),
+ group_reported_(false),
+ trial_registered_(false) {
+ DCHECK_GT(total_probability, 0);
+ DCHECK(!trial_name_.empty());
+ DCHECK(!default_group_name_.empty());
+}
+
+FieldTrial::~FieldTrial() {}
+
+void FieldTrial::SetTrialRegistered() {
+ DCHECK_EQ(kNotFinalized, group_);
+ DCHECK(!trial_registered_);
+ trial_registered_ = true;
+}
+
+void FieldTrial::SetGroupChoice(const std::string& group_name, int number) {
+ group_ = number;
+ if (group_name.empty())
+ StringAppendF(&group_name_, "%d", group_);
+ else
+ group_name_ = group_name;
+ DVLOG(1) << "Field trial: " << trial_name_ << " Group choice:" << group_name_;
+}
+
+void FieldTrial::FinalizeGroupChoice() {
+ if (group_ != kNotFinalized)
+ return;
+ accumulated_group_probability_ = divisor_;
+ // Here it's OK to use |kDefaultGroupNumber| since we can't be forced and not
+ // finalized.
+ DCHECK(!forced_);
+ SetGroupChoice(default_group_name_, kDefaultGroupNumber);
+}
+
+bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
+ if (!group_reported_ || !enable_field_trial_)
+ return false;
+ DCHECK_NE(group_, kNotFinalized);
+ active_group->trial_name = trial_name_;
+ active_group->group_name = group_name_;
+ return true;
+}
+
+bool FieldTrial::GetState(State* field_trial_state) {
+ if (!enable_field_trial_)
+ return false;
+ FinalizeGroupChoice();
+ field_trial_state->trial_name = trial_name_;
+ field_trial_state->group_name = group_name_;
+ field_trial_state->activated = group_reported_;
+ return true;
+}
+
+//------------------------------------------------------------------------------
+// FieldTrialList methods and members.
+
+// static
+FieldTrialList* FieldTrialList::global_ = NULL;
+
+// static
+bool FieldTrialList::used_without_global_ = false;
+
+FieldTrialList::Observer::~Observer() {
+}
+
+FieldTrialList::FieldTrialList(
+ const FieldTrial::EntropyProvider* entropy_provider)
+ : entropy_provider_(entropy_provider),
+ observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
+ ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
+ DCHECK(!global_);
+ DCHECK(!used_without_global_);
+ global_ = this;
+
+ Time two_years_from_build_time = GetBuildTime() + TimeDelta::FromDays(730);
+ Time::Exploded exploded;
+ two_years_from_build_time.LocalExplode(&exploded);
+ kNoExpirationYear = exploded.year;
+}
+
+FieldTrialList::~FieldTrialList() {
+ AutoLock auto_lock(lock_);
+ while (!registered_.empty()) {
+ RegistrationMap::iterator it = registered_.begin();
+ it->second->Release();
+ registered_.erase(it->first);
+ }
+ DCHECK_EQ(this, global_);
+ global_ = NULL;
+}
+
+// static
+FieldTrial* FieldTrialList::FactoryGetFieldTrial(
+ const std::string& trial_name,
+ FieldTrial::Probability total_probability,
+ const std::string& default_group_name,
+ const int year,
+ const int month,
+ const int day_of_month,
+ FieldTrial::RandomizationType randomization_type,
+ int* default_group_number) {
+ return FactoryGetFieldTrialWithRandomizationSeed(
+ trial_name, total_probability, default_group_name, year, month,
+ day_of_month, randomization_type, 0, default_group_number, NULL);
+}
+
+// static
+FieldTrial* FieldTrialList::FactoryGetFieldTrialWithRandomizationSeed(
+ const std::string& trial_name,
+ FieldTrial::Probability total_probability,
+ const std::string& default_group_name,
+ const int year,
+ const int month,
+ const int day_of_month,
+ FieldTrial::RandomizationType randomization_type,
+ uint32_t randomization_seed,
+ int* default_group_number,
+ const FieldTrial::EntropyProvider* override_entropy_provider) {
+ if (default_group_number)
+ *default_group_number = FieldTrial::kDefaultGroupNumber;
+ // Check if the field trial has already been created in some other way.
+ FieldTrial* existing_trial = Find(trial_name);
+ if (existing_trial) {
+ CHECK(existing_trial->forced_);
+ // If the default group name differs between the existing forced trial
+ // and this trial, then use a different value for the default group number.
+ if (default_group_number &&
+ default_group_name != existing_trial->default_group_name()) {
+ // If the new default group number corresponds to the group that was
+ // chosen for the forced trial (which has been finalized when it was
+ // forced), then set the default group number to that.
+ if (default_group_name == existing_trial->group_name_internal()) {
+ *default_group_number = existing_trial->group_;
+ } else {
+ // Otherwise, use |kNonConflictingGroupNumber| (-2) for the default
+ // group number, so that it does not conflict with the |AppendGroup()|
+ // result for the chosen group.
+ const int kNonConflictingGroupNumber = -2;
+ static_assert(
+ kNonConflictingGroupNumber != FieldTrial::kDefaultGroupNumber,
+ "The 'non-conflicting' group number conflicts");
+ static_assert(kNonConflictingGroupNumber != FieldTrial::kNotFinalized,
+ "The 'non-conflicting' group number conflicts");
+ *default_group_number = kNonConflictingGroupNumber;
+ }
+ }
+ return existing_trial;
+ }
+
+ double entropy_value;
+ if (randomization_type == FieldTrial::ONE_TIME_RANDOMIZED) {
+ // If an override entropy provider is given, use it.
+ const FieldTrial::EntropyProvider* entropy_provider =
+ override_entropy_provider ? override_entropy_provider
+ : GetEntropyProviderForOneTimeRandomization();
+ CHECK(entropy_provider);
+ entropy_value = entropy_provider->GetEntropyForTrial(trial_name,
+ randomization_seed);
+ } else {
+ DCHECK_EQ(FieldTrial::SESSION_RANDOMIZED, randomization_type);
+ DCHECK_EQ(0U, randomization_seed);
+ entropy_value = RandDouble();
+ }
+
+ FieldTrial* field_trial = new FieldTrial(trial_name, total_probability,
+ default_group_name, entropy_value);
+ if (GetBuildTime() > CreateTimeFromParams(year, month, day_of_month))
+ field_trial->Disable();
+ FieldTrialList::Register(field_trial);
+ return field_trial;
+}
+
+// static
+FieldTrial* FieldTrialList::Find(const std::string& trial_name) {
+ if (!global_)
+ return NULL;
+ AutoLock auto_lock(global_->lock_);
+ return global_->PreLockedFind(trial_name);
+}
+
+// static
+int FieldTrialList::FindValue(const std::string& trial_name) {
+ FieldTrial* field_trial = Find(trial_name);
+ if (field_trial)
+ return field_trial->group();
+ return FieldTrial::kNotFinalized;
+}
+
+// static
+std::string FieldTrialList::FindFullName(const std::string& trial_name) {
+ FieldTrial* field_trial = Find(trial_name);
+ if (field_trial)
+ return field_trial->group_name();
+ return std::string();
+}
+
+// static
+bool FieldTrialList::TrialExists(const std::string& trial_name) {
+ return Find(trial_name) != NULL;
+}
+
+// static
+bool FieldTrialList::IsTrialActive(const std::string& trial_name) {
+ FieldTrial* field_trial = Find(trial_name);
+ FieldTrial::ActiveGroup active_group;
+ return field_trial && field_trial->GetActiveGroup(&active_group);
+}
+
+// static
+void FieldTrialList::StatesToString(std::string* output) {
+ FieldTrial::ActiveGroups active_groups;
+ GetActiveFieldTrialGroups(&active_groups);
+ for (FieldTrial::ActiveGroups::const_iterator it = active_groups.begin();
+ it != active_groups.end(); ++it) {
+ DCHECK_EQ(std::string::npos,
+ it->trial_name.find(kPersistentStringSeparator));
+ DCHECK_EQ(std::string::npos,
+ it->group_name.find(kPersistentStringSeparator));
+ output->append(it->trial_name);
+ output->append(1, kPersistentStringSeparator);
+ output->append(it->group_name);
+ output->append(1, kPersistentStringSeparator);
+ }
+}
+
+// static
+void FieldTrialList::AllStatesToString(std::string* output) {
+ if (!global_)
+ return;
+ AutoLock auto_lock(global_->lock_);
+
+ for (const auto& registered : global_->registered_) {
+ FieldTrial::State trial;
+ if (!registered.second->GetState(&trial))
+ continue;
+ DCHECK_EQ(std::string::npos,
+ trial.trial_name.find(kPersistentStringSeparator));
+ DCHECK_EQ(std::string::npos,
+ trial.group_name.find(kPersistentStringSeparator));
+ if (trial.activated)
+ output->append(1, kActivationMarker);
+ trial.trial_name.AppendToString(output);
+ output->append(1, kPersistentStringSeparator);
+ trial.group_name.AppendToString(output);
+ output->append(1, kPersistentStringSeparator);
+ }
+}
+
+// static
+void FieldTrialList::GetActiveFieldTrialGroups(
+ FieldTrial::ActiveGroups* active_groups) {
+ DCHECK(active_groups->empty());
+ if (!global_)
+ return;
+ AutoLock auto_lock(global_->lock_);
+
+ for (RegistrationMap::iterator it = global_->registered_.begin();
+ it != global_->registered_.end(); ++it) {
+ FieldTrial::ActiveGroup active_group;
+ if (it->second->GetActiveGroup(&active_group))
+ active_groups->push_back(active_group);
+ }
+}
+
+// static
+void FieldTrialList::GetActiveFieldTrialGroupsFromString(
+ const std::string& trials_string,
+ FieldTrial::ActiveGroups* active_groups) {
+ std::vector<FieldTrial::State> entries;
+ if (!ParseFieldTrialsString(trials_string, &entries))
+ return;
+
+ for (const auto& entry : entries) {
+ if (entry.activated) {
+ FieldTrial::ActiveGroup group;
+ group.trial_name = entry.trial_name.as_string();
+ group.group_name = entry.group_name.as_string();
+ active_groups->push_back(group);
+ }
+ }
+}
+
+// static
+bool FieldTrialList::CreateTrialsFromString(
+ const std::string& trials_string,
+ const std::set<std::string>& ignored_trial_names) {
+ DCHECK(global_);
+ if (trials_string.empty() || !global_)
+ return true;
+
+ std::vector<FieldTrial::State> entries;
+ if (!ParseFieldTrialsString(trials_string, &entries))
+ return false;
+
+ for (const auto& entry : entries) {
+ const std::string trial_name = entry.trial_name.as_string();
+ const std::string group_name = entry.group_name.as_string();
+
+ if (ContainsKey(ignored_trial_names, trial_name))
+ continue;
+
+ FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
+ if (!trial)
+ return false;
+ if (entry.activated) {
+ // Call |group()| to mark the trial as "used" and notify observers, if
+ // any. This is useful to ensure that field trials created in child
+ // processes are properly reported in crash reports.
+ trial->group();
+ }
+ }
+ return true;
+}
+
+// static
+FieldTrial* FieldTrialList::CreateFieldTrial(
+ const std::string& name,
+ const std::string& group_name) {
+ DCHECK(global_);
+ DCHECK_GE(name.size(), 0u);
+ DCHECK_GE(group_name.size(), 0u);
+ if (name.empty() || group_name.empty() || !global_)
+ return NULL;
+
+ FieldTrial* field_trial = FieldTrialList::Find(name);
+ if (field_trial) {
+ // In single process mode, or when we force them from the command line,
+ // we may have already created the field trial.
+ if (field_trial->group_name_internal() != group_name)
+ return NULL;
+ return field_trial;
+ }
+ const int kTotalProbability = 100;
+ field_trial = new FieldTrial(name, kTotalProbability, group_name, 0);
+ FieldTrialList::Register(field_trial);
+ // Force the trial, which will also finalize the group choice.
+ field_trial->SetForced();
+ return field_trial;
+}
+
+// static
+void FieldTrialList::AddObserver(Observer* observer) {
+ if (!global_)
+ return;
+ global_->observer_list_->AddObserver(observer);
+}
+
+// static
+void FieldTrialList::RemoveObserver(Observer* observer) {
+ if (!global_)
+ return;
+ global_->observer_list_->RemoveObserver(observer);
+}
+
+// static
+void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
+ if (!global_)
+ return;
+
+ {
+ AutoLock auto_lock(global_->lock_);
+ if (field_trial->group_reported_)
+ return;
+ field_trial->group_reported_ = true;
+ }
+
+ if (!field_trial->enable_field_trial_)
+ return;
+
+ global_->observer_list_->Notify(
+ FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
+ field_trial->trial_name(), field_trial->group_name_internal());
+}
+
+// static
+size_t FieldTrialList::GetFieldTrialCount() {
+ if (!global_)
+ return 0;
+ AutoLock auto_lock(global_->lock_);
+ return global_->registered_.size();
+}
+
+// static
+const FieldTrial::EntropyProvider*
+ FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
+ if (!global_) {
+ used_without_global_ = true;
+ return NULL;
+ }
+
+ return global_->entropy_provider_.get();
+}
+
+FieldTrial* FieldTrialList::PreLockedFind(const std::string& name) {
+ RegistrationMap::iterator it = registered_.find(name);
+ if (registered_.end() == it)
+ return NULL;
+ return it->second;
+}
+
+// static
+void FieldTrialList::Register(FieldTrial* trial) {
+ if (!global_) {
+ used_without_global_ = true;
+ return;
+ }
+ AutoLock auto_lock(global_->lock_);
+ CHECK(!global_->PreLockedFind(trial->trial_name())) << trial->trial_name();
+ trial->AddRef();
+ trial->SetTrialRegistered();
+ global_->registered_[trial->trial_name()] = trial;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/field_trial.h b/libchrome/base/metrics/field_trial.h
new file mode 100644
index 0000000..28a4606
--- /dev/null
+++ b/libchrome/base/metrics/field_trial.h
@@ -0,0 +1,531 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FieldTrial is a class for handling details of statistical experiments
+// performed by actual users in the field (i.e., in a shipped or beta product).
+// All code is called exclusively on the UI thread currently.
+//
+// The simplest example is an experiment to see whether one of two options
+// produces "better" results across our user population. In that scenario, UMA
+// data is uploaded to aggregate the test results, and this FieldTrial class
+// manages the state of each such experiment (state == which option was
+// pseudo-randomly selected).
+//
+// States are typically generated randomly, either based on a one time
+// randomization (which will yield the same results, in terms of selecting
+// the client for a field trial or not, for every run of the program on a
+// given machine), or by a session randomization (generated each time the
+// application starts up, but held constant during the duration of the
+// process).
+
+//------------------------------------------------------------------------------
+// Example: Suppose we have an experiment involving memory, such as determining
+// the impact of some pruning algorithm.
+// We assume that we already have a histogram of memory usage, such as:
+
+// UMA_HISTOGRAM_COUNTS("Memory.RendererTotal", count);
+
+// Somewhere in main thread initialization code, we'd probably define an
+// instance of a FieldTrial, with code such as:
+
+// // FieldTrials are reference counted, and persist automagically until
+// // process teardown, courtesy of their automatic registration in
+// // FieldTrialList.
+// // Note: This field trial will run in Chrome instances compiled through
+// // 8 July, 2015, and after that all instances will be in "StandardMem".
+// scoped_refptr<base::FieldTrial> trial(
+// base::FieldTrialList::FactoryGetFieldTrial(
+// "MemoryExperiment", 1000, "StandardMem", 2015, 7, 8,
+// base::FieldTrial::ONE_TIME_RANDOMIZED, NULL));
+//
+// const int high_mem_group =
+// trial->AppendGroup("HighMem", 20); // 2% in HighMem group.
+// const int low_mem_group =
+// trial->AppendGroup("LowMem", 20); // 2% in LowMem group.
+// // Take action depending of which group we randomly land in.
+// if (trial->group() == high_mem_group)
+// SetPruningAlgorithm(kType1); // Sample setting of browser state.
+// else if (trial->group() == low_mem_group)
+// SetPruningAlgorithm(kType2); // Sample alternate setting.
+
+//------------------------------------------------------------------------------
+
+#ifndef BASE_METRICS_FIELD_TRIAL_H_
+#define BASE_METRICS_FIELD_TRIAL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class FieldTrialList;
+
+class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
+ public:
+ typedef int Probability; // Probability type for being selected in a trial.
+
+ // Specifies the persistence of the field trial group choice.
+ enum RandomizationType {
+ // One time randomized trials will persist the group choice between
+ // restarts, which is recommended for most trials, especially those that
+ // change user visible behavior.
+ ONE_TIME_RANDOMIZED,
+ // Session randomized trials will roll the dice to select a group on every
+ // process restart.
+ SESSION_RANDOMIZED,
+ };
+
+ // EntropyProvider is an interface for providing entropy for one-time
+ // randomized (persistent) field trials.
+ class BASE_EXPORT EntropyProvider {
+ public:
+ virtual ~EntropyProvider();
+
+ // Returns a double in the range of [0, 1) to be used for the dice roll for
+ // the specified field trial. If |randomization_seed| is not 0, it will be
+ // used in preference to |trial_name| for generating the entropy by entropy
+ // providers that support it. A given instance should always return the same
+ // value given the same input |trial_name| and |randomization_seed| values.
+ virtual double GetEntropyForTrial(const std::string& trial_name,
+ uint32_t randomization_seed) const = 0;
+ };
+
+ // A pair representing a Field Trial and its selected group.
+ struct ActiveGroup {
+ std::string trial_name;
+ std::string group_name;
+ };
+
+ // A triplet representing a FieldTrial, its selected group and whether it's
+ // active.
+ struct BASE_EXPORT State {
+ StringPiece trial_name;
+ StringPiece group_name;
+ bool activated;
+
+ State();
+ State(const State& other);
+ ~State();
+ };
+
+ typedef std::vector<ActiveGroup> ActiveGroups;
+
+ // A return value to indicate that a given instance has not yet had a group
+ // assignment (and hence is not yet participating in the trial).
+ static const int kNotFinalized;
+
+ // Disables this trial, meaning it always determines the default group
+ // has been selected. May be called immediately after construction, or
+ // at any time after initialization (should not be interleaved with
+ // AppendGroup calls). Once disabled, there is no way to re-enable a
+ // trial.
+ // TODO(mad): http://code.google.com/p/chromium/issues/detail?id=121446
+ // This doesn't properly reset to Default when a group was forced.
+ void Disable();
+
+ // Establish the name and probability of the next group in this trial.
+ // Sometimes, based on construction randomization, this call may cause the
+ // provided group to be *THE* group selected for use in this instance.
+ // The return value is the group number of the new group.
+ int AppendGroup(const std::string& name, Probability group_probability);
+
+ // Return the name of the FieldTrial (excluding the group name).
+ const std::string& trial_name() const { return trial_name_; }
+
+ // Return the randomly selected group number that was assigned, and notify
+ // any/all observers that this finalized group number has presumably been used
+ // (queried), and will never change. Note that this will force an instance to
+ // participate, and make it illegal to attempt to probabilistically add any
+ // other groups to the trial.
+ int group();
+
+ // If the group's name is empty, a string version containing the group number
+ // is used as the group name. This causes a winner to be chosen if none was.
+ const std::string& group_name();
+
+ // Finalizes the group choice and returns the chosen group, but does not mark
+ // the trial as active - so its state will not be reported until group_name()
+ // or similar is called.
+ const std::string& GetGroupNameWithoutActivation();
+
+ // Set the field trial as forced, meaning that it was setup earlier than
+ // the hard coded registration of the field trial to override it.
+ // This allows the code that was hard coded to register the field trial to
+ // still succeed even though the field trial has already been registered.
+ // This must be called after appending all the groups, since we will make
+ // the group choice here. Note that this is a NOOP for already forced trials.
+ // And, as the rest of the FieldTrial code, this is not thread safe and must
+ // be done from the UI thread.
+ void SetForced();
+
+ // Enable benchmarking sets field trials to a common setting.
+ static void EnableBenchmarking();
+
+ // Creates a FieldTrial object with the specified parameters, to be used for
+ // simulation of group assignment without actually affecting global field
+ // trial state in the running process. Group assignment will be done based on
+ // |entropy_value|, which must have a range of [0, 1).
+ //
+ // Note: Using this function will not register the field trial globally in the
+ // running process - for that, use FieldTrialList::FactoryGetFieldTrial().
+ //
+ // The ownership of the returned FieldTrial is transfered to the caller which
+ // is responsible for deref'ing it (e.g. by using scoped_refptr<FieldTrial>).
+ static FieldTrial* CreateSimulatedFieldTrial(
+ const std::string& trial_name,
+ Probability total_probability,
+ const std::string& default_group_name,
+ double entropy_value);
+
+ private:
+ // Allow tests to access our innards for testing purposes.
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Registration);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AbsoluteProbabilities);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, RemainingProbability);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FiftyFiftyProbability);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, MiddleProbabilities);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, OneWinner);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DisableProbability);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroups);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AllGroups);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroupsNotFinalized);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Save);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SaveAll);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DuplicateRestore);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOff);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOn);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_Default);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
+
+ friend class base::FieldTrialList;
+
+ friend class RefCounted<FieldTrial>;
+
+ // This is the group number of the 'default' group when a choice wasn't forced
+ // by a call to FieldTrialList::CreateFieldTrial. It is kept private so that
+ // consumers don't use it by mistake in cases where the group was forced.
+ static const int kDefaultGroupNumber;
+
+ // Creates a field trial with the specified parameters. Group assignment will
+ // be done based on |entropy_value|, which must have a range of [0, 1).
+ FieldTrial(const std::string& trial_name,
+ Probability total_probability,
+ const std::string& default_group_name,
+ double entropy_value);
+ virtual ~FieldTrial();
+
+ // Return the default group name of the FieldTrial.
+ std::string default_group_name() const { return default_group_name_; }
+
+ // Marks this trial as having been registered with the FieldTrialList. Must be
+ // called no more than once and before any |group()| calls have occurred.
+ void SetTrialRegistered();
+
+ // Sets the chosen group name and number.
+ void SetGroupChoice(const std::string& group_name, int number);
+
+ // Ensures that a group is chosen, if it hasn't yet been. The field trial
+ // might yet be disabled, so this call will *not* notify observers of the
+ // status.
+ void FinalizeGroupChoice();
+
+ // Returns the trial name and selected group name for this field trial via
+ // the output parameter |active_group|, but only if the group has already
+ // been chosen and has been externally observed via |group()| and the trial
+ // has not been disabled. In that case, true is returned and |active_group|
+ // is filled in; otherwise, the result is false and |active_group| is left
+ // untouched.
+ bool GetActiveGroup(ActiveGroup* active_group) const;
+
+ // Returns the trial name and selected group name for this field trial via
+ // the output parameter |field_trial_state|, but only if the trial has not
+ // been disabled. In that case, true is returned and |field_trial_state| is
+ // filled in; otherwise, the result is false and |field_trial_state| is left
+ // untouched.
+ bool GetState(State* field_trial_state);
+
+ // Returns the group_name. A winner need not have been chosen.
+ std::string group_name_internal() const { return group_name_; }
+
+ // The name of the field trial, as can be found via the FieldTrialList.
+ const std::string trial_name_;
+
+ // The maximum sum of all probabilities supplied, which corresponds to 100%.
+ // This is the scaling factor used to adjust supplied probabilities.
+ const Probability divisor_;
+
+ // The name of the default group.
+ const std::string default_group_name_;
+
+ // The randomly selected probability that is used to select a group (or have
+ // the instance not participate). It is the product of divisor_ and a random
+ // number between [0, 1).
+ Probability random_;
+
+ // Sum of the probabilities of all appended groups.
+ Probability accumulated_group_probability_;
+
+ // The number that will be returned by the next AppendGroup() call.
+ int next_group_number_;
+
+ // The pseudo-randomly assigned group number.
+ // This is kNotFinalized if no group has been assigned.
+ int group_;
+
+ // A textual name for the randomly selected group. Valid after |group()|
+ // has been called.
+ std::string group_name_;
+
+ // When enable_field_trial_ is false, field trial reverts to the 'default'
+ // group.
+ bool enable_field_trial_;
+
+ // When forced_ is true, we return the chosen group from AppendGroup when
+ // appropriate.
+ bool forced_;
+
+ // Specifies whether the group choice has been reported to observers.
+ bool group_reported_;
+
+ // Whether this trial is registered with the global FieldTrialList and thus
+ // should notify it when its group is queried.
+ bool trial_registered_;
+
+ // When benchmarking is enabled, field trials all revert to the 'default'
+ // group.
+ static bool enable_benchmarking_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrial);
+};
+
+//------------------------------------------------------------------------------
+// Class with a list of all active field trials. A trial is active if it has
+// been registered, which includes evaluating its state based on its probaility.
+// Only one instance of this class exists.
+class BASE_EXPORT FieldTrialList {
+ public:
+ // Year that is guaranteed to not be expired when instantiating a field trial
+ // via |FactoryGetFieldTrial()|. Set to two years from the build date.
+ static int kNoExpirationYear;
+
+ // Observer is notified when a FieldTrial's group is selected.
+ class BASE_EXPORT Observer {
+ public:
+ // Notify observers when FieldTrials's group is selected.
+ virtual void OnFieldTrialGroupFinalized(const std::string& trial_name,
+ const std::string& group_name) = 0;
+
+ protected:
+ virtual ~Observer();
+ };
+
+ // This singleton holds the global list of registered FieldTrials.
+ //
+ // To support one-time randomized field trials, specify a non-NULL
+ // |entropy_provider| which should be a source of uniformly distributed
+ // entropy values. Takes ownership of |entropy_provider|. If one time
+ // randomization is not desired, pass in NULL for |entropy_provider|.
+ explicit FieldTrialList(const FieldTrial::EntropyProvider* entropy_provider);
+
+ // Destructor Release()'s references to all registered FieldTrial instances.
+ ~FieldTrialList();
+
+ // Get a FieldTrial instance from the factory.
+ //
+ // |name| is used to register the instance with the FieldTrialList class,
+ // and can be used to find the trial (only one trial can be present for each
+ // name). |default_group_name| is the name of the default group which will
+ // be chosen if none of the subsequent appended groups get to be chosen.
+ // |default_group_number| can receive the group number of the default group as
+ // AppendGroup returns the number of the subsequence groups. |trial_name| and
+ // |default_group_name| may not be empty but |default_group_number| can be
+ // NULL if the value is not needed.
+ //
+ // Group probabilities that are later supplied must sum to less than or equal
+ // to the |total_probability|. Arguments |year|, |month| and |day_of_month|
+ // specify the expiration time. If the build time is after the expiration time
+ // then the field trial reverts to the 'default' group.
+ //
+ // Use this static method to get a startup-randomized FieldTrial or a
+ // previously created forced FieldTrial.
+ static FieldTrial* FactoryGetFieldTrial(
+ const std::string& trial_name,
+ FieldTrial::Probability total_probability,
+ const std::string& default_group_name,
+ const int year,
+ const int month,
+ const int day_of_month,
+ FieldTrial::RandomizationType randomization_type,
+ int* default_group_number);
+
+ // Same as FactoryGetFieldTrial(), but allows specifying a custom seed to be
+ // used on one-time randomized field trials (instead of a hash of the trial
+ // name, which is used otherwise or if |randomization_seed| has value 0). The
+ // |randomization_seed| value (other than 0) should never be the same for two
+ // trials, else this would result in correlated group assignments. Note:
+ // Using a custom randomization seed is only supported by the
+ // PermutedEntropyProvider (which is used when UMA is not enabled). If
+ // |override_entropy_provider| is not null, then it will be used for
+ // randomization instead of the provider given when the FieldTrialList was
+ // instanciated.
+ static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
+ const std::string& trial_name,
+ FieldTrial::Probability total_probability,
+ const std::string& default_group_name,
+ const int year,
+ const int month,
+ const int day_of_month,
+ FieldTrial::RandomizationType randomization_type,
+ uint32_t randomization_seed,
+ int* default_group_number,
+ const FieldTrial::EntropyProvider* override_entropy_provider);
+
+ // The Find() method can be used to test to see if a named trial was already
+ // registered, or to retrieve a pointer to it from the global map.
+ static FieldTrial* Find(const std::string& trial_name);
+
+ // Returns the group number chosen for the named trial, or
+ // FieldTrial::kNotFinalized if the trial does not exist.
+ static int FindValue(const std::string& trial_name);
+
+ // Returns the group name chosen for the named trial, or the empty string if
+ // the trial does not exist. The first call of this function on a given field
+ // trial will mark it as active, so that its state will be reported with usage
+ // metrics, crashes, etc.
+ static std::string FindFullName(const std::string& trial_name);
+
+ // Returns true if the named trial has been registered.
+ static bool TrialExists(const std::string& trial_name);
+
+ // Returns true if the named trial exists and has been activated.
+ static bool IsTrialActive(const std::string& trial_name);
+
+ // Creates a persistent representation of active FieldTrial instances for
+ // resurrection in another process. This allows randomization to be done in
+ // one process, and secondary processes can be synchronized on the result.
+ // The resulting string contains the name and group name pairs of all
+ // registered FieldTrials for which the group has been chosen and externally
+ // observed (via |group()|) and which have not been disabled, with "/" used
+ // to separate all names and to terminate the string. This string is parsed
+ // by |CreateTrialsFromString()|.
+ static void StatesToString(std::string* output);
+
+ // Creates a persistent representation of all FieldTrial instances for
+ // resurrection in another process. This allows randomization to be done in
+ // one process, and secondary processes can be synchronized on the result.
+ // The resulting string contains the name and group name pairs of all
+ // registered FieldTrials which have not been disabled, with "/" used
+ // to separate all names and to terminate the string. All activated trials
+ // have their name prefixed with "*". This string is parsed by
+ // |CreateTrialsFromString()|.
+ static void AllStatesToString(std::string* output);
+
+ // Fills in the supplied vector |active_groups| (which must be empty when
+ // called) with a snapshot of all registered FieldTrials for which the group
+ // has been chosen and externally observed (via |group()|) and which have
+ // not been disabled.
+ static void GetActiveFieldTrialGroups(
+ FieldTrial::ActiveGroups* active_groups);
+
+ // Returns the field trials that are marked active in |trials_string|.
+ static void GetActiveFieldTrialGroupsFromString(
+ const std::string& trials_string,
+ FieldTrial::ActiveGroups* active_groups);
+
+ // Use a state string (re: StatesToString()) to augment the current list of
+ // field trials to include the supplied trials, and using a 100% probability
+ // for each trial, force them to have the same group string. This is commonly
+ // used in a non-browser process, to carry randomly selected state in a
+ // browser process into this non-browser process, but could also be invoked
+ // through a command line argument to the browser process. Created field
+ // trials will be marked "used" for the purposes of active trial reporting
+ // if they are prefixed with |kActivationMarker|. Trial names in
+ // |ignored_trial_names| are ignored when parsing |trials_string|.
+ static bool CreateTrialsFromString(
+ const std::string& trials_string,
+ const std::set<std::string>& ignored_trial_names);
+
+ // Create a FieldTrial with the given |name| and using 100% probability for
+ // the FieldTrial, force FieldTrial to have the same group string as
+ // |group_name|. This is commonly used in a non-browser process, to carry
+ // randomly selected state in a browser process into this non-browser process.
+ // It returns NULL if there is a FieldTrial that is already registered with
+ // the same |name| but has different finalized group string (|group_name|).
+ static FieldTrial* CreateFieldTrial(const std::string& name,
+ const std::string& group_name);
+
+ // Add an observer to be notified when a field trial is irrevocably committed
+ // to being part of some specific field_group (and hence the group_name is
+ // also finalized for that field_trial).
+ static void AddObserver(Observer* observer);
+
+ // Remove an observer.
+ static void RemoveObserver(Observer* observer);
+
+ // Notify all observers that a group has been finalized for |field_trial|.
+ static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
+
+ // Return the number of active field trials.
+ static size_t GetFieldTrialCount();
+
+ private:
+ // A map from FieldTrial names to the actual instances.
+ typedef std::map<std::string, FieldTrial*> RegistrationMap;
+
+ // If one-time randomization is enabled, returns a weak pointer to the
+ // corresponding EntropyProvider. Otherwise, returns NULL.
+ static const FieldTrial::EntropyProvider*
+ GetEntropyProviderForOneTimeRandomization();
+
+ // Helper function should be called only while holding lock_.
+ FieldTrial* PreLockedFind(const std::string& name);
+
+ // Register() stores a pointer to the given trial in a global map.
+ // This method also AddRef's the indicated trial.
+ // This should always be called after creating a new FieldTrial instance.
+ static void Register(FieldTrial* trial);
+
+ static FieldTrialList* global_; // The singleton of this class.
+
+ // This will tell us if there is an attempt to register a field
+ // trial or check if one-time randomization is enabled without
+ // creating the FieldTrialList. This is not an error, unless a
+ // FieldTrialList is created after that.
+ static bool used_without_global_;
+
+ // Lock for access to registered_.
+ base::Lock lock_;
+ RegistrationMap registered_;
+
+ std::map<std::string, std::string> seen_states_;
+
+ // Entropy provider to be used for one-time randomized field trials. If NULL,
+ // one-time randomization is not supported.
+ std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
+
+ // List of observers to be notified when a group is selected for a FieldTrial.
+ scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_FIELD_TRIAL_H_
diff --git a/libchrome/base/metrics/field_trial_unittest.cc b/libchrome/base/metrics/field_trial_unittest.cc
new file mode 100644
index 0000000..00f351f
--- /dev/null
+++ b/libchrome/base/metrics/field_trial_unittest.cc
@@ -0,0 +1,1134 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial.h"
+
+#include <stddef.h>
+
+#include "base/build_time.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/rand_util.h"
+#include "base/run_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Default group name used by several tests.
+const char kDefaultGroupName[] = "DefaultGroup";
+
+// Call FieldTrialList::FactoryGetFieldTrial() with a future expiry date.
+scoped_refptr<base::FieldTrial> CreateFieldTrial(
+ const std::string& trial_name,
+ int total_probability,
+ const std::string& default_group_name,
+ int* default_group_number) {
+ return FieldTrialList::FactoryGetFieldTrial(
+ trial_name, total_probability, default_group_name,
+ base::FieldTrialList::kNoExpirationYear, 1, 1,
+ base::FieldTrial::SESSION_RANDOMIZED, default_group_number);
+}
+
+int OneYearBeforeBuildTime() {
+ Time one_year_before_build_time = GetBuildTime() - TimeDelta::FromDays(365);
+ Time::Exploded exploded;
+ one_year_before_build_time.LocalExplode(&exploded);
+ return exploded.year;
+}
+
+// FieldTrialList::Observer implementation for testing.
+class TestFieldTrialObserver : public FieldTrialList::Observer {
+ public:
+ TestFieldTrialObserver() {
+ FieldTrialList::AddObserver(this);
+ }
+
+ ~TestFieldTrialObserver() override { FieldTrialList::RemoveObserver(this); }
+
+ void OnFieldTrialGroupFinalized(const std::string& trial,
+ const std::string& group) override {
+ trial_name_ = trial;
+ group_name_ = group;
+ }
+
+ const std::string& trial_name() const { return trial_name_; }
+ const std::string& group_name() const { return group_name_; }
+
+ private:
+ std::string trial_name_;
+ std::string group_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestFieldTrialObserver);
+};
+
+} // namespace
+
+class FieldTrialTest : public testing::Test {
+ public:
+ FieldTrialTest() : trial_list_(NULL) {}
+
+ private:
+ MessageLoop message_loop_;
+ FieldTrialList trial_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrialTest);
+};
+
+// Test registration, and also check that destructors are called for trials
+// (and that Valgrind doesn't catch us leaking).
+TEST_F(FieldTrialTest, Registration) {
+ const char name1[] = "name 1 test";
+ const char name2[] = "name 2 test";
+ EXPECT_FALSE(FieldTrialList::Find(name1));
+ EXPECT_FALSE(FieldTrialList::Find(name2));
+
+ scoped_refptr<FieldTrial> trial1 =
+ CreateFieldTrial(name1, 10, "default name 1 test", NULL);
+ EXPECT_EQ(FieldTrial::kNotFinalized, trial1->group_);
+ EXPECT_EQ(name1, trial1->trial_name());
+ EXPECT_EQ("", trial1->group_name_internal());
+
+ trial1->AppendGroup(std::string(), 7);
+
+ EXPECT_EQ(trial1.get(), FieldTrialList::Find(name1));
+ EXPECT_FALSE(FieldTrialList::Find(name2));
+
+ scoped_refptr<FieldTrial> trial2 =
+ CreateFieldTrial(name2, 10, "default name 2 test", NULL);
+ EXPECT_EQ(FieldTrial::kNotFinalized, trial2->group_);
+ EXPECT_EQ(name2, trial2->trial_name());
+ EXPECT_EQ("", trial2->group_name_internal());
+
+ trial2->AppendGroup("a first group", 7);
+
+ EXPECT_EQ(trial1.get(), FieldTrialList::Find(name1));
+ EXPECT_EQ(trial2.get(), FieldTrialList::Find(name2));
+ // Note: FieldTrialList should delete the objects at shutdown.
+}
+
+TEST_F(FieldTrialTest, AbsoluteProbabilities) {
+ char always_true[] = " always true";
+ char default_always_true[] = " default always true";
+ char always_false[] = " always false";
+ char default_always_false[] = " default always false";
+ for (int i = 1; i < 250; ++i) {
+ // Try lots of names, by changing the first character of the name.
+ char c = static_cast<char>(i);
+ always_true[0] = c;
+ default_always_true[0] = c;
+ always_false[0] = c;
+ default_always_false[0] = c;
+
+ scoped_refptr<FieldTrial> trial_true =
+ CreateFieldTrial(always_true, 10, default_always_true, NULL);
+ const std::string winner = "TheWinner";
+ int winner_group = trial_true->AppendGroup(winner, 10);
+
+ EXPECT_EQ(winner_group, trial_true->group());
+ EXPECT_EQ(winner, trial_true->group_name());
+
+ scoped_refptr<FieldTrial> trial_false =
+ CreateFieldTrial(always_false, 10, default_always_false, NULL);
+ int loser_group = trial_false->AppendGroup("ALoser", 0);
+
+ EXPECT_NE(loser_group, trial_false->group());
+ }
+}
+
+TEST_F(FieldTrialTest, RemainingProbability) {
+ // First create a test that hasn't had a winner yet.
+ const std::string winner = "Winner";
+ const std::string loser = "Loser";
+ scoped_refptr<FieldTrial> trial;
+ int counter = 0;
+ int default_group_number = -1;
+ do {
+ std::string name = StringPrintf("trial%d", ++counter);
+ trial = CreateFieldTrial(name, 10, winner, &default_group_number);
+ trial->AppendGroup(loser, 5); // 50% chance of not being chosen.
+ // If a group is not assigned, group_ will be kNotFinalized.
+ } while (trial->group_ != FieldTrial::kNotFinalized);
+
+ // And that 'default' group (winner) should always win.
+ EXPECT_EQ(default_group_number, trial->group());
+
+ // And that winner should ALWAYS win.
+ EXPECT_EQ(winner, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, FiftyFiftyProbability) {
+ // Check that even with small divisors, we have the proper probabilities, and
+ // all outcomes are possible. Since this is a 50-50 test, it should get both
+ // outcomes in a few tries, but we'll try no more than 100 times (and be flaky
+ // with probability around 1 in 2^99).
+ bool first_winner = false;
+ bool second_winner = false;
+ int counter = 0;
+ do {
+ std::string name = base::StringPrintf("FiftyFifty%d", ++counter);
+ std::string default_group_name = base::StringPrintf("Default FiftyFifty%d",
+ ++counter);
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(name, 2, default_group_name, NULL);
+ trial->AppendGroup("first", 1); // 50% chance of being chosen.
+ // If group_ is kNotFinalized, then a group assignement hasn't been done.
+ if (trial->group_ != FieldTrial::kNotFinalized) {
+ first_winner = true;
+ continue;
+ }
+ trial->AppendGroup("second", 1); // Always chosen at this point.
+ EXPECT_NE(FieldTrial::kNotFinalized, trial->group());
+ second_winner = true;
+ } while ((!second_winner || !first_winner) && counter < 100);
+ EXPECT_TRUE(second_winner);
+ EXPECT_TRUE(first_winner);
+}
+
+TEST_F(FieldTrialTest, MiddleProbabilities) {
+ char name[] = " same name";
+ char default_group_name[] = " default same name";
+ bool false_event_seen = false;
+ bool true_event_seen = false;
+ for (int i = 1; i < 250; ++i) {
+ char c = static_cast<char>(i);
+ name[0] = c;
+ default_group_name[0] = c;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(name, 10, default_group_name, NULL);
+ int might_win = trial->AppendGroup("MightWin", 5);
+
+ if (trial->group() == might_win) {
+ true_event_seen = true;
+ } else {
+ false_event_seen = true;
+ }
+ if (false_event_seen && true_event_seen)
+ return; // Successful test!!!
+ }
+ // Very surprising to get here. Probability should be around 1 in 2 ** 250.
+ // One of the following will fail.
+ EXPECT_TRUE(false_event_seen);
+ EXPECT_TRUE(true_event_seen);
+}
+
+TEST_F(FieldTrialTest, OneWinner) {
+ char name[] = "Some name";
+ char default_group_name[] = "Default some name";
+ int group_count(10);
+
+ int default_group_number = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(name, group_count, default_group_name, NULL);
+ int winner_index(-2);
+ std::string winner_name;
+
+ for (int i = 1; i <= group_count; ++i) {
+ int might_win = trial->AppendGroup(std::string(), 1);
+
+ // Because we keep appending groups, we want to see if the last group that
+ // was added has been assigned or not.
+ if (trial->group_ == might_win) {
+ EXPECT_EQ(-2, winner_index);
+ winner_index = might_win;
+ StringAppendF(&winner_name, "%d", might_win);
+ EXPECT_EQ(winner_name, trial->group_name());
+ }
+ }
+ EXPECT_GE(winner_index, 0);
+ // Since all groups cover the total probability, we should not have
+ // chosen the default group.
+ EXPECT_NE(trial->group(), default_group_number);
+ EXPECT_EQ(trial->group(), winner_index);
+ EXPECT_EQ(trial->group_name(), winner_name);
+}
+
+TEST_F(FieldTrialTest, DisableProbability) {
+ const std::string default_group_name = "Default group";
+ const std::string loser = "Loser";
+ const std::string name = "Trial";
+
+ // Create a field trail that has expired.
+ int default_group_number = -1;
+ FieldTrial* trial = FieldTrialList::FactoryGetFieldTrial(
+ name, 1000000000, default_group_name, OneYearBeforeBuildTime(), 1, 1,
+ FieldTrial::SESSION_RANDOMIZED,
+ &default_group_number);
+ trial->AppendGroup(loser, 999999999); // 99.9999999% chance of being chosen.
+
+ // Because trial has expired, we should always be in the default group.
+ EXPECT_EQ(default_group_number, trial->group());
+
+ // And that default_group_name should ALWAYS win.
+ EXPECT_EQ(default_group_name, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, ActiveGroups) {
+ std::string no_group("No Group");
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(no_group, 10, "Default", NULL);
+
+ // There is no winner yet, so no NameGroupId should be returned.
+ FieldTrial::ActiveGroup active_group;
+ EXPECT_FALSE(trial->GetActiveGroup(&active_group));
+
+ // Create a single winning group.
+ std::string one_winner("One Winner");
+ trial = CreateFieldTrial(one_winner, 10, "Default", NULL);
+ std::string winner("Winner");
+ trial->AppendGroup(winner, 10);
+ EXPECT_FALSE(trial->GetActiveGroup(&active_group));
+ // Finalize the group selection by accessing the selected group.
+ trial->group();
+ EXPECT_TRUE(trial->GetActiveGroup(&active_group));
+ EXPECT_EQ(one_winner, active_group.trial_name);
+ EXPECT_EQ(winner, active_group.group_name);
+
+ std::string multi_group("MultiGroup");
+ scoped_refptr<FieldTrial> multi_group_trial =
+ CreateFieldTrial(multi_group, 9, "Default", NULL);
+
+ multi_group_trial->AppendGroup("Me", 3);
+ multi_group_trial->AppendGroup("You", 3);
+ multi_group_trial->AppendGroup("Them", 3);
+ EXPECT_FALSE(multi_group_trial->GetActiveGroup(&active_group));
+ // Finalize the group selection by accessing the selected group.
+ multi_group_trial->group();
+ EXPECT_TRUE(multi_group_trial->GetActiveGroup(&active_group));
+ EXPECT_EQ(multi_group, active_group.trial_name);
+ EXPECT_EQ(multi_group_trial->group_name(), active_group.group_name);
+
+ // Now check if the list is built properly...
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ EXPECT_EQ(2U, active_groups.size());
+ for (size_t i = 0; i < active_groups.size(); ++i) {
+ // Order is not guaranteed, so check all values.
+ EXPECT_NE(no_group, active_groups[i].trial_name);
+ EXPECT_TRUE(one_winner != active_groups[i].trial_name ||
+ winner == active_groups[i].group_name);
+ EXPECT_TRUE(multi_group != active_groups[i].trial_name ||
+ multi_group_trial->group_name() == active_groups[i].group_name);
+ }
+}
+
+TEST_F(FieldTrialTest, GetActiveFieldTrialGroupsFromString) {
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroupsFromString("*A/X/B/Y/*C/Z",
+ &active_groups);
+ ASSERT_EQ(2U, active_groups.size());
+ EXPECT_EQ("A", active_groups[0].trial_name);
+ EXPECT_EQ("X", active_groups[0].group_name);
+ EXPECT_EQ("C", active_groups[1].trial_name);
+ EXPECT_EQ("Z", active_groups[1].group_name);
+}
+
+TEST_F(FieldTrialTest, AllGroups) {
+ FieldTrial::State field_trial_state;
+ std::string one_winner("One Winner");
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(one_winner, 10, "Default", NULL);
+ std::string winner("Winner");
+ trial->AppendGroup(winner, 10);
+ EXPECT_TRUE(trial->GetState(&field_trial_state));
+ EXPECT_EQ(one_winner, field_trial_state.trial_name);
+ EXPECT_EQ(winner, field_trial_state.group_name);
+ trial->group();
+ EXPECT_TRUE(trial->GetState(&field_trial_state));
+ EXPECT_EQ(one_winner, field_trial_state.trial_name);
+ EXPECT_EQ(winner, field_trial_state.group_name);
+
+ std::string multi_group("MultiGroup");
+ scoped_refptr<FieldTrial> multi_group_trial =
+ CreateFieldTrial(multi_group, 9, "Default", NULL);
+
+ multi_group_trial->AppendGroup("Me", 3);
+ multi_group_trial->AppendGroup("You", 3);
+ multi_group_trial->AppendGroup("Them", 3);
+ EXPECT_TRUE(multi_group_trial->GetState(&field_trial_state));
+ // Finalize the group selection by accessing the selected group.
+ multi_group_trial->group();
+ EXPECT_TRUE(multi_group_trial->GetState(&field_trial_state));
+ EXPECT_EQ(multi_group, field_trial_state.trial_name);
+ EXPECT_EQ(multi_group_trial->group_name(), field_trial_state.group_name);
+}
+
+TEST_F(FieldTrialTest, ActiveGroupsNotFinalized) {
+ const char kTrialName[] = "TestTrial";
+ const char kSecondaryGroupName[] = "SecondaryGroup";
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+
+ // Before |group()| is called, |GetActiveGroup()| should return false.
+ FieldTrial::ActiveGroup active_group;
+ EXPECT_FALSE(trial->GetActiveGroup(&active_group));
+
+ // |GetActiveFieldTrialGroups()| should also not include the trial.
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ EXPECT_TRUE(active_groups.empty());
+
+ // After |group()| has been called, both APIs should succeed.
+ const int chosen_group = trial->group();
+ EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+
+ EXPECT_TRUE(trial->GetActiveGroup(&active_group));
+ EXPECT_EQ(kTrialName, active_group.trial_name);
+ if (chosen_group == default_group)
+ EXPECT_EQ(kDefaultGroupName, active_group.group_name);
+ else
+ EXPECT_EQ(kSecondaryGroupName, active_group.group_name);
+
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ ASSERT_EQ(1U, active_groups.size());
+ EXPECT_EQ(kTrialName, active_groups[0].trial_name);
+ EXPECT_EQ(active_group.group_name, active_groups[0].group_name);
+}
+
+TEST_F(FieldTrialTest, GetGroupNameWithoutActivation) {
+ const char kTrialName[] = "TestTrial";
+ const char kSecondaryGroupName[] = "SecondaryGroup";
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ trial->AppendGroup(kSecondaryGroupName, 50);
+
+ // The trial should start inactive.
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+ // Calling |GetGroupNameWithoutActivation()| should not activate the trial.
+ std::string group_name = trial->GetGroupNameWithoutActivation();
+ EXPECT_FALSE(group_name.empty());
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+ // Calling |group_name()| should activate it and return the same group name.
+ EXPECT_EQ(group_name, trial->group_name());
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+}
+
+TEST_F(FieldTrialTest, Save) {
+ std::string save_string;
+
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("Some name", 10, "Default some name", NULL);
+ // There is no winner yet, so no textual group name is associated with trial.
+ // In this case, the trial should not be included.
+ EXPECT_EQ("", trial->group_name_internal());
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("", save_string);
+ save_string.clear();
+
+ // Create a winning group.
+ trial->AppendGroup("Winner", 10);
+ // Finalize the group selection by accessing the selected group.
+ trial->group();
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("Some name/Winner/", save_string);
+ save_string.clear();
+
+ // Create a second trial and winning group.
+ scoped_refptr<FieldTrial> trial2 =
+ CreateFieldTrial("xxx", 10, "Default xxx", NULL);
+ trial2->AppendGroup("yyyy", 10);
+ // Finalize the group selection by accessing the selected group.
+ trial2->group();
+
+ FieldTrialList::StatesToString(&save_string);
+ // We assume names are alphabetized... though this is not critical.
+ EXPECT_EQ("Some name/Winner/xxx/yyyy/", save_string);
+ save_string.clear();
+
+ // Create a third trial with only the default group.
+ scoped_refptr<FieldTrial> trial3 =
+ CreateFieldTrial("zzz", 10, "default", NULL);
+ // Finalize the group selection by accessing the selected group.
+ trial3->group();
+
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("Some name/Winner/xxx/yyyy/zzz/default/", save_string);
+}
+
+TEST_F(FieldTrialTest, SaveAll) {
+ std::string save_string;
+
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("Some name", 10, "Default some name", nullptr);
+ EXPECT_EQ("", trial->group_name_internal());
+ FieldTrialList::AllStatesToString(&save_string);
+ EXPECT_EQ("Some name/Default some name/", save_string);
+ // Getting all states should have finalized the trial.
+ EXPECT_EQ("Default some name", trial->group_name_internal());
+ save_string.clear();
+
+ // Create a winning group.
+ trial = CreateFieldTrial("trial2", 10, "Default some name", nullptr);
+ trial->AppendGroup("Winner", 10);
+ // Finalize the group selection by accessing the selected group.
+ trial->group();
+ FieldTrialList::AllStatesToString(&save_string);
+ EXPECT_EQ("Some name/Default some name/*trial2/Winner/", save_string);
+ save_string.clear();
+
+ // Create a second trial and winning group.
+ scoped_refptr<FieldTrial> trial2 =
+ CreateFieldTrial("xxx", 10, "Default xxx", nullptr);
+ trial2->AppendGroup("yyyy", 10);
+ // Finalize the group selection by accessing the selected group.
+ trial2->group();
+
+ FieldTrialList::AllStatesToString(&save_string);
+ // We assume names are alphabetized... though this is not critical.
+ EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/",
+ save_string);
+ save_string.clear();
+
+ // Create a third trial with only the default group.
+ scoped_refptr<FieldTrial> trial3 =
+ CreateFieldTrial("zzz", 10, "default", NULL);
+
+ FieldTrialList::AllStatesToString(&save_string);
+ EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
+ save_string);
+}
+
+TEST_F(FieldTrialTest, Restore) {
+ ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("xxx"));
+
+ FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/",
+ std::set<std::string>());
+
+ FieldTrial* trial = FieldTrialList::Find("Some_name");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("Winner", trial->group_name());
+ EXPECT_EQ("Some_name", trial->trial_name());
+
+ trial = FieldTrialList::Find("xxx");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("yyyy", trial->group_name());
+ EXPECT_EQ("xxx", trial->trial_name());
+}
+
+TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
+ EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname",
+ std::set<std::string>()));
+
+ FieldTrial* trial = FieldTrialList::Find("tname");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("gname", trial->group_name());
+ EXPECT_EQ("tname", trial->trial_name());
+}
+
+TEST_F(FieldTrialTest, BogusRestore) {
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash",
+ std::set<std::string>()));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/",
+ std::set<std::string>()));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/",
+ std::set<std::string>()));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname",
+ std::set<std::string>()));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname",
+ std::set<std::string>()));
+}
+
+TEST_F(FieldTrialTest, DuplicateRestore) {
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("Some name", 10, "Default", NULL);
+ trial->AppendGroup("Winner", 10);
+ // Finalize the group selection by accessing the selected group.
+ trial->group();
+ std::string save_string;
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("Some name/Winner/", save_string);
+
+ // It is OK if we redundantly specify a winner.
+ EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string,
+ std::set<std::string>()));
+
+ // But it is an error to try to change to a different winner.
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/",
+ std::set<std::string>()));
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringNotActive) {
+ ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
+ ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/",
+ std::set<std::string>()));
+
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ ASSERT_TRUE(active_groups.empty());
+
+ // Check that the values still get returned and querying them activates them.
+ EXPECT_EQ("def", FieldTrialList::FindFullName("Abc"));
+ EXPECT_EQ("zyx", FieldTrialList::FindFullName("Xyz"));
+
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ ASSERT_EQ(2U, active_groups.size());
+ EXPECT_EQ("Abc", active_groups[0].trial_name);
+ EXPECT_EQ("def", active_groups[0].group_name);
+ EXPECT_EQ("Xyz", active_groups[1].trial_name);
+ EXPECT_EQ("zyx", active_groups[1].group_name);
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringForceActivation) {
+ ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("def"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
+ ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
+ "*Abc/cba/def/fed/*Xyz/zyx/", std::set<std::string>()));
+
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ ASSERT_EQ(2U, active_groups.size());
+ EXPECT_EQ("Abc", active_groups[0].trial_name);
+ EXPECT_EQ("cba", active_groups[0].group_name);
+ EXPECT_EQ("Xyz", active_groups[1].trial_name);
+ EXPECT_EQ("zyx", active_groups[1].group_name);
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
+ ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
+
+ TestFieldTrialObserver observer;
+ ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
+ std::set<std::string>()));
+ RunLoop().RunUntilIdle();
+ // Observer shouldn't be notified.
+ EXPECT_TRUE(observer.trial_name().empty());
+
+ // Check that the values still get returned and querying them activates them.
+ EXPECT_EQ("def", FieldTrialList::FindFullName("Abc"));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ("Abc", observer.trial_name());
+ EXPECT_EQ("def", observer.group_name());
+}
+
+TEST_F(FieldTrialTest, CreateTrialsFromStringWithIgnoredFieldTrials) {
+ ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("Foo"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("Bar"));
+ ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
+
+ std::set<std::string> ignored_trial_names;
+ ignored_trial_names.insert("Unaccepted1");
+ ignored_trial_names.insert("Unaccepted2");
+ ignored_trial_names.insert("Unaccepted3");
+
+ FieldTrialList::CreateTrialsFromString(
+ "Unaccepted1/Unaccepted1_name/"
+ "Foo/Foo_name/"
+ "Unaccepted2/Unaccepted2_name/"
+ "Bar/Bar_name/"
+ "Unaccepted3/Unaccepted3_name/",
+ ignored_trial_names);
+
+ EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
+ EXPECT_TRUE(FieldTrialList::TrialExists("Foo"));
+ EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
+ EXPECT_TRUE(FieldTrialList::TrialExists("Bar"));
+ EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
+
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ EXPECT_TRUE(active_groups.empty());
+
+ FieldTrial* trial = FieldTrialList::Find("Foo");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("Foo", trial->trial_name());
+ EXPECT_EQ("Foo_name", trial->group_name());
+
+ trial = FieldTrialList::Find("Bar");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("Bar", trial->trial_name());
+ EXPECT_EQ("Bar_name", trial->group_name());
+}
+
+TEST_F(FieldTrialTest, CreateFieldTrial) {
+ ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
+
+ FieldTrialList::CreateFieldTrial("Some_name", "Winner");
+
+ FieldTrial* trial = FieldTrialList::Find("Some_name");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("Winner", trial->group_name());
+ EXPECT_EQ("Some_name", trial->trial_name());
+}
+
+TEST_F(FieldTrialTest, CreateFieldTrialIsNotActive) {
+ const char kTrialName[] = "CreateFieldTrialIsActiveTrial";
+ const char kWinnerGroup[] = "Winner";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+ FieldTrialList::CreateFieldTrial(kTrialName, kWinnerGroup);
+
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ EXPECT_TRUE(active_groups.empty());
+}
+
+TEST_F(FieldTrialTest, DuplicateFieldTrial) {
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("Some_name", 10, "Default", NULL);
+ trial->AppendGroup("Winner", 10);
+
+ // It is OK if we redundantly specify a winner.
+ FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("Some_name", "Winner");
+ EXPECT_TRUE(trial1 != NULL);
+
+ // But it is an error to try to change to a different winner.
+ FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("Some_name", "Loser");
+ EXPECT_TRUE(trial2 == NULL);
+}
+
+TEST_F(FieldTrialTest, DisableImmediately) {
+ int default_group_number = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("trial", 100, "default", &default_group_number);
+ trial->Disable();
+ ASSERT_EQ("default", trial->group_name());
+ ASSERT_EQ(default_group_number, trial->group());
+}
+
+TEST_F(FieldTrialTest, DisableAfterInitialization) {
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("trial", 100, "default", NULL);
+ trial->AppendGroup("non_default", 100);
+ trial->Disable();
+ ASSERT_EQ("default", trial->group_name());
+}
+
+TEST_F(FieldTrialTest, ForcedFieldTrials) {
+ // Validate we keep the forced choice.
+ FieldTrial* forced_trial = FieldTrialList::CreateFieldTrial("Use the",
+ "Force");
+ EXPECT_STREQ("Force", forced_trial->group_name().c_str());
+
+ int default_group_number = -1;
+ scoped_refptr<FieldTrial> factory_trial =
+ CreateFieldTrial("Use the", 1000, "default", &default_group_number);
+ EXPECT_EQ(factory_trial.get(), forced_trial);
+
+ int chosen_group = factory_trial->AppendGroup("Force", 100);
+ EXPECT_EQ(chosen_group, factory_trial->group());
+ int not_chosen_group = factory_trial->AppendGroup("Dark Side", 100);
+ EXPECT_NE(chosen_group, not_chosen_group);
+
+ // Since we didn't force the default group, we should not be returned the
+ // chosen group as the default group.
+ EXPECT_NE(default_group_number, chosen_group);
+ int new_group = factory_trial->AppendGroup("Duck Tape", 800);
+ EXPECT_NE(chosen_group, new_group);
+ // The new group should not be the default group either.
+ EXPECT_NE(default_group_number, new_group);
+}
+
+TEST_F(FieldTrialTest, ForcedFieldTrialsDefaultGroup) {
+ // Forcing the default should use the proper group ID.
+ FieldTrial* forced_trial = FieldTrialList::CreateFieldTrial("Trial Name",
+ "Default");
+ int default_group_number = -1;
+ scoped_refptr<FieldTrial> factory_trial =
+ CreateFieldTrial("Trial Name", 1000, "Default", &default_group_number);
+ EXPECT_EQ(forced_trial, factory_trial.get());
+
+ int other_group = factory_trial->AppendGroup("Not Default", 100);
+ EXPECT_STREQ("Default", factory_trial->group_name().c_str());
+ EXPECT_EQ(default_group_number, factory_trial->group());
+ EXPECT_NE(other_group, factory_trial->group());
+
+ int new_other_group = factory_trial->AppendGroup("Not Default Either", 800);
+ EXPECT_NE(new_other_group, factory_trial->group());
+}
+
+TEST_F(FieldTrialTest, SetForced) {
+ // Start by setting a trial for which we ensure a winner...
+ int default_group_number = -1;
+ scoped_refptr<FieldTrial> forced_trial =
+ CreateFieldTrial("Use the", 1, "default", &default_group_number);
+ EXPECT_EQ(forced_trial, forced_trial);
+
+ int forced_group = forced_trial->AppendGroup("Force", 1);
+ EXPECT_EQ(forced_group, forced_trial->group());
+
+ // Now force it.
+ forced_trial->SetForced();
+
+ // Now try to set it up differently as a hard coded registration would.
+ scoped_refptr<FieldTrial> hard_coded_trial =
+ CreateFieldTrial("Use the", 1, "default", &default_group_number);
+ EXPECT_EQ(hard_coded_trial, forced_trial);
+
+ int would_lose_group = hard_coded_trial->AppendGroup("Force", 0);
+ EXPECT_EQ(forced_group, hard_coded_trial->group());
+ EXPECT_EQ(forced_group, would_lose_group);
+
+ // Same thing if we would have done it to win again.
+ scoped_refptr<FieldTrial> other_hard_coded_trial =
+ CreateFieldTrial("Use the", 1, "default", &default_group_number);
+ EXPECT_EQ(other_hard_coded_trial, forced_trial);
+
+ int would_win_group = other_hard_coded_trial->AppendGroup("Force", 1);
+ EXPECT_EQ(forced_group, other_hard_coded_trial->group());
+ EXPECT_EQ(forced_group, would_win_group);
+}
+
+TEST_F(FieldTrialTest, SetForcedDefaultOnly) {
+ const char kTrialName[] = "SetForcedDefaultOnly";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ trial->SetForced();
+
+ trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ EXPECT_EQ(default_group, trial->group());
+ EXPECT_EQ(kDefaultGroupName, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedDefaultWithExtraGroup) {
+ const char kTrialName[] = "SetForcedDefaultWithExtraGroup";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ trial->SetForced();
+
+ trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ const int extra_group = trial->AppendGroup("Extra", 100);
+ EXPECT_EQ(default_group, trial->group());
+ EXPECT_NE(extra_group, trial->group());
+ EXPECT_EQ(kDefaultGroupName, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedTurnFeatureOn) {
+ const char kTrialName[] = "SetForcedTurnFeatureOn";
+ const char kExtraGroupName[] = "Extra";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ // Simulate a server-side (forced) config that turns the feature on when the
+ // original hard-coded config had it disabled.
+ scoped_refptr<FieldTrial> forced_trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ forced_trial->AppendGroup(kExtraGroupName, 100);
+ forced_trial->SetForced();
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> client_trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ const int extra_group = client_trial->AppendGroup(kExtraGroupName, 0);
+ EXPECT_NE(default_group, extra_group);
+
+ EXPECT_FALSE(client_trial->group_reported_);
+ EXPECT_EQ(extra_group, client_trial->group());
+ EXPECT_TRUE(client_trial->group_reported_);
+ EXPECT_EQ(kExtraGroupName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedTurnFeatureOff) {
+ const char kTrialName[] = "SetForcedTurnFeatureOff";
+ const char kExtraGroupName[] = "Extra";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ // Simulate a server-side (forced) config that turns the feature off when the
+ // original hard-coded config had it enabled.
+ scoped_refptr<FieldTrial> forced_trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ forced_trial->AppendGroup(kExtraGroupName, 0);
+ forced_trial->SetForced();
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> client_trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ const int extra_group = client_trial->AppendGroup(kExtraGroupName, 100);
+ EXPECT_NE(default_group, extra_group);
+
+ EXPECT_FALSE(client_trial->group_reported_);
+ EXPECT_EQ(default_group, client_trial->group());
+ EXPECT_TRUE(client_trial->group_reported_);
+ EXPECT_EQ(kDefaultGroupName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedChangeDefault_Default) {
+ const char kTrialName[] = "SetForcedDefaultGroupChange";
+ const char kGroupAName[] = "A";
+ const char kGroupBName[] = "B";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ // Simulate a server-side (forced) config that switches which group is default
+ // and ensures that the non-forced code receives the correct group numbers.
+ scoped_refptr<FieldTrial> forced_trial =
+ CreateFieldTrial(kTrialName, 100, kGroupAName, NULL);
+ forced_trial->AppendGroup(kGroupBName, 100);
+ forced_trial->SetForced();
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> client_trial =
+ CreateFieldTrial(kTrialName, 100, kGroupBName, &default_group);
+ const int extra_group = client_trial->AppendGroup(kGroupAName, 50);
+ EXPECT_NE(default_group, extra_group);
+
+ EXPECT_FALSE(client_trial->group_reported_);
+ EXPECT_EQ(default_group, client_trial->group());
+ EXPECT_TRUE(client_trial->group_reported_);
+ EXPECT_EQ(kGroupBName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, SetForcedChangeDefault_NonDefault) {
+ const char kTrialName[] = "SetForcedDefaultGroupChange";
+ const char kGroupAName[] = "A";
+ const char kGroupBName[] = "B";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ // Simulate a server-side (forced) config that switches which group is default
+ // and ensures that the non-forced code receives the correct group numbers.
+ scoped_refptr<FieldTrial> forced_trial =
+ CreateFieldTrial(kTrialName, 100, kGroupAName, NULL);
+ forced_trial->AppendGroup(kGroupBName, 0);
+ forced_trial->SetForced();
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> client_trial =
+ CreateFieldTrial(kTrialName, 100, kGroupBName, &default_group);
+ const int extra_group = client_trial->AppendGroup(kGroupAName, 50);
+ EXPECT_NE(default_group, extra_group);
+
+ EXPECT_FALSE(client_trial->group_reported_);
+ EXPECT_EQ(extra_group, client_trial->group());
+ EXPECT_TRUE(client_trial->group_reported_);
+ EXPECT_EQ(kGroupAName, client_trial->group_name());
+}
+
+TEST_F(FieldTrialTest, Observe) {
+ const char kTrialName[] = "TrialToObserve1";
+ const char kSecondaryGroupName[] = "SecondaryGroup";
+
+ TestFieldTrialObserver observer;
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+ const int chosen_group = trial->group();
+ EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(kTrialName, observer.trial_name());
+ if (chosen_group == default_group)
+ EXPECT_EQ(kDefaultGroupName, observer.group_name());
+ else
+ EXPECT_EQ(kSecondaryGroupName, observer.group_name());
+}
+
+TEST_F(FieldTrialTest, ObserveDisabled) {
+ const char kTrialName[] = "TrialToObserve2";
+
+ TestFieldTrialObserver observer;
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ trial->AppendGroup("A", 25);
+ trial->AppendGroup("B", 25);
+ trial->AppendGroup("C", 25);
+ trial->Disable();
+
+ // Observer shouldn't be notified of a disabled trial.
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(observer.trial_name().empty());
+ EXPECT_TRUE(observer.group_name().empty());
+
+ // Observer shouldn't be notified even after a |group()| call.
+ EXPECT_EQ(default_group, trial->group());
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(observer.trial_name().empty());
+ EXPECT_TRUE(observer.group_name().empty());
+}
+
+TEST_F(FieldTrialTest, ObserveForcedDisabled) {
+ const char kTrialName[] = "TrialToObserve3";
+
+ TestFieldTrialObserver observer;
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ trial->AppendGroup("A", 25);
+ trial->AppendGroup("B", 25);
+ trial->AppendGroup("C", 25);
+ trial->SetForced();
+ trial->Disable();
+
+ // Observer shouldn't be notified of a disabled trial, even when forced.
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(observer.trial_name().empty());
+ EXPECT_TRUE(observer.group_name().empty());
+
+ // Observer shouldn't be notified even after a |group()| call.
+ EXPECT_EQ(default_group, trial->group());
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(observer.trial_name().empty());
+ EXPECT_TRUE(observer.group_name().empty());
+}
+
+TEST_F(FieldTrialTest, DisabledTrialNotActive) {
+ const char kTrialName[] = "DisabledTrial";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, NULL);
+ trial->AppendGroup("X", 50);
+ trial->Disable();
+
+ // Ensure the trial is not listed as active.
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ EXPECT_TRUE(active_groups.empty());
+
+ // Ensure the trial is not listed in the |StatesToString()| result.
+ std::string states;
+ FieldTrialList::StatesToString(&states);
+ EXPECT_TRUE(states.empty());
+}
+
+TEST_F(FieldTrialTest, ExpirationYearNotExpired) {
+ const char kTrialName[] = "NotExpired";
+ const char kGroupName[] = "Group2";
+ const int kProbability = 100;
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, kProbability, kDefaultGroupName, NULL);
+ trial->AppendGroup(kGroupName, kProbability);
+ EXPECT_EQ(kGroupName, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes) {
+ const int kBucketCount = 100;
+
+ // Try each boundary value |i / 100.0| as the entropy value.
+ for (int i = 0; i < kBucketCount; ++i) {
+ const double entropy = i / static_cast<double>(kBucketCount);
+
+ scoped_refptr<base::FieldTrial> trial(
+ new base::FieldTrial("test", kBucketCount, "default", entropy));
+ for (int j = 0; j < kBucketCount; ++j)
+ trial->AppendGroup(base::IntToString(j), 1);
+
+ EXPECT_EQ(base::IntToString(i), trial->group_name());
+ }
+}
+
+TEST_F(FieldTrialTest, DoesNotSurpassTotalProbability) {
+ const double kEntropyValue = 1.0 - 1e-9;
+ ASSERT_LT(kEntropyValue, 1.0);
+
+ scoped_refptr<base::FieldTrial> trial(
+ new base::FieldTrial("test", 2, "default", kEntropyValue));
+ trial->AppendGroup("1", 1);
+ trial->AppendGroup("2", 1);
+
+ EXPECT_EQ("2", trial->group_name());
+}
+
+TEST_F(FieldTrialTest, CreateSimulatedFieldTrial) {
+ const char kTrialName[] = "CreateSimulatedFieldTrial";
+ ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
+
+ // Different cases to test, e.g. default vs. non default group being chosen.
+ struct {
+ double entropy_value;
+ const char* expected_group;
+ } test_cases[] = {
+ { 0.4, "A" },
+ { 0.85, "B" },
+ { 0.95, kDefaultGroupName },
+ };
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ TestFieldTrialObserver observer;
+ scoped_refptr<FieldTrial> trial(
+ FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, kDefaultGroupName,
+ test_cases[i].entropy_value));
+ trial->AppendGroup("A", 80);
+ trial->AppendGroup("B", 10);
+ EXPECT_EQ(test_cases[i].expected_group, trial->group_name());
+
+ // Field trial shouldn't have been registered with the list.
+ EXPECT_FALSE(FieldTrialList::TrialExists(kTrialName));
+ EXPECT_EQ(0u, FieldTrialList::GetFieldTrialCount());
+
+ // Observer shouldn't have been notified.
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(observer.trial_name().empty());
+
+ // The trial shouldn't be in the active set of trials.
+ FieldTrial::ActiveGroups active_groups;
+ FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
+ EXPECT_TRUE(active_groups.empty());
+
+ // The trial shouldn't be listed in the |StatesToString()| result.
+ std::string states;
+ FieldTrialList::StatesToString(&states);
+ EXPECT_TRUE(states.empty());
+ }
+}
+
+TEST(FieldTrialTestWithoutList, StatesStringFormat) {
+ std::string save_string;
+
+ // Scoping the first FieldTrialList, as we need another one to test the
+ // importing function.
+ {
+ FieldTrialList field_trial_list(NULL);
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial("Abc", 10, "Default some name", NULL);
+ trial->AppendGroup("cba", 10);
+ trial->group();
+ scoped_refptr<FieldTrial> trial2 =
+ CreateFieldTrial("Xyz", 10, "Default xxx", NULL);
+ trial2->AppendGroup("zyx", 10);
+ trial2->group();
+ scoped_refptr<FieldTrial> trial3 =
+ CreateFieldTrial("zzz", 10, "default", NULL);
+
+ FieldTrialList::AllStatesToString(&save_string);
+ }
+
+ // Starting with a new blank FieldTrialList.
+ FieldTrialList field_trial_list(NULL);
+ ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
+ std::set<std::string>()));
+
+ FieldTrial::ActiveGroups active_groups;
+ field_trial_list.GetActiveFieldTrialGroups(&active_groups);
+ ASSERT_EQ(2U, active_groups.size());
+ EXPECT_EQ("Abc", active_groups[0].trial_name);
+ EXPECT_EQ("cba", active_groups[0].group_name);
+ EXPECT_EQ("Xyz", active_groups[1].trial_name);
+ EXPECT_EQ("zyx", active_groups[1].group_name);
+ EXPECT_TRUE(field_trial_list.TrialExists("zzz"));
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
+ // Trying to instantiate a one-time randomized field trial before the
+ // FieldTrialList is created should crash.
+ EXPECT_DEATH(FieldTrialList::FactoryGetFieldTrial(
+ "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
+ base::FieldTrialList::kNoExpirationYear, 1, 1,
+ base::FieldTrial::ONE_TIME_RANDOMIZED, NULL), "");
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram.cc b/libchrome/base/metrics/histogram.cc
new file mode 100644
index 0000000..0d6287c
--- /dev/null
+++ b/libchrome/base/metrics/histogram.cc
@@ -0,0 +1,1191 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+// See header file for details and examples.
+
+#include "base/metrics/histogram.h"
+
+#include <limits.h>
+#include <math.h>
+
+#include <algorithm>
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/values.h"
+
+namespace base {
+
+namespace {
+
+bool ReadHistogramArguments(PickleIterator* iter,
+ std::string* histogram_name,
+ int* flags,
+ int* declared_min,
+ int* declared_max,
+ uint32_t* bucket_count,
+ uint32_t* range_checksum) {
+ if (!iter->ReadString(histogram_name) ||
+ !iter->ReadInt(flags) ||
+ !iter->ReadInt(declared_min) ||
+ !iter->ReadInt(declared_max) ||
+ !iter->ReadUInt32(bucket_count) ||
+ !iter->ReadUInt32(range_checksum)) {
+ DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
+ return false;
+ }
+
+ // Since these fields may have come from an untrusted renderer, do additional
+ // checks above and beyond those in Histogram::Initialize()
+ if (*declared_max <= 0 ||
+ *declared_min <= 0 ||
+ *declared_max < *declared_min ||
+ INT_MAX / sizeof(HistogramBase::Count) <= *bucket_count ||
+ *bucket_count < 2) {
+ DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
+ return false;
+ }
+
+ // We use the arguments to find or create the local version of the histogram
+ // in this process, so we need to clear any IPC flag.
+ *flags &= ~HistogramBase::kIPCSerializationSourceFlag;
+
+ return true;
+}
+
+bool ValidateRangeChecksum(const HistogramBase& histogram,
+ uint32_t range_checksum) {
+ const Histogram& casted_histogram =
+ static_cast<const Histogram&>(histogram);
+
+ return casted_histogram.bucket_ranges()->checksum() == range_checksum;
+}
+
+} // namespace
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+// static
+const uint32_t Histogram::kBucketCount_MAX = 16384u;
+
+class Histogram::Factory {
+ public:
+ Factory(const std::string& name,
+ HistogramBase::Sample minimum,
+ HistogramBase::Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags)
+ : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
+ virtual ~Factory() = default;
+
+ // Create histogram based on construction parameters. Caller takes
+ // ownership of the returned object.
+ HistogramBase* Build();
+
+ protected:
+ Factory(const std::string& name,
+ HistogramType histogram_type,
+ HistogramBase::Sample minimum,
+ HistogramBase::Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags)
+ : name_(name),
+ histogram_type_(histogram_type),
+ minimum_(minimum),
+ maximum_(maximum),
+ bucket_count_(bucket_count),
+ flags_(flags) {}
+
+ // Create a BucketRanges structure appropriate for this histogram.
+ virtual BucketRanges* CreateRanges() {
+ BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+ Histogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+ return ranges;
+ }
+
+ // Allocate the correct Histogram object off the heap (in case persistent
+ // memory is not available).
+ virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
+ return WrapUnique(new Histogram(name_, minimum_, maximum_, ranges));
+ }
+
+ // Perform any required datafill on the just-created histogram. If
+ // overridden, be sure to call the "super" version -- this method may not
+ // always remain empty.
+ virtual void FillHistogram(HistogramBase* /*histogram*/) {}
+
+ // These values are protected (instead of private) because they need to
+ // be accessible to methods of sub-classes in order to avoid passing
+ // unnecessary parameters everywhere.
+ const std::string& name_;
+ const HistogramType histogram_type_;
+ HistogramBase::Sample minimum_;
+ HistogramBase::Sample maximum_;
+ uint32_t bucket_count_;
+ int32_t flags_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* Histogram::Factory::Build() {
+ HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
+ if (!histogram) {
+ // To avoid racy destruction at shutdown, the following will be leaked.
+ const BucketRanges* created_ranges = CreateRanges();
+ const BucketRanges* registered_ranges =
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
+
+ // In most cases, the bucket-count, minimum, and maximum values are known
+ // when the code is written and so are passed in explicitly. In other
+ // cases (such as with a CustomHistogram), they are calculated dynamically
+ // at run-time. In the latter case, those ctor parameters are zero and
+ // the results extracted from the result of CreateRanges().
+ if (bucket_count_ == 0) {
+ bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
+ minimum_ = registered_ranges->range(1);
+ maximum_ = registered_ranges->range(bucket_count_ - 1);
+ }
+
+ // Try to create the histogram using a "persistent" allocator. As of
+ // 2016-02-25, the availability of such is controlled by a base::Feature
+ // that is off by default. If the allocator doesn't exist or if
+ // allocating from it fails, code below will allocate the histogram from
+ // the process heap.
+ PersistentHistogramAllocator::Reference histogram_ref = 0;
+ std::unique_ptr<HistogramBase> tentative_histogram;
+ PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+ if (allocator) {
+ tentative_histogram = allocator->AllocateHistogram(
+ histogram_type_,
+ name_,
+ minimum_,
+ maximum_,
+ registered_ranges,
+ flags_,
+ &histogram_ref);
+ }
+
+ // Handle the case where no persistent allocator is present or the
+ // persistent allocation fails (perhaps because it is full).
+ if (!tentative_histogram) {
+ DCHECK(!histogram_ref); // Should never have been set.
+ DCHECK(!allocator); // Shouldn't have failed.
+ flags_ &= ~HistogramBase::kIsPersistent;
+ tentative_histogram = HeapAlloc(registered_ranges);
+ tentative_histogram->SetFlags(flags_);
+ }
+
+ FillHistogram(tentative_histogram.get());
+
+ // Register this histogram with the StatisticsRecorder. Keep a copy of
+ // the pointer value to tell later whether the locally created histogram
+ // was registered or deleted. The type is "void" because it could point
+ // to released memory after the following line.
+ const void* tentative_histogram_ptr = tentative_histogram.get();
+ histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+ tentative_histogram.release());
+
+ // Persistent histograms need some follow-up processing.
+ if (histogram_ref) {
+ allocator->FinalizeHistogram(histogram_ref,
+ histogram == tentative_histogram_ptr);
+ }
+
+ // Update report on created histograms.
+ ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
+ } else {
+ // Update report on lookup histograms.
+ ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
+ }
+
+ DCHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
+ if (bucket_count_ != 0 &&
+ !histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
+ // The construction arguments do not match the existing histogram. This can
+ // come about if an extension updates in the middle of a chrome run and has
+ // changed one of them, or simply by bad code within Chrome itself. We
+ // return NULL here with the expectation that bad code in Chrome will crash
+ // on dereference, but extension/Pepper APIs will guard against NULL and not
+ // crash.
+ DLOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
+ return nullptr;
+ }
+ return histogram;
+}
+
+HistogramBase* Histogram::FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ bool valid_arguments =
+ InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
+ DCHECK(valid_arguments);
+
+ return Factory(name, minimum, maximum, bucket_count, flags).Build();
+}
+
+HistogramBase* Histogram::FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
+ static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
+ flags);
+}
+
+HistogramBase* Histogram::FactoryGet(const char* name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
+}
+
+HistogramBase* Histogram::FactoryTimeGet(const char* name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
+ flags);
+}
+
+std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
+ const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta) {
+ return WrapUnique(new Histogram(name, minimum, maximum, ranges, counts,
+ logged_counts, counts_size, meta,
+ logged_meta));
+}
+
+// Calculate what range of values are held in each bucket.
+// We have to be careful that we don't pick a ratio between starting points in
+// consecutive buckets that is sooo small, that the integer bounds are the same
+// (effectively making one bucket get no values). We need to avoid:
+// ranges(i) == ranges(i + 1)
+// To avoid that, we just do a fine-grained bucket width as far as we need to
+// until we get a ratio that moves us along at least 2 units at a time. From
+// that bucket onward we do use the exponential growth of buckets.
+//
+// static
+void Histogram::InitializeBucketRanges(Sample minimum,
+ Sample maximum,
+ BucketRanges* ranges) {
+ double log_max = log(static_cast<double>(maximum));
+ double log_ratio;
+ double log_next;
+ size_t bucket_index = 1;
+ Sample current = minimum;
+ ranges->set_range(bucket_index, current);
+ size_t bucket_count = ranges->bucket_count();
+ while (bucket_count > ++bucket_index) {
+ double log_current;
+ log_current = log(static_cast<double>(current));
+ // Calculate the count'th root of the range.
+ log_ratio = (log_max - log_current) / (bucket_count - bucket_index);
+ // See where the next bucket would start.
+ log_next = log_current + log_ratio;
+ Sample next;
+ next = static_cast<int>(floor(exp(log_next) + 0.5));
+ if (next > current)
+ current = next;
+ else
+ ++current; // Just do a narrow bucket, and keep trying.
+ ranges->set_range(bucket_index, current);
+ }
+ ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
+ ranges->ResetChecksum();
+}
+
+// static
+const int Histogram::kCommonRaceBasedCountMismatch = 5;
+
+uint32_t Histogram::FindCorruption(const HistogramSamples& samples) const {
+ int inconsistencies = NO_INCONSISTENCIES;
+ Sample previous_range = -1; // Bottom range is always 0.
+ for (uint32_t index = 0; index < bucket_count(); ++index) {
+ int new_range = ranges(index);
+ if (previous_range >= new_range)
+ inconsistencies |= BUCKET_ORDER_ERROR;
+ previous_range = new_range;
+ }
+
+ if (!bucket_ranges()->HasValidChecksum())
+ inconsistencies |= RANGE_CHECKSUM_ERROR;
+
+ int64_t delta64 = samples.redundant_count() - samples.TotalCount();
+ if (delta64 != 0) {
+ int delta = static_cast<int>(delta64);
+ if (delta != delta64)
+ delta = INT_MAX; // Flag all giant errors as INT_MAX.
+ if (delta > 0) {
+ UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
+ if (delta > kCommonRaceBasedCountMismatch)
+ inconsistencies |= COUNT_HIGH_ERROR;
+ } else {
+ DCHECK_GT(0, delta);
+ UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
+ if (-delta > kCommonRaceBasedCountMismatch)
+ inconsistencies |= COUNT_LOW_ERROR;
+ }
+ }
+ return inconsistencies;
+}
+
+Sample Histogram::ranges(uint32_t i) const {
+ return bucket_ranges_->range(i);
+}
+
+uint32_t Histogram::bucket_count() const {
+ return static_cast<uint32_t>(bucket_ranges_->bucket_count());
+}
+
+// static
+bool Histogram::InspectConstructionArguments(const std::string& name,
+ Sample* minimum,
+ Sample* maximum,
+ uint32_t* bucket_count) {
+ // Defensive code for backward compatibility.
+ if (*minimum < 1) {
+ DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
+ *minimum = 1;
+ }
+ if (*maximum >= kSampleType_MAX) {
+ DVLOG(1) << "Histogram: " << name << " has bad maximum: " << *maximum;
+ *maximum = kSampleType_MAX - 1;
+ }
+ if (*bucket_count >= kBucketCount_MAX) {
+ DVLOG(1) << "Histogram: " << name << " has bad bucket_count: "
+ << *bucket_count;
+ *bucket_count = kBucketCount_MAX - 1;
+ }
+
+ if (*minimum >= *maximum)
+ return false;
+ if (*bucket_count < 3)
+ return false;
+ if (*bucket_count > static_cast<uint32_t>(*maximum - *minimum + 2))
+ return false;
+ return true;
+}
+
+uint64_t Histogram::name_hash() const {
+ return samples_->id();
+}
+
+HistogramType Histogram::GetHistogramType() const {
+ return HISTOGRAM;
+}
+
+bool Histogram::HasConstructionArguments(Sample expected_minimum,
+ Sample expected_maximum,
+ uint32_t expected_bucket_count) const {
+ return ((expected_minimum == declared_min_) &&
+ (expected_maximum == declared_max_) &&
+ (expected_bucket_count == bucket_count()));
+}
+
+void Histogram::Add(int value) {
+ AddCount(value, 1);
+}
+
+void Histogram::AddCount(int value, int count) {
+ DCHECK_EQ(0, ranges(0));
+ DCHECK_EQ(kSampleType_MAX, ranges(bucket_count()));
+
+ if (value > kSampleType_MAX - 1)
+ value = kSampleType_MAX - 1;
+ if (value < 0)
+ value = 0;
+ if (count <= 0) {
+ NOTREACHED();
+ return;
+ }
+ samples_->Accumulate(value, count);
+
+ FindAndRunCallback(value);
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
+ return SnapshotSampleVector();
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotDelta() {
+ DCHECK(!final_delta_created_);
+
+ std::unique_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+ if (!logged_samples_) {
+ // If nothing has been previously logged, save this one as
+ // |logged_samples_| and gather another snapshot to return.
+ logged_samples_.swap(snapshot);
+ return SnapshotSampleVector();
+ }
+
+ // Subtract what was previously logged and update that information.
+ snapshot->Subtract(*logged_samples_);
+ logged_samples_->Add(*snapshot);
+ return snapshot;
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotFinalDelta() const {
+ DCHECK(!final_delta_created_);
+ final_delta_created_ = true;
+
+ std::unique_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+
+ // Subtract what was previously logged and then return.
+ if (logged_samples_)
+ snapshot->Subtract(*logged_samples_);
+ return snapshot;
+}
+
+void Histogram::AddSamples(const HistogramSamples& samples) {
+ samples_->Add(samples);
+}
+
+bool Histogram::AddSamplesFromPickle(PickleIterator* iter) {
+ return samples_->AddFromPickle(iter);
+}
+
+// The following methods provide a graphical histogram display.
+void Histogram::WriteHTMLGraph(std::string* output) const {
+ // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
+ output->append("<PRE>");
+ WriteAsciiImpl(true, "<br>", output);
+ output->append("</PRE>");
+}
+
+void Histogram::WriteAscii(std::string* output) const {
+ WriteAsciiImpl(true, "\n", output);
+}
+
+bool Histogram::SerializeInfoImpl(Pickle* pickle) const {
+ DCHECK(bucket_ranges()->HasValidChecksum());
+ return pickle->WriteString(histogram_name()) &&
+ pickle->WriteInt(flags()) &&
+ pickle->WriteInt(declared_min()) &&
+ pickle->WriteInt(declared_max()) &&
+ pickle->WriteUInt32(bucket_count()) &&
+ pickle->WriteUInt32(bucket_ranges()->checksum());
+}
+
+Histogram::Histogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges)
+ : HistogramBase(name),
+ bucket_ranges_(ranges),
+ declared_min_(minimum),
+ declared_max_(maximum) {
+ if (ranges)
+ samples_.reset(new SampleVector(HashMetricName(name), ranges));
+}
+
+Histogram::Histogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta)
+ : HistogramBase(name),
+ bucket_ranges_(ranges),
+ declared_min_(minimum),
+ declared_max_(maximum) {
+ if (ranges) {
+ samples_.reset(new SampleVector(HashMetricName(name),
+ counts, counts_size, meta, ranges));
+ logged_samples_.reset(new SampleVector(samples_->id(), logged_counts,
+ counts_size, logged_meta, ranges));
+ }
+}
+
+Histogram::~Histogram() {
+}
+
+bool Histogram::PrintEmptyBucket(uint32_t /*index*/) const {
+ return true;
+}
+
+// Use the actual bucket widths (like a linear histogram) until the widths get
+// over some transition value, and then use that transition width. Exponentials
+// get so big so fast (and we don't expect to see a lot of entries in the large
+// buckets), so we need this to make it possible to see what is going on and
+// not have 0-graphical-height buckets.
+double Histogram::GetBucketSize(Count current, uint32_t i) const {
+ DCHECK_GT(ranges(i + 1), ranges(i));
+ static const double kTransitionWidth = 5;
+ double denominator = ranges(i + 1) - ranges(i);
+ if (denominator > kTransitionWidth)
+ denominator = kTransitionWidth; // Stop trying to normalize.
+ return current/denominator;
+}
+
+const std::string Histogram::GetAsciiBucketRange(uint32_t i) const {
+ return GetSimpleAsciiBucketRange(ranges(i));
+}
+
+//------------------------------------------------------------------------------
+// Private methods
+
+// static
+HistogramBase* Histogram::DeserializeInfoImpl(PickleIterator* iter) {
+ std::string histogram_name;
+ int flags;
+ int declared_min;
+ int declared_max;
+ uint32_t bucket_count;
+ uint32_t range_checksum;
+
+ if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+ &declared_max, &bucket_count, &range_checksum)) {
+ return NULL;
+ }
+
+ // Find or create the local version of the histogram in this process.
+ HistogramBase* histogram = Histogram::FactoryGet(
+ histogram_name, declared_min, declared_max, bucket_count, flags);
+
+ if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+ // The serialized histogram might be corrupted.
+ return NULL;
+ }
+ return histogram;
+}
+
+std::unique_ptr<SampleVector> Histogram::SnapshotSampleVector() const {
+ std::unique_ptr<SampleVector> samples(
+ new SampleVector(samples_->id(), bucket_ranges()));
+ samples->Add(*samples_);
+ return samples;
+}
+
+void Histogram::WriteAsciiImpl(bool graph_it,
+ const std::string& newline,
+ std::string* output) const {
+ // Get local (stack) copies of all effectively volatile class data so that we
+ // are consistent across our output activities.
+ std::unique_ptr<SampleVector> snapshot = SnapshotSampleVector();
+ Count sample_count = snapshot->TotalCount();
+
+ WriteAsciiHeader(*snapshot, sample_count, output);
+ output->append(newline);
+
+ // Prepare to normalize graphical rendering of bucket contents.
+ double max_size = 0;
+ if (graph_it)
+ max_size = GetPeakBucketSize(*snapshot);
+
+ // Calculate space needed to print bucket range numbers. Leave room to print
+ // nearly the largest bucket range without sliding over the histogram.
+ uint32_t largest_non_empty_bucket = bucket_count() - 1;
+ while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
+ if (0 == largest_non_empty_bucket)
+ break; // All buckets are empty.
+ --largest_non_empty_bucket;
+ }
+
+ // Calculate largest print width needed for any of our bucket range displays.
+ size_t print_width = 1;
+ for (uint32_t i = 0; i < bucket_count(); ++i) {
+ if (snapshot->GetCountAtIndex(i)) {
+ size_t width = GetAsciiBucketRange(i).size() + 1;
+ if (width > print_width)
+ print_width = width;
+ }
+ }
+
+ int64_t remaining = sample_count;
+ int64_t past = 0;
+ // Output the actual histogram graph.
+ for (uint32_t i = 0; i < bucket_count(); ++i) {
+ Count current = snapshot->GetCountAtIndex(i);
+ if (!current && !PrintEmptyBucket(i))
+ continue;
+ remaining -= current;
+ std::string range = GetAsciiBucketRange(i);
+ output->append(range);
+ for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+ output->push_back(' ');
+ if (0 == current && i < bucket_count() - 1 &&
+ 0 == snapshot->GetCountAtIndex(i + 1)) {
+ while (i < bucket_count() - 1 &&
+ 0 == snapshot->GetCountAtIndex(i + 1)) {
+ ++i;
+ }
+ output->append("... ");
+ output->append(newline);
+ continue; // No reason to plot emptiness.
+ }
+ double current_size = GetBucketSize(current, i);
+ if (graph_it)
+ WriteAsciiBucketGraph(current_size, max_size, output);
+ WriteAsciiBucketContext(past, current, remaining, i, output);
+ output->append(newline);
+ past += current;
+ }
+ DCHECK_EQ(sample_count, past);
+}
+
+double Histogram::GetPeakBucketSize(const SampleVector& samples) const {
+ double max = 0;
+ for (uint32_t i = 0; i < bucket_count() ; ++i) {
+ double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
+ if (current_size > max)
+ max = current_size;
+ }
+ return max;
+}
+
+void Histogram::WriteAsciiHeader(const SampleVector& samples,
+ Count sample_count,
+ std::string* output) const {
+ StringAppendF(output,
+ "Histogram: %s recorded %d samples",
+ histogram_name().c_str(),
+ sample_count);
+ if (0 == sample_count) {
+ DCHECK_EQ(samples.sum(), 0);
+ } else {
+ double average = static_cast<float>(samples.sum()) / sample_count;
+
+ StringAppendF(output, ", average = %.1f", average);
+ }
+ if (flags() & ~kHexRangePrintingFlag)
+ StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag);
+}
+
+void Histogram::WriteAsciiBucketContext(const int64_t past,
+ const Count current,
+ const int64_t remaining,
+ const uint32_t i,
+ std::string* output) const {
+ double scaled_sum = (past + current + remaining) / 100.0;
+ WriteAsciiBucketValue(current, scaled_sum, output);
+ if (0 < i) {
+ double percentage = past / scaled_sum;
+ StringAppendF(output, " {%3.1f%%}", percentage);
+ }
+}
+
+void Histogram::GetParameters(DictionaryValue* params) const {
+ params->SetString("type", HistogramTypeToString(GetHistogramType()));
+ params->SetInteger("min", declared_min());
+ params->SetInteger("max", declared_max());
+ params->SetInteger("bucket_count", static_cast<int>(bucket_count()));
+}
+
+void Histogram::GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const {
+ std::unique_ptr<SampleVector> snapshot = SnapshotSampleVector();
+ *count = snapshot->TotalCount();
+ *sum = snapshot->sum();
+ uint32_t index = 0;
+ for (uint32_t i = 0; i < bucket_count(); ++i) {
+ Sample count_at_index = snapshot->GetCountAtIndex(i);
+ if (count_at_index > 0) {
+ std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
+ bucket_value->SetInteger("low", ranges(i));
+ if (i != bucket_count() - 1)
+ bucket_value->SetInteger("high", ranges(i + 1));
+ bucket_value->SetInteger("count", count_at_index);
+ buckets->Set(index, bucket_value.release());
+ ++index;
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+// LinearHistogram: This histogram uses a traditional set of evenly spaced
+// buckets.
+//------------------------------------------------------------------------------
+
+class LinearHistogram::Factory : public Histogram::Factory {
+ public:
+ Factory(const std::string& name,
+ HistogramBase::Sample minimum,
+ HistogramBase::Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags,
+ const DescriptionPair* descriptions)
+ : Histogram::Factory(name, LINEAR_HISTOGRAM, minimum, maximum,
+ bucket_count, flags) {
+ descriptions_ = descriptions;
+ }
+ ~Factory() override = default;
+
+ protected:
+ BucketRanges* CreateRanges() override {
+ BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+ LinearHistogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+ return ranges;
+ }
+
+ std::unique_ptr<HistogramBase> HeapAlloc(
+ const BucketRanges* ranges) override {
+ return WrapUnique(
+ new LinearHistogram(name_, minimum_, maximum_, ranges));
+ }
+
+ void FillHistogram(HistogramBase* base_histogram) override {
+ Histogram::Factory::FillHistogram(base_histogram);
+ LinearHistogram* histogram = static_cast<LinearHistogram*>(base_histogram);
+ // Set range descriptions.
+ if (descriptions_) {
+ for (int i = 0; descriptions_[i].description; ++i) {
+ histogram->bucket_description_[descriptions_[i].sample] =
+ descriptions_[i].description;
+ }
+ }
+ }
+
+ private:
+ const DescriptionPair* descriptions_;
+
+ DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+LinearHistogram::~LinearHistogram() {}
+
+HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryGetWithRangeDescription(
+ name, minimum, maximum, bucket_count, flags, NULL);
+}
+
+HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
+ static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
+ flags);
+}
+
+HistogramBase* LinearHistogram::FactoryGet(const char* name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
+}
+
+HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags) {
+ return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
+ flags);
+}
+
+std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
+ const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta) {
+ return WrapUnique(new LinearHistogram(name, minimum, maximum, ranges,
+ counts, logged_counts,
+ counts_size, meta, logged_meta));
+}
+
+HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
+ const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags,
+ const DescriptionPair descriptions[]) {
+ bool valid_arguments = Histogram::InspectConstructionArguments(
+ name, &minimum, &maximum, &bucket_count);
+ DCHECK(valid_arguments);
+
+ return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
+ .Build();
+}
+
+HistogramType LinearHistogram::GetHistogramType() const {
+ return LINEAR_HISTOGRAM;
+}
+
+LinearHistogram::LinearHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges)
+ : Histogram(name, minimum, maximum, ranges) {
+}
+
+LinearHistogram::LinearHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta)
+ : Histogram(name, minimum, maximum, ranges, counts, logged_counts,
+ counts_size, meta, logged_meta) {}
+
+double LinearHistogram::GetBucketSize(Count current, uint32_t i) const {
+ DCHECK_GT(ranges(i + 1), ranges(i));
+ // Adjacent buckets with different widths would have "surprisingly" many (few)
+ // samples in a histogram if we didn't normalize this way.
+ double denominator = ranges(i + 1) - ranges(i);
+ return current/denominator;
+}
+
+const std::string LinearHistogram::GetAsciiBucketRange(uint32_t i) const {
+ int range = ranges(i);
+ BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
+ if (it == bucket_description_.end())
+ return Histogram::GetAsciiBucketRange(i);
+ return it->second;
+}
+
+bool LinearHistogram::PrintEmptyBucket(uint32_t index) const {
+ return bucket_description_.find(ranges(index)) == bucket_description_.end();
+}
+
+// static
+void LinearHistogram::InitializeBucketRanges(Sample minimum,
+ Sample maximum,
+ BucketRanges* ranges) {
+ double min = minimum;
+ double max = maximum;
+ size_t bucket_count = ranges->bucket_count();
+ for (size_t i = 1; i < bucket_count; ++i) {
+ double linear_range =
+ (min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
+ ranges->set_range(i, static_cast<Sample>(linear_range + 0.5));
+ // TODO(bcwhite): Remove once crbug/586622 is fixed.
+ base::debug::Alias(&linear_range);
+ }
+ ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
+ ranges->ResetChecksum();
+}
+
+// static
+HistogramBase* LinearHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+ std::string histogram_name;
+ int flags;
+ int declared_min;
+ int declared_max;
+ uint32_t bucket_count;
+ uint32_t range_checksum;
+
+ if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+ &declared_max, &bucket_count, &range_checksum)) {
+ return NULL;
+ }
+
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ histogram_name, declared_min, declared_max, bucket_count, flags);
+ if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+ // The serialized histogram might be corrupted.
+ return NULL;
+ }
+ return histogram;
+}
+
+//------------------------------------------------------------------------------
+// This section provides implementation for BooleanHistogram.
+//------------------------------------------------------------------------------
+
+class BooleanHistogram::Factory : public Histogram::Factory {
+ public:
+ Factory(const std::string& name, int32_t flags)
+ : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
+ ~Factory() override = default;
+
+ protected:
+ BucketRanges* CreateRanges() override {
+ BucketRanges* ranges = new BucketRanges(3 + 1);
+ LinearHistogram::InitializeBucketRanges(1, 2, ranges);
+ return ranges;
+ }
+
+ std::unique_ptr<HistogramBase> HeapAlloc(
+ const BucketRanges* ranges) override {
+ return WrapUnique(new BooleanHistogram(name_, ranges));
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
+ int32_t flags) {
+ return Factory(name, flags).Build();
+}
+
+HistogramBase* BooleanHistogram::FactoryGet(const char* name, int32_t flags) {
+ return FactoryGet(std::string(name), flags);
+}
+
+std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
+ const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta) {
+ return WrapUnique(new BooleanHistogram(
+ name, ranges, counts, logged_counts, meta, logged_meta));
+}
+
+HistogramType BooleanHistogram::GetHistogramType() const {
+ return BOOLEAN_HISTOGRAM;
+}
+
+BooleanHistogram::BooleanHistogram(const std::string& name,
+ const BucketRanges* ranges)
+ : LinearHistogram(name, 1, 2, ranges) {}
+
+BooleanHistogram::BooleanHistogram(const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta)
+ : LinearHistogram(name, 1, 2, ranges, counts, logged_counts, 2, meta,
+ logged_meta) {}
+
+HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+ std::string histogram_name;
+ int flags;
+ int declared_min;
+ int declared_max;
+ uint32_t bucket_count;
+ uint32_t range_checksum;
+
+ if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+ &declared_max, &bucket_count, &range_checksum)) {
+ return NULL;
+ }
+
+ HistogramBase* histogram = BooleanHistogram::FactoryGet(
+ histogram_name, flags);
+ if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+ // The serialized histogram might be corrupted.
+ return NULL;
+ }
+ return histogram;
+}
+
+//------------------------------------------------------------------------------
+// CustomHistogram:
+//------------------------------------------------------------------------------
+
+class CustomHistogram::Factory : public Histogram::Factory {
+ public:
+ Factory(const std::string& name,
+ const std::vector<Sample>* custom_ranges,
+ int32_t flags)
+ : Histogram::Factory(name, CUSTOM_HISTOGRAM, 0, 0, 0, flags) {
+ custom_ranges_ = custom_ranges;
+ }
+ ~Factory() override = default;
+
+ protected:
+ BucketRanges* CreateRanges() override {
+ // Remove the duplicates in the custom ranges array.
+ std::vector<int> ranges = *custom_ranges_;
+ ranges.push_back(0); // Ensure we have a zero value.
+ ranges.push_back(HistogramBase::kSampleType_MAX);
+ std::sort(ranges.begin(), ranges.end());
+ ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+
+ BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
+ for (uint32_t i = 0; i < ranges.size(); i++) {
+ bucket_ranges->set_range(i, ranges[i]);
+ }
+ bucket_ranges->ResetChecksum();
+ return bucket_ranges;
+ }
+
+ std::unique_ptr<HistogramBase> HeapAlloc(
+ const BucketRanges* ranges) override {
+ return WrapUnique(new CustomHistogram(name_, ranges));
+ }
+
+ private:
+ const std::vector<Sample>* custom_ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* CustomHistogram::FactoryGet(
+ const std::string& name,
+ const std::vector<Sample>& custom_ranges,
+ int32_t flags) {
+ CHECK(ValidateCustomRanges(custom_ranges));
+
+ return Factory(name, &custom_ranges, flags).Build();
+}
+
+HistogramBase* CustomHistogram::FactoryGet(
+ const char* name,
+ const std::vector<Sample>& custom_ranges,
+ int32_t flags) {
+ return FactoryGet(std::string(name), custom_ranges, flags);
+}
+
+std::unique_ptr<HistogramBase> CustomHistogram::PersistentCreate(
+ const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta) {
+ return WrapUnique(new CustomHistogram(
+ name, ranges, counts, logged_counts, counts_size, meta, logged_meta));
+}
+
+HistogramType CustomHistogram::GetHistogramType() const {
+ return CUSTOM_HISTOGRAM;
+}
+
+// static
+std::vector<Sample> CustomHistogram::ArrayToCustomRanges(
+ const Sample* values, uint32_t num_values) {
+ std::vector<Sample> all_values;
+ for (uint32_t i = 0; i < num_values; ++i) {
+ Sample value = values[i];
+ all_values.push_back(value);
+
+ // Ensure that a guard bucket is added. If we end up with duplicate
+ // values, FactoryGet will take care of removing them.
+ all_values.push_back(value + 1);
+ }
+ return all_values;
+}
+
+CustomHistogram::CustomHistogram(const std::string& name,
+ const BucketRanges* ranges)
+ : Histogram(name,
+ ranges->range(1),
+ ranges->range(ranges->bucket_count() - 1),
+ ranges) {}
+
+CustomHistogram::CustomHistogram(const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta)
+ : Histogram(name,
+ ranges->range(1),
+ ranges->range(ranges->bucket_count() - 1),
+ ranges,
+ counts,
+ logged_counts,
+ counts_size,
+ meta,
+ logged_meta) {}
+
+bool CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
+ if (!Histogram::SerializeInfoImpl(pickle))
+ return false;
+
+ // Serialize ranges. First and last ranges are alwasy 0 and INT_MAX, so don't
+ // write them.
+ for (uint32_t i = 1; i < bucket_ranges()->bucket_count(); ++i) {
+ if (!pickle->WriteInt(bucket_ranges()->range(i)))
+ return false;
+ }
+ return true;
+}
+
+double CustomHistogram::GetBucketSize(Count /*current*/, uint32_t /*i*/) const {
+ return 1;
+}
+
+// static
+HistogramBase* CustomHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+ std::string histogram_name;
+ int flags;
+ int declared_min;
+ int declared_max;
+ uint32_t bucket_count;
+ uint32_t range_checksum;
+
+ if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
+ &declared_max, &bucket_count, &range_checksum)) {
+ return NULL;
+ }
+
+ // First and last ranges are not serialized.
+ std::vector<Sample> sample_ranges(bucket_count - 1);
+
+ for (uint32_t i = 0; i < sample_ranges.size(); ++i) {
+ if (!iter->ReadInt(&sample_ranges[i]))
+ return NULL;
+ }
+
+ HistogramBase* histogram = CustomHistogram::FactoryGet(
+ histogram_name, sample_ranges, flags);
+ if (!ValidateRangeChecksum(*histogram, range_checksum)) {
+ // The serialized histogram might be corrupted.
+ return NULL;
+ }
+ return histogram;
+}
+
+// static
+bool CustomHistogram::ValidateCustomRanges(
+ const std::vector<Sample>& custom_ranges) {
+ bool has_valid_range = false;
+ for (uint32_t i = 0; i < custom_ranges.size(); i++) {
+ Sample sample = custom_ranges[i];
+ if (sample < 0 || sample > HistogramBase::kSampleType_MAX - 1)
+ return false;
+ if (sample != 0)
+ has_valid_range = true;
+ }
+ return has_valid_range;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram.h b/libchrome/base/metrics/histogram.h
new file mode 100644
index 0000000..2283a4d
--- /dev/null
+++ b/libchrome/base/metrics/histogram.h
@@ -0,0 +1,552 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+
+// It supports calls to accumulate either time intervals (which are processed
+// as integral number of milliseconds), or arbitrary integral units.
+
+// For Histogram(exponential histogram), LinearHistogram and CustomHistogram,
+// the minimum for a declared range is 1 (instead of 0), while the maximum is
+// (HistogramBase::kSampleType_MAX - 1). Currently you can declare histograms
+// with ranges exceeding those limits (e.g. 0 as minimal or
+// HistogramBase::kSampleType_MAX as maximal), but those excesses will be
+// silently clamped to those limits (for backwards compatibility with existing
+// code). Best practice is to not exceed the limits.
+
+// Each use of a histogram with the same name will reference the same underlying
+// data, so it is safe to record to the same histogram from multiple locations
+// in the code. It is a runtime error if all uses of the same histogram do not
+// agree exactly in type, bucket size and range.
+
+// For Histogram and LinearHistogram, the maximum for a declared range should
+// always be larger (not equal) than minimal range. Zero and
+// HistogramBase::kSampleType_MAX are implicitly added as first and last ranges,
+// so the smallest legal bucket_count is 3. However CustomHistogram can have
+// bucket count as 2 (when you give a custom ranges vector containing only 1
+// range).
+// For these 3 kinds of histograms, the max bucket count is always
+// (Histogram::kBucketCount_MAX - 1).
+
+// The buckets layout of class Histogram is exponential. For example, buckets
+// might contain (sequentially) the count of values in the following intervals:
+// [0,1), [1,2), [2,4), [4,8), [8,16), [16,32), [32,64), [64,infinity)
+// That bucket allocation would actually result from construction of a histogram
+// for values between 1 and 64, with 8 buckets, such as:
+// Histogram count("some name", 1, 64, 8);
+// Note that the underflow bucket [0,1) and the overflow bucket [64,infinity)
+// are also counted by the constructor in the user supplied "bucket_count"
+// argument.
+// The above example has an exponential ratio of 2 (doubling the bucket width
+// in each consecutive bucket. The Histogram class automatically calculates
+// the smallest ratio that it can use to construct the number of buckets
+// selected in the constructor. An another example, if you had 50 buckets,
+// and millisecond time values from 1 to 10000, then the ratio between
+// consecutive bucket widths will be approximately somewhere around the 50th
+// root of 10000. This approach provides very fine grain (narrow) buckets
+// at the low end of the histogram scale, but allows the histogram to cover a
+// gigantic range with the addition of very few buckets.
+
+// Usually we use macros to define and use a histogram, which are defined in
+// base/metrics/histogram_macros.h. Note: Callers should include that header
+// directly if they only access the histogram APIs through macros.
+//
+// Macros use a pattern involving a function static variable, that is a pointer
+// to a histogram. This static is explicitly initialized on any thread
+// that detects a uninitialized (NULL) pointer. The potentially racy
+// initialization is not a problem as it is always set to point to the same
+// value (i.e., the FactoryGet always returns the same value). FactoryGet
+// is also completely thread safe, which results in a completely thread safe,
+// and relatively fast, set of counters. To avoid races at shutdown, the static
+// pointer is NOT deleted, and we leak the histograms at process termination.
+
+#ifndef BASE_METRICS_HISTOGRAM_H_
+#define BASE_METRICS_HISTOGRAM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_base.h"
+// TODO(asvitkine): Migrate callers to to include this directly and remove this.
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BooleanHistogram;
+class CustomHistogram;
+class Histogram;
+class LinearHistogram;
+class PersistentMemoryAllocator;
+class Pickle;
+class PickleIterator;
+class SampleVector;
+
+class BASE_EXPORT Histogram : public HistogramBase {
+ public:
+ // Initialize maximum number of buckets in histograms as 16,384.
+ static const uint32_t kBucketCount_MAX;
+
+ typedef std::vector<Count> Counts;
+
+ ~Histogram() override;
+
+ //----------------------------------------------------------------------------
+ // For a valid histogram, input should follow these restrictions:
+ // minimum > 0 (if a minimum below 1 is specified, it will implicitly be
+ // normalized up to 1)
+ // maximum > minimum
+ // buckets > 2 [minimum buckets needed: underflow, overflow and the range]
+ // Additionally,
+ // buckets <= (maximum - minimum + 2) - this is to ensure that we don't have
+ // more buckets than the range of numbers; having more buckets than 1 per
+ // value in the range would be nonsensical.
+ static HistogramBase* FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+ static HistogramBase* FactoryTimeGet(const std::string& name,
+ base::TimeDelta minimum,
+ base::TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+
+ // Overloads of the above two functions that take a const char* |name| param,
+ // to avoid code bloat from the std::string constructor being inlined into
+ // call sites.
+ static HistogramBase* FactoryGet(const char* name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+ static HistogramBase* FactoryTimeGet(const char* name,
+ base::TimeDelta minimum,
+ base::TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+
+ // Create a histogram using data in persistent storage.
+ static std::unique_ptr<HistogramBase> PersistentCreate(
+ const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ static void InitializeBucketRanges(Sample minimum,
+ Sample maximum,
+ BucketRanges* ranges);
+
+ // This constant if for FindCorruption. Since snapshots of histograms are
+ // taken asynchronously relative to sampling, and our counting code currently
+ // does not prevent race conditions, it is pretty likely that we'll catch a
+ // redundant count that doesn't match the sample count. We allow for a
+ // certain amount of slop before flagging this as an inconsistency. Even with
+ // an inconsistency, we'll snapshot it again (for UMA in about a half hour),
+ // so we'll eventually get the data, if it was not the result of a corruption.
+ static const int kCommonRaceBasedCountMismatch;
+
+ // Check to see if bucket ranges, counts and tallies in the snapshot are
+ // consistent with the bucket ranges and checksums in our histogram. This can
+ // produce a false-alarm if a race occurred in the reading of the data during
+ // a SnapShot process, but should otherwise be false at all times (unless we
+ // have memory over-writes, or DRAM failures). Flag definitions are located
+ // under "enum Inconsistency" in base/metrics/histogram_base.h.
+ uint32_t FindCorruption(const HistogramSamples& samples) const override;
+
+ //----------------------------------------------------------------------------
+ // Accessors for factory construction, serialization and testing.
+ //----------------------------------------------------------------------------
+ Sample declared_min() const { return declared_min_; }
+ Sample declared_max() const { return declared_max_; }
+ virtual Sample ranges(uint32_t i) const;
+ virtual uint32_t bucket_count() const;
+ const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
+
+ // This function validates histogram construction arguments. It returns false
+ // if some of the arguments are totally bad.
+ // Note. Currently it allow some bad input, e.g. 0 as minimum, but silently
+ // converts it to good input: 1.
+ // TODO(kaiwang): Be more restrict and return false for any bad input, and
+ // make this a readonly validating function.
+ static bool InspectConstructionArguments(const std::string& name,
+ Sample* minimum,
+ Sample* maximum,
+ uint32_t* bucket_count);
+
+ // HistogramBase implementation:
+ uint64_t name_hash() const override;
+ HistogramType GetHistogramType() const override;
+ bool HasConstructionArguments(Sample expected_minimum,
+ Sample expected_maximum,
+ uint32_t expected_bucket_count) const override;
+ void Add(Sample value) override;
+ void AddCount(Sample value, int count) override;
+ std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+ std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+ std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
+ void AddSamples(const HistogramSamples& samples) override;
+ bool AddSamplesFromPickle(base::PickleIterator* iter) override;
+ void WriteHTMLGraph(std::string* output) const override;
+ void WriteAscii(std::string* output) const override;
+
+ protected:
+ // This class, defined entirely within the .cc file, contains all the
+ // common logic for building a Histogram and can be overridden by more
+ // specific types to alter details of how the creation is done. It is
+ // defined as an embedded class (rather than an anonymous one) so it
+ // can access the protected constructors.
+ class Factory;
+
+ // |ranges| should contain the underflow and overflow buckets. See top
+ // comments for example.
+ Histogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges);
+
+ // Traditionally, histograms allocate their own memory for the bucket
+ // vector but "shared" histograms use memory regions allocated from a
+ // special memory segment that is passed in here. It is assumed that
+ // the life of this memory is managed externally and exceeds the lifetime
+ // of this object. Practically, this memory is never released until the
+ // process exits and the OS cleans it up.
+ Histogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ // HistogramBase implementation:
+ bool SerializeInfoImpl(base::Pickle* pickle) const override;
+
+ // Method to override to skip the display of the i'th bucket if it's empty.
+ virtual bool PrintEmptyBucket(uint32_t index) const;
+
+ // Get normalized size, relative to the ranges(i).
+ virtual double GetBucketSize(Count current, uint32_t i) const;
+
+ // Return a string description of what goes in a given bucket.
+ // Most commonly this is the numeric value, but in derived classes it may
+ // be a name (or string description) given to the bucket.
+ virtual const std::string GetAsciiBucketRange(uint32_t it) const;
+
+ private:
+ // Allow tests to corrupt our innards for testing purposes.
+ FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
+ FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
+ FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
+
+ friend class StatisticsRecorder; // To allow it to delete duplicates.
+ friend class StatisticsRecorderTest;
+
+ friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+ base::PickleIterator* iter);
+ static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+ // Implementation of SnapshotSamples function.
+ std::unique_ptr<SampleVector> SnapshotSampleVector() const;
+
+ //----------------------------------------------------------------------------
+ // Helpers for emitting Ascii graphic. Each method appends data to output.
+
+ void WriteAsciiImpl(bool graph_it,
+ const std::string& newline,
+ std::string* output) const;
+
+ // Find out how large (graphically) the largest bucket will appear to be.
+ double GetPeakBucketSize(const SampleVector& samples) const;
+
+ // Write a common header message describing this histogram.
+ void WriteAsciiHeader(const SampleVector& samples,
+ Count sample_count,
+ std::string* output) const;
+
+ // Write information about previous, current, and next buckets.
+ // Information such as cumulative percentage, etc.
+ void WriteAsciiBucketContext(const int64_t past,
+ const Count current,
+ const int64_t remaining,
+ const uint32_t i,
+ std::string* output) const;
+
+ // WriteJSON calls these.
+ void GetParameters(DictionaryValue* params) const override;
+
+ void GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const override;
+
+ // Does not own this object. Should get from StatisticsRecorder.
+ const BucketRanges* bucket_ranges_;
+
+ Sample declared_min_; // Less than this goes into the first bucket.
+ Sample declared_max_; // Over this goes into the last bucket.
+
+ // Finally, provide the state that changes with the addition of each new
+ // sample.
+ std::unique_ptr<SampleVector> samples_;
+
+ // Also keep a previous uploaded state for calculating deltas.
+ std::unique_ptr<HistogramSamples> logged_samples_;
+
+ // Flag to indicate if PrepareFinalDelta has been previously called. It is
+ // used to DCHECK that a final delta is not created multiple times.
+ mutable bool final_delta_created_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(Histogram);
+};
+
+//------------------------------------------------------------------------------
+
+// LinearHistogram is a more traditional histogram, with evenly spaced
+// buckets.
+class BASE_EXPORT LinearHistogram : public Histogram {
+ public:
+ ~LinearHistogram() override;
+
+ /* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
+ default underflow bucket. */
+ static HistogramBase* FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+ static HistogramBase* FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+
+ // Overloads of the above two functions that take a const char* |name| param,
+ // to avoid code bloat from the std::string constructor being inlined into
+ // call sites.
+ static HistogramBase* FactoryGet(const char* name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+ static HistogramBase* FactoryTimeGet(const char* name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ uint32_t bucket_count,
+ int32_t flags);
+
+ // Create a histogram using data in persistent storage.
+ static std::unique_ptr<HistogramBase> PersistentCreate(
+ const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ struct DescriptionPair {
+ Sample sample;
+ const char* description; // Null means end of a list of pairs.
+ };
+
+ // Create a LinearHistogram and store a list of number/text values for use in
+ // writing the histogram graph.
+ // |descriptions| can be NULL, which means no special descriptions to set. If
+ // it's not NULL, the last element in the array must has a NULL in its
+ // "description" field.
+ static HistogramBase* FactoryGetWithRangeDescription(
+ const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ uint32_t bucket_count,
+ int32_t flags,
+ const DescriptionPair descriptions[]);
+
+ static void InitializeBucketRanges(Sample minimum,
+ Sample maximum,
+ BucketRanges* ranges);
+
+ // Overridden from Histogram:
+ HistogramType GetHistogramType() const override;
+
+ protected:
+ class Factory;
+
+ LinearHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges);
+
+ LinearHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ double GetBucketSize(Count current, uint32_t i) const override;
+
+ // If we have a description for a bucket, then return that. Otherwise
+ // let parent class provide a (numeric) description.
+ const std::string GetAsciiBucketRange(uint32_t i) const override;
+
+ // Skip printing of name for numeric range if we have a name (and if this is
+ // an empty bucket).
+ bool PrintEmptyBucket(uint32_t index) const override;
+
+ private:
+ friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+ base::PickleIterator* iter);
+ static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+ // For some ranges, we store a printable description of a bucket range.
+ // If there is no description, then GetAsciiBucketRange() uses parent class
+ // to provide a description.
+ typedef std::map<Sample, std::string> BucketDescriptionMap;
+ BucketDescriptionMap bucket_description_;
+
+ DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// BooleanHistogram is a histogram for booleans.
+class BASE_EXPORT BooleanHistogram : public LinearHistogram {
+ public:
+ static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
+
+ // Overload of the above function that takes a const char* |name| param,
+ // to avoid code bloat from the std::string constructor being inlined into
+ // call sites.
+ static HistogramBase* FactoryGet(const char* name, int32_t flags);
+
+ // Create a histogram using data in persistent storage.
+ static std::unique_ptr<HistogramBase> PersistentCreate(
+ const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ HistogramType GetHistogramType() const override;
+
+ protected:
+ class Factory;
+
+ private:
+ BooleanHistogram(const std::string& name, const BucketRanges* ranges);
+ BooleanHistogram(const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+ base::PickleIterator* iter);
+ static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+ DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// CustomHistogram is a histogram for a set of custom integers.
+class BASE_EXPORT CustomHistogram : public Histogram {
+ public:
+ // |custom_ranges| contains a vector of limits on ranges. Each limit should be
+ // > 0 and < kSampleType_MAX. (Currently 0 is still accepted for backward
+ // compatibility). The limits can be unordered or contain duplication, but
+ // client should not depend on this.
+ static HistogramBase* FactoryGet(const std::string& name,
+ const std::vector<Sample>& custom_ranges,
+ int32_t flags);
+
+ // Overload of the above function that takes a const char* |name| param,
+ // to avoid code bloat from the std::string constructor being inlined into
+ // call sites.
+ static HistogramBase* FactoryGet(const char* name,
+ const std::vector<Sample>& custom_ranges,
+ int32_t flags);
+
+ // Create a histogram using data in persistent storage.
+ static std::unique_ptr<HistogramBase> PersistentCreate(
+ const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ // Overridden from Histogram:
+ HistogramType GetHistogramType() const override;
+
+ // Helper method for transforming an array of valid enumeration values
+ // to the std::vector<int> expected by UMA_HISTOGRAM_CUSTOM_ENUMERATION.
+ // This function ensures that a guard bucket exists right after any
+ // valid sample value (unless the next higher sample is also a valid value),
+ // so that invalid samples never fall into the same bucket as valid samples.
+ // TODO(kaiwang): Change name to ArrayToCustomEnumRanges.
+ static std::vector<Sample> ArrayToCustomRanges(const Sample* values,
+ uint32_t num_values);
+ protected:
+ class Factory;
+
+ CustomHistogram(const std::string& name,
+ const BucketRanges* ranges);
+
+ CustomHistogram(const std::string& name,
+ const BucketRanges* ranges,
+ HistogramBase::AtomicCount* counts,
+ HistogramBase::AtomicCount* logged_counts,
+ uint32_t counts_size,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ // HistogramBase implementation:
+ bool SerializeInfoImpl(base::Pickle* pickle) const override;
+
+ double GetBucketSize(Count current, uint32_t i) const override;
+
+ private:
+ friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+ base::PickleIterator* iter);
+ static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+ static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
+
+ DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_H_
diff --git a/libchrome/base/metrics/histogram_base.cc b/libchrome/base/metrics/histogram_base.cc
new file mode 100644
index 0000000..8c4f1ec
--- /dev/null
+++ b/libchrome/base/metrics/histogram_base.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_base.h"
+
+#include <limits.h>
+
+#include <memory>
+#include <utility>
+
+#include "base/json/json_string_value_serializer.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/values.h"
+
+namespace base {
+
+std::string HistogramTypeToString(HistogramType type) {
+ switch (type) {
+ case HISTOGRAM:
+ return "HISTOGRAM";
+ case LINEAR_HISTOGRAM:
+ return "LINEAR_HISTOGRAM";
+ case BOOLEAN_HISTOGRAM:
+ return "BOOLEAN_HISTOGRAM";
+ case CUSTOM_HISTOGRAM:
+ return "CUSTOM_HISTOGRAM";
+ case SPARSE_HISTOGRAM:
+ return "SPARSE_HISTOGRAM";
+ }
+ NOTREACHED();
+ return "UNKNOWN";
+}
+
+HistogramBase* DeserializeHistogramInfo(PickleIterator* iter) {
+ int type;
+ if (!iter->ReadInt(&type))
+ return NULL;
+
+ switch (type) {
+ case HISTOGRAM:
+ return Histogram::DeserializeInfoImpl(iter);
+ case LINEAR_HISTOGRAM:
+ return LinearHistogram::DeserializeInfoImpl(iter);
+ case BOOLEAN_HISTOGRAM:
+ return BooleanHistogram::DeserializeInfoImpl(iter);
+ case CUSTOM_HISTOGRAM:
+ return CustomHistogram::DeserializeInfoImpl(iter);
+ case SPARSE_HISTOGRAM:
+ return SparseHistogram::DeserializeInfoImpl(iter);
+ default:
+ return NULL;
+ }
+}
+
+const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
+HistogramBase* HistogramBase::report_histogram_ = nullptr;
+
+HistogramBase::HistogramBase(const std::string& name)
+ : histogram_name_(name),
+ flags_(kNoFlags) {}
+
+HistogramBase::~HistogramBase() {}
+
+void HistogramBase::CheckName(const StringPiece& name) const {
+ DCHECK_EQ(histogram_name(), name);
+}
+
+void HistogramBase::SetFlags(int32_t flags) {
+ HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
+ subtle::NoBarrier_Store(&flags_, old_flags | flags);
+}
+
+void HistogramBase::ClearFlags(int32_t flags) {
+ HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
+ subtle::NoBarrier_Store(&flags_, old_flags & ~flags);
+}
+
+void HistogramBase::AddTime(const TimeDelta& time) {
+ Add(static_cast<Sample>(time.InMilliseconds()));
+}
+
+void HistogramBase::AddBoolean(bool value) {
+ Add(value ? 1 : 0);
+}
+
+bool HistogramBase::SerializeInfo(Pickle* pickle) const {
+ if (!pickle->WriteInt(GetHistogramType()))
+ return false;
+ return SerializeInfoImpl(pickle);
+}
+
+uint32_t HistogramBase::FindCorruption(
+ const HistogramSamples& /*samples*/) const {
+ // Not supported by default.
+ return NO_INCONSISTENCIES;
+}
+
+void HistogramBase::WriteJSON(std::string* output) const {
+ Count count;
+ int64_t sum;
+ std::unique_ptr<ListValue> buckets(new ListValue());
+ GetCountAndBucketData(&count, &sum, buckets.get());
+ std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
+ GetParameters(parameters.get());
+
+ JSONStringValueSerializer serializer(output);
+ DictionaryValue root;
+ root.SetString("name", histogram_name());
+ root.SetInteger("count", count);
+ root.SetDouble("sum", static_cast<double>(sum));
+ root.SetInteger("flags", flags());
+ root.Set("params", std::move(parameters));
+ root.Set("buckets", std::move(buckets));
+ root.SetInteger("pid", GetCurrentProcId());
+ serializer.Serialize(root);
+}
+
+// static
+void HistogramBase::EnableActivityReportHistogram(
+ const std::string& process_type) {
+ DCHECK(!report_histogram_);
+ size_t existing = StatisticsRecorder::GetHistogramCount();
+ if (existing != 0) {
+ DVLOG(1) << existing
+ << " histograms were created before reporting was enabled.";
+ }
+
+ std::string name =
+ "UMA.Histograms.Activity" +
+ (process_type.empty() ? process_type : "." + process_type);
+
+ // Calling FactoryGet() here rather than using a histogram-macro works
+ // around some problems with tests that could end up seeing the results
+ // histogram when not expected due to a bad interaction between
+ // HistogramTester and StatisticsRecorder.
+ report_histogram_ = LinearHistogram::FactoryGet(
+ name, 1, HISTOGRAM_REPORT_MAX, HISTOGRAM_REPORT_MAX + 1,
+ kUmaTargetedHistogramFlag);
+ report_histogram_->Add(HISTOGRAM_REPORT_CREATED);
+}
+
+void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
+ if ((flags() & kCallbackExists) == 0)
+ return;
+
+ StatisticsRecorder::OnSampleCallback cb =
+ StatisticsRecorder::FindCallback(histogram_name());
+ if (!cb.is_null())
+ cb.Run(sample);
+}
+
+void HistogramBase::WriteAsciiBucketGraph(double current_size,
+ double max_size,
+ std::string* output) const {
+ const int k_line_length = 72; // Maximal horizontal width of graph.
+ int x_count = static_cast<int>(k_line_length * (current_size / max_size)
+ + 0.5);
+ int x_remainder = k_line_length - x_count;
+
+ while (0 < x_count--)
+ output->append("-");
+ output->append("O");
+ while (0 < x_remainder--)
+ output->append(" ");
+}
+
+const std::string HistogramBase::GetSimpleAsciiBucketRange(
+ Sample sample) const {
+ std::string result;
+ if (kHexRangePrintingFlag & flags())
+ StringAppendF(&result, "%#x", sample);
+ else
+ StringAppendF(&result, "%d", sample);
+ return result;
+}
+
+void HistogramBase::WriteAsciiBucketValue(Count current,
+ double scaled_sum,
+ std::string* output) const {
+ StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
+}
+
+// static
+void HistogramBase::ReportHistogramActivity(const HistogramBase& histogram,
+ ReportActivity activity) {
+ if (!report_histogram_)
+ return;
+
+ const int32_t flags = histogram.flags_;
+ HistogramReport report_type = HISTOGRAM_REPORT_MAX;
+ switch (activity) {
+ case HISTOGRAM_CREATED:
+ report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_CREATED);
+ switch (histogram.GetHistogramType()) {
+ case HISTOGRAM:
+ report_type = HISTOGRAM_REPORT_TYPE_LOGARITHMIC;
+ break;
+ case LINEAR_HISTOGRAM:
+ report_type = HISTOGRAM_REPORT_TYPE_LINEAR;
+ break;
+ case BOOLEAN_HISTOGRAM:
+ report_type = HISTOGRAM_REPORT_TYPE_BOOLEAN;
+ break;
+ case CUSTOM_HISTOGRAM:
+ report_type = HISTOGRAM_REPORT_TYPE_CUSTOM;
+ break;
+ case SPARSE_HISTOGRAM:
+ report_type = HISTOGRAM_REPORT_TYPE_SPARSE;
+ break;
+ }
+ report_histogram_->Add(report_type);
+ if (flags & kIsPersistent)
+ report_histogram_->Add(HISTOGRAM_REPORT_FLAG_PERSISTENT);
+ if ((flags & kUmaStabilityHistogramFlag) == kUmaStabilityHistogramFlag)
+ report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_STABILITY);
+ else if (flags & kUmaTargetedHistogramFlag)
+ report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_TARGETED);
+ break;
+
+ case HISTOGRAM_LOOKUP:
+ report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP);
+ break;
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_base.h b/libchrome/base/metrics/histogram_base.h
new file mode 100644
index 0000000..d240099
--- /dev/null
+++ b/libchrome/base/metrics/histogram_base.h
@@ -0,0 +1,279 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_BASE_H_
+#define BASE_METRICS_HISTOGRAM_BASE_H_
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BucketRanges;
+class DictionaryValue;
+class HistogramBase;
+class HistogramSamples;
+class ListValue;
+class Pickle;
+class PickleIterator;
+
+////////////////////////////////////////////////////////////////////////////////
+// This enum is used to facilitate deserialization of histograms from other
+// processes into the browser. If you create another class that inherits from
+// HistogramBase, add new histogram types and names below.
+
+enum HistogramType {
+ HISTOGRAM,
+ LINEAR_HISTOGRAM,
+ BOOLEAN_HISTOGRAM,
+ CUSTOM_HISTOGRAM,
+ SPARSE_HISTOGRAM,
+};
+
+std::string HistogramTypeToString(HistogramType type);
+
+// This enum is used for reporting how many histograms and of what types and
+// variations are being created. It has to be in the main .h file so it is
+// visible to files that define the various histogram types.
+enum HistogramReport {
+ // Count the number of reports created. The other counts divided by this
+ // number will give the average per run of the program.
+ HISTOGRAM_REPORT_CREATED = 0,
+
+ // Count the total number of histograms created. It is the limit against
+ // which all others are compared.
+ HISTOGRAM_REPORT_HISTOGRAM_CREATED = 1,
+
+ // Count the total number of histograms looked-up. It's better to cache
+ // the result of a single lookup rather than do it repeatedly.
+ HISTOGRAM_REPORT_HISTOGRAM_LOOKUP = 2,
+
+ // These count the individual histogram types. This must follow the order
+ // of HistogramType above.
+ HISTOGRAM_REPORT_TYPE_LOGARITHMIC = 3,
+ HISTOGRAM_REPORT_TYPE_LINEAR = 4,
+ HISTOGRAM_REPORT_TYPE_BOOLEAN = 5,
+ HISTOGRAM_REPORT_TYPE_CUSTOM = 6,
+ HISTOGRAM_REPORT_TYPE_SPARSE = 7,
+
+ // These indicate the individual flags that were set.
+ HISTOGRAM_REPORT_FLAG_UMA_TARGETED = 8,
+ HISTOGRAM_REPORT_FLAG_UMA_STABILITY = 9,
+ HISTOGRAM_REPORT_FLAG_PERSISTENT = 10,
+
+ // This must be last.
+ HISTOGRAM_REPORT_MAX = 11
+};
+
+// Create or find existing histogram that matches the pickled info.
+// Returns NULL if the pickled data has problems.
+BASE_EXPORT HistogramBase* DeserializeHistogramInfo(base::PickleIterator* iter);
+
+////////////////////////////////////////////////////////////////////////////////
+
+class BASE_EXPORT HistogramBase {
+ public:
+ typedef int32_t Sample; // Used for samples.
+ typedef subtle::Atomic32 AtomicCount; // Used to count samples.
+ typedef int32_t Count; // Used to manipulate counts in temporaries.
+
+ static const Sample kSampleType_MAX; // INT_MAX
+
+ enum Flags {
+ kNoFlags = 0,
+
+ // Histogram should be UMA uploaded.
+ kUmaTargetedHistogramFlag = 0x1,
+
+ // Indicates that this is a stability histogram. This flag exists to specify
+ // which histograms should be included in the initial stability log. Please
+ // refer to |MetricsService::PrepareInitialStabilityLog|.
+ kUmaStabilityHistogramFlag = kUmaTargetedHistogramFlag | 0x2,
+
+ // Indicates that the histogram was pickled to be sent across an IPC
+ // Channel. If we observe this flag on a histogram being aggregated into
+ // after IPC, then we are running in a single process mode, and the
+ // aggregation should not take place (as we would be aggregating back into
+ // the source histogram!).
+ kIPCSerializationSourceFlag = 0x10,
+
+ // Indicates that a callback exists for when a new sample is recorded on
+ // this histogram. We store this as a flag with the histogram since
+ // histograms can be in performance critical code, and this allows us
+ // to shortcut looking up the callback if it doesn't exist.
+ kCallbackExists = 0x20,
+
+ // Indicates that the histogram is held in "persistent" memory and may
+ // be accessible between processes. This is only possible if such a
+ // memory segment has been created/attached, used to create a Persistent-
+ // MemoryAllocator, and that loaded into the Histogram module before this
+ // histogram is created.
+ kIsPersistent = 0x40,
+
+ // Only for Histogram and its sub classes: fancy bucket-naming support.
+ kHexRangePrintingFlag = 0x8000,
+ };
+
+ // Histogram data inconsistency types.
+ enum Inconsistency : uint32_t {
+ NO_INCONSISTENCIES = 0x0,
+ RANGE_CHECKSUM_ERROR = 0x1,
+ BUCKET_ORDER_ERROR = 0x2,
+ COUNT_HIGH_ERROR = 0x4,
+ COUNT_LOW_ERROR = 0x8,
+
+ NEVER_EXCEEDED_VALUE = 0x10,
+ };
+
+ explicit HistogramBase(const std::string& name);
+ virtual ~HistogramBase();
+
+ const std::string& histogram_name() const { return histogram_name_; }
+
+ // Comapres |name| to the histogram name and triggers a DCHECK if they do not
+ // match. This is a helper function used by histogram macros, which results in
+ // in more compact machine code being generated by the macros.
+ void CheckName(const StringPiece& name) const;
+
+ // Get a unique ID for this histogram's samples.
+ virtual uint64_t name_hash() const = 0;
+
+ // Operations with Flags enum.
+ int32_t flags() const { return subtle::NoBarrier_Load(&flags_); }
+ void SetFlags(int32_t flags);
+ void ClearFlags(int32_t flags);
+
+ virtual HistogramType GetHistogramType() const = 0;
+
+ // Whether the histogram has construction arguments as parameters specified.
+ // For histograms that don't have the concept of minimum, maximum or
+ // bucket_count, this function always returns false.
+ virtual bool HasConstructionArguments(
+ Sample expected_minimum,
+ Sample expected_maximum,
+ uint32_t expected_bucket_count) const = 0;
+
+ virtual void Add(Sample value) = 0;
+
+ // In Add function the |value| bucket is increased by one, but in some use
+ // cases we need to increase this value by an arbitrary integer. AddCount
+ // function increases the |value| bucket by |count|. |count| should be greater
+ // than or equal to 1.
+ virtual void AddCount(Sample value, int count) = 0;
+
+ // 2 convenient functions that call Add(Sample).
+ void AddTime(const TimeDelta& time);
+ void AddBoolean(bool value);
+
+ virtual void AddSamples(const HistogramSamples& samples) = 0;
+ virtual bool AddSamplesFromPickle(base::PickleIterator* iter) = 0;
+
+ // Serialize the histogram info into |pickle|.
+ // Note: This only serializes the construction arguments of the histogram, but
+ // does not serialize the samples.
+ bool SerializeInfo(base::Pickle* pickle) const;
+
+ // Try to find out data corruption from histogram and the samples.
+ // The returned value is a combination of Inconsistency enum.
+ virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
+
+ // Snapshot the current complete set of sample data.
+ // Override with atomic/locked snapshot if needed.
+ virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
+
+ // Calculate the change (delta) in histogram counts since the previous call
+ // to this method. Each successive call will return only those counts
+ // changed since the last call.
+ virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
+
+ // Calculate the change (delta) in histogram counts since the previous call
+ // to SnapshotDelta() but do so without modifying any internal data as to
+ // what was previous logged. After such a call, no further calls to this
+ // method or to SnapshotDelta() should be done as the result would include
+ // data previously returned. Because no internal data is changed, this call
+ // can be made on "const" histograms such as those with data held in
+ // read-only memory.
+ virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
+
+ // The following methods provide graphical histogram displays.
+ virtual void WriteHTMLGraph(std::string* output) const = 0;
+ virtual void WriteAscii(std::string* output) const = 0;
+
+ // Produce a JSON representation of the histogram. This is implemented with
+ // the help of GetParameters and GetCountAndBucketData; overwrite them to
+ // customize the output.
+ void WriteJSON(std::string* output) const;
+
+ // This enables a histogram that reports the what types of histograms are
+ // created and their flags. It must be called while still single-threaded.
+ //
+ // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+ // with the following histogram:
+ // UMA.Histograms.process_type.Creations
+ static void EnableActivityReportHistogram(const std::string& process_type);
+
+ protected:
+ enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
+
+ // Subclasses should implement this function to make SerializeInfo work.
+ virtual bool SerializeInfoImpl(base::Pickle* pickle) const = 0;
+
+ // Writes information about the construction parameters in |params|.
+ virtual void GetParameters(DictionaryValue* params) const = 0;
+
+ // Writes information about the current (non-empty) buckets and their sample
+ // counts to |buckets|, the total sample count to |count| and the total sum
+ // to |sum|.
+ virtual void GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const = 0;
+
+ //// Produce actual graph (set of blank vs non blank char's) for a bucket.
+ void WriteAsciiBucketGraph(double current_size,
+ double max_size,
+ std::string* output) const;
+
+ // Return a string description of what goes in a given bucket.
+ const std::string GetSimpleAsciiBucketRange(Sample sample) const;
+
+ // Write textual description of the bucket contents (relative to histogram).
+ // Output is the count in the buckets, as well as the percentage.
+ void WriteAsciiBucketValue(Count current,
+ double scaled_sum,
+ std::string* output) const;
+
+ // Retrieves the callback for this histogram, if one exists, and runs it
+ // passing |sample| as the parameter.
+ void FindAndRunCallback(Sample sample) const;
+
+ // Update report with an |activity| that occurred for |histogram|.
+ static void ReportHistogramActivity(const HistogramBase& histogram,
+ ReportActivity activicty);
+
+ // Retrieves the global histogram reporting what histograms are created.
+ static HistogramBase* report_histogram_;
+
+ private:
+ friend class HistogramBaseTest;
+
+ const std::string histogram_name_;
+ AtomicCount flags_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramBase);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_BASE_H_
diff --git a/libchrome/base/metrics/histogram_base_unittest.cc b/libchrome/base/metrics/histogram_base_unittest.cc
new file mode 100644
index 0000000..1eb8fd4
--- /dev/null
+++ b/libchrome/base/metrics/histogram_base_unittest.cc
@@ -0,0 +1,222 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class HistogramBaseTest : public testing::Test {
+ protected:
+ HistogramBaseTest() {
+ // Each test will have a clean state (no Histogram / BucketRanges
+ // registered).
+ ResetStatisticsRecorder();
+ }
+
+ ~HistogramBaseTest() override {
+ HistogramBase::report_histogram_ = nullptr;
+ }
+
+ void ResetStatisticsRecorder() {
+ // It is necessary to fully destruct any existing StatisticsRecorder
+ // before creating a new one.
+ statistics_recorder_.reset();
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+ }
+
+ HistogramBase* GetCreationReportHistogram(const std::string& name) {
+ HistogramBase::EnableActivityReportHistogram(name);
+ return HistogramBase::report_histogram_;
+ }
+
+ private:
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramBaseTest);
+};
+
+TEST_F(HistogramBaseTest, DeserializeHistogram) {
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10,
+ (HistogramBase::kUmaTargetedHistogramFlag |
+ HistogramBase::kIPCSerializationSourceFlag));
+
+ Pickle pickle;
+ ASSERT_TRUE(histogram->SerializeInfo(&pickle));
+
+ PickleIterator iter(pickle);
+ HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+ EXPECT_EQ(histogram, deserialized);
+
+ ResetStatisticsRecorder();
+
+ PickleIterator iter2(pickle);
+ deserialized = DeserializeHistogramInfo(&iter2);
+ EXPECT_TRUE(deserialized);
+ EXPECT_NE(histogram, deserialized);
+ EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_TRUE(deserialized->HasConstructionArguments(1, 1000, 10));
+
+ // kIPCSerializationSourceFlag will be cleared.
+ EXPECT_EQ(HistogramBase::kUmaTargetedHistogramFlag, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeLinearHistogram) {
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10,
+ HistogramBase::kIPCSerializationSourceFlag);
+
+ Pickle pickle;
+ ASSERT_TRUE(histogram->SerializeInfo(&pickle));
+
+ PickleIterator iter(pickle);
+ HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+ EXPECT_EQ(histogram, deserialized);
+
+ ResetStatisticsRecorder();
+
+ PickleIterator iter2(pickle);
+ deserialized = DeserializeHistogramInfo(&iter2);
+ EXPECT_TRUE(deserialized);
+ EXPECT_NE(histogram, deserialized);
+ EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_TRUE(deserialized->HasConstructionArguments(1, 1000, 10));
+ EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeBooleanHistogram) {
+ HistogramBase* histogram = BooleanHistogram::FactoryGet(
+ "TestHistogram", HistogramBase::kIPCSerializationSourceFlag);
+
+ Pickle pickle;
+ ASSERT_TRUE(histogram->SerializeInfo(&pickle));
+
+ PickleIterator iter(pickle);
+ HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+ EXPECT_EQ(histogram, deserialized);
+
+ ResetStatisticsRecorder();
+
+ PickleIterator iter2(pickle);
+ deserialized = DeserializeHistogramInfo(&iter2);
+ EXPECT_TRUE(deserialized);
+ EXPECT_NE(histogram, deserialized);
+ EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_TRUE(deserialized->HasConstructionArguments(1, 2, 3));
+ EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeCustomHistogram) {
+ std::vector<HistogramBase::Sample> ranges;
+ ranges.push_back(13);
+ ranges.push_back(5);
+ ranges.push_back(9);
+
+ HistogramBase* histogram = CustomHistogram::FactoryGet(
+ "TestHistogram", ranges, HistogramBase::kIPCSerializationSourceFlag);
+
+ Pickle pickle;
+ ASSERT_TRUE(histogram->SerializeInfo(&pickle));
+
+ PickleIterator iter(pickle);
+ HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+ EXPECT_EQ(histogram, deserialized);
+
+ ResetStatisticsRecorder();
+
+ PickleIterator iter2(pickle);
+ deserialized = DeserializeHistogramInfo(&iter2);
+ EXPECT_TRUE(deserialized);
+ EXPECT_NE(histogram, deserialized);
+ EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_TRUE(deserialized->HasConstructionArguments(5, 13, 4));
+ EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, DeserializeSparseHistogram) {
+ HistogramBase* histogram = SparseHistogram::FactoryGet(
+ "TestHistogram", HistogramBase::kIPCSerializationSourceFlag);
+
+ Pickle pickle;
+ ASSERT_TRUE(histogram->SerializeInfo(&pickle));
+
+ PickleIterator iter(pickle);
+ HistogramBase* deserialized = DeserializeHistogramInfo(&iter);
+ EXPECT_EQ(histogram, deserialized);
+
+ ResetStatisticsRecorder();
+
+ PickleIterator iter2(pickle);
+ deserialized = DeserializeHistogramInfo(&iter2);
+ EXPECT_TRUE(deserialized);
+ EXPECT_NE(histogram, deserialized);
+ EXPECT_EQ("TestHistogram", deserialized->histogram_name());
+ EXPECT_EQ(0, deserialized->flags());
+}
+
+TEST_F(HistogramBaseTest, CreationReportHistogram) {
+ // Enabled creation report. Itself is not included in the report.
+ HistogramBase* report = GetCreationReportHistogram("CreationReportTest");
+ ASSERT_TRUE(report);
+
+ std::vector<HistogramBase::Sample> ranges;
+ ranges.push_back(1);
+ ranges.push_back(2);
+ ranges.push_back(4);
+ ranges.push_back(8);
+ ranges.push_back(10);
+
+ // Create all histogram types and verify counts.
+ Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
+ LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
+ BooleanHistogram::FactoryGet("CRH-Boolean", 0);
+ CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
+ SparseHistogram::FactoryGet("CRH-Sparse", 0);
+
+ std::unique_ptr<HistogramSamples> samples = report->SnapshotSamples();
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+ EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+ EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LOGARITHMIC));
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LINEAR));
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_BOOLEAN));
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_CUSTOM));
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_SPARSE));
+
+ // Create all flag types and verify counts.
+ Histogram::FactoryGet("CRH-Histogram-UMA-Targeted", 1, 10, 5,
+ HistogramBase::kUmaTargetedHistogramFlag);
+ Histogram::FactoryGet("CRH-Histogram-UMA-Stability", 1, 10, 5,
+ HistogramBase::kUmaStabilityHistogramFlag);
+ SparseHistogram::FactoryGet("CRH-Sparse-UMA-Targeted",
+ HistogramBase::kUmaTargetedHistogramFlag);
+ SparseHistogram::FactoryGet("CRH-Sparse-UMA-Stability",
+ HistogramBase::kUmaStabilityHistogramFlag);
+ samples = report->SnapshotSamples();
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+ EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+ EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+ EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_TARGETED));
+ EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_STABILITY));
+
+ // Do lookup of existing histograms and verify counts.
+ Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
+ LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
+ BooleanHistogram::FactoryGet("CRH-Boolean", 0);
+ CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
+ SparseHistogram::FactoryGet("CRH-Sparse", 0);
+ samples = report->SnapshotSamples();
+ EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+ EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+ EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_delta_serialization.cc b/libchrome/base/metrics/histogram_delta_serialization.cc
new file mode 100644
index 0000000..3e5d154
--- /dev/null
+++ b/libchrome/base/metrics/histogram_delta_serialization.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/pickle.h"
+#include "base/values.h"
+
+namespace base {
+
+namespace {
+
+// Create or find existing histogram and add the samples from pickle.
+// Silently returns when seeing any data problem in the pickle.
+void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
+ HistogramBase* histogram = DeserializeHistogramInfo(iter);
+ if (!histogram)
+ return;
+
+ if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
+ DVLOG(1) << "Single process mode, histogram observed and not copied: "
+ << histogram->histogram_name();
+ return;
+ }
+ histogram->AddSamplesFromPickle(iter);
+}
+
+} // namespace
+
+HistogramDeltaSerialization::HistogramDeltaSerialization(
+ const std::string& caller_name)
+ : histogram_snapshot_manager_(this),
+ serialized_deltas_(NULL) {
+ inconsistencies_histogram_ =
+ LinearHistogram::FactoryGet(
+ "Histogram.Inconsistencies" + caller_name, 1,
+ HistogramBase::NEVER_EXCEEDED_VALUE,
+ HistogramBase::NEVER_EXCEEDED_VALUE + 1,
+ HistogramBase::kUmaTargetedHistogramFlag);
+
+ inconsistencies_unique_histogram_ =
+ LinearHistogram::FactoryGet(
+ "Histogram.Inconsistencies" + caller_name + "Unique", 1,
+ HistogramBase::NEVER_EXCEEDED_VALUE,
+ HistogramBase::NEVER_EXCEEDED_VALUE + 1,
+ HistogramBase::kUmaTargetedHistogramFlag);
+
+ inconsistent_snapshot_histogram_ =
+ Histogram::FactoryGet(
+ "Histogram.InconsistentSnapshot" + caller_name, 1, 1000000, 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+HistogramDeltaSerialization::~HistogramDeltaSerialization() {
+}
+
+void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
+ std::vector<std::string>* serialized_deltas,
+ bool include_persistent) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ serialized_deltas_ = serialized_deltas;
+ // Note: Before serializing, we set the kIPCSerializationSourceFlag for all
+ // the histograms, so that the receiving process can distinguish them from the
+ // local histograms.
+ histogram_snapshot_manager_.PrepareDeltas(
+ StatisticsRecorder::begin(include_persistent), StatisticsRecorder::end(),
+ Histogram::kIPCSerializationSourceFlag, Histogram::kNoFlags);
+ serialized_deltas_ = NULL;
+}
+
+// static
+void HistogramDeltaSerialization::DeserializeAndAddSamples(
+ const std::vector<std::string>& serialized_deltas) {
+ for (std::vector<std::string>::const_iterator it = serialized_deltas.begin();
+ it != serialized_deltas.end(); ++it) {
+ Pickle pickle(it->data(), checked_cast<int>(it->size()));
+ PickleIterator iter(pickle);
+ DeserializeHistogramAndAddSamples(&iter);
+ }
+}
+
+void HistogramDeltaSerialization::RecordDelta(
+ const HistogramBase& histogram,
+ const HistogramSamples& snapshot) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_NE(0, snapshot.TotalCount());
+
+ Pickle pickle;
+ histogram.SerializeInfo(&pickle);
+ snapshot.Serialize(&pickle);
+ serialized_deltas_->push_back(
+ std::string(static_cast<const char*>(pickle.data()), pickle.size()));
+}
+
+void HistogramDeltaSerialization::InconsistencyDetected(
+ HistogramBase::Inconsistency problem) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ inconsistencies_histogram_->Add(problem);
+}
+
+void HistogramDeltaSerialization::UniqueInconsistencyDetected(
+ HistogramBase::Inconsistency problem) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ inconsistencies_unique_histogram_->Add(problem);
+}
+
+void HistogramDeltaSerialization::InconsistencyDetectedInLoggedCount(
+ int amount) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ inconsistent_snapshot_histogram_->Add(std::abs(amount));
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_delta_serialization.h b/libchrome/base/metrics/histogram_delta_serialization.h
new file mode 100644
index 0000000..3bb04cb
--- /dev/null
+++ b/libchrome/base/metrics/histogram_delta_serialization.h
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
+#define BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+class HistogramBase;
+
+// Serializes and restores histograms deltas.
+class BASE_EXPORT HistogramDeltaSerialization : public HistogramFlattener {
+ public:
+ // |caller_name| is string used in histograms for counting inconsistencies.
+ explicit HistogramDeltaSerialization(const std::string& caller_name);
+ ~HistogramDeltaSerialization() override;
+
+ // Computes deltas in histogram bucket counts relative to the previous call to
+ // this method. Stores the deltas in serialized form into |serialized_deltas|.
+ // If |serialized_deltas| is null, no data is serialized, though the next call
+ // will compute the deltas relative to this one. Setting |include_persistent|
+ // will include histograms held in persistent memory (and thus may be reported
+ // elsewhere); otherwise only histograms local to this process are serialized.
+ void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas,
+ bool include_persistent);
+
+ // Deserialize deltas and add samples to corresponding histograms, creating
+ // them if necessary. Silently ignores errors in |serialized_deltas|.
+ static void DeserializeAndAddSamples(
+ const std::vector<std::string>& serialized_deltas);
+
+ private:
+ // HistogramFlattener implementation.
+ void RecordDelta(const HistogramBase& histogram,
+ const HistogramSamples& snapshot) override;
+ void InconsistencyDetected(HistogramBase::Inconsistency problem) override;
+ void UniqueInconsistencyDetected(
+ HistogramBase::Inconsistency problem) override;
+ void InconsistencyDetectedInLoggedCount(int amount) override;
+
+ ThreadChecker thread_checker_;
+
+ // Calculates deltas in histogram counters.
+ HistogramSnapshotManager histogram_snapshot_manager_;
+
+ // Output buffer for serialized deltas.
+ std::vector<std::string>* serialized_deltas_;
+
+ // Histograms to count inconsistencies in snapshots.
+ HistogramBase* inconsistencies_histogram_;
+ HistogramBase* inconsistencies_unique_histogram_;
+ HistogramBase* inconsistent_snapshot_histogram_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramDeltaSerialization);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
diff --git a/libchrome/base/metrics/histogram_delta_serialization_unittest.cc b/libchrome/base/metrics/histogram_delta_serialization_unittest.cc
new file mode 100644
index 0000000..719bc70
--- /dev/null
+++ b/libchrome/base/metrics/histogram_delta_serialization_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include <vector>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(HistogramDeltaSerializationTest, DeserializeHistogramAndAddSamples) {
+ std::unique_ptr<StatisticsRecorder> statistic_recorder(
+ StatisticsRecorder::CreateTemporaryForTesting());
+ HistogramDeltaSerialization serializer("HistogramDeltaSerializationTest");
+ std::vector<std::string> deltas;
+ // Nothing was changed yet.
+ serializer.PrepareAndSerializeDeltas(&deltas, true);
+ EXPECT_TRUE(deltas.empty());
+
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kIPCSerializationSourceFlag);
+ histogram->Add(1);
+ histogram->Add(10);
+ histogram->Add(100);
+ histogram->Add(1000);
+
+ serializer.PrepareAndSerializeDeltas(&deltas, true);
+ EXPECT_FALSE(deltas.empty());
+
+ HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+ // The histogram has kIPCSerializationSourceFlag. So samples will be ignored.
+ std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+ EXPECT_EQ(1, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(10));
+ EXPECT_EQ(1, snapshot->GetCount(100));
+ EXPECT_EQ(1, snapshot->GetCount(1000));
+
+ // Clear kIPCSerializationSourceFlag to emulate multi-process usage.
+ histogram->ClearFlags(HistogramBase::kIPCSerializationSourceFlag);
+ HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+ std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+ EXPECT_EQ(2, snapshot2->GetCount(1));
+ EXPECT_EQ(2, snapshot2->GetCount(10));
+ EXPECT_EQ(2, snapshot2->GetCount(100));
+ EXPECT_EQ(2, snapshot2->GetCount(1000));
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_flattener.h b/libchrome/base/metrics/histogram_flattener.h
new file mode 100644
index 0000000..b5fe976
--- /dev/null
+++ b/libchrome/base/metrics/histogram_flattener.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_FLATTENER_H_
+#define BASE_METRICS_HISTOGRAM_FLATTENER_H_
+
+#include <map>
+#include <string>
+
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+
+namespace base {
+
+class HistogramSamples;
+
+// HistogramFlattener is an interface used by HistogramSnapshotManager, which
+// handles the logistics of gathering up available histograms for recording.
+// The implementors handle the exact lower level recording mechanism, or
+// error report mechanism.
+class BASE_EXPORT HistogramFlattener {
+ public:
+ virtual void RecordDelta(const HistogramBase& histogram,
+ const HistogramSamples& snapshot) = 0;
+
+ // Will be called each time a type of Inconsistency is seen on a histogram,
+ // during inspections done internally in HistogramSnapshotManager class.
+ virtual void InconsistencyDetected(HistogramBase::Inconsistency problem) = 0;
+
+ // Will be called when a type of Inconsistency is seen for the first time on
+ // a histogram.
+ virtual void UniqueInconsistencyDetected(
+ HistogramBase::Inconsistency problem) = 0;
+
+ // Will be called when the total logged sample count of a histogram
+ // differs from the sum of logged sample count in all the buckets. The
+ // argument |amount| is the non-zero discrepancy.
+ virtual void InconsistencyDetectedInLoggedCount(int amount) = 0;
+
+ protected:
+ HistogramFlattener() {}
+ virtual ~HistogramFlattener() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HistogramFlattener);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_FLATTENER_H_
diff --git a/libchrome/base/metrics/histogram_macros.h b/libchrome/base/metrics/histogram_macros.h
new file mode 100644
index 0000000..ce1811a
--- /dev/null
+++ b/libchrome/base/metrics/histogram_macros.h
@@ -0,0 +1,298 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
+#define BASE_METRICS_HISTOGRAM_MACROS_H_
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/time/time.h"
+
+// Macros for efficient use of histograms. See documentation in histogram.h.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is defined in sparse_histogram.h as it has
+// different #include dependencies.
+
+//------------------------------------------------------------------------------
+// Histograms are often put in areas where they are called many many times, and
+// performance is critical. As a result, they are designed to have a very low
+// recurring cost of executing (adding additional samples). Toward that end,
+// the macros declare a static pointer to the histogram in question, and only
+// take a "slow path" to construct (or find) the histogram on the first run
+// through the macro. We leak the histograms at shutdown time so that we don't
+// have to validate using the pointers at any time during the running of the
+// process.
+
+// The following code is generally what a thread-safe static pointer
+// initialization looks like for a histogram (after a macro is expanded). This
+// sample is an expansion (with comments) of the code for
+// LOCAL_HISTOGRAM_CUSTOM_COUNTS().
+
+/*
+ do {
+ // The pointer's presence indicates the initialization is complete.
+ // Initialization is idempotent, so it can safely be atomically repeated.
+ static base::subtle::AtomicWord atomic_histogram_pointer = 0;
+
+ // Acquire_Load() ensures that we acquire visibility to the pointed-to data
+ // in the histogram.
+ base::Histogram* histogram_pointer(reinterpret_cast<base::Histogram*>(
+ base::subtle::Acquire_Load(&atomic_histogram_pointer)));
+
+ if (!histogram_pointer) {
+ // This is the slow path, which will construct OR find the matching
+ // histogram. FactoryGet includes locks on a global histogram name map
+ // and is completely thread safe.
+ histogram_pointer = base::Histogram::FactoryGet(
+ name, min, max, bucket_count, base::HistogramBase::kNoFlags);
+
+ // Use Release_Store to ensure that the histogram data is made available
+ // globally before we make the pointer visible.
+ // Several threads may perform this store, but the same value will be
+ // stored in all cases (for a given named/spec'ed histogram).
+ // We could do this without any barrier, since FactoryGet entered and
+ // exited a lock after construction, but this barrier makes things clear.
+ base::subtle::Release_Store(&atomic_histogram_pointer,
+ reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
+ }
+
+ // Ensure calling contract is upheld, and the name does NOT vary.
+ DCHECK(histogram_pointer->histogram_name() == constant_histogram_name);
+
+ histogram_pointer->Add(sample);
+ } while (0);
+*/
+
+// The above pattern is repeated in several macros. The only elements that
+// vary are the invocation of the Add(sample) vs AddTime(sample), and the choice
+// of which FactoryGet method to use. The different FactoryGet methods have
+// various argument lists, so the function with its argument list is provided as
+// a macro argument here. The name is only used in a DCHECK, to assure that
+// callers don't try to vary the name of the histogram (which would tend to be
+// ignored by the one-time initialization of the histogtram_pointer).
+
+// In some cases (integration into 3rd party code), it's useful to seperate the
+// definition of |atomic_histogram_poiner| from its use. To achieve this we
+// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
+// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
+// and forwards to HISTOGRAM_POINTER_USE.
+#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer, \
+ constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ base::HistogramBase* histogram_pointer( \
+ reinterpret_cast<base::HistogramBase*>( \
+ base::subtle::Acquire_Load(atomic_histogram_pointer))); \
+ if (!histogram_pointer) { \
+ histogram_pointer = histogram_factory_get_invocation; \
+ base::subtle::Release_Store( \
+ atomic_histogram_pointer, \
+ reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
+ } \
+ if (DCHECK_IS_ON()) \
+ histogram_pointer->CheckName(constant_histogram_name); \
+ histogram_pointer->histogram_add_method_invocation; \
+ } while (0)
+
+// Defines the static |atomic_histogram_pointer| and forwards to
+// HISTOGRAM_POINTER_USE.
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ static base::subtle::AtomicWord atomic_histogram_pointer = 0; \
+ HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation); \
+ } while (0)
+
+//------------------------------------------------------------------------------
+// Provide easy general purpose histogram in a macro, just like stats counters.
+// Most of these macros use 50 buckets, but check the definition for details.
+//
+// All of these macros must be called with |name| as a runtime constant --- it
+// doesn't have to literally be a constant, but it must be the same string on
+// all calls from a particular call site. If this rule is violated,
+// STATIC_HISTOGRAM_POINTER_BLOCK will DCHECK, and if DCHECKS are disabled, the
+// data will be written to the wrong histogram.
+
+#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+// For folks that need real specific times, use this to select a precise range
+// of times you want plotted, and the number of buckets you want used.
+#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_COUNTS(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
+
+#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
+ LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::HistogramBase::kNoFlags))
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+ flag))
+
+#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
+
+// Support histograming of an enumerated value. The samples should always be
+// strictly less than |boundary_value| -- this prevents you from running into
+// problems down the line if you add additional buckets to the histogram. Note
+// also that, despite explicitly setting the minimum bucket value to |1| below,
+// it is fine for enumerated histograms to be 0-indexed -- this is because
+// enumerated histograms should never have underflow.
+#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+ boundary_value + 1, base::HistogramBase::kNoFlags))
+
+// Support histograming of an enumerated value. Samples should be one of the
+// std::vector<int> list provided via |custom_ranges|. See comments above
+// CustomRanges::FactoryGet about the requirement of |custom_ranges|.
+// You can use the helper function CustomHistogram::ArrayToCustomRanges to
+// transform a C-style array of valid sample values to a std::vector<int>.
+#define LOCAL_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+//------------------------------------------------------------------------------
+// The following macros provide typical usage scenarios for callers that wish
+// to record histogram data, and have the data submitted/uploaded via UMA.
+// Not all systems support such UMA, but if they do, the following macros
+// should work with the service.
+
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(10), \
+ base::TimeDelta::FromMinutes(3), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds and
+// you want 100 buckets.
+#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 100)
+
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
+
+#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet(name, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// The samples should always be strictly less than |boundary_value|. For more
+// details, see the comment for the |LOCAL_HISTOGRAM_ENUMERATION| macro, above.
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+ base::HistogramBase::kUmaTargetedHistogramFlag)
+
+// Similar to UMA_HISTOGRAM_ENUMERATION, but used for recording stability
+// histograms. Use this if recording a histogram that should be part of the
+// initial stability log.
+#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+ base::HistogramBase::kUmaStabilityHistogramFlag)
+
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// Scoped class which logs its time on this earth as a UMA statistic. This is
+// recommended for when you want a histogram which measures the time it takes
+// for a method to execute. This measures up to 10 seconds.
+#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
+ SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
+
+// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
+// which measures up to an hour, and uses 100 buckets. This is more expensive
+// to store, so only use if this often takes >10 seconds.
+#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name) \
+ SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
+
+// This nested macro is necessary to expand __COUNTER__ to an actual value.
+#define SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key) \
+ SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
+
+#define SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key) \
+ class ScopedHistogramTimer##key { \
+ public: \
+ ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
+ ~ScopedHistogramTimer##key() { \
+ base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
+ if (is_long) { \
+ UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed); \
+ } else { \
+ UMA_HISTOGRAM_TIMES(name, elapsed); \
+ } \
+ } \
+ private: \
+ base::TimeTicks constructed_; \
+ } scoped_histogram_timer_##key
+
+#endif // BASE_METRICS_HISTOGRAM_MACROS_H_
diff --git a/libchrome/base/metrics/histogram_macros_unittest.cc b/libchrome/base/metrics/histogram_macros_unittest.cc
new file mode 100644
index 0000000..c599161
--- /dev/null
+++ b/libchrome/base/metrics/histogram_macros_unittest.cc
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_macros.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedHistogramTimer, TwoTimersOneScope) {
+ SCOPED_UMA_HISTOGRAM_TIMER("TestTimer0");
+ SCOPED_UMA_HISTOGRAM_TIMER("TestTimer1");
+ SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer0");
+ SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer1");
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_samples.cc b/libchrome/base/metrics/histogram_samples.cc
new file mode 100644
index 0000000..ea3b987
--- /dev/null
+++ b/libchrome/base/metrics/histogram_samples.cc
@@ -0,0 +1,155 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_samples.h"
+
+#include "base/compiler_specific.h"
+#include "base/pickle.h"
+
+namespace base {
+
+namespace {
+
+class SampleCountPickleIterator : public SampleCountIterator {
+ public:
+ explicit SampleCountPickleIterator(PickleIterator* iter);
+
+ bool Done() const override;
+ void Next() override;
+ void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const override;
+
+ private:
+ PickleIterator* const iter_;
+
+ HistogramBase::Sample min_;
+ HistogramBase::Sample max_;
+ HistogramBase::Count count_;
+ bool is_done_;
+};
+
+SampleCountPickleIterator::SampleCountPickleIterator(PickleIterator* iter)
+ : iter_(iter),
+ is_done_(false) {
+ Next();
+}
+
+bool SampleCountPickleIterator::Done() const {
+ return is_done_;
+}
+
+void SampleCountPickleIterator::Next() {
+ DCHECK(!Done());
+ if (!iter_->ReadInt(&min_) ||
+ !iter_->ReadInt(&max_) ||
+ !iter_->ReadInt(&count_))
+ is_done_ = true;
+}
+
+void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const {
+ DCHECK(!Done());
+ *min = min_;
+ *max = max_;
+ *count = count_;
+}
+
+} // namespace
+
+// Don't try to delegate behavior to the constructor below that accepts a
+// Matadata pointer by passing &local_meta_. Such cannot be reliably passed
+// because it has not yet been constructed -- no member variables have; the
+// class itself is in the middle of being constructed. Using it to
+// initialize meta_ is okay because the object now exists and local_meta_
+// is before meta_ in the construction order.
+HistogramSamples::HistogramSamples(uint64_t id)
+ : meta_(&local_meta_) {
+ meta_->id = id;
+}
+
+HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
+ : meta_(meta) {
+ DCHECK(meta_->id == 0 || meta_->id == id);
+
+ // It's possible that |meta| is contained in initialized, read-only memory
+ // so it's essential that no write be done in that case.
+ if (!meta_->id)
+ meta_->id = id;
+}
+
+HistogramSamples::~HistogramSamples() {}
+
+void HistogramSamples::Add(const HistogramSamples& other) {
+ IncreaseSum(other.sum());
+ subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+ other.redundant_count());
+ bool success = AddSubtractImpl(other.Iterator().get(), ADD);
+ DCHECK(success);
+}
+
+bool HistogramSamples::AddFromPickle(PickleIterator* iter) {
+ int64_t sum;
+ HistogramBase::Count redundant_count;
+
+ if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
+ return false;
+
+ IncreaseSum(sum);
+ subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+ redundant_count);
+
+ SampleCountPickleIterator pickle_iter(iter);
+ return AddSubtractImpl(&pickle_iter, ADD);
+}
+
+void HistogramSamples::Subtract(const HistogramSamples& other) {
+ IncreaseSum(-other.sum());
+ subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+ -other.redundant_count());
+ bool success = AddSubtractImpl(other.Iterator().get(), SUBTRACT);
+ DCHECK(success);
+}
+
+bool HistogramSamples::Serialize(Pickle* pickle) const {
+ if (!pickle->WriteInt64(sum()))
+ return false;
+ if (!pickle->WriteInt(redundant_count()))
+ return false;
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+ for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
+ it->Next()) {
+ it->Get(&min, &max, &count);
+ if (!pickle->WriteInt(min) ||
+ !pickle->WriteInt(max) ||
+ !pickle->WriteInt(count))
+ return false;
+ }
+ return true;
+}
+
+void HistogramSamples::IncreaseSum(int64_t diff) {
+#ifdef ARCH_CPU_64_BITS
+ subtle::NoBarrier_AtomicIncrement(&meta_->sum, diff);
+#else
+ meta_->sum += diff;
+#endif
+}
+
+void HistogramSamples::IncreaseRedundantCount(HistogramBase::Count diff) {
+ subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, diff);
+}
+
+SampleCountIterator::~SampleCountIterator() {}
+
+bool SampleCountIterator::GetBucketIndex(size_t* /*index*/) const {
+ DCHECK(!Done());
+ return false;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_samples.h b/libchrome/base/metrics/histogram_samples.h
new file mode 100644
index 0000000..e28573f
--- /dev/null
+++ b/libchrome/base/metrics/histogram_samples.h
@@ -0,0 +1,133 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_SAMPLES_H_
+#define BASE_METRICS_HISTOGRAM_SAMPLES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+class Pickle;
+class PickleIterator;
+class SampleCountIterator;
+
+// HistogramSamples is a container storing all samples of a histogram. All
+// elements must be of a fixed width to ensure 32/64-bit interoperability.
+// If this structure changes, bump the version number for kTypeIdHistogram
+// in persistent_histogram_allocator.cc.
+class BASE_EXPORT HistogramSamples {
+ public:
+ struct Metadata {
+ // Initialized when the sample-set is first created with a value provided
+ // by the caller. It is generally used to identify the sample-set across
+ // threads and processes, though not necessarily uniquely as it is possible
+ // to have multiple sample-sets representing subsets of the data.
+ uint64_t id;
+
+ // The sum of all the entries, effectivly the sum(sample * count) for
+ // all samples. Despite being atomic, no guarantees are made on the
+ // accuracy of this value; there may be races during histogram
+ // accumulation and snapshotting that we choose to accept. It should
+ // be treated as approximate.
+#ifdef ARCH_CPU_64_BITS
+ subtle::Atomic64 sum;
+#else
+ // 32-bit systems don't have atomic 64-bit operations. Use a basic type
+ // and don't worry about "shearing".
+ int64_t sum;
+#endif
+
+ // A "redundant" count helps identify memory corruption. It redundantly
+ // stores the total number of samples accumulated in the histogram. We
+ // can compare this count to the sum of the counts (TotalCount() function),
+ // and detect problems. Note, depending on the implementation of different
+ // histogram types, there might be races during histogram accumulation
+ // and snapshotting that we choose to accept. In this case, the tallies
+ // might mismatch even when no memory corruption has happened.
+ HistogramBase::AtomicCount redundant_count;
+
+ Metadata() : id(0), sum(0), redundant_count(0) {}
+ };
+
+ explicit HistogramSamples(uint64_t id);
+ HistogramSamples(uint64_t id, Metadata* meta);
+ virtual ~HistogramSamples();
+
+ virtual void Accumulate(HistogramBase::Sample value,
+ HistogramBase::Count count) = 0;
+ virtual HistogramBase::Count GetCount(HistogramBase::Sample value) const = 0;
+ virtual HistogramBase::Count TotalCount() const = 0;
+
+ virtual void Add(const HistogramSamples& other);
+
+ // Add from serialized samples.
+ virtual bool AddFromPickle(PickleIterator* iter);
+
+ virtual void Subtract(const HistogramSamples& other);
+
+ virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
+ virtual bool Serialize(Pickle* pickle) const;
+
+ // Accessor fuctions.
+ uint64_t id() const { return meta_->id; }
+ int64_t sum() const {
+#ifdef ARCH_CPU_64_BITS
+ return subtle::NoBarrier_Load(&meta_->sum);
+#else
+ return meta_->sum;
+#endif
+ }
+ HistogramBase::Count redundant_count() const {
+ return subtle::NoBarrier_Load(&meta_->redundant_count);
+ }
+
+ protected:
+ // Based on |op| type, add or subtract sample counts data from the iterator.
+ enum Operator { ADD, SUBTRACT };
+ virtual bool AddSubtractImpl(SampleCountIterator* iter, Operator op) = 0;
+
+ void IncreaseSum(int64_t diff);
+ void IncreaseRedundantCount(HistogramBase::Count diff);
+
+ private:
+ // In order to support histograms shared through an external memory segment,
+ // meta values may be the local storage or external storage depending on the
+ // wishes of the derived class.
+ Metadata local_meta_;
+ Metadata* meta_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
+};
+
+class BASE_EXPORT SampleCountIterator {
+ public:
+ virtual ~SampleCountIterator();
+
+ virtual bool Done() const = 0;
+ virtual void Next() = 0;
+
+ // Get the sample and count at current position.
+ // |min| |max| and |count| can be NULL if the value is not of interest.
+ // Requires: !Done();
+ virtual void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const = 0;
+
+ // Get the index of current histogram bucket.
+ // For histograms that don't use predefined buckets, it returns false.
+ // Requires: !Done();
+ virtual bool GetBucketIndex(size_t* index) const;
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_SAMPLES_H_
diff --git a/libchrome/base/metrics/histogram_snapshot_manager.cc b/libchrome/base/metrics/histogram_snapshot_manager.cc
new file mode 100644
index 0000000..340505e
--- /dev/null
+++ b/libchrome/base/metrics/histogram_snapshot_manager.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_snapshot_manager.h"
+
+#include <memory>
+
+#include "base/debug/alias.h"
+#include "base/metrics/histogram_flattener.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+HistogramSnapshotManager::HistogramSnapshotManager(
+ HistogramFlattener* histogram_flattener)
+ : histogram_flattener_(histogram_flattener) {
+ DCHECK(histogram_flattener_);
+}
+
+HistogramSnapshotManager::~HistogramSnapshotManager() {
+}
+
+void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
+ PrepareSamples(histogram, histogram->SnapshotDelta());
+}
+
+void HistogramSnapshotManager::PrepareFinalDelta(
+ const HistogramBase* histogram) {
+ PrepareSamples(histogram, histogram->SnapshotFinalDelta());
+}
+
+void HistogramSnapshotManager::PrepareSamples(
+ const HistogramBase* histogram,
+ std::unique_ptr<HistogramSamples> samples) {
+ DCHECK(histogram_flattener_);
+
+ // Get information known about this histogram. If it did not previously
+ // exist, one will be created and initialized.
+ SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
+
+ // Crash if we detect that our histograms have been overwritten. This may be
+ // a fair distance from the memory smasher, but we hope to correlate these
+ // crashes with other events, such as plugins, or usage patterns, etc.
+ uint32_t corruption = histogram->FindCorruption(*samples);
+ if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
+ // Extract fields useful during debug.
+ const BucketRanges* ranges =
+ static_cast<const Histogram*>(histogram)->bucket_ranges();
+ std::vector<HistogramBase::Sample> ranges_copy;
+ for (size_t i = 0; i < ranges->size(); ++i)
+ ranges_copy.push_back(ranges->range(i));
+ HistogramBase::Sample* ranges_ptr = &ranges_copy[0];
+ const char* histogram_name = histogram->histogram_name().c_str();
+ int32_t flags = histogram->flags();
+ // The checksum should have caught this, so crash separately if it didn't.
+ CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+ CHECK(false); // Crash for the bucket order corruption.
+ // Ensure that compiler keeps around pointers to |histogram| and its
+ // internal |bucket_ranges_| for any minidumps.
+ base::debug::Alias(&ranges_ptr);
+ base::debug::Alias(&histogram_name);
+ base::debug::Alias(&flags);
+ }
+ // Checksum corruption might not have caused order corruption.
+ CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+
+ // Note, at this point corruption can only be COUNT_HIGH_ERROR or
+ // COUNT_LOW_ERROR and they never arise together, so we don't need to extract
+ // bits from corruption.
+ if (corruption) {
+ DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
+ << "\" has data corruption: " << corruption;
+ histogram_flattener_->InconsistencyDetected(
+ static_cast<HistogramBase::Inconsistency>(corruption));
+ // Don't record corrupt data to metrics services.
+ const uint32_t old_corruption = sample_info->inconsistencies;
+ if (old_corruption == (corruption | old_corruption))
+ return; // We've already seen this corruption for this histogram.
+ sample_info->inconsistencies |= corruption;
+ histogram_flattener_->UniqueInconsistencyDetected(
+ static_cast<HistogramBase::Inconsistency>(corruption));
+ return;
+ }
+
+ if (samples->TotalCount() > 0)
+ histogram_flattener_->RecordDelta(*histogram, *samples);
+}
+
+void HistogramSnapshotManager::InspectLoggedSamplesInconsistency(
+ const HistogramSamples& new_snapshot,
+ HistogramSamples* logged_samples) {
+ HistogramBase::Count discrepancy =
+ logged_samples->TotalCount() - logged_samples->redundant_count();
+ if (!discrepancy)
+ return;
+
+ histogram_flattener_->InconsistencyDetectedInLoggedCount(discrepancy);
+ if (discrepancy > Histogram::kCommonRaceBasedCountMismatch) {
+ // Fix logged_samples.
+ logged_samples->Subtract(*logged_samples);
+ logged_samples->Add(new_snapshot);
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_snapshot_manager.h b/libchrome/base/metrics/histogram_snapshot_manager.h
new file mode 100644
index 0000000..26fb93f
--- /dev/null
+++ b/libchrome/base/metrics/histogram_snapshot_manager.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
+#define BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+
+namespace base {
+
+class HistogramSamples;
+class HistogramFlattener;
+
+// HistogramSnapshotManager handles the logistics of gathering up available
+// histograms for recording either to disk or for transmission (such as from
+// renderer to browser, or from browser to UMA upload). Since histograms can sit
+// in memory for an extended period of time, and are vulnerable to memory
+// corruption, this class also validates as much rendundancy as it can before
+// calling for the marginal change (a.k.a., delta) in a histogram to be
+// recorded.
+class BASE_EXPORT HistogramSnapshotManager {
+ public:
+ explicit HistogramSnapshotManager(HistogramFlattener* histogram_flattener);
+ virtual ~HistogramSnapshotManager();
+
+ // Snapshot all histograms, and ask |histogram_flattener_| to record the
+ // delta. |flags_to_set| is used to set flags for each histogram.
+ // |required_flags| is used to select histograms to be recorded.
+ // Only histograms that have all the flags specified by the argument will be
+ // chosen. If all histograms should be recorded, set it to
+ // |Histogram::kNoFlags|.
+ template <class ForwardHistogramIterator>
+ void PrepareDeltas(ForwardHistogramIterator begin,
+ ForwardHistogramIterator end,
+ HistogramBase::Flags flags_to_set,
+ HistogramBase::Flags required_flags) {
+ for (ForwardHistogramIterator it = begin; it != end; ++it) {
+ (*it)->SetFlags(flags_to_set);
+ if (((*it)->flags() & required_flags) == required_flags)
+ PrepareDelta(*it);
+ }
+ }
+
+ // When the collection is not so simple as can be done using a single
+ // iterator, the steps can be performed separately. Call PerpareDelta()
+ // as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
+ // except that it does not update the previous logged values and can thus
+ // be used with read-only files.
+ void PrepareDelta(HistogramBase* histogram);
+ void PrepareFinalDelta(const HistogramBase* histogram);
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
+
+ // During a snapshot, samples are acquired and aggregated. This structure
+ // contains all the information for a given histogram that persists between
+ // collections.
+ struct SampleInfo {
+ // The set of inconsistencies (flags) already seen for the histogram.
+ // See HistogramBase::Inconsistency for values.
+ uint32_t inconsistencies = 0;
+ };
+
+ // Capture and hold samples from a histogram. This does all the heavy
+ // lifting for PrepareDelta() and PrepareAbsolute().
+ void PrepareSamples(const HistogramBase* histogram,
+ std::unique_ptr<HistogramSamples> samples);
+
+ // Try to detect and fix count inconsistency of logged samples.
+ void InspectLoggedSamplesInconsistency(
+ const HistogramSamples& new_snapshot,
+ HistogramSamples* logged_samples);
+
+ // For histograms, track what has been previously seen, indexed
+ // by the hash of the histogram name.
+ std::map<uint64_t, SampleInfo> known_histograms_;
+
+ // |histogram_flattener_| handles the logistics of recording the histogram
+ // deltas.
+ HistogramFlattener* histogram_flattener_; // Weak.
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramSnapshotManager);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
diff --git a/libchrome/base/metrics/histogram_snapshot_manager_unittest.cc b/libchrome/base/metrics/histogram_snapshot_manager_unittest.cc
new file mode 100644
index 0000000..3c13e1a
--- /dev/null
+++ b/libchrome/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_snapshot_manager.h"
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/metrics/histogram_delta_serialization.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
+ public:
+ HistogramFlattenerDeltaRecorder() {}
+
+ void RecordDelta(const HistogramBase& histogram,
+ const HistogramSamples& snapshot) override {
+ recorded_delta_histogram_names_.push_back(histogram.histogram_name());
+ // Use CHECK instead of ASSERT to get full stack-trace and thus origin.
+ CHECK(!ContainsKey(recorded_delta_histogram_sum_,
+ histogram.histogram_name()));
+ // Keep pointer to snapshot for testing. This really isn't ideal but the
+ // snapshot-manager keeps the snapshot alive until it's "forgotten".
+ recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
+ }
+
+ void InconsistencyDetected(HistogramBase::Inconsistency problem) override {
+ ASSERT_TRUE(false);
+ }
+
+ void UniqueInconsistencyDetected(
+ HistogramBase::Inconsistency problem) override {
+ ASSERT_TRUE(false);
+ }
+
+ void InconsistencyDetectedInLoggedCount(int amount) override {
+ ASSERT_TRUE(false);
+ }
+
+ void Reset() {
+ recorded_delta_histogram_names_.clear();
+ recorded_delta_histogram_sum_.clear();
+ }
+
+ std::vector<std::string> GetRecordedDeltaHistogramNames() {
+ return recorded_delta_histogram_names_;
+ }
+
+ int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
+ EXPECT_TRUE(ContainsKey(recorded_delta_histogram_sum_, name));
+ return recorded_delta_histogram_sum_[name];
+ }
+
+ private:
+ std::vector<std::string> recorded_delta_histogram_names_;
+ std::map<std::string, int64_t> recorded_delta_histogram_sum_;
+
+ DISALLOW_COPY_AND_ASSIGN(HistogramFlattenerDeltaRecorder);
+};
+
+class HistogramSnapshotManagerTest : public testing::Test {
+ protected:
+ HistogramSnapshotManagerTest()
+ : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
+ histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
+
+ ~HistogramSnapshotManagerTest() override {}
+
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+ HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
+ HistogramSnapshotManager histogram_snapshot_manager_;
+};
+
+TEST_F(HistogramSnapshotManagerTest, PrepareDeltasNoFlagsFilter) {
+ // kNoFlags filter should record all histograms.
+ UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+ UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+ histogram_snapshot_manager_.PrepareDeltas(
+ StatisticsRecorder::begin(false), StatisticsRecorder::end(),
+ HistogramBase::kNoFlags, HistogramBase::kNoFlags);
+
+ const std::vector<std::string>& histograms =
+ histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+ EXPECT_EQ(2U, histograms.size());
+ EXPECT_EQ("UmaHistogram", histograms[0]);
+ EXPECT_EQ("UmaStabilityHistogram", histograms[1]);
+}
+
+TEST_F(HistogramSnapshotManagerTest, PrepareDeltasUmaHistogramFlagFilter) {
+ // Note that kUmaStabilityHistogramFlag includes kUmaTargetedHistogramFlag.
+ UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+ UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+ histogram_snapshot_manager_.PrepareDeltas(
+ StatisticsRecorder::begin(false), StatisticsRecorder::end(),
+ HistogramBase::kNoFlags, HistogramBase::kUmaTargetedHistogramFlag);
+
+ const std::vector<std::string>& histograms =
+ histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+ EXPECT_EQ(2U, histograms.size());
+ EXPECT_EQ("UmaHistogram", histograms[0]);
+ EXPECT_EQ("UmaStabilityHistogram", histograms[1]);
+}
+
+TEST_F(HistogramSnapshotManagerTest,
+ PrepareDeltasUmaStabilityHistogramFlagFilter) {
+ UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+ UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+ histogram_snapshot_manager_.PrepareDeltas(
+ StatisticsRecorder::begin(false), StatisticsRecorder::end(),
+ HistogramBase::kNoFlags, HistogramBase::kUmaStabilityHistogramFlag);
+
+ const std::vector<std::string>& histograms =
+ histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+ EXPECT_EQ(1U, histograms.size());
+ EXPECT_EQ("UmaStabilityHistogram", histograms[0]);
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/histogram_unittest.cc b/libchrome/base/metrics/histogram_unittest.cc
new file mode 100644
index 0000000..5c2ca68
--- /dev/null
+++ b/libchrome/base/metrics/histogram_unittest.cc
@@ -0,0 +1,751 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sample_vector.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class HistogramTest : public testing::TestWithParam<bool> {
+ protected:
+ const int32_t kAllocatorMemorySize = 8 << 20; // 8 MiB
+
+ HistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
+ void SetUp() override {
+ if (use_persistent_histogram_allocator_)
+ CreatePersistentHistogramAllocator();
+
+ // Each test will have a clean state (no Histogram / BucketRanges
+ // registered).
+ InitializeStatisticsRecorder();
+ }
+
+ void TearDown() override {
+ if (allocator_) {
+ ASSERT_FALSE(allocator_->IsFull());
+ ASSERT_FALSE(allocator_->IsCorrupt());
+ }
+ UninitializeStatisticsRecorder();
+ DestroyPersistentHistogramAllocator();
+ }
+
+ void InitializeStatisticsRecorder() {
+ DCHECK(!statistics_recorder_);
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+ }
+
+ void UninitializeStatisticsRecorder() {
+ statistics_recorder_.reset();
+ }
+
+ void CreatePersistentHistogramAllocator() {
+ // By getting the results-histogram before any persistent allocator
+ // is attached, that histogram is guaranteed not to be stored in
+ // any persistent memory segment (which simplifies some tests).
+ GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+
+ GlobalHistogramAllocator::CreateWithLocalMemory(
+ kAllocatorMemorySize, 0, "HistogramAllocatorTest");
+ allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+ }
+
+ void DestroyPersistentHistogramAllocator() {
+ allocator_ = nullptr;
+ GlobalHistogramAllocator::ReleaseForTesting();
+ }
+
+ const bool use_persistent_histogram_allocator_;
+
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+ std::unique_ptr<char[]> allocator_memory_;
+ PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HistogramTest);
+};
+
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent, HistogramTest, testing::Bool());
+
+
+// Check for basic syntax and use.
+TEST_P(HistogramTest, BasicTest) {
+ // Try basic construction
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ EXPECT_TRUE(linear_histogram);
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+ EXPECT_TRUE(custom_histogram);
+
+ // Macros that create hitograms have an internal static variable which will
+ // continue to point to those from the very first run of this method even
+ // during subsequent runs.
+ static bool already_run = false;
+ if (already_run)
+ return;
+ already_run = true;
+
+ // Use standard macros (but with fixed samples)
+ LOCAL_HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
+ LOCAL_HISTOGRAM_COUNTS("Test3Histogram", 30);
+
+ LOCAL_HISTOGRAM_ENUMERATION("Test6Histogram", 129, 130);
+}
+
+// Check that the macro correctly matches histograms by name and records their
+// data together.
+TEST_P(HistogramTest, NameMatchTest) {
+ // Macros that create hitograms have an internal static variable which will
+ // continue to point to those from the very first run of this method even
+ // during subsequent runs.
+ static bool already_run = false;
+ if (already_run)
+ return;
+ already_run = true;
+
+ LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
+ LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "DuplicatedHistogram", 1, 101, 102, HistogramBase::kNoFlags);
+
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+ EXPECT_EQ(2, samples->TotalCount());
+ EXPECT_EQ(2, samples->GetCount(10));
+}
+
+// Check that delta calculations work correctly.
+TEST_P(HistogramTest, DeltaTest) {
+ HistogramBase* histogram =
+ Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+ HistogramBase::kNoFlags);
+ histogram->Add(1);
+ histogram->Add(10);
+ histogram->Add(50);
+
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+ EXPECT_EQ(3, samples->TotalCount());
+ EXPECT_EQ(1, samples->GetCount(1));
+ EXPECT_EQ(1, samples->GetCount(10));
+ EXPECT_EQ(1, samples->GetCount(50));
+ EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+ samples = histogram->SnapshotDelta();
+ EXPECT_EQ(0, samples->TotalCount());
+
+ histogram->Add(10);
+ histogram->Add(10);
+ samples = histogram->SnapshotDelta();
+ EXPECT_EQ(2, samples->TotalCount());
+ EXPECT_EQ(2, samples->GetCount(10));
+
+ samples = histogram->SnapshotDelta();
+ EXPECT_EQ(0, samples->TotalCount());
+}
+
+// Check that final-delta calculations work correctly.
+TEST_P(HistogramTest, FinalDeltaTest) {
+ HistogramBase* histogram =
+ Histogram::FactoryGet("FinalDeltaHistogram", 1, 64, 8,
+ HistogramBase::kNoFlags);
+ histogram->Add(1);
+ histogram->Add(10);
+ histogram->Add(50);
+
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+ EXPECT_EQ(3, samples->TotalCount());
+ EXPECT_EQ(1, samples->GetCount(1));
+ EXPECT_EQ(1, samples->GetCount(10));
+ EXPECT_EQ(1, samples->GetCount(50));
+ EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+ histogram->Add(2);
+ histogram->Add(50);
+
+ samples = histogram->SnapshotFinalDelta();
+ EXPECT_EQ(2, samples->TotalCount());
+ EXPECT_EQ(1, samples->GetCount(2));
+ EXPECT_EQ(1, samples->GetCount(50));
+ EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+}
+
+TEST_P(HistogramTest, ExponentialRangesTest) {
+ // Check that we got a nice exponential when there was enough room.
+ BucketRanges ranges(9);
+ Histogram::InitializeBucketRanges(1, 64, &ranges);
+ EXPECT_EQ(0, ranges.range(0));
+ int power_of_2 = 1;
+ for (int i = 1; i < 8; i++) {
+ EXPECT_EQ(power_of_2, ranges.range(i));
+ power_of_2 *= 2;
+ }
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges.range(8));
+
+ // Check the corresponding Histogram will use the correct ranges.
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+ EXPECT_TRUE(ranges.Equals(histogram->bucket_ranges()));
+
+ // When bucket count is limited, exponential ranges will partially look like
+ // linear.
+ BucketRanges ranges2(16);
+ Histogram::InitializeBucketRanges(1, 32, &ranges2);
+
+ EXPECT_EQ(0, ranges2.range(0));
+ EXPECT_EQ(1, ranges2.range(1));
+ EXPECT_EQ(2, ranges2.range(2));
+ EXPECT_EQ(3, ranges2.range(3));
+ EXPECT_EQ(4, ranges2.range(4));
+ EXPECT_EQ(5, ranges2.range(5));
+ EXPECT_EQ(6, ranges2.range(6));
+ EXPECT_EQ(7, ranges2.range(7));
+ EXPECT_EQ(9, ranges2.range(8));
+ EXPECT_EQ(11, ranges2.range(9));
+ EXPECT_EQ(14, ranges2.range(10));
+ EXPECT_EQ(17, ranges2.range(11));
+ EXPECT_EQ(21, ranges2.range(12));
+ EXPECT_EQ(26, ranges2.range(13));
+ EXPECT_EQ(32, ranges2.range(14));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges2.range(15));
+
+ // Check the corresponding Histogram will use the correct ranges.
+ Histogram* histogram2 = static_cast<Histogram*>(
+ Histogram::FactoryGet("Histogram2", 1, 32, 15, HistogramBase::kNoFlags));
+ EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
+}
+
+TEST_P(HistogramTest, LinearRangesTest) {
+ BucketRanges ranges(9);
+ LinearHistogram::InitializeBucketRanges(1, 7, &ranges);
+ // Gets a nice linear set of bucket ranges.
+ for (int i = 0; i < 8; i++)
+ EXPECT_EQ(i, ranges.range(i));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges.range(8));
+
+ // The correspoding LinearHistogram should use the correct ranges.
+ Histogram* histogram = static_cast<Histogram*>(
+ LinearHistogram::FactoryGet("Linear", 1, 7, 8, HistogramBase::kNoFlags));
+ EXPECT_TRUE(ranges.Equals(histogram->bucket_ranges()));
+
+ // Linear ranges are not divisible.
+ BucketRanges ranges2(6);
+ LinearHistogram::InitializeBucketRanges(1, 6, &ranges2);
+ EXPECT_EQ(0, ranges2.range(0));
+ EXPECT_EQ(1, ranges2.range(1));
+ EXPECT_EQ(3, ranges2.range(2));
+ EXPECT_EQ(4, ranges2.range(3));
+ EXPECT_EQ(6, ranges2.range(4));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges2.range(5));
+ // The correspoding LinearHistogram should use the correct ranges.
+ Histogram* histogram2 = static_cast<Histogram*>(
+ LinearHistogram::FactoryGet("Linear2", 1, 6, 5, HistogramBase::kNoFlags));
+ EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
+}
+
+TEST_P(HistogramTest, ArrayToCustomRangesTest) {
+ const HistogramBase::Sample ranges[3] = {5, 10, 20};
+ std::vector<HistogramBase::Sample> ranges_vec =
+ CustomHistogram::ArrayToCustomRanges(ranges, 3);
+ ASSERT_EQ(6u, ranges_vec.size());
+ EXPECT_EQ(5, ranges_vec[0]);
+ EXPECT_EQ(6, ranges_vec[1]);
+ EXPECT_EQ(10, ranges_vec[2]);
+ EXPECT_EQ(11, ranges_vec[3]);
+ EXPECT_EQ(20, ranges_vec[4]);
+ EXPECT_EQ(21, ranges_vec[5]);
+}
+
+TEST_P(HistogramTest, CustomHistogramTest) {
+ // A well prepared custom ranges.
+ std::vector<HistogramBase::Sample> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(2);
+
+ Histogram* histogram = static_cast<Histogram*>(
+ CustomHistogram::FactoryGet("TestCustomHistogram1", custom_ranges,
+ HistogramBase::kNoFlags));
+ const BucketRanges* ranges = histogram->bucket_ranges();
+ ASSERT_EQ(4u, ranges->size());
+ EXPECT_EQ(0, ranges->range(0)); // Auto added.
+ EXPECT_EQ(1, ranges->range(1));
+ EXPECT_EQ(2, ranges->range(2));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3)); // Auto added.
+
+ // A unordered custom ranges.
+ custom_ranges.clear();
+ custom_ranges.push_back(2);
+ custom_ranges.push_back(1);
+ histogram = static_cast<Histogram*>(
+ CustomHistogram::FactoryGet("TestCustomHistogram2", custom_ranges,
+ HistogramBase::kNoFlags));
+ ranges = histogram->bucket_ranges();
+ ASSERT_EQ(4u, ranges->size());
+ EXPECT_EQ(0, ranges->range(0));
+ EXPECT_EQ(1, ranges->range(1));
+ EXPECT_EQ(2, ranges->range(2));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));
+
+ // A custom ranges with duplicated values.
+ custom_ranges.clear();
+ custom_ranges.push_back(4);
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(4);
+ histogram = static_cast<Histogram*>(
+ CustomHistogram::FactoryGet("TestCustomHistogram3", custom_ranges,
+ HistogramBase::kNoFlags));
+ ranges = histogram->bucket_ranges();
+ ASSERT_EQ(4u, ranges->size());
+ EXPECT_EQ(0, ranges->range(0));
+ EXPECT_EQ(1, ranges->range(1));
+ EXPECT_EQ(4, ranges->range(2));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));
+}
+
+TEST_P(HistogramTest, CustomHistogramWithOnly2Buckets) {
+ // This test exploits the fact that the CustomHistogram can have 2 buckets,
+ // while the base class Histogram is *supposed* to have at least 3 buckets.
+ // We should probably change the restriction on the base class (or not inherit
+ // the base class!).
+
+ std::vector<HistogramBase::Sample> custom_ranges;
+ custom_ranges.push_back(4);
+
+ Histogram* histogram = static_cast<Histogram*>(
+ CustomHistogram::FactoryGet("2BucketsCustomHistogram", custom_ranges,
+ HistogramBase::kNoFlags));
+ const BucketRanges* ranges = histogram->bucket_ranges();
+ ASSERT_EQ(3u, ranges->size());
+ EXPECT_EQ(0, ranges->range(0));
+ EXPECT_EQ(4, ranges->range(1));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
+}
+
+TEST_P(HistogramTest, AddCountTest) {
+ const size_t kBucketCount = 50;
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("AddCountHistogram", 10, 100, kBucketCount,
+ HistogramBase::kNoFlags));
+
+ histogram->AddCount(20, 15);
+ histogram->AddCount(30, 14);
+
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+ EXPECT_EQ(29, samples->TotalCount());
+ EXPECT_EQ(15, samples->GetCount(20));
+ EXPECT_EQ(14, samples->GetCount(30));
+
+ histogram->AddCount(20, 25);
+ histogram->AddCount(30, 24);
+
+ std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+ EXPECT_EQ(78, samples2->TotalCount());
+ EXPECT_EQ(40, samples2->GetCount(20));
+ EXPECT_EQ(38, samples2->GetCount(30));
+}
+
+TEST_P(HistogramTest, AddCount_LargeValuesDontOverflow) {
+ const size_t kBucketCount = 50;
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("AddCountHistogram", 10, 1000000000, kBucketCount,
+ HistogramBase::kNoFlags));
+
+ histogram->AddCount(200000000, 15);
+ histogram->AddCount(300000000, 14);
+
+ std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+ EXPECT_EQ(29, samples->TotalCount());
+ EXPECT_EQ(15, samples->GetCount(200000000));
+ EXPECT_EQ(14, samples->GetCount(300000000));
+
+ histogram->AddCount(200000000, 25);
+ histogram->AddCount(300000000, 24);
+
+ std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+ EXPECT_EQ(78, samples2->TotalCount());
+ EXPECT_EQ(40, samples2->GetCount(200000000));
+ EXPECT_EQ(38, samples2->GetCount(300000000));
+ EXPECT_EQ(19400000000LL, samples2->sum());
+}
+
+// Make sure histogram handles out-of-bounds data gracefully.
+TEST_P(HistogramTest, BoundsTest) {
+ const size_t kBucketCount = 50;
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("Bounded", 10, 100, kBucketCount,
+ HistogramBase::kNoFlags));
+
+ // Put two samples "out of bounds" above and below.
+ histogram->Add(5);
+ histogram->Add(-50);
+
+ histogram->Add(100);
+ histogram->Add(10000);
+
+ // Verify they landed in the underflow, and overflow buckets.
+ std::unique_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+ EXPECT_EQ(2, samples->GetCountAtIndex(0));
+ EXPECT_EQ(0, samples->GetCountAtIndex(1));
+ size_t array_size = histogram->bucket_count();
+ EXPECT_EQ(kBucketCount, array_size);
+ EXPECT_EQ(0, samples->GetCountAtIndex(array_size - 2));
+ EXPECT_EQ(2, samples->GetCountAtIndex(array_size - 1));
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(10);
+ custom_ranges.push_back(50);
+ custom_ranges.push_back(100);
+ Histogram* test_custom_histogram = static_cast<Histogram*>(
+ CustomHistogram::FactoryGet("TestCustomRangeBoundedHistogram",
+ custom_ranges, HistogramBase::kNoFlags));
+
+ // Put two samples "out of bounds" above and below.
+ test_custom_histogram->Add(5);
+ test_custom_histogram->Add(-50);
+ test_custom_histogram->Add(100);
+ test_custom_histogram->Add(1000);
+ test_custom_histogram->Add(INT_MAX);
+
+ // Verify they landed in the underflow, and overflow buckets.
+ std::unique_ptr<SampleVector> custom_samples =
+ test_custom_histogram->SnapshotSampleVector();
+ EXPECT_EQ(2, custom_samples->GetCountAtIndex(0));
+ EXPECT_EQ(0, custom_samples->GetCountAtIndex(1));
+ size_t bucket_count = test_custom_histogram->bucket_count();
+ EXPECT_EQ(0, custom_samples->GetCountAtIndex(bucket_count - 2));
+ EXPECT_EQ(3, custom_samples->GetCountAtIndex(bucket_count - 1));
+}
+
+// Check to be sure samples land as expected is "correct" buckets.
+TEST_P(HistogramTest, BucketPlacementTest) {
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+
+ // Add i+1 samples to the i'th bucket.
+ histogram->Add(0);
+ int power_of_2 = 1;
+ for (int i = 1; i < 8; i++) {
+ for (int j = 0; j <= i; j++)
+ histogram->Add(power_of_2);
+ power_of_2 *= 2;
+ }
+
+ // Check to see that the bucket counts reflect our additions.
+ std::unique_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+ for (int i = 0; i < 8; i++)
+ EXPECT_EQ(i + 1, samples->GetCountAtIndex(i));
+}
+
+TEST_P(HistogramTest, CorruptSampleCounts) {
+ // The internal code creates histograms via macros and thus keeps static
+ // pointers to them. If those pointers are to persistent memory which will
+ // be free'd then any following calls to that code will crash with a
+ // segmentation violation.
+ if (use_persistent_histogram_allocator_)
+ return;
+
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+
+ // Add some samples.
+ histogram->Add(20);
+ histogram->Add(40);
+
+ std::unique_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
+ EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
+ histogram->FindCorruption(*snapshot));
+ EXPECT_EQ(2, snapshot->redundant_count());
+ EXPECT_EQ(2, snapshot->TotalCount());
+
+ snapshot->counts_[3] += 100; // Sample count won't match redundant count.
+ EXPECT_EQ(HistogramBase::COUNT_LOW_ERROR,
+ histogram->FindCorruption(*snapshot));
+ snapshot->counts_[2] -= 200;
+ EXPECT_EQ(HistogramBase::COUNT_HIGH_ERROR,
+ histogram->FindCorruption(*snapshot));
+
+ // But we can't spot a corruption if it is compensated for.
+ snapshot->counts_[1] += 100;
+ EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
+ histogram->FindCorruption(*snapshot));
+}
+
+TEST_P(HistogramTest, CorruptBucketBounds) {
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
+
+ std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
+ EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
+ histogram->FindCorruption(*snapshot));
+
+ BucketRanges* bucket_ranges =
+ const_cast<BucketRanges*>(histogram->bucket_ranges());
+ HistogramBase::Sample tmp = bucket_ranges->range(1);
+ bucket_ranges->set_range(1, bucket_ranges->range(2));
+ bucket_ranges->set_range(2, tmp);
+ EXPECT_EQ(
+ HistogramBase::BUCKET_ORDER_ERROR | HistogramBase::RANGE_CHECKSUM_ERROR,
+ histogram->FindCorruption(*snapshot));
+
+ bucket_ranges->set_range(2, bucket_ranges->range(1));
+ bucket_ranges->set_range(1, tmp);
+ EXPECT_EQ(0U, histogram->FindCorruption(*snapshot));
+
+ // Show that two simple changes don't offset each other
+ bucket_ranges->set_range(3, bucket_ranges->range(3) + 1);
+ EXPECT_EQ(HistogramBase::RANGE_CHECKSUM_ERROR,
+ histogram->FindCorruption(*snapshot));
+
+ bucket_ranges->set_range(4, bucket_ranges->range(4) - 1);
+ EXPECT_EQ(HistogramBase::RANGE_CHECKSUM_ERROR,
+ histogram->FindCorruption(*snapshot));
+
+ // Repair histogram so that destructor won't DCHECK().
+ bucket_ranges->set_range(3, bucket_ranges->range(3) - 1);
+ bucket_ranges->set_range(4, bucket_ranges->range(4) + 1);
+}
+
+TEST_P(HistogramTest, HistogramSerializeInfo) {
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("Histogram", 1, 64, 8,
+ HistogramBase::kIPCSerializationSourceFlag));
+ Pickle pickle;
+ histogram->SerializeInfo(&pickle);
+
+ PickleIterator iter(pickle);
+
+ int type;
+ EXPECT_TRUE(iter.ReadInt(&type));
+ EXPECT_EQ(HISTOGRAM, type);
+
+ std::string name;
+ EXPECT_TRUE(iter.ReadString(&name));
+ EXPECT_EQ("Histogram", name);
+
+ int flag;
+ EXPECT_TRUE(iter.ReadInt(&flag));
+ EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag,
+ flag & ~HistogramBase::kIsPersistent);
+
+ int min;
+ EXPECT_TRUE(iter.ReadInt(&min));
+ EXPECT_EQ(1, min);
+
+ int max;
+ EXPECT_TRUE(iter.ReadInt(&max));
+ EXPECT_EQ(64, max);
+
+ uint32_t bucket_count;
+ EXPECT_TRUE(iter.ReadUInt32(&bucket_count));
+ EXPECT_EQ(8u, bucket_count);
+
+ uint32_t checksum;
+ EXPECT_TRUE(iter.ReadUInt32(&checksum));
+ EXPECT_EQ(histogram->bucket_ranges()->checksum(), checksum);
+
+ // No more data in the pickle.
+ EXPECT_FALSE(iter.SkipBytes(1));
+}
+
+TEST_P(HistogramTest, CustomHistogramSerializeInfo) {
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(10);
+ custom_ranges.push_back(100);
+
+ HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomRangeBoundedHistogram",
+ custom_ranges,
+ HistogramBase::kNoFlags);
+ Pickle pickle;
+ custom_histogram->SerializeInfo(&pickle);
+
+ // Validate the pickle.
+ PickleIterator iter(pickle);
+
+ int i;
+ std::string s;
+ uint32_t bucket_count;
+ uint32_t ui32;
+ EXPECT_TRUE(iter.ReadInt(&i) && iter.ReadString(&s) && iter.ReadInt(&i) &&
+ iter.ReadInt(&i) && iter.ReadInt(&i) &&
+ iter.ReadUInt32(&bucket_count) && iter.ReadUInt32(&ui32));
+ EXPECT_EQ(3u, bucket_count);
+
+ int range;
+ EXPECT_TRUE(iter.ReadInt(&range));
+ EXPECT_EQ(10, range);
+ EXPECT_TRUE(iter.ReadInt(&range));
+ EXPECT_EQ(100, range);
+
+ // No more data in the pickle.
+ EXPECT_FALSE(iter.SkipBytes(1));
+}
+
+TEST_P(HistogramTest, BadConstruction) {
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "BadConstruction", 0, 100, 8, HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram->HasConstructionArguments(1, 100, 8));
+
+ // Try to get the same histogram name with different arguments.
+ HistogramBase* bad_histogram = Histogram::FactoryGet(
+ "BadConstruction", 0, 100, 7, HistogramBase::kNoFlags);
+ EXPECT_EQ(NULL, bad_histogram);
+ bad_histogram = Histogram::FactoryGet(
+ "BadConstruction", 0, 99, 8, HistogramBase::kNoFlags);
+ EXPECT_EQ(NULL, bad_histogram);
+
+ HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+ "BadConstructionLinear", 0, 100, 8, HistogramBase::kNoFlags);
+ EXPECT_TRUE(linear_histogram->HasConstructionArguments(1, 100, 8));
+
+ // Try to get the same histogram name with different arguments.
+ bad_histogram = LinearHistogram::FactoryGet(
+ "BadConstructionLinear", 0, 100, 7, HistogramBase::kNoFlags);
+ EXPECT_EQ(NULL, bad_histogram);
+ bad_histogram = LinearHistogram::FactoryGet(
+ "BadConstructionLinear", 10, 100, 8, HistogramBase::kNoFlags);
+ EXPECT_EQ(NULL, bad_histogram);
+}
+
+TEST_P(HistogramTest, FactoryTime) {
+ const int kTestCreateCount = 1 << 14; // Must be power-of-2.
+ const int kTestLookupCount = 100000;
+ const int kTestAddCount = 1000000;
+
+ // Create all histogram names in advance for accurate timing below.
+ std::vector<std::string> histogram_names;
+ for (int i = 0; i < kTestCreateCount; ++i) {
+ histogram_names.push_back(
+ StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+ }
+
+ // Calculate cost of creating histograms.
+ TimeTicks create_start = TimeTicks::Now();
+ for (int i = 0; i < kTestCreateCount; ++i) {
+ Histogram::FactoryGet(histogram_names[i], 1, 100, 10,
+ HistogramBase::kNoFlags);
+ }
+ TimeDelta create_ticks = TimeTicks::Now() - create_start;
+ int64_t create_ms = create_ticks.InMilliseconds();
+
+ VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+ << "ms or about "
+ << (create_ms * 1000000) / kTestCreateCount
+ << "ns each.";
+
+ // Calculate cost of looking up existing histograms.
+ TimeTicks lookup_start = TimeTicks::Now();
+ for (int i = 0; i < kTestLookupCount; ++i) {
+ // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+ // order less likely to be cacheable (but still hit them all) should the
+ // underlying storage use the exact histogram name as the key.
+ const int i_mult = 6007;
+ static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+ int index = (i * i_mult) & (kTestCreateCount - 1);
+ Histogram::FactoryGet(histogram_names[index], 1, 100, 10,
+ HistogramBase::kNoFlags);
+ }
+ TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+ int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+ VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+ << "ms or about "
+ << (lookup_ms * 1000000) / kTestLookupCount
+ << "ns each.";
+
+ // Calculate cost of accessing histograms.
+ HistogramBase* histogram = Histogram::FactoryGet(
+ histogram_names[0], 1, 100, 10, HistogramBase::kNoFlags);
+ ASSERT_TRUE(histogram);
+ TimeTicks add_start = TimeTicks::Now();
+ for (int i = 0; i < kTestAddCount; ++i)
+ histogram->Add(i & 127);
+ TimeDelta add_ticks = TimeTicks::Now() - add_start;
+ int64_t add_ms = add_ticks.InMilliseconds();
+
+ VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+ << "ms or about "
+ << (add_ms * 1000000) / kTestAddCount
+ << "ns each.";
+}
+
+#if GTEST_HAS_DEATH_TEST
+// For Histogram, LinearHistogram and CustomHistogram, the minimum for a
+// declared range is 1, while the maximum is (HistogramBase::kSampleType_MAX -
+// 1). But we accept ranges exceeding those limits, and silently clamped to
+// those limits. This is for backwards compatibility.
+TEST(HistogramDeathTest, BadRangesTest) {
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "BadRanges", 0, HistogramBase::kSampleType_MAX, 8,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(
+ histogram->HasConstructionArguments(
+ 1, HistogramBase::kSampleType_MAX - 1, 8));
+
+ HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+ "BadRangesLinear", 0, HistogramBase::kSampleType_MAX, 8,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(
+ linear_histogram->HasConstructionArguments(
+ 1, HistogramBase::kSampleType_MAX - 1, 8));
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(0);
+ custom_ranges.push_back(5);
+ Histogram* custom_histogram = static_cast<Histogram*>(
+ CustomHistogram::FactoryGet(
+ "BadRangesCustom", custom_ranges, HistogramBase::kNoFlags));
+ const BucketRanges* ranges = custom_histogram->bucket_ranges();
+ ASSERT_EQ(3u, ranges->size());
+ EXPECT_EQ(0, ranges->range(0));
+ EXPECT_EQ(5, ranges->range(1));
+ EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
+
+ // CustomHistogram does not accepts kSampleType_MAX as range.
+ custom_ranges.push_back(HistogramBase::kSampleType_MAX);
+ EXPECT_DEATH(CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
+ HistogramBase::kNoFlags),
+ "");
+
+ // CustomHistogram needs at least 1 valid range.
+ custom_ranges.clear();
+ custom_ranges.push_back(0);
+ EXPECT_DEATH(CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
+ HistogramBase::kNoFlags),
+ "");
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/metrics/metrics_hashes.cc b/libchrome/base/metrics/metrics_hashes.cc
new file mode 100644
index 0000000..5672b06
--- /dev/null
+++ b/libchrome/base/metrics/metrics_hashes.cc
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/metrics_hashes.h"
+
+#include "base/logging.h"
+#include "base/md5.h"
+#include "base/sys_byteorder.h"
+
+namespace base {
+
+namespace {
+
+// Converts the 8-byte prefix of an MD5 hash into a uint64_t value.
+inline uint64_t DigestToUInt64(const base::MD5Digest& digest) {
+ uint64_t value;
+ DCHECK_GE(sizeof(digest.a), sizeof(value));
+ memcpy(&value, digest.a, sizeof(value));
+ return base::NetToHost64(value);
+}
+
+} // namespace
+
+uint64_t HashMetricName(base::StringPiece name) {
+ base::MD5Digest digest;
+ base::MD5Sum(name.data(), name.size(), &digest);
+ return DigestToUInt64(digest);
+}
+
+} // namespace metrics
diff --git a/libchrome/base/metrics/metrics_hashes.h b/libchrome/base/metrics/metrics_hashes.h
new file mode 100644
index 0000000..d05c4ba
--- /dev/null
+++ b/libchrome/base/metrics/metrics_hashes.h
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_METRICS_HASHES_H_
+#define BASE_METRICS_METRICS_HASHES_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Computes a uint64_t hash of a given string based on its MD5 hash. Suitable
+// for metric names.
+BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
+
+} // namespace metrics
+
+#endif // BASE_METRICS_METRICS_HASHES_H_
diff --git a/libchrome/base/metrics/metrics_hashes_unittest.cc b/libchrome/base/metrics/metrics_hashes_unittest.cc
new file mode 100644
index 0000000..aea254e
--- /dev/null
+++ b/libchrome/base/metrics/metrics_hashes_unittest.cc
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/metrics_hashes.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Make sure our ID hashes are the same as what we see on the server side.
+TEST(MetricsUtilTest, HashMetricName) {
+ static const struct {
+ std::string input;
+ std::string output;
+ } cases[] = {
+ {"Back", "0x0557fa923dcee4d0"},
+ {"Forward", "0x67d2f6740a8eaebf"},
+ {"NewTab", "0x290eb683f96572f1"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ uint64_t hash = HashMetricName(cases[i].input);
+ std::string hash_hex = base::StringPrintf("0x%016" PRIx64, hash);
+ EXPECT_EQ(cases[i].output, hash_hex);
+ }
+}
+
+} // namespace metrics
diff --git a/libchrome/base/metrics/persistent_histogram_allocator.cc b/libchrome/base/metrics/persistent_histogram_allocator.cc
new file mode 100644
index 0000000..5af3486
--- /dev/null
+++ b/libchrome/base/metrics/persistent_histogram_allocator.cc
@@ -0,0 +1,866 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/important_file_writer.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_sample_map.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+namespace {
+
+// Name of histogram for storing results of local operations.
+const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
+
+// Type identifiers used when storing in persistent memory so they can be
+// identified during extraction; the first 4 bytes of the SHA1 of the name
+// is used as a unique integer. A "version number" is added to the base
+// so that, if the structure of that object changes, stored older versions
+// will be safely ignored.
+enum : uint32_t {
+ kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2
+ kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
+ kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
+};
+
+// The current globally-active persistent allocator for all new histograms.
+// The object held here will obviously not be destructed at process exit
+// but that's best since PersistentMemoryAllocator objects (that underlie
+// GlobalHistogramAllocator objects) are explicitly forbidden from doing
+// anything essential at exit anyway due to the fact that they depend on data
+// managed elsewhere and which could be destructed first.
+GlobalHistogramAllocator* g_allocator = nullptr;
+
+// Take an array of range boundaries and create a proper BucketRanges object
+// which is returned to the caller. A return of nullptr indicates that the
+// passed boundaries are invalid.
+std::unique_ptr<BucketRanges> CreateRangesFromData(
+ HistogramBase::Sample* ranges_data,
+ uint32_t ranges_checksum,
+ size_t count) {
+ // To avoid racy destruction at shutdown, the following may be leaked.
+ std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
+ DCHECK_EQ(count, ranges->size());
+ for (size_t i = 0; i < count; ++i) {
+ if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
+ return nullptr;
+ ranges->set_range(i, ranges_data[i]);
+ }
+
+ ranges->ResetChecksum();
+ if (ranges->checksum() != ranges_checksum)
+ return nullptr;
+
+ return ranges;
+}
+
+// Calculate the number of bytes required to store all of a histogram's
+// "counts". This will return zero (0) if |bucket_count| is not valid.
+size_t CalculateRequiredCountsBytes(size_t bucket_count) {
+ // 2 because each "sample count" also requires a backup "logged count"
+ // used for calculating the delta during snapshot operations.
+ const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
+
+ // If the |bucket_count| is such that it would overflow the return type,
+ // perhaps as the result of a malicious actor, then return zero to
+ // indicate the problem to the caller.
+ if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
+ return 0;
+
+ return bucket_count * kBytesPerBucket;
+}
+
+} // namespace
+
+const Feature kPersistentHistogramsFeature{
+ "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
+};
+
+
+PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
+ PersistentMemoryAllocator* allocator)
+ : allocator_(allocator), record_iterator_(allocator) {}
+
+PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() {}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
+ const void* user) {
+ base::AutoLock auto_lock(lock_);
+ return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
+}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
+ uint64_t id) {
+ lock_.AssertAcquired();
+
+ auto found = sample_records_.find(id);
+ if (found != sample_records_.end())
+ return found->second.get();
+
+ std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
+ samples = WrapUnique(new PersistentSampleMapRecords(this, id));
+ return samples.get();
+}
+
+bool PersistentSparseHistogramDataManager::LoadRecords(
+ PersistentSampleMapRecords* sample_map_records) {
+ // DataManager must be locked in order to access the found_ field of any
+ // PersistentSampleMapRecords object.
+ base::AutoLock auto_lock(lock_);
+ bool found = false;
+
+ // If there are already "found" entries for the passed object, move them.
+ if (!sample_map_records->found_.empty()) {
+ sample_map_records->records_.reserve(sample_map_records->records_.size() +
+ sample_map_records->found_.size());
+ sample_map_records->records_.insert(sample_map_records->records_.end(),
+ sample_map_records->found_.begin(),
+ sample_map_records->found_.end());
+ sample_map_records->found_.clear();
+ found = true;
+ }
+
+ // Acquiring a lock is a semi-expensive operation so load some records with
+ // each call. More than this number may be loaded if it takes longer to
+ // find at least one matching record for the passed object.
+ const int kMinimumNumberToLoad = 10;
+ const uint64_t match_id = sample_map_records->sample_map_id_;
+
+ // Loop while no enty is found OR we haven't yet loaded the minimum number.
+ // This will continue reading even after a match is found.
+ for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
+ // Get the next sample-record. The iterator will always resume from where
+ // it left off even if it previously had nothing further to return.
+ uint64_t found_id;
+ PersistentMemoryAllocator::Reference ref =
+ PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
+ &found_id);
+
+ // Stop immediately if there are none.
+ if (!ref)
+ break;
+
+ // The sample-record could be for any sparse histogram. Add the reference
+ // to the appropriate collection for later use.
+ if (found_id == match_id) {
+ sample_map_records->records_.push_back(ref);
+ found = true;
+ } else {
+ PersistentSampleMapRecords* samples =
+ GetSampleMapRecordsWhileLocked(found_id);
+ DCHECK(samples);
+ samples->found_.push_back(ref);
+ }
+ }
+
+ return found;
+}
+
+
+PersistentSampleMapRecords::PersistentSampleMapRecords(
+ PersistentSparseHistogramDataManager* data_manager,
+ uint64_t sample_map_id)
+ : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
+
+PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
+
+PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
+ const void* user) {
+ DCHECK(!user_);
+ user_ = user;
+ seen_ = 0;
+ return this;
+}
+
+void PersistentSampleMapRecords::Release(const void* user) {
+ DCHECK_EQ(user_, user);
+ user_ = nullptr;
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
+ DCHECK(user_);
+
+ // If there are no unseen records, lock and swap in all the found ones.
+ if (records_.size() == seen_) {
+ if (!data_manager_->LoadRecords(this))
+ return false;
+ }
+
+ // Return the next record. Records *must* be returned in the same order
+ // they are found in the persistent memory in order to ensure that all
+ // objects using this data always have the same state. Race conditions
+ // can cause duplicate records so using the "first found" is the only
+ // guarantee that all objects always access the same one.
+ DCHECK_LT(seen_, records_.size());
+ return records_[seen_++];
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
+ HistogramBase::Sample value) {
+ return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
+ sample_map_id_, value);
+}
+
+
+// This data will be held in persistent memory in order for processes to
+// locate and use histograms created elsewhere.
+struct PersistentHistogramAllocator::PersistentHistogramData {
+ int32_t histogram_type;
+ int32_t flags;
+ int32_t minimum;
+ int32_t maximum;
+ uint32_t bucket_count;
+ PersistentMemoryAllocator::Reference ranges_ref;
+ uint32_t ranges_checksum;
+ PersistentMemoryAllocator::Reference counts_ref;
+ HistogramSamples::Metadata samples_metadata;
+ HistogramSamples::Metadata logged_metadata;
+
+ // Space for the histogram name will be added during the actual allocation
+ // request. This must be the last field of the structure. A zero-size array
+ // or a "flexible" array would be preferred but is not (yet) valid C++.
+ char name[1];
+};
+
+PersistentHistogramAllocator::Iterator::Iterator(
+ PersistentHistogramAllocator* allocator)
+ : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
+
+std::unique_ptr<HistogramBase>
+PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
+ PersistentMemoryAllocator::Reference ref;
+ while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
+ if (ref != ignore)
+ return allocator_->GetHistogram(ref);
+ }
+ return nullptr;
+}
+
+
+PersistentHistogramAllocator::PersistentHistogramAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> memory)
+ : memory_allocator_(std::move(memory)),
+ sparse_histogram_data_manager_(memory_allocator_.get()) {}
+
+PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
+ Reference ref) {
+ // Unfortunately, the histogram "pickle" methods cannot be used as part of
+ // the persistance because the deserialization methods always create local
+ // count data (while these must reference the persistent counts) and always
+ // add it to the local list of known histograms (while these may be simple
+ // references to histograms in other processes).
+ PersistentHistogramData* histogram_data =
+ memory_allocator_->GetAsObject<PersistentHistogramData>(
+ ref, kTypeIdHistogram);
+ size_t length = memory_allocator_->GetAllocSize(ref);
+ if (!histogram_data ||
+ reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
+ NOTREACHED();
+ return nullptr;
+ }
+ return CreateHistogram(histogram_data);
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
+ HistogramType histogram_type,
+ const std::string& name,
+ int minimum,
+ int maximum,
+ const BucketRanges* bucket_ranges,
+ int32_t flags,
+ Reference* ref_ptr) {
+ // If the allocator is corrupt, don't waste time trying anything else.
+ // This also allows differentiating on the dashboard between allocations
+ // failed due to a corrupt allocator and the number of process instances
+ // with one, the latter being idicated by "newly corrupt", below.
+ if (memory_allocator_->IsCorrupt()) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
+ return nullptr;
+ }
+
+ // Create the metadata necessary for a persistent sparse histogram. This
+ // is done first because it is a small subset of what is required for
+ // other histograms.
+ PersistentMemoryAllocator::Reference histogram_ref =
+ memory_allocator_->Allocate(
+ offsetof(PersistentHistogramData, name) + name.length() + 1,
+ kTypeIdHistogram);
+ PersistentHistogramData* histogram_data =
+ memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
+ kTypeIdHistogram);
+ if (histogram_data) {
+ memcpy(histogram_data->name, name.c_str(), name.size() + 1);
+ histogram_data->histogram_type = histogram_type;
+ histogram_data->flags = flags | HistogramBase::kIsPersistent;
+ }
+
+ // Create the remaining metadata necessary for regular histograms.
+ if (histogram_type != SPARSE_HISTOGRAM) {
+ size_t bucket_count = bucket_ranges->bucket_count();
+ size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
+ if (counts_bytes == 0) {
+ // |bucket_count| was out-of-range.
+ NOTREACHED();
+ return nullptr;
+ }
+
+ size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+ PersistentMemoryAllocator::Reference counts_ref =
+ memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
+ PersistentMemoryAllocator::Reference ranges_ref =
+ memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+ HistogramBase::Sample* ranges_data =
+ memory_allocator_->GetAsObject<HistogramBase::Sample>(
+ ranges_ref, kTypeIdRangesArray);
+
+ // Only continue here if all allocations were successful. If they weren't,
+ // there is no way to free the space but that's not really a problem since
+ // the allocations only fail because the space is full or corrupt and so
+ // any future attempts will also fail.
+ if (counts_ref && ranges_data && histogram_data) {
+ for (size_t i = 0; i < bucket_ranges->size(); ++i)
+ ranges_data[i] = bucket_ranges->range(i);
+
+ histogram_data->minimum = minimum;
+ histogram_data->maximum = maximum;
+ // |bucket_count| must fit within 32-bits or the allocation of the counts
+ // array would have failed for being too large; the allocator supports
+ // less than 4GB total size.
+ histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
+ histogram_data->ranges_ref = ranges_ref;
+ histogram_data->ranges_checksum = bucket_ranges->checksum();
+ histogram_data->counts_ref = counts_ref;
+ } else {
+ histogram_data = nullptr; // Clear this for proper handling below.
+ }
+ }
+
+ if (histogram_data) {
+ // Create the histogram using resources in persistent memory. This ends up
+ // resolving the "ref" values stored in histogram_data instad of just
+ // using what is already known above but avoids duplicating the switch
+ // statement here and serves as a double-check that everything is
+ // correct before commiting the new histogram to persistent space.
+ std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
+ DCHECK(histogram);
+ if (ref_ptr != nullptr)
+ *ref_ptr = histogram_ref;
+
+ // By storing the reference within the allocator to this histogram, the
+ // next import (which will happen before the next histogram creation)
+ // will know to skip it.
+ // See also the comment in ImportHistogramsToStatisticsRecorder().
+ subtle::NoBarrier_Store(&last_created_, histogram_ref);
+ return histogram;
+ }
+
+ CreateHistogramResultType result;
+ if (memory_allocator_->IsCorrupt()) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
+ result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
+ } else if (memory_allocator_->IsFull()) {
+ result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
+ } else {
+ result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
+ }
+ RecordCreateHistogramResult(result);
+ NOTREACHED() << "error=" << result;
+
+ return nullptr;
+}
+
+void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
+ bool registered) {
+ // If the created persistent histogram was registered then it needs to
+ // be marked as "iterable" in order to be found by other processes.
+ if (registered)
+ memory_allocator_->MakeIterable(ref);
+ // If it wasn't registered then a race condition must have caused
+ // two to be created. The allocator does not support releasing the
+ // acquired memory so just change the type to be empty.
+ else
+ memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
+}
+
+void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
+ HistogramBase* histogram) {
+ DCHECK(histogram);
+
+ HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+ if (!existing) {
+ // The above should never fail but if it does, no real harm is done.
+ // The data won't be merged but it also won't be recorded as merged
+ // so a future try, if successful, will get what was missed. If it
+ // continues to fail, some metric data will be lost but that is better
+ // than crashing.
+ NOTREACHED();
+ return;
+ }
+
+ // Merge the delta from the passed object to the one in the SR.
+ existing->AddSamples(*histogram->SnapshotDelta());
+}
+
+void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
+ const HistogramBase* histogram) {
+ DCHECK(histogram);
+
+ HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+ if (!existing) {
+ // The above should never fail but if it does, no real harm is done.
+ // Some metric data will be lost but that is better than crashing.
+ NOTREACHED();
+ return;
+ }
+
+ // Merge the delta from the passed object to the one in the SR.
+ existing->AddSamples(*histogram->SnapshotFinalDelta());
+}
+
+PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
+ uint64_t id,
+ const void* user) {
+ return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
+}
+
+void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
+ memory_allocator_->CreateTrackingHistograms(name);
+}
+
+void PersistentHistogramAllocator::UpdateTrackingHistograms() {
+ memory_allocator_->UpdateTrackingHistograms();
+}
+
+void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
+ subtle::NoBarrier_Store(&last_created_, 0);
+}
+
+// static
+HistogramBase*
+PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
+ // Get the histogram in which create-results are stored. This is copied
+ // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
+ // added code to prevent recursion (a likely occurance because the creation
+ // of a new a histogram can end up calling this.)
+ static base::subtle::AtomicWord atomic_histogram_pointer = 0;
+ HistogramBase* histogram_pointer =
+ reinterpret_cast<HistogramBase*>(
+ base::subtle::Acquire_Load(&atomic_histogram_pointer));
+ if (!histogram_pointer) {
+ // It's possible for multiple threads to make it here in parallel but
+ // they'll always return the same result as there is a mutex in the Get.
+ // The purpose of the "initialized" variable is just to ensure that
+ // the same thread doesn't recurse which is also why it doesn't have
+ // to be atomic.
+ static bool initialized = false;
+ if (!initialized) {
+ initialized = true;
+ if (g_allocator) {
+// Don't log in release-with-asserts builds, otherwise the test_installer step
+// fails because this code writes to a log file before the installer code had a
+// chance to set the log file's location.
+#if !defined(DCHECK_ALWAYS_ON)
+ DLOG(WARNING) << "Creating the results-histogram inside persistent"
+ << " memory can cause future allocations to crash if"
+ << " that memory is ever released (for testing).";
+#endif
+ }
+
+ histogram_pointer = LinearHistogram::FactoryGet(
+ kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
+ HistogramBase::kUmaTargetedHistogramFlag);
+ base::subtle::Release_Store(
+ &atomic_histogram_pointer,
+ reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
+ }
+ }
+ return histogram_pointer;
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
+ PersistentHistogramData* histogram_data_ptr) {
+ if (!histogram_data_ptr) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
+ NOTREACHED();
+ return nullptr;
+ }
+
+ // Sparse histograms are quite different so handle them as a special case.
+ if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
+ std::unique_ptr<HistogramBase> histogram =
+ SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
+ &histogram_data_ptr->samples_metadata,
+ &histogram_data_ptr->logged_metadata);
+ DCHECK(histogram);
+ histogram->SetFlags(histogram_data_ptr->flags);
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+ return histogram;
+ }
+
+ // Copy the histogram_data to local storage because anything in persistent
+ // memory cannot be trusted as it could be changed at any moment by a
+ // malicious actor that shares access. The contents of histogram_data are
+ // validated below; the local copy is to ensure that the contents cannot
+ // be externally changed between validation and use.
+ PersistentHistogramData histogram_data = *histogram_data_ptr;
+
+ HistogramBase::Sample* ranges_data =
+ memory_allocator_->GetAsObject<HistogramBase::Sample>(
+ histogram_data.ranges_ref, kTypeIdRangesArray);
+
+ const uint32_t max_buckets =
+ std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
+ size_t required_bytes =
+ (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
+ size_t allocated_bytes =
+ memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
+ if (!ranges_data || histogram_data.bucket_count < 2 ||
+ histogram_data.bucket_count >= max_buckets ||
+ allocated_bytes < required_bytes) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
+ NOTREACHED();
+ return nullptr;
+ }
+
+ std::unique_ptr<const BucketRanges> created_ranges =
+ CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
+ histogram_data.bucket_count + 1);
+ if (!created_ranges) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
+ NOTREACHED();
+ return nullptr;
+ }
+ const BucketRanges* ranges =
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+ created_ranges.release());
+
+ HistogramBase::AtomicCount* counts_data =
+ memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
+ histogram_data.counts_ref, kTypeIdCountsArray);
+ size_t counts_bytes =
+ CalculateRequiredCountsBytes(histogram_data.bucket_count);
+ if (!counts_data || counts_bytes == 0 ||
+ memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
+ counts_bytes) {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
+ NOTREACHED();
+ return nullptr;
+ }
+
+ // After the main "counts" array is a second array using for storing what
+ // was previously logged. This is used to calculate the "delta" during
+ // snapshot operations.
+ HistogramBase::AtomicCount* logged_data =
+ counts_data + histogram_data.bucket_count;
+
+ std::string name(histogram_data_ptr->name);
+ std::unique_ptr<HistogramBase> histogram;
+ switch (histogram_data.histogram_type) {
+ case HISTOGRAM:
+ histogram = Histogram::PersistentCreate(
+ name, histogram_data.minimum, histogram_data.maximum, ranges,
+ counts_data, logged_data, histogram_data.bucket_count,
+ &histogram_data_ptr->samples_metadata,
+ &histogram_data_ptr->logged_metadata);
+ DCHECK(histogram);
+ break;
+ case LINEAR_HISTOGRAM:
+ histogram = LinearHistogram::PersistentCreate(
+ name, histogram_data.minimum, histogram_data.maximum, ranges,
+ counts_data, logged_data, histogram_data.bucket_count,
+ &histogram_data_ptr->samples_metadata,
+ &histogram_data_ptr->logged_metadata);
+ DCHECK(histogram);
+ break;
+ case BOOLEAN_HISTOGRAM:
+ histogram = BooleanHistogram::PersistentCreate(
+ name, ranges, counts_data, logged_data,
+ &histogram_data_ptr->samples_metadata,
+ &histogram_data_ptr->logged_metadata);
+ DCHECK(histogram);
+ break;
+ case CUSTOM_HISTOGRAM:
+ histogram = CustomHistogram::PersistentCreate(
+ name, ranges, counts_data, logged_data, histogram_data.bucket_count,
+ &histogram_data_ptr->samples_metadata,
+ &histogram_data_ptr->logged_metadata);
+ DCHECK(histogram);
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ if (histogram) {
+ DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
+ histogram->SetFlags(histogram_data.flags);
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+ } else {
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
+ }
+
+ return histogram;
+}
+
+HistogramBase*
+PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
+ const HistogramBase* histogram) {
+ // This should never be called on the global histogram allocator as objects
+ // created there are already within the global statistics recorder.
+ DCHECK_NE(g_allocator, this);
+ DCHECK(histogram);
+
+ HistogramBase* existing =
+ StatisticsRecorder::FindHistogram(histogram->histogram_name());
+ if (existing)
+ return existing;
+
+ // Adding the passed histogram to the SR would cause a problem if the
+ // allocator that holds it eventually goes away. Instead, create a new
+ // one from a serialized version.
+ base::Pickle pickle;
+ if (!histogram->SerializeInfo(&pickle))
+ return nullptr;
+ PickleIterator iter(pickle);
+ existing = DeserializeHistogramInfo(&iter);
+ if (!existing)
+ return nullptr;
+
+ // Make sure there is no "serialization" flag set.
+ DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
+ // Record the newly created histogram in the SR.
+ return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
+}
+
+// static
+void PersistentHistogramAllocator::RecordCreateHistogramResult(
+ CreateHistogramResultType result) {
+ HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
+ if (result_histogram)
+ result_histogram->Add(result);
+}
+
+GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
+
+// static
+void GlobalHistogramAllocator::CreateWithPersistentMemory(
+ void* base,
+ size_t size,
+ size_t page_size,
+ uint64_t id,
+ StringPiece name) {
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new PersistentMemoryAllocator(
+ base, size, page_size, id, name, false)))));
+}
+
+// static
+void GlobalHistogramAllocator::CreateWithLocalMemory(
+ size_t size,
+ uint64_t id,
+ StringPiece name) {
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
+}
+
+#if !defined(OS_NACL)
+// static
+void GlobalHistogramAllocator::CreateWithFile(
+ const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name) {
+ bool exists = PathExists(file_path);
+ File file(
+ file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
+ File::FLAG_READ | File::FLAG_WRITE);
+
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ if (exists) {
+ mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
+ } else {
+ mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
+ MemoryMappedFile::READ_WRITE_EXTEND);
+ }
+ if (!mmfile->IsValid() ||
+ !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+ NOTREACHED();
+ return;
+ }
+
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new FilePersistentMemoryAllocator(
+ std::move(mmfile), size, id, name, false)))));
+}
+#endif
+
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemory(
+ std::unique_ptr<SharedMemory> memory,
+ size_t size,
+ uint64_t /*id*/,
+ StringPiece /*name*/) {
+ if ((!memory->memory() && !memory->Map(size)) ||
+ !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
+ NOTREACHED();
+ return;
+ }
+
+ DCHECK_LE(memory->mapped_size(), size);
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new SharedPersistentMemoryAllocator(
+ std::move(memory), 0, StringPiece(), /*readonly=*/false)))));
+}
+
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t size) {
+ std::unique_ptr<SharedMemory> shm(
+ new SharedMemory(handle, /*readonly=*/false));
+ if (!shm->Map(size) ||
+ !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+ NOTREACHED();
+ return;
+ }
+
+ Set(WrapUnique(new GlobalHistogramAllocator(
+ WrapUnique(new SharedPersistentMemoryAllocator(
+ std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
+}
+
+// static
+void GlobalHistogramAllocator::Set(
+ std::unique_ptr<GlobalHistogramAllocator> allocator) {
+ // Releasing or changing an allocator is extremely dangerous because it
+ // likely has histograms stored within it. If the backing memory is also
+ // also released, future accesses to those histograms will seg-fault.
+ CHECK(!g_allocator);
+ g_allocator = allocator.release();
+ size_t existing = StatisticsRecorder::GetHistogramCount();
+
+ DVLOG_IF(1, existing)
+ << existing << " histograms were created before persistence was enabled.";
+}
+
+// static
+GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
+ return g_allocator;
+}
+
+// static
+std::unique_ptr<GlobalHistogramAllocator>
+GlobalHistogramAllocator::ReleaseForTesting() {
+ GlobalHistogramAllocator* histogram_allocator = g_allocator;
+ if (!histogram_allocator)
+ return nullptr;
+ PersistentMemoryAllocator* memory_allocator =
+ histogram_allocator->memory_allocator();
+
+ // Before releasing the memory, it's necessary to have the Statistics-
+ // Recorder forget about the histograms contained therein; otherwise,
+ // some operations will try to access them and the released memory.
+ PersistentMemoryAllocator::Iterator iter(memory_allocator);
+ PersistentMemoryAllocator::Reference ref;
+ while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
+ PersistentHistogramData* histogram_data =
+ memory_allocator->GetAsObject<PersistentHistogramData>(
+ ref, kTypeIdHistogram);
+ DCHECK(histogram_data);
+ StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
+
+ // If a test breaks here then a memory region containing a histogram
+ // actively used by this code is being released back to the test.
+ // If that memory segment were to be deleted, future calls to create
+ // persistent histograms would crash. To avoid this, have the test call
+ // the method GetCreateHistogramResultHistogram() *before* setting
+ // the (temporary) memory allocator via SetGlobalAllocator() so that
+ // histogram is instead allocated from the process heap.
+ DCHECK_NE(kResultHistogram, histogram_data->name);
+ }
+
+ g_allocator = nullptr;
+ return WrapUnique(histogram_allocator);
+};
+
+void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
+ persistent_location_ = location;
+}
+
+const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
+ return persistent_location_;
+}
+
+bool GlobalHistogramAllocator::WriteToPersistentLocation() {
+#if defined(OS_NACL)
+ // NACL doesn't support file operations, including ImportantFileWriter.
+ NOTREACHED();
+ return false;
+#else
+ // Stop if no destination is set.
+ if (persistent_location_.empty()) {
+ NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
+ << " to file because no location was set.";
+ return false;
+ }
+
+ StringPiece contents(static_cast<const char*>(data()), used());
+ if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
+ contents)) {
+ LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
+ << " to file: " << persistent_location_.value();
+ return false;
+ }
+
+ return true;
+#endif
+}
+
+GlobalHistogramAllocator::GlobalHistogramAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> memory)
+ : PersistentHistogramAllocator(std::move(memory)),
+ import_iterator_(this) {}
+
+void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
+ // Skip the import if it's the histogram that was last created. Should a
+ // race condition cause the "last created" to be overwritten before it
+ // is recognized here then the histogram will be created and be ignored
+ // when it is detected as a duplicate by the statistics-recorder. This
+ // simple check reduces the time of creating persistent histograms by
+ // about 40%.
+ Reference record_to_ignore = last_created();
+
+ // There is no lock on this because the iterator is lock-free while still
+ // guaranteed to only return each entry only once. The StatisticsRecorder
+ // has its own lock so the Register operation is safe.
+ while (true) {
+ std::unique_ptr<HistogramBase> histogram =
+ import_iterator_.GetNextWithIgnore(record_to_ignore);
+ if (!histogram)
+ break;
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/persistent_histogram_allocator.h b/libchrome/base/metrics/persistent_histogram_allocator.h
new file mode 100644
index 0000000..ee1fba5
--- /dev/null
+++ b/libchrome/base/metrics/persistent_histogram_allocator.h
@@ -0,0 +1,479 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+#define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+
+#include <map>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FilePath;
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
+// Feature definition for enabling histogram persistence.
+BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
+
+
+// A data manager for sparse histograms so each instance of such doesn't have
+// to separately iterate over the entire memory segment. Though this class
+// will generally be accessed through the PersistentHistogramAllocator above,
+// it can be used independently on any PersistentMemoryAllocator (making it
+// useable for testing). This object supports only one instance of a sparse
+// histogram for a given id. Tests that create multiple identical histograms,
+// perhaps to simulate multiple processes, should create a separate manager
+// for each.
+class BASE_EXPORT PersistentSparseHistogramDataManager {
+ public:
+ // Constructs the data manager. The allocator must live longer than any
+ // managers that reference it.
+ explicit PersistentSparseHistogramDataManager(
+ PersistentMemoryAllocator* allocator);
+
+ ~PersistentSparseHistogramDataManager();
+
+ // Returns the object that manages the persistent-sample-map records for a
+ // given |id|. Only one |user| of this data is allowed at a time. This does
+ // an automatic Acquire() on the records. The user must call Release() on
+ // the returned object when it is finished with it. Ownership of the records
+ // object stays with this manager.
+ PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+ const void* user);
+
+ // Convenience method that gets the object for a given reference so callers
+ // don't have to also keep their own pointer to the appropriate allocator.
+ template <typename T>
+ T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+ return allocator_->GetAsObject<T>(ref, type_id);
+ }
+
+ private:
+ friend class PersistentSampleMapRecords;
+
+ // Gets the object holding records for a given sample-map id when |lock_|
+ // has already been acquired.
+ PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id);
+
+ // Loads sample-map records looking for those belonging to the specified
+ // |load_id|. Records found for other sample-maps are held for later use
+ // without having to iterate again. This should be called only from a
+ // PersistentSampleMapRecords object because those objects have a contract
+ // that there are no other threads accessing the internal records_ field
+ // of the object that is passed in.
+ bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
+
+ // Weak-pointer to the allocator used by the sparse histograms.
+ PersistentMemoryAllocator* allocator_;
+
+ // Iterator within the allocator for finding sample records.
+ PersistentMemoryAllocator::Iterator record_iterator_;
+
+ // Mapping of sample-map IDs to their sample records.
+ std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
+ sample_records_;
+
+ // A lock used for synchronizing changes to sample_records_.
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
+};
+
+
+// This class manages sample-records used by a PersistentSampleMap container
+// that underlies a persistent SparseHistogram object. It is broken out into a
+// top-level class so that it can be forward-declared in other header files
+// rather than include this entire file as would be necessary if it were
+// declared within the PersistentSparseHistogramDataManager class above.
+class BASE_EXPORT PersistentSampleMapRecords {
+ public:
+ // Constructs an instance of this class. The manager object must live longer
+ // than all instances of this class that reference it, which is not usually
+ // a problem since these objects are generally managed from within that
+ // manager instance.
+ PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
+ uint64_t sample_map_id);
+
+ ~PersistentSampleMapRecords();
+
+ // Resets the internal state for a new object using this data. The return
+ // value is "this" as a convenience.
+ PersistentSampleMapRecords* Acquire(const void* user);
+
+ // Indicates that the using object is done with this data.
+ void Release(const void* user);
+
+ // Gets the next reference to a persistent sample-map record. The type and
+ // layout of the data being referenced is defined entirely within the
+ // PersistentSampleMap class.
+ PersistentMemoryAllocator::Reference GetNext();
+
+ // Creates a new persistent sample-map record for sample |value| and returns
+ // a reference to it.
+ PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
+
+ // Convenience method that gets the object for a given reference so callers
+ // don't have to also keep their own pointer to the appropriate allocator.
+ // This is expected to be used with the SampleRecord structure defined inside
+ // the persistent_sample_map.cc file but since that isn't exported (for
+ // cleanliness of the interface), a template is defined that will be
+ // resolved when used inside that file.
+ template <typename T>
+ T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+ return data_manager_->GetAsObject<T>(ref, type_id);
+ }
+
+ private:
+ friend PersistentSparseHistogramDataManager;
+
+ // Weak-pointer to the parent data-manager object.
+ PersistentSparseHistogramDataManager* data_manager_;
+
+ // ID of PersistentSampleMap to which these records apply.
+ const uint64_t sample_map_id_;
+
+ // The current user of this set of records. It is used to ensure that no
+ // more than one object is using these records at a given time.
+ const void* user_ = nullptr;
+
+ // This is the count of how many "records" have already been read by the
+ // owning sample-map.
+ size_t seen_ = 0;
+
+ // This is the set of records previously found for a sample map. Because
+ // there is ever only one object with a given ID (typically a hash of a
+ // histogram name) and because the parent SparseHistogram has acquired
+ // its own lock before accessing the PersistentSampleMap it controls, this
+ // list can be accessed without acquiring any additional lock.
+ std::vector<PersistentMemoryAllocator::Reference> records_;
+
+ // This is the set of records found during iteration through memory. It
+ // is appended in bulk to "records". Access to this vector can be done
+ // only while holding the parent manager's lock.
+ std::vector<PersistentMemoryAllocator::Reference> found_;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
+};
+
+
+// This class manages histograms created within a PersistentMemoryAllocator.
+class BASE_EXPORT PersistentHistogramAllocator {
+ public:
+ // A reference to a histogram. While this is implemented as PMA::Reference,
+ // it is not conceptually the same thing. Outside callers should always use
+ // a Reference matching the class it is for and not mix the two.
+ using Reference = PersistentMemoryAllocator::Reference;
+
+ // Iterator used for fetching persistent histograms from an allocator.
+ // It is lock-free and thread-safe.
+ // See PersistentMemoryAllocator::Iterator for more information.
+ class BASE_EXPORT Iterator {
+ public:
+ // Constructs an iterator on a given |allocator|, starting at the beginning.
+ // The allocator must live beyond the lifetime of the iterator.
+ explicit Iterator(PersistentHistogramAllocator* allocator);
+
+ // Gets the next histogram from persistent memory; returns null if there
+ // are no more histograms to be found. This may still be called again
+ // later to retrieve any new histograms added in the meantime.
+ std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
+
+ // Gets the next histogram from persistent memory, ignoring one particular
+ // reference in the process. Pass |ignore| of zero (0) to ignore nothing.
+ std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
+
+ private:
+ // Weak-pointer to histogram allocator being iterated over.
+ PersistentHistogramAllocator* allocator_;
+
+ // The iterator used for stepping through objects in persistent memory.
+ // It is lock-free and thread-safe which is why this class is also such.
+ PersistentMemoryAllocator::Iterator memory_iter_;
+
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ // A PersistentHistogramAllocator is constructed from a PersistentMemory-
+ // Allocator object of which it takes ownership.
+ explicit PersistentHistogramAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> memory);
+ virtual ~PersistentHistogramAllocator();
+
+ // Direct access to underlying memory allocator. If the segment is shared
+ // across threads or processes, reading data through these values does
+ // not guarantee consistency. Use with care. Do not write.
+ PersistentMemoryAllocator* memory_allocator() {
+ return memory_allocator_.get();
+ }
+
+ // Implement the "metadata" API of a PersistentMemoryAllocator, forwarding
+ // those requests to the real one.
+ uint64_t Id() const { return memory_allocator_->Id(); }
+ const char* Name() const { return memory_allocator_->Name(); }
+ const void* data() const { return memory_allocator_->data(); }
+ size_t length() const { return memory_allocator_->length(); }
+ size_t size() const { return memory_allocator_->size(); }
+ size_t used() const { return memory_allocator_->used(); }
+
+ // Recreate a Histogram from data held in persistent memory. Though this
+ // object will be local to the current process, the sample data will be
+ // shared with all other threads referencing it. This method takes a |ref|
+ // to where the top-level histogram data may be found in this allocator.
+ // This method will return null if any problem is detected with the data.
+ std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
+
+ // Allocate a new persistent histogram. The returned histogram will not
+ // be able to be located by other allocators until it is "finalized".
+ std::unique_ptr<HistogramBase> AllocateHistogram(
+ HistogramType histogram_type,
+ const std::string& name,
+ int minimum,
+ int maximum,
+ const BucketRanges* bucket_ranges,
+ int32_t flags,
+ Reference* ref_ptr);
+
+ // Finalize the creation of the histogram, making it available to other
+ // processes if |registered| (as in: added to the StatisticsRecorder) is
+ // True, forgetting it otherwise.
+ void FinalizeHistogram(Reference ref, bool registered);
+
+ // Merges the data in a persistent histogram with one held globally by the
+ // StatisticsRecorder, updating the "logged" samples within the passed
+ // object so that repeated merges are allowed. Don't call this on a "global"
+ // allocator because histograms created there will already be in the SR.
+ void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
+
+ // As above but merge the "final" delta. No update of "logged" samples is
+ // done which means it can operate on read-only objects. It's essential,
+ // however, not to call this more than once or those final samples will
+ // get recorded again.
+ void MergeHistogramFinalDeltaToStatisticsRecorder(
+ const HistogramBase* histogram);
+
+ // Returns the object that manages the persistent-sample-map records for a
+ // given |id|. Only one |user| of this data is allowed at a time. This does
+ // an automatic Acquire() on the records. The user must call Release() on
+ // the returned object when it is finished with it. Ownership stays with
+ // this allocator.
+ PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+ const void* user);
+
+ // Create internal histograms for tracking memory use and allocation sizes
+ // for allocator of |name| (which can simply be the result of Name()). This
+ // is done seperately from construction for situations such as when the
+ // histograms will be backed by memory provided by this very allocator.
+ //
+ // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+ // with the following histograms:
+ // UMA.PersistentAllocator.name.Allocs
+ // UMA.PersistentAllocator.name.UsedPct
+ void CreateTrackingHistograms(StringPiece name);
+ void UpdateTrackingHistograms();
+
+ // Clears the internal |last_created_| reference so testing can validate
+ // operation without that optimization.
+ void ClearLastCreatedReferenceForTesting();
+
+ // Histogram containing creation results. Visible for testing.
+ static HistogramBase* GetCreateHistogramResultHistogram();
+
+ protected:
+ // The structure used to hold histogram data in persistent memory. It is
+ // defined and used entirely within the .cc file.
+ struct PersistentHistogramData;
+
+ // Gets the reference of the last histogram created, used to avoid
+ // trying to import what was just created.
+ PersistentHistogramAllocator::Reference last_created() {
+ return subtle::NoBarrier_Load(&last_created_);
+ }
+
+ // Gets the next histogram in persistent data based on iterator while
+ // ignoring a particular reference if it is found.
+ std::unique_ptr<HistogramBase> GetNextHistogramWithIgnore(Iterator* iter,
+ Reference ignore);
+
+ private:
+ // Enumerate possible creation results for reporting.
+ enum CreateHistogramResultType {
+ // Everything was fine.
+ CREATE_HISTOGRAM_SUCCESS = 0,
+
+ // Pointer to metadata was not valid.
+ CREATE_HISTOGRAM_INVALID_METADATA_POINTER,
+
+ // Histogram metadata was not valid.
+ CREATE_HISTOGRAM_INVALID_METADATA,
+
+ // Ranges information was not valid.
+ CREATE_HISTOGRAM_INVALID_RANGES_ARRAY,
+
+ // Counts information was not valid.
+ CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY,
+
+ // Could not allocate histogram memory due to corruption.
+ CREATE_HISTOGRAM_ALLOCATOR_CORRUPT,
+
+ // Could not allocate histogram memory due to lack of space.
+ CREATE_HISTOGRAM_ALLOCATOR_FULL,
+
+ // Could not allocate histogram memory due to unknown error.
+ CREATE_HISTOGRAM_ALLOCATOR_ERROR,
+
+ // Histogram was of unknown type.
+ CREATE_HISTOGRAM_UNKNOWN_TYPE,
+
+ // Instance has detected a corrupt allocator (recorded only once).
+ CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT,
+
+ // Always keep this at the end.
+ CREATE_HISTOGRAM_MAX
+ };
+
+ // Create a histogram based on saved (persistent) information about it.
+ std::unique_ptr<HistogramBase> CreateHistogram(
+ PersistentHistogramData* histogram_data_ptr);
+
+ // Gets or creates an object in the global StatisticsRecorder matching
+ // the |histogram| passed. Null is returned if one was not found and
+ // one could not be created.
+ HistogramBase* GetOrCreateStatisticsRecorderHistogram(
+ const HistogramBase* histogram);
+
+ // Record the result of a histogram creation.
+ static void RecordCreateHistogramResult(CreateHistogramResultType result);
+
+ // The memory allocator that provides the actual histogram storage.
+ std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
+
+ // The data-manager used to improve performance of sparse histograms.
+ PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
+
+ // A reference to the last-created histogram in the allocator, used to avoid
+ // trying to import what was just created.
+ // TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
+ subtle::Atomic32 last_created_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
+};
+
+
+// A special case of the PersistentHistogramAllocator that operates on a
+// global scale, collecting histograms created through standard macros and
+// the FactoryGet() method.
+class BASE_EXPORT GlobalHistogramAllocator
+ : public PersistentHistogramAllocator {
+ public:
+ ~GlobalHistogramAllocator() override;
+
+ // Create a global allocator using the passed-in memory |base|, |size|, and
+ // other parameters. Ownership of the memory segment remains with the caller.
+ static void CreateWithPersistentMemory(void* base,
+ size_t size,
+ size_t page_size,
+ uint64_t id,
+ StringPiece name);
+
+ // Create a global allocator using an internal block of memory of the
+ // specified |size| taken from the heap.
+ static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
+
+#if !defined(OS_NACL)
+ // Create a global allocator by memory-mapping a |file|. If the file does
+ // not exist, it will be created with the specified |size|. If the file does
+ // exist, the allocator will use and add to its contents, ignoring the passed
+ // size in favor of the existing size.
+ static void CreateWithFile(const FilePath& file_path,
+ size_t size,
+ uint64_t id,
+ StringPiece name);
+#endif
+
+ // Create a global allocator using a block of shared |memory| of the
+ // specified |size|. The allocator takes ownership of the shared memory
+ // and releases it upon destruction, though the memory will continue to
+ // live if other processes have access to it.
+ static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
+ size_t size,
+ uint64_t id,
+ StringPiece name);
+
+ // Create a global allocator using a block of shared memory accessed
+ // through the given |handle| and |size|. The allocator takes ownership
+ // of the handle and closes it upon destruction, though the memory will
+ // continue to live if other processes have access to it.
+ static void CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
+ size_t size);
+
+ // Sets a GlobalHistogramAllocator for globally storing histograms in
+ // a space that can be persisted or shared between processes. There is only
+ // ever one allocator for all such histograms created by a single process.
+ // This takes ownership of the object and should be called as soon as
+ // possible during startup to capture as many histograms as possible and
+ // while operating single-threaded so there are no race-conditions.
+ static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
+
+ // Gets a pointer to the global histogram allocator. Returns null if none
+ // exists.
+ static GlobalHistogramAllocator* Get();
+
+ // This access to the persistent allocator is only for testing; it extracts
+ // the current allocator completely. This allows easy creation of histograms
+ // within persistent memory segments which can then be extracted and used in
+ // other ways.
+ static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
+
+ // Stores a pathname to which the contents of this allocator should be saved
+ // in order to persist the data for a later use.
+ void SetPersistentLocation(const FilePath& location);
+
+ // Retrieves a previously set pathname to which the contents of this allocator
+ // are to be saved.
+ const FilePath& GetPersistentLocation() const;
+
+ // Writes the internal data to a previously set location. This is generally
+ // called when a process is exiting from a section of code that may not know
+ // the filesystem. The data is written in an atomic manner. The return value
+ // indicates success.
+ bool WriteToPersistentLocation();
+
+ private:
+ friend class StatisticsRecorder;
+
+ // Creates a new global histogram allocator.
+ explicit GlobalHistogramAllocator(
+ std::unique_ptr<PersistentMemoryAllocator> memory);
+
+ // Import new histograms from the global histogram allocator. It's possible
+ // for other processes to create histograms in the active memory segment;
+ // this adds those to the internal list of known histograms to avoid creating
+ // duplicates that would have to be merged during reporting. Every call to
+ // this method resumes from the last entry it saw; it costs nothing if
+ // nothing new has been added.
+ void ImportHistogramsToStatisticsRecorder();
+
+ // Import always continues from where it left off, making use of a single
+ // iterator to continue the work.
+ Iterator import_iterator_;
+
+ // The location to which the data should be persisted.
+ FilePath persistent_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
diff --git a/libchrome/base/metrics/persistent_histogram_allocator_unittest.cc b/libchrome/base/metrics/persistent_histogram_allocator_unittest.cc
new file mode 100644
index 0000000..b680662
--- /dev/null
+++ b/libchrome/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -0,0 +1,209 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class PersistentHistogramAllocatorTest : public testing::Test {
+ protected:
+ const int32_t kAllocatorMemorySize = 64 << 10; // 64 KiB
+
+ PersistentHistogramAllocatorTest()
+ : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
+ CreatePersistentHistogramAllocator();
+ }
+ ~PersistentHistogramAllocatorTest() override {
+ DestroyPersistentHistogramAllocator();
+ }
+
+ void CreatePersistentHistogramAllocator() {
+ allocator_memory_.reset(new char[kAllocatorMemorySize]);
+
+ GlobalHistogramAllocator::ReleaseForTesting();
+ memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
+ GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+ GlobalHistogramAllocator::CreateWithPersistentMemory(
+ allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
+ "PersistentHistogramAllocatorTest");
+ allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+ }
+
+ void DestroyPersistentHistogramAllocator() {
+ allocator_ = nullptr;
+ GlobalHistogramAllocator::ReleaseForTesting();
+ }
+
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+ std::unique_ptr<char[]> allocator_memory_;
+ PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocatorTest);
+};
+
+TEST_F(PersistentHistogramAllocatorTest, CreateAndIterateTest) {
+ PersistentMemoryAllocator::MemoryInfo meminfo0;
+ allocator_->GetMemoryInfo(&meminfo0);
+
+ // Try basic construction
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+ EXPECT_TRUE(histogram);
+ histogram->CheckName("TestHistogram");
+ PersistentMemoryAllocator::MemoryInfo meminfo1;
+ allocator_->GetMemoryInfo(&meminfo1);
+ EXPECT_GT(meminfo0.free, meminfo1.free);
+
+ HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+ EXPECT_TRUE(linear_histogram);
+ linear_histogram->CheckName("TestLinearHistogram");
+ PersistentMemoryAllocator::MemoryInfo meminfo2;
+ allocator_->GetMemoryInfo(&meminfo2);
+ EXPECT_GT(meminfo1.free, meminfo2.free);
+
+ HistogramBase* boolean_histogram = BooleanHistogram::FactoryGet(
+ "TestBooleanHistogram", HistogramBase::kIsPersistent);
+ EXPECT_TRUE(boolean_histogram);
+ boolean_histogram->CheckName("TestBooleanHistogram");
+ PersistentMemoryAllocator::MemoryInfo meminfo3;
+ allocator_->GetMemoryInfo(&meminfo3);
+ EXPECT_GT(meminfo2.free, meminfo3.free);
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, HistogramBase::kIsPersistent);
+ EXPECT_TRUE(custom_histogram);
+ custom_histogram->CheckName("TestCustomHistogram");
+ PersistentMemoryAllocator::MemoryInfo meminfo4;
+ allocator_->GetMemoryInfo(&meminfo4);
+ EXPECT_GT(meminfo3.free, meminfo4.free);
+
+ PersistentMemoryAllocator::Iterator iter(allocator_);
+ uint32_t type;
+ EXPECT_NE(0U, iter.GetNext(&type)); // Histogram
+ EXPECT_NE(0U, iter.GetNext(&type)); // LinearHistogram
+ EXPECT_NE(0U, iter.GetNext(&type)); // BooleanHistogram
+ EXPECT_NE(0U, iter.GetNext(&type)); // CustomHistogram
+ EXPECT_EQ(0U, iter.GetNext(&type));
+
+ // Create a second allocator and have it access the memory of the first.
+ std::unique_ptr<HistogramBase> recovered;
+ PersistentHistogramAllocator recovery(
+ WrapUnique(new PersistentMemoryAllocator(
+ allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+ PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+ recovered->CheckName("TestHistogram");
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+ recovered->CheckName("TestLinearHistogram");
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+ recovered->CheckName("TestBooleanHistogram");
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+ recovered->CheckName("TestCustomHistogram");
+
+ recovered = histogram_iter.GetNext();
+ EXPECT_FALSE(recovered);
+}
+
+TEST_F(PersistentHistogramAllocatorTest, CreateWithFileTest) {
+ const char temp_name[] = "CreateWithFileTest";
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath temp_file = temp_dir.path().AppendASCII(temp_name);
+ const size_t temp_size = 64 << 10; // 64 KiB
+
+ // Test creation of a new file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Test re-open of a possibly-existing file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, "");
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Test re-open of an known-existing file.
+ GlobalHistogramAllocator::ReleaseForTesting();
+ GlobalHistogramAllocator::CreateWithFile(temp_file, 0, 0, "");
+ EXPECT_EQ(std::string(temp_name),
+ GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+ // Final release so file and temp-dir can be removed.
+ GlobalHistogramAllocator::ReleaseForTesting();
+}
+
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderTest) {
+ size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+
+ // Create a local StatisticsRecorder in which the newly created histogram
+ // will be recorded.
+ std::unique_ptr<StatisticsRecorder> local_sr =
+ StatisticsRecorder::CreateTemporaryForTesting();
+ EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+
+ HistogramBase* histogram = LinearHistogram::FactoryGet(
+ "TestHistogram", 1, 10, 10, HistogramBase::kIsPersistent);
+ EXPECT_TRUE(histogram);
+ EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
+ histogram->Add(3);
+ histogram->Add(1);
+ histogram->Add(4);
+ histogram->Add(1);
+ histogram->Add(6);
+
+ // Destroy the local SR and ensure that we're back to the initial state.
+ local_sr.reset();
+ EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+
+ // Create a second allocator and have it access the memory of the first.
+ std::unique_ptr<HistogramBase> recovered;
+ PersistentHistogramAllocator recovery(
+ WrapUnique(new PersistentMemoryAllocator(
+ allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+ PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+ recovered = histogram_iter.GetNext();
+ ASSERT_TRUE(recovered);
+
+ // Merge the recovered histogram to the SR. It will always be a new object.
+ recovery.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+ EXPECT_EQ(starting_sr_count + 1, StatisticsRecorder::GetHistogramCount());
+ HistogramBase* found =
+ StatisticsRecorder::FindHistogram(recovered->histogram_name());
+ ASSERT_TRUE(found);
+ EXPECT_NE(recovered.get(), found);
+
+ // Ensure that the data got merged, too.
+ std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+ EXPECT_EQ(recovered->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+ EXPECT_EQ(1, snapshot->GetCount(3));
+ EXPECT_EQ(2, snapshot->GetCount(1));
+ EXPECT_EQ(1, snapshot->GetCount(4));
+ EXPECT_EQ(1, snapshot->GetCount(6));
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/persistent_memory_allocator.cc b/libchrome/base/metrics/persistent_memory_allocator.cc
new file mode 100644
index 0000000..dfa408f
--- /dev/null
+++ b/libchrome/base/metrics/persistent_memory_allocator.cc
@@ -0,0 +1,830 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <assert.h>
+#include <algorithm>
+
+#if defined(OS_WIN)
+#include "winbase.h"
+#elif defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_macros.h"
+
+namespace {
+
+// Limit of memory segment size. It has to fit in an unsigned 32-bit number
+// and should be a power of 2 in order to accomodate almost any page size.
+const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
+
+// A constant (random) value placed in the shared metadata to identify
+// an already initialized memory segment.
+const uint32_t kGlobalCookie = 0x408305DC;
+
+// The current version of the metadata. If updates are made that change
+// the metadata, the version number can be queried to operate in a backward-
+// compatible manner until the memory segment is completely re-initalized.
+const uint32_t kGlobalVersion = 1;
+
+// Constant values placed in the block headers to indicate its state.
+const uint32_t kBlockCookieFree = 0;
+const uint32_t kBlockCookieQueue = 1;
+const uint32_t kBlockCookieWasted = (uint32_t)-1;
+const uint32_t kBlockCookieAllocated = 0xC8799269;
+
+// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
+// types rather than combined bitfield.
+
+// Flags stored in the flags_ field of the SharedMetaData structure below.
+enum : int {
+ kFlagCorrupt = 1 << 0,
+ kFlagFull = 1 << 1
+};
+
+bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
+ uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
+ return (loaded_flags & flag) != 0;
+}
+
+void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
+ uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
+ for (;;) {
+ uint32_t new_flags = (loaded_flags & ~flag) | flag;
+ // In the failue case, actual "flags" value stored in loaded_flags.
+ if (flags->compare_exchange_weak(loaded_flags, new_flags))
+ break;
+ }
+}
+
+} // namespace
+
+namespace base {
+
+// All allocations and data-structures must be aligned to this byte boundary.
+// Alignment as large as the physical bus between CPU and RAM is _required_
+// for some architectures, is simply more efficient on other CPUs, and
+// generally a Good Idea(tm) for all platforms as it reduces/eliminates the
+// chance that a type will span cache lines. Alignment mustn't be less
+// than 8 to ensure proper alignment for all types. The rest is a balance
+// between reducing spans across multiple cache lines and wasted space spent
+// padding out allocations. An alignment of 16 would ensure that the block
+// header structure always sits in a single cache line. An average of about
+// 1/2 this value will be wasted with every allocation.
+const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
+
+// The block-header is placed at the top of every allocation within the
+// segment to describe the data that follows it.
+struct PersistentMemoryAllocator::BlockHeader {
+ uint32_t size; // Number of bytes in this block, including header.
+ uint32_t cookie; // Constant value indicating completed allocation.
+ std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
+ std::atomic<uint32_t> next; // Pointer to the next block when iterating.
+};
+
+// The shared metadata exists once at the top of the memory segment to
+// describe the state of the allocator to all processes.
+struct PersistentMemoryAllocator::SharedMetadata {
+ uint32_t cookie; // Some value that indicates complete initialization.
+ uint32_t size; // Total size of memory segment.
+ uint32_t page_size; // Paging size within memory segment.
+ uint32_t version; // Version code so upgrades don't break.
+ uint64_t id; // Arbitrary ID number given by creator.
+ uint32_t name; // Reference to stored name string.
+
+ // Above is read-only after first construction. Below may be changed and
+ // so must be marked "volatile" to provide correct inter-process behavior.
+
+ // Bitfield of information flags. Access to this should be done through
+ // the CheckFlag() and SetFlag() methods defined above.
+ volatile std::atomic<uint32_t> flags;
+
+ // Offset/reference to first free space in segment.
+ volatile std::atomic<uint32_t> freeptr;
+
+ // The "iterable" queue is an M&S Queue as described here, append-only:
+ // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+ volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
+ volatile BlockHeader queue; // Empty block for linked-list head/tail.
+};
+
+// The "queue" block header is used to detect "last node" so that zero/null
+// can be used to indicate that it hasn't been added at all. It is part of
+// the SharedMetadata structure which itself is always located at offset zero.
+const PersistentMemoryAllocator::Reference
+ PersistentMemoryAllocator::kReferenceQueue =
+ offsetof(SharedMetadata, queue);
+
+const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
+ FILE_PATH_LITERAL(".pma");
+
+
+PersistentMemoryAllocator::Iterator::Iterator(
+ const PersistentMemoryAllocator* allocator)
+ : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
+
+PersistentMemoryAllocator::Iterator::Iterator(
+ const PersistentMemoryAllocator* allocator,
+ Reference starting_after)
+ : allocator_(allocator), last_record_(starting_after), record_count_(0) {
+ // Ensure that the starting point is a valid, iterable block (meaning it can
+ // be read and has a non-zero "next" pointer).
+ const volatile BlockHeader* block =
+ allocator_->GetBlock(starting_after, 0, 0, false, false);
+ if (!block || block->next.load(std::memory_order_relaxed) == 0) {
+ NOTREACHED();
+ last_record_.store(kReferenceQueue, std::memory_order_release);
+ }
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
+ // Make a copy of the existing count of found-records, acquiring all changes
+ // made to the allocator, notably "freeptr" (see comment in loop for why
+ // the load of that value cannot be moved above here) that occurred during
+ // any previous runs of this method, including those by parallel threads
+ // that interrupted it. It pairs with the Release at the end of this method.
+ //
+ // Otherwise, if the compiler were to arrange the two loads such that
+ // "count" was fetched _after_ "freeptr" then it would be possible for
+ // this thread to be interrupted between them and other threads perform
+ // multiple allocations, make-iterables, and iterations (with the included
+ // increment of |record_count_|) culminating in the check at the bottom
+ // mistakenly determining that a loop exists. Isn't this stuff fun?
+ uint32_t count = record_count_.load(std::memory_order_acquire);
+
+ Reference last = last_record_.load(std::memory_order_acquire);
+ Reference next;
+ while (true) {
+ const volatile BlockHeader* block =
+ allocator_->GetBlock(last, 0, 0, true, false);
+ if (!block) // Invalid iterator state.
+ return kReferenceNull;
+
+ // The compiler and CPU can freely reorder all memory accesses on which
+ // there are no dependencies. It could, for example, move the load of
+ // "freeptr" to above this point because there are no explicit dependencies
+ // between it and "next". If it did, however, then another block could
+ // be queued after that but before the following load meaning there is
+ // one more queued block than the future "detect loop by having more
+ // blocks that could fit before freeptr" will allow.
+ //
+ // By "acquiring" the "next" value here, it's synchronized to the enqueue
+ // of the node which in turn is synchronized to the allocation (which sets
+ // freeptr). Thus, the scenario above cannot happen.
+ next = block->next.load(std::memory_order_acquire);
+ if (next == kReferenceQueue) // No next allocation in queue.
+ return kReferenceNull;
+ block = allocator_->GetBlock(next, 0, 0, false, false);
+ if (!block) { // Memory is corrupt.
+ allocator_->SetCorrupt();
+ return kReferenceNull;
+ }
+
+ // Update the "last_record" pointer to be the reference being returned.
+ // If it fails then another thread has already iterated past it so loop
+ // again. Failing will also load the existing value into "last" so there
+ // is no need to do another such load when the while-loop restarts. A
+ // "strong" compare-exchange is used because failing unnecessarily would
+ // mean repeating some fairly costly validations above.
+ if (last_record_.compare_exchange_strong(last, next)) {
+ *type_return = block->type_id.load(std::memory_order_relaxed);
+ break;
+ }
+ }
+
+ // Memory corruption could cause a loop in the list. Such must be detected
+ // so as to not cause an infinite loop in the caller. This is done by simply
+ // making sure it doesn't iterate more times than the absolute maximum
+ // number of allocations that could have been made. Callers are likely
+ // to loop multiple times before it is detected but at least it stops.
+ const uint32_t freeptr = std::min(
+ allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
+ allocator_->mem_size_);
+ const uint32_t max_records =
+ freeptr / (sizeof(BlockHeader) + kAllocAlignment);
+ if (count > max_records) {
+ allocator_->SetCorrupt();
+ return kReferenceNull;
+ }
+
+ // Increment the count and release the changes made above. It pairs with
+ // the Acquire at the top of this method. Note that this operation is not
+ // strictly synchonized with fetching of the object to return, which would
+ // have to be done inside the loop and is somewhat complicated to achieve.
+ // It does not matter if it falls behind temporarily so long as it never
+ // gets ahead.
+ record_count_.fetch_add(1, std::memory_order_release);
+ return next;
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
+ Reference ref;
+ uint32_t type_found;
+ while ((ref = GetNext(&type_found)) != 0) {
+ if (type_found == type_match)
+ return ref;
+ }
+ return kReferenceNull;
+}
+
+
+// static
+bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
+ size_t size,
+ size_t page_size,
+ bool readonly) {
+ return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
+ (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
+ (size % kAllocAlignment == 0 || readonly) &&
+ (page_size == 0 || size % page_size == 0 || readonly));
+}
+
+PersistentMemoryAllocator::PersistentMemoryAllocator(
+ void* base,
+ size_t size,
+ size_t page_size,
+ uint64_t id,
+ base::StringPiece name,
+ bool readonly)
+ : mem_base_(static_cast<char*>(base)),
+ mem_size_(static_cast<uint32_t>(size)),
+ mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
+ readonly_(readonly),
+ corrupt_(0),
+ allocs_histogram_(nullptr),
+ used_histogram_(nullptr) {
+ static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
+ "BlockHeader is not a multiple of kAllocAlignment");
+ static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
+ "SharedMetadata is not a multiple of kAllocAlignment");
+ static_assert(kReferenceQueue % kAllocAlignment == 0,
+ "\"queue\" is not aligned properly; must be at end of struct");
+
+ // Ensure that memory segment is of acceptable size.
+ CHECK(IsMemoryAcceptable(base, size, page_size, readonly));
+
+ // These atomics operate inter-process and so must be lock-free. The local
+ // casts are to make sure it can be evaluated at compile time to a constant.
+ CHECK(((SharedMetadata*)0)->freeptr.is_lock_free());
+ CHECK(((SharedMetadata*)0)->flags.is_lock_free());
+ CHECK(((BlockHeader*)0)->next.is_lock_free());
+ CHECK(corrupt_.is_lock_free());
+
+ if (shared_meta()->cookie != kGlobalCookie) {
+ if (readonly) {
+ SetCorrupt();
+ return;
+ }
+
+ // This block is only executed when a completely new memory segment is
+ // being initialized. It's unshared and single-threaded...
+ volatile BlockHeader* const first_block =
+ reinterpret_cast<volatile BlockHeader*>(mem_base_ +
+ sizeof(SharedMetadata));
+ if (shared_meta()->cookie != 0 ||
+ shared_meta()->size != 0 ||
+ shared_meta()->version != 0 ||
+ shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
+ shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
+ shared_meta()->id != 0 ||
+ shared_meta()->name != 0 ||
+ shared_meta()->tailptr != 0 ||
+ shared_meta()->queue.cookie != 0 ||
+ shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
+ first_block->size != 0 ||
+ first_block->cookie != 0 ||
+ first_block->type_id.load(std::memory_order_relaxed) != 0 ||
+ first_block->next != 0) {
+ // ...or something malicious has been playing with the metadata.
+ SetCorrupt();
+ }
+
+ // This is still safe to do even if corruption has been detected.
+ shared_meta()->cookie = kGlobalCookie;
+ shared_meta()->size = mem_size_;
+ shared_meta()->page_size = mem_page_;
+ shared_meta()->version = kGlobalVersion;
+ shared_meta()->id = id;
+ shared_meta()->freeptr.store(sizeof(SharedMetadata),
+ std::memory_order_release);
+
+ // Set up the queue of iterable allocations.
+ shared_meta()->queue.size = sizeof(BlockHeader);
+ shared_meta()->queue.cookie = kBlockCookieQueue;
+ shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
+ shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
+
+ // Allocate space for the name so other processes can learn it.
+ if (!name.empty()) {
+ const size_t name_length = name.length() + 1;
+ shared_meta()->name = Allocate(name_length, 0);
+ char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
+ if (name_cstr)
+ memcpy(name_cstr, name.data(), name.length());
+ }
+ } else {
+ if (shared_meta()->size == 0 ||
+ shared_meta()->version == 0 ||
+ shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
+ shared_meta()->tailptr == 0 ||
+ shared_meta()->queue.cookie == 0 ||
+ shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
+ SetCorrupt();
+ }
+ if (!readonly) {
+ // The allocator is attaching to a previously initialized segment of
+ // memory. If the initialization parameters differ, make the best of it
+ // by reducing the local construction parameters to match those of
+ // the actual memory area. This ensures that the local object never
+ // tries to write outside of the original bounds.
+ // Because the fields are const to ensure that no code other than the
+ // constructor makes changes to them as well as to give optimization
+ // hints to the compiler, it's necessary to const-cast them for changes
+ // here.
+ if (shared_meta()->size < mem_size_)
+ *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
+ if (shared_meta()->page_size < mem_page_)
+ *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
+
+ // Ensure that settings are still valid after the above adjustments.
+ if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly))
+ SetCorrupt();
+ }
+ }
+}
+
+PersistentMemoryAllocator::~PersistentMemoryAllocator() {
+ // It's strictly forbidden to do any memory access here in case there is
+ // some issue with the underlying memory segment. The "Local" allocator
+ // makes use of this to allow deletion of the segment on the heap from
+ // within its destructor.
+}
+
+uint64_t PersistentMemoryAllocator::Id() const {
+ return shared_meta()->id;
+}
+
+const char* PersistentMemoryAllocator::Name() const {
+ Reference name_ref = shared_meta()->name;
+ const char* name_cstr = GetAsObject<char>(name_ref, 0);
+ if (!name_cstr)
+ return "";
+
+ size_t name_length = GetAllocSize(name_ref);
+ if (name_cstr[name_length - 1] != '\0') {
+ NOTREACHED();
+ SetCorrupt();
+ return "";
+ }
+
+ return name_cstr;
+}
+
+void PersistentMemoryAllocator::CreateTrackingHistograms(
+ base::StringPiece name) {
+ if (name.empty() || readonly_)
+ return;
+
+ std::string name_string = name.as_string();
+ DCHECK(!used_histogram_);
+ used_histogram_ = LinearHistogram::FactoryGet(
+ "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
+ HistogramBase::kUmaTargetedHistogramFlag);
+
+ DCHECK(!allocs_histogram_);
+ allocs_histogram_ = Histogram::FactoryGet(
+ "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+size_t PersistentMemoryAllocator::used() const {
+ return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
+ mem_size_);
+}
+
+size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
+ const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+ if (!block)
+ return 0;
+ uint32_t size = block->size;
+ // Header was verified by GetBlock() but a malicious actor could change
+ // the value between there and here. Check it again.
+ if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
+ SetCorrupt();
+ return 0;
+ }
+ return size - sizeof(BlockHeader);
+}
+
+uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
+ const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+ if (!block)
+ return 0;
+ return block->type_id.load(std::memory_order_relaxed);
+}
+
+bool PersistentMemoryAllocator::ChangeType(Reference ref,
+ uint32_t to_type_id,
+ uint32_t from_type_id) {
+ DCHECK(!readonly_);
+ volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+ if (!block)
+ return false;
+
+ // This is a "strong" exchange because there is no loop that can retry in
+ // the wake of spurious failures possible with "weak" exchanges.
+ return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
+ size_t req_size,
+ uint32_t type_id) {
+ Reference ref = AllocateImpl(req_size, type_id);
+ if (ref) {
+ // Success: Record this allocation in usage stats (if active).
+ if (allocs_histogram_)
+ allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
+ } else {
+ // Failure: Record an allocation of zero for tracking.
+ if (allocs_histogram_)
+ allocs_histogram_->Add(0);
+ }
+ return ref;
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
+ size_t req_size,
+ uint32_t type_id) {
+ DCHECK(!readonly_);
+
+ // Validate req_size to ensure it won't overflow when used as 32-bit value.
+ if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
+ NOTREACHED();
+ return kReferenceNull;
+ }
+
+ // Round up the requested size, plus header, to the next allocation alignment.
+ uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader));
+ size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+ if (size <= sizeof(BlockHeader) || size > mem_page_) {
+ NOTREACHED();
+ return kReferenceNull;
+ }
+
+ // Get the current start of unallocated memory. Other threads may
+ // update this at any time and cause us to retry these operations.
+ // This value should be treated as "const" to avoid confusion through
+ // the code below but recognize that any failed compare-exchange operation
+ // involving it will cause it to be loaded with a more recent value. The
+ // code should either exit or restart the loop in that case.
+ /* const */ uint32_t freeptr =
+ shared_meta()->freeptr.load(std::memory_order_acquire);
+
+ // Allocation is lockless so we do all our caculation and then, if saving
+ // indicates a change has occurred since we started, scrap everything and
+ // start over.
+ for (;;) {
+ if (IsCorrupt())
+ return kReferenceNull;
+
+ if (freeptr + size > mem_size_) {
+ SetFlag(&shared_meta()->flags, kFlagFull);
+ return kReferenceNull;
+ }
+
+ // Get pointer to the "free" block. If something has been allocated since
+ // the load of freeptr above, it is still safe as nothing will be written
+ // to that location until after the compare-exchange below.
+ volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
+ if (!block) {
+ SetCorrupt();
+ return kReferenceNull;
+ }
+
+ // An allocation cannot cross page boundaries. If it would, create a
+ // "wasted" block and begin again at the top of the next page. This
+ // area could just be left empty but we fill in the block header just
+ // for completeness sake.
+ const uint32_t page_free = mem_page_ - freeptr % mem_page_;
+ if (size > page_free) {
+ if (page_free <= sizeof(BlockHeader)) {
+ SetCorrupt();
+ return kReferenceNull;
+ }
+ const uint32_t new_freeptr = freeptr + page_free;
+ if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
+ new_freeptr)) {
+ block->size = page_free;
+ block->cookie = kBlockCookieWasted;
+ }
+ continue;
+ }
+
+ // Don't leave a slice at the end of a page too small for anything. This
+ // can result in an allocation up to two alignment-sizes greater than the
+ // minimum required by requested-size + header + alignment.
+ if (page_free - size < sizeof(BlockHeader) + kAllocAlignment)
+ size = page_free;
+
+ const uint32_t new_freeptr = freeptr + size;
+ if (new_freeptr > mem_size_) {
+ SetCorrupt();
+ return kReferenceNull;
+ }
+
+ // Save our work. Try again if another thread has completed an allocation
+ // while we were processing. A "weak" exchange would be permissable here
+ // because the code will just loop and try again but the above processing
+ // is significant so make the extra effort of a "strong" exchange.
+ if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
+ continue;
+
+ // Given that all memory was zeroed before ever being given to an instance
+ // of this class and given that we only allocate in a monotomic fashion
+ // going forward, it must be that the newly allocated block is completely
+ // full of zeros. If we find anything in the block header that is NOT a
+ // zero then something must have previously run amuck through memory,
+ // writing beyond the allocated space and into unallocated space.
+ if (block->size != 0 ||
+ block->cookie != kBlockCookieFree ||
+ block->type_id.load(std::memory_order_relaxed) != 0 ||
+ block->next.load(std::memory_order_relaxed) != 0) {
+ SetCorrupt();
+ return kReferenceNull;
+ }
+
+ block->size = size;
+ block->cookie = kBlockCookieAllocated;
+ block->type_id.store(type_id, std::memory_order_relaxed);
+ return freeptr;
+ }
+}
+
+void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
+ uint32_t remaining = std::max(
+ mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
+ (uint32_t)sizeof(BlockHeader));
+ meminfo->total = mem_size_;
+ meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::MakeIterable(Reference ref) {
+ DCHECK(!readonly_);
+ if (IsCorrupt())
+ return;
+ volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
+ if (!block) // invalid reference
+ return;
+ if (block->next.load(std::memory_order_acquire) != 0) // Already iterable.
+ return;
+ block->next.store(kReferenceQueue, std::memory_order_release); // New tail.
+
+ // Try to add this block to the tail of the queue. May take multiple tries.
+ // If so, tail will be automatically updated with a more recent value during
+ // compare-exchange operations.
+ uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
+ for (;;) {
+ // Acquire the current tail-pointer released by previous call to this
+ // method and validate it.
+ block = GetBlock(tail, 0, 0, true, false);
+ if (!block) {
+ SetCorrupt();
+ return;
+ }
+
+ // Try to insert the block at the tail of the queue. The tail node always
+ // has an existing value of kReferenceQueue; if that is somehow not the
+ // existing value then another thread has acted in the meantime. A "strong"
+ // exchange is necessary so the "else" block does not get executed when
+ // that is not actually the case (which can happen with a "weak" exchange).
+ uint32_t next = kReferenceQueue; // Will get replaced with existing value.
+ if (block->next.compare_exchange_strong(next, ref,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
+ // Update the tail pointer to the new offset. If the "else" clause did
+ // not exist, then this could be a simple Release_Store to set the new
+ // value but because it does, it's possible that other threads could add
+ // one or more nodes at the tail before reaching this point. We don't
+ // have to check the return value because it either operates correctly
+ // or the exact same operation has already been done (by the "else"
+ // clause) on some other thread.
+ shared_meta()->tailptr.compare_exchange_strong(tail, ref,
+ std::memory_order_release,
+ std::memory_order_relaxed);
+ return;
+ } else {
+ // In the unlikely case that a thread crashed or was killed between the
+ // update of "next" and the update of "tailptr", it is necessary to
+ // perform the operation that would have been done. There's no explicit
+ // check for crash/kill which means that this operation may also happen
+ // even when the other thread is in perfect working order which is what
+ // necessitates the CompareAndSwap above.
+ shared_meta()->tailptr.compare_exchange_strong(tail, next,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire);
+ }
+ }
+}
+
+// The "corrupted" state is held both locally and globally (shared). The
+// shared flag can't be trusted since a malicious actor could overwrite it.
+// Because corruption can be detected during read-only operations such as
+// iteration, this method may be called by other "const" methods. In this
+// case, it's safe to discard the constness and modify the local flag and
+// maybe even the shared flag if the underlying data isn't actually read-only.
+void PersistentMemoryAllocator::SetCorrupt() const {
+ LOG(ERROR) << "Corruption detected in shared-memory segment.";
+ const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
+ std::memory_order_relaxed);
+ if (!readonly_) {
+ SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+ kFlagCorrupt);
+ }
+}
+
+bool PersistentMemoryAllocator::IsCorrupt() const {
+ if (corrupt_.load(std::memory_order_relaxed) ||
+ CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
+ SetCorrupt(); // Make sure all indicators are set.
+ return true;
+ }
+ return false;
+}
+
+bool PersistentMemoryAllocator::IsFull() const {
+ return CheckFlag(&shared_meta()->flags, kFlagFull);
+}
+
+// Dereference a block |ref| and ensure that it's valid for the desired
+// |type_id| and |size|. |special| indicates that we may try to access block
+// headers not available to callers but still accessed by this module. By
+// having internal dereferences go through this same function, the allocator
+// is hardened against corruption.
+const volatile PersistentMemoryAllocator::BlockHeader*
+PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
+ uint32_t size, bool queue_ok,
+ bool free_ok) const {
+ // Validation of parameters.
+ if (ref % kAllocAlignment != 0)
+ return nullptr;
+ if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+ return nullptr;
+ size += sizeof(BlockHeader);
+ if (ref + size > mem_size_)
+ return nullptr;
+
+ // Validation of referenced block-header.
+ if (!free_ok) {
+ uint32_t freeptr = std::min(
+ shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
+ if (ref + size > freeptr)
+ return nullptr;
+ const volatile BlockHeader* const block =
+ reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
+ if (block->size < size)
+ return nullptr;
+ if (ref + block->size > freeptr)
+ return nullptr;
+ if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
+ return nullptr;
+ if (type_id != 0 &&
+ block->type_id.load(std::memory_order_relaxed) != type_id) {
+ return nullptr;
+ }
+ }
+
+ // Return pointer to block data.
+ return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+}
+
+const volatile void* PersistentMemoryAllocator::GetBlockData(
+ Reference ref,
+ uint32_t type_id,
+ uint32_t size) const {
+ DCHECK(size > 0);
+ const volatile BlockHeader* block =
+ GetBlock(ref, type_id, size, false, false);
+ if (!block)
+ return nullptr;
+ return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::UpdateTrackingHistograms() {
+ DCHECK(!readonly_);
+ if (used_histogram_) {
+ MemoryInfo meminfo;
+ GetMemoryInfo(&meminfo);
+ HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
+ ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
+ used_histogram_->Add(used_percent);
+ }
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
+ size_t size,
+ uint64_t id,
+ base::StringPiece name)
+ : PersistentMemoryAllocator(AllocateLocalMemory(size),
+ size, 0, id, name, false) {}
+
+LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
+ DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_);
+}
+
+// static
+void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+#if defined(OS_WIN)
+ void* address =
+ ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ DPCHECK(address);
+ return address;
+#elif defined(OS_POSIX)
+ // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
+ // MAP_SHARED is not available on Linux <2.4 but required on Mac.
+ void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_SHARED, -1, 0);
+ DPCHECK(MAP_FAILED != address);
+ return address;
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+// static
+void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
+ size_t size) {
+#if defined(OS_WIN)
+ BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
+ DPCHECK(success);
+#elif defined(OS_POSIX)
+ int result = ::munmap(memory, size);
+ DPCHECK(0 == result);
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
+ std::unique_ptr<SharedMemory> memory,
+ uint64_t id,
+ base::StringPiece name,
+ bool read_only)
+ : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()),
+ memory->mapped_size(),
+ 0,
+ id,
+ name,
+ read_only),
+ shared_memory_(std::move(memory)) {}
+
+SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
+
+// static
+bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+ const SharedMemory& memory) {
+ return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
+}
+
+
+#if !defined(OS_NACL)
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
+ std::unique_ptr<MemoryMappedFile> file,
+ size_t max_size,
+ uint64_t id,
+ base::StringPiece name,
+ bool read_only)
+ : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
+ max_size != 0 ? max_size : file->length(),
+ 0,
+ id,
+ name,
+ read_only),
+ mapped_file_(std::move(file)) {}
+
+FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
+
+// static
+bool FilePersistentMemoryAllocator::IsFileAcceptable(
+ const MemoryMappedFile& file,
+ bool read_only) {
+ return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
+}
+#endif // !defined(OS_NACL)
+
+} // namespace base
diff --git a/libchrome/base/metrics/persistent_memory_allocator.h b/libchrome/base/metrics/persistent_memory_allocator.h
new file mode 100644
index 0000000..2fc0d2d
--- /dev/null
+++ b/libchrome/base/metrics/persistent_memory_allocator.h
@@ -0,0 +1,429 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <atomic>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class HistogramBase;
+class MemoryMappedFile;
+class SharedMemory;
+
+// Simple allocator for pieces of a memory block that may be persistent
+// to some storage or shared across multiple processes. This class resides
+// under base/metrics because it was written for that purpose. It is,
+// however, fully general-purpose and can be freely moved to base/memory
+// if other uses are found.
+//
+// This class provides for thread-secure (i.e. safe against other threads
+// or processes that may be compromised and thus have malicious intent)
+// allocation of memory within a designated block and also a mechanism by
+// which other threads can learn of these allocations.
+//
+// There is (currently) no way to release an allocated block of data because
+// doing so would risk invalidating pointers held by other processes and
+// greatly complicate the allocation algorithm.
+//
+// Construction of this object can accept new, clean (i.e. zeroed) memory
+// or previously initialized memory. In the first case, construction must
+// be allowed to complete before letting other allocators attach to the same
+// segment. In other words, don't share the segment until at least one
+// allocator has been attached to it.
+//
+// Note that memory not in active use is not accessed so it is possible to
+// use virtual memory, including memory-mapped files, as backing storage with
+// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
+class BASE_EXPORT PersistentMemoryAllocator {
+ public:
+ typedef uint32_t Reference;
+
+ // Iterator for going through all iterable memory records in an allocator.
+ // Like the allocator itself, iterators are lock-free and thread-secure.
+ // That means that multiple threads can share an iterator and the same
+ // reference will not be returned twice.
+ //
+ // Iteration, in general, is tolerant of corrupted memory. It will return
+ // what it can and stop only when corruption forces it to. Bad corruption
+ // could cause the same object to be returned many times but it will
+ // eventually quit.
+ class BASE_EXPORT Iterator {
+ public:
+ // Constructs an iterator on a given |allocator|, starting at the beginning.
+ // The allocator must live beyond the lifetime of the iterator. This class
+ // has read-only access to the allocator (hence "const") but the returned
+ // references can be used on a read/write version, too.
+ explicit Iterator(const PersistentMemoryAllocator* allocator);
+
+ // As above but resuming from the |starting_after| reference. The first call
+ // to GetNext() will return the next object found after that reference. The
+ // reference must be to an "iterable" object; references to non-iterable
+ // objects (those that never had MakeIterable() called for them) will cause
+ // a run-time error.
+ Iterator(const PersistentMemoryAllocator* allocator,
+ Reference starting_after);
+
+ // Gets the next iterable, storing that type in |type_return|. The actual
+ // return value is a reference to the allocation inside the allocator or
+ // zero if there are no more. GetNext() may still be called again at a
+ // later time to retrieve any new allocations that have been added.
+ Reference GetNext(uint32_t* type_return);
+
+ // Similar to above but gets the next iterable of a specific |type_match|.
+ // This should not be mixed with calls to GetNext() because any allocations
+ // skipped here due to a type mis-match will never be returned by later
+ // calls to GetNext() meaning it's possible to completely miss entries.
+ Reference GetNextOfType(uint32_t type_match);
+
+ // Converts references to objects. This is a convenience method so that
+ // users of the iterator don't need to also have their own pointer to the
+ // allocator over which the iterator runs in order to retrieve objects.
+ // Because the iterator is not read/write, only "const" objects can be
+ // fetched. Non-const objects can be fetched using the reference on a
+ // non-const (external) pointer to the same allocator (or use const_cast
+ // to remove the qualifier).
+ template <typename T>
+ const T* GetAsObject(Reference ref, uint32_t type_id) const {
+ return allocator_->GetAsObject<T>(ref, type_id);
+ }
+
+ private:
+ // Weak-pointer to memory allocator being iterated over.
+ const PersistentMemoryAllocator* allocator_;
+
+ // The last record that was returned.
+ std::atomic<Reference> last_record_;
+
+ // The number of records found; used for detecting loops.
+ std::atomic<uint32_t> record_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ // Returned information about the internal state of the heap.
+ struct MemoryInfo {
+ size_t total;
+ size_t free;
+ };
+
+ enum : Reference {
+ kReferenceNull = 0 // A common "null" reference value.
+ };
+
+ enum : uint32_t {
+ kTypeIdAny = 0 // Match any type-id inside GetAsObject().
+ };
+
+ // This is the standard file extension (suitable for being passed to the
+ // AddExtension() method of base::FilePath) for dumps of persistent memory.
+ static const base::FilePath::CharType kFileExtension[];
+
+ // The allocator operates on any arbitrary block of memory. Creation and
+ // persisting or sharing of that block with another process is the
+ // responsibility of the caller. The allocator needs to know only the
+ // block's |base| address, the total |size| of the block, and any internal
+ // |page| size (zero if not paged) across which allocations should not span.
+ // The |id| is an arbitrary value the caller can use to identify a
+ // particular memory segment. It will only be loaded during the initial
+ // creation of the segment and can be checked by the caller for consistency.
+ // The |name|, if provided, is used to distinguish histograms for this
+ // allocator. Only the primary owner of the segment should define this value;
+ // other processes can learn it from the shared state. If the underlying
+ // memory is |readonly| then no changes will be made to it. The resulting
+ // object should be stored as a "const" pointer.
+ //
+ // PersistentMemoryAllocator does NOT take ownership of the memory block.
+ // The caller must manage it and ensure it stays available throughout the
+ // lifetime of this object.
+ //
+ // Memory segments for sharing must have had an allocator attached to them
+ // before actually being shared. If the memory segment was just created, it
+ // should be zeroed before being passed here. If it was an existing segment,
+ // the values here will be compared to copies stored in the shared segment
+ // as a guard against corruption.
+ //
+ // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
+ // method below) before construction if the definition of the segment can
+ // vary in any way at run-time. Invalid memory segments will cause a crash.
+ PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
+ uint64_t id, base::StringPiece name,
+ bool readonly);
+ virtual ~PersistentMemoryAllocator();
+
+ // Check if memory segment is acceptable for creation of an Allocator. This
+ // doesn't do any analysis of the data and so doesn't guarantee that the
+ // contents are valid, just that the paramaters won't cause the program to
+ // abort. The IsCorrupt() method will report detection of data problems
+ // found during construction and general operation.
+ static bool IsMemoryAcceptable(const void* data, size_t size,
+ size_t page_size, bool readonly);
+
+ // Get the internal identifier for this persistent memory segment.
+ uint64_t Id() const;
+
+ // Get the internal name of this allocator (possibly an empty string).
+ const char* Name() const;
+
+ // Is this segment open only for read?
+ bool IsReadonly() { return readonly_; }
+
+ // Create internal histograms for tracking memory use and allocation sizes
+ // for allocator of |name| (which can simply be the result of Name()). This
+ // is done seperately from construction for situations such as when the
+ // histograms will be backed by memory provided by this very allocator.
+ //
+ // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+ // with the following histograms:
+ // UMA.PersistentAllocator.name.Allocs
+ // UMA.PersistentAllocator.name.UsedPct
+ void CreateTrackingHistograms(base::StringPiece name);
+
+ // Direct access to underlying memory segment. If the segment is shared
+ // across threads or processes, reading data through these values does
+ // not guarantee consistency. Use with care. Do not write.
+ const void* data() const { return const_cast<const char*>(mem_base_); }
+ size_t length() const { return mem_size_; }
+ size_t size() const { return mem_size_; }
+ size_t used() const;
+
+ // Get an object referenced by a |ref|. For safety reasons, the |type_id|
+ // code and size-of(|T|) are compared to ensure the reference is valid
+ // and cannot return an object outside of the memory segment. A |type_id| of
+ // kTypeIdAny (zero) will match any though the size is still checked. NULL is
+ // returned if any problem is detected, such as corrupted storage or incorrect
+ // parameters. Callers MUST check that the returned value is not-null EVERY
+ // TIME before accessing it or risk crashing! Once dereferenced, the pointer
+ // is safe to reuse forever.
+ //
+ // NOTE: Though this method will guarantee that an object of the specified
+ // type can be accessed without going outside the bounds of the memory
+ // segment, it makes no guarantees of the validity of the data within the
+ // object itself. If it is expected that the contents of the segment could
+ // be compromised with malicious intent, the object must be hardened as well.
+ //
+ // Though the persistent data may be "volatile" if it is shared with
+ // other processes, such is not necessarily the case. The internal
+ // "volatile" designation is discarded so as to not propagate the viral
+ // nature of that keyword to the caller. It can add it back, if necessary,
+ // based on knowledge of how the allocator is being used.
+ template <typename T>
+ T* GetAsObject(Reference ref, uint32_t type_id) {
+ static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+ return const_cast<T*>(
+ reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
+ }
+ template <typename T>
+ const T* GetAsObject(Reference ref, uint32_t type_id) const {
+ static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+ return const_cast<const T*>(
+ reinterpret_cast<const volatile T*>(GetBlockData(
+ ref, type_id, sizeof(T))));
+ }
+
+ // Get the number of bytes allocated to a block. This is useful when storing
+ // arrays in order to validate the ending boundary. The returned value will
+ // include any padding added to achieve the required alignment and so could
+ // be larger than given in the original Allocate() request.
+ size_t GetAllocSize(Reference ref) const;
+
+ // Access the internal "type" of an object. This generally isn't necessary
+ // but can be used to "clear" the type and so effectively mark it as deleted
+ // even though the memory stays valid and allocated. Changing the type is
+ // an atomic compare/exchange and so requires knowing the existing value.
+ // It will return false if the existing type is not what is expected.
+ uint32_t GetType(Reference ref) const;
+ bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
+
+ // Reserve space in the memory segment of the desired |size| and |type_id|.
+ // A return value of zero indicates the allocation failed, otherwise the
+ // returned reference can be used by any process to get a real pointer via
+ // the GetAsObject() call.
+ Reference Allocate(size_t size, uint32_t type_id);
+
+ // Allocated objects can be added to an internal list that can then be
+ // iterated over by other processes. If an allocated object can be found
+ // another way, such as by having its reference within a different object
+ // that will be made iterable, then this call is not necessary. This always
+ // succeeds unless corruption is detected; check IsCorrupted() to find out.
+ // Once an object is made iterable, its position in iteration can never
+ // change; new iterable objects will always be added after it in the series.
+ void MakeIterable(Reference ref);
+
+ // Get the information about the amount of free space in the allocator. The
+ // amount of free space should be treated as approximate due to extras from
+ // alignment and metadata. Concurrent allocations from other threads will
+ // also make the true amount less than what is reported.
+ void GetMemoryInfo(MemoryInfo* meminfo) const;
+
+ // If there is some indication that the memory has become corrupted,
+ // calling this will attempt to prevent further damage by indicating to
+ // all processes that something is not as expected.
+ void SetCorrupt() const;
+
+ // This can be called to determine if corruption has been detected in the
+ // segment, possibly my a malicious actor. Once detected, future allocations
+ // will fail and iteration may not locate all objects.
+ bool IsCorrupt() const;
+
+ // Flag set if an allocation has failed because the memory segment was full.
+ bool IsFull() const;
+
+ // Update those "tracking" histograms which do not get updates during regular
+ // operation, such as how much memory is currently used. This should be
+ // called before such information is to be displayed or uploaded.
+ void UpdateTrackingHistograms();
+
+ protected:
+ volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
+ const uint32_t mem_size_; // Size of entire memory segment.
+ const uint32_t mem_page_; // Page size allocations shouldn't cross.
+
+ private:
+ struct SharedMetadata;
+ struct BlockHeader;
+ static const uint32_t kAllocAlignment;
+ static const Reference kReferenceQueue;
+
+ // The shared metadata is always located at the top of the memory segment.
+ // These convenience functions eliminate constant casting of the base
+ // pointer within the code.
+ const SharedMetadata* shared_meta() const {
+ return reinterpret_cast<const SharedMetadata*>(
+ const_cast<const char*>(mem_base_));
+ }
+ SharedMetadata* shared_meta() {
+ return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
+ }
+
+ // Actual method for doing the allocation.
+ Reference AllocateImpl(size_t size, uint32_t type_id);
+
+ // Get the block header associated with a specific reference.
+ const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
+ uint32_t size, bool queue_ok,
+ bool free_ok) const;
+ volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
+ bool queue_ok, bool free_ok) {
+ return const_cast<volatile BlockHeader*>(
+ const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
+ ref, type_id, size, queue_ok, free_ok));
+ }
+
+ // Get the actual data within a block associated with a specific reference.
+ const volatile void* GetBlockData(Reference ref, uint32_t type_id,
+ uint32_t size) const;
+ volatile void* GetBlockData(Reference ref, uint32_t type_id,
+ uint32_t size) {
+ return const_cast<volatile void*>(
+ const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
+ ref, type_id, size));
+ }
+
+ const bool readonly_; // Indicates access to read-only memory.
+ std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
+
+ HistogramBase* allocs_histogram_; // Histogram recording allocs.
+ HistogramBase* used_histogram_; // Histogram recording used space.
+
+ friend class PersistentMemoryAllocatorTest;
+ FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
+ DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
+};
+
+
+// This allocator uses a local memory block it allocates from the general
+// heap. It is generally used when some kind of "death rattle" handler will
+// save the contents to persistent storage during process shutdown. It is
+// also useful for testing.
+class BASE_EXPORT LocalPersistentMemoryAllocator
+ : public PersistentMemoryAllocator {
+ public:
+ LocalPersistentMemoryAllocator(size_t size, uint64_t id,
+ base::StringPiece name);
+ ~LocalPersistentMemoryAllocator() override;
+
+ private:
+ // Allocates a block of local memory of the specified |size|, ensuring that
+ // the memory will not be physically allocated until accessed and will read
+ // as zero when that happens.
+ static void* AllocateLocalMemory(size_t size);
+
+ // Deallocates a block of local |memory| of the specified |size|.
+ static void DeallocateLocalMemory(void* memory, size_t size);
+
+ DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
+};
+
+
+// This allocator takes a shared-memory object and performs allocation from
+// it. The memory must be previously mapped via Map() or MapAt(). The allocator
+// takes ownership of the memory object.
+class BASE_EXPORT SharedPersistentMemoryAllocator
+ : public PersistentMemoryAllocator {
+ public:
+ SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
+ uint64_t id,
+ base::StringPiece name,
+ bool read_only);
+ ~SharedPersistentMemoryAllocator() override;
+
+ SharedMemory* shared_memory() { return shared_memory_.get(); }
+
+ // Ensure that the memory isn't so invalid that it won't crash when passing it
+ // to the allocator. This doesn't guarantee the data is valid, just that it
+ // won't cause the program to abort. The existing IsCorrupt() call will handle
+ // the rest.
+ static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
+
+ private:
+ std::unique_ptr<SharedMemory> shared_memory_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
+};
+
+
+#if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
+// This allocator takes a memory-mapped file object and performs allocation
+// from it. The allocator takes ownership of the file object.
+class BASE_EXPORT FilePersistentMemoryAllocator
+ : public PersistentMemoryAllocator {
+ public:
+ // A |max_size| of zero will use the length of the file as the maximum
+ // size. The |file| object must have been already created with sufficient
+ // permissions (read, read/write, or read/write/extend).
+ FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
+ size_t max_size,
+ uint64_t id,
+ base::StringPiece name,
+ bool read_only);
+ ~FilePersistentMemoryAllocator() override;
+
+ // Ensure that the file isn't so invalid that it won't crash when passing it
+ // to the allocator. This doesn't guarantee the file is valid, just that it
+ // won't cause the program to abort. The existing IsCorrupt() call will handle
+ // the rest.
+ static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
+
+ private:
+ std::unique_ptr<MemoryMappedFile> mapped_file_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
+};
+#endif // !defined(OS_NACL)
+
+} // namespace base
+
+#endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
diff --git a/libchrome/base/metrics/persistent_memory_allocator_unittest.cc b/libchrome/base/metrics/persistent_memory_allocator_unittest.cc
new file mode 100644
index 0000000..a3d90c2
--- /dev/null
+++ b/libchrome/base/metrics/persistent_memory_allocator_unittest.cc
@@ -0,0 +1,815 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <memory>
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/strings/safe_sprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+const uint32_t TEST_MEMORY_SIZE = 1 << 20; // 1 MiB
+const uint32_t TEST_MEMORY_PAGE = 64 << 10; // 64 KiB
+const uint32_t TEST_ID = 12345;
+const char TEST_NAME[] = "TestAllocator";
+
+} // namespace
+
+namespace base {
+
+typedef PersistentMemoryAllocator::Reference Reference;
+
+class PersistentMemoryAllocatorTest : public testing::Test {
+ public:
+ // This can't be statically initialized because it's value isn't defined
+ // in the PersistentMemoryAllocator header file. Instead, it's simply set
+ // in the constructor.
+ uint32_t kAllocAlignment;
+
+ struct TestObject1 {
+ int onething;
+ char oranother;
+ };
+
+ struct TestObject2 {
+ int thiis;
+ long that;
+ float andthe;
+ char other;
+ double thing;
+ };
+
+ PersistentMemoryAllocatorTest() {
+ kAllocAlignment = GetAllocAlignment();
+ mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
+ }
+
+ void SetUp() override {
+ allocator_.reset();
+ ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
+ allocator_.reset(new PersistentMemoryAllocator(
+ mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
+ TEST_ID, TEST_NAME, false));
+ allocator_->CreateTrackingHistograms(allocator_->Name());
+ }
+
+ void TearDown() override {
+ allocator_.reset();
+ }
+
+ unsigned CountIterables() {
+ PersistentMemoryAllocator::Iterator iter(allocator_.get());
+ uint32_t type;
+ unsigned count = 0;
+ while (iter.GetNext(&type) != 0) {
+ ++count;
+ }
+ return count;
+ }
+
+ static uint32_t GetAllocAlignment() {
+ return PersistentMemoryAllocator::kAllocAlignment;
+ }
+
+ protected:
+ std::unique_ptr<char[]> mem_segment_;
+ std::unique_ptr<PersistentMemoryAllocator> allocator_;
+};
+
+TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
+ std::string base_name(TEST_NAME);
+ EXPECT_EQ(TEST_ID, allocator_->Id());
+ EXPECT_TRUE(allocator_->used_histogram_);
+ EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
+ allocator_->used_histogram_->histogram_name());
+ EXPECT_TRUE(allocator_->allocs_histogram_);
+ EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs",
+ allocator_->allocs_histogram_->histogram_name());
+
+ // Get base memory info for later comparison.
+ PersistentMemoryAllocator::MemoryInfo meminfo0;
+ allocator_->GetMemoryInfo(&meminfo0);
+ EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
+ EXPECT_GT(meminfo0.total, meminfo0.free);
+
+ // Validate allocation of test object and make sure it can be referenced
+ // and all metadata looks correct.
+ Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+ EXPECT_NE(0U, block1);
+ EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
+ EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
+ EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
+ allocator_->GetAllocSize(block1));
+ PersistentMemoryAllocator::MemoryInfo meminfo1;
+ allocator_->GetMemoryInfo(&meminfo1);
+ EXPECT_EQ(meminfo0.total, meminfo1.total);
+ EXPECT_GT(meminfo0.free, meminfo1.free);
+
+ // Ensure that the test-object can be made iterable.
+ PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
+ uint32_t type;
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+ allocator_->MakeIterable(block1);
+ EXPECT_EQ(block1, iter1a.GetNext(&type));
+ EXPECT_EQ(1U, type);
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+ // Create second test-object and ensure everything is good and it cannot
+ // be confused with test-object of another type.
+ Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
+ EXPECT_NE(0U, block2);
+ EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
+ EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
+ EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
+ allocator_->GetAllocSize(block2));
+ PersistentMemoryAllocator::MemoryInfo meminfo2;
+ allocator_->GetMemoryInfo(&meminfo2);
+ EXPECT_EQ(meminfo1.total, meminfo2.total);
+ EXPECT_GT(meminfo1.free, meminfo2.free);
+
+ // Ensure that second test-object can also be made iterable.
+ allocator_->MakeIterable(block2);
+ EXPECT_EQ(block2, iter1a.GetNext(&type));
+ EXPECT_EQ(2U, type);
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+ // Check that iteration can begin after an arbitrary location.
+ PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
+ EXPECT_EQ(block2, iter1b.GetNext(&type));
+ EXPECT_EQ(0U, iter1b.GetNext(&type));
+
+ // Ensure nothing has gone noticably wrong.
+ EXPECT_FALSE(allocator_->IsFull());
+ EXPECT_FALSE(allocator_->IsCorrupt());
+
+ // Check the internal histogram record of used memory.
+ allocator_->UpdateTrackingHistograms();
+ std::unique_ptr<HistogramSamples> used_samples(
+ allocator_->used_histogram_->SnapshotSamples());
+ EXPECT_TRUE(used_samples);
+ EXPECT_EQ(1, used_samples->TotalCount());
+
+ // Check the internal histogram record of allocation requests.
+ std::unique_ptr<HistogramSamples> allocs_samples(
+ allocator_->allocs_histogram_->SnapshotSamples());
+ EXPECT_TRUE(allocs_samples);
+ EXPECT_EQ(2, allocs_samples->TotalCount());
+ EXPECT_EQ(0, allocs_samples->GetCount(0));
+ EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
+ EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2)));
+#if !DCHECK_IS_ON() // DCHECK builds will die at a NOTREACHED().
+ EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0));
+ allocs_samples = allocator_->allocs_histogram_->SnapshotSamples();
+ EXPECT_EQ(3, allocs_samples->TotalCount());
+ EXPECT_EQ(1, allocs_samples->GetCount(0));
+#endif
+
+ // Check that an objcet's type can be changed.
+ EXPECT_EQ(2U, allocator_->GetType(block2));
+ allocator_->ChangeType(block2, 3, 2);
+ EXPECT_EQ(3U, allocator_->GetType(block2));
+ allocator_->ChangeType(block2, 2, 3);
+ EXPECT_EQ(2U, allocator_->GetType(block2));
+
+ // Create second allocator (read/write) using the same memory segment.
+ std::unique_ptr<PersistentMemoryAllocator> allocator2(
+ new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+ TEST_MEMORY_PAGE, 0, "", false));
+ EXPECT_EQ(TEST_ID, allocator2->Id());
+ EXPECT_FALSE(allocator2->used_histogram_);
+ EXPECT_FALSE(allocator2->allocs_histogram_);
+ EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
+
+ // Ensure that iteration and access through second allocator works.
+ PersistentMemoryAllocator::Iterator iter2(allocator2.get());
+ EXPECT_EQ(block1, iter2.GetNext(&type));
+ EXPECT_EQ(block2, iter2.GetNext(&type));
+ EXPECT_EQ(0U, iter2.GetNext(&type));
+ EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
+ EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
+
+ // Create a third allocator (read-only) using the same memory segment.
+ std::unique_ptr<const PersistentMemoryAllocator> allocator3(
+ new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+ TEST_MEMORY_PAGE, 0, "", true));
+ EXPECT_EQ(TEST_ID, allocator3->Id());
+ EXPECT_FALSE(allocator3->used_histogram_);
+ EXPECT_FALSE(allocator3->allocs_histogram_);
+
+ // Ensure that iteration and access through third allocator works.
+ PersistentMemoryAllocator::Iterator iter3(allocator3.get());
+ EXPECT_EQ(block1, iter3.GetNext(&type));
+ EXPECT_EQ(block2, iter3.GetNext(&type));
+ EXPECT_EQ(0U, iter3.GetNext(&type));
+ EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
+ EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
+
+ // Ensure that GetNextOfType works.
+ PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
+ EXPECT_EQ(block2, iter1c.GetNextOfType(2));
+ EXPECT_EQ(0U, iter1c.GetNextOfType(2));
+}
+
+TEST_F(PersistentMemoryAllocatorTest, PageTest) {
+ // This allocation will go into the first memory page.
+ Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
+ EXPECT_LT(0U, block1);
+ EXPECT_GT(TEST_MEMORY_PAGE, block1);
+
+ // This allocation won't fit in same page as previous block.
+ Reference block2 =
+ allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
+ EXPECT_EQ(TEST_MEMORY_PAGE, block2);
+
+ // This allocation will also require a new page.
+ Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
+ EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
+}
+
+// A simple thread that takes an allocator and repeatedly allocates random-
+// sized chunks from it until no more can be done.
+class AllocatorThread : public SimpleThread {
+ public:
+ AllocatorThread(const std::string& name,
+ void* base,
+ uint32_t size,
+ uint32_t page_size)
+ : SimpleThread(name, Options()),
+ count_(0),
+ iterable_(0),
+ allocator_(base, size, page_size, 0, std::string(), false) {}
+
+ void Run() override {
+ for (;;) {
+ uint32_t size = RandInt(1, 99);
+ uint32_t type = RandInt(100, 999);
+ Reference block = allocator_.Allocate(size, type);
+ if (!block)
+ break;
+
+ count_++;
+ if (RandInt(0, 1)) {
+ allocator_.MakeIterable(block);
+ iterable_++;
+ }
+ }
+ }
+
+ unsigned iterable() { return iterable_; }
+ unsigned count() { return count_; }
+
+ private:
+ unsigned count_;
+ unsigned iterable_;
+ PersistentMemoryAllocator allocator_;
+};
+
+// Test parallel allocation/iteration and ensure consistency across all
+// instances.
+TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
+ void* memory = mem_segment_.get();
+ AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+ t1.Start();
+ t2.Start();
+ t3.Start();
+ t4.Start();
+ t5.Start();
+
+ unsigned last_count = 0;
+ do {
+ unsigned count = CountIterables();
+ EXPECT_LE(last_count, count);
+ } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+ t1.Join();
+ t2.Join();
+ t3.Join();
+ t4.Join();
+ t5.Join();
+
+ EXPECT_FALSE(allocator_->IsCorrupt());
+ EXPECT_TRUE(allocator_->IsFull());
+ EXPECT_EQ(CountIterables(),
+ t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
+ t5.iterable());
+}
+
+// A simple thread that counts objects by iterating through an allocator.
+class CounterThread : public SimpleThread {
+ public:
+ CounterThread(const std::string& name,
+ PersistentMemoryAllocator::Iterator* iterator,
+ Lock* lock,
+ ConditionVariable* condition,
+ bool* wake_up)
+ : SimpleThread(name, Options()),
+ iterator_(iterator),
+ lock_(lock),
+ condition_(condition),
+ count_(0),
+ wake_up_(wake_up) {}
+
+ void Run() override {
+ // Wait so all threads can start at approximately the same time.
+ // Best performance comes from releasing a single worker which then
+ // releases the next, etc., etc.
+ {
+ AutoLock autolock(*lock_);
+
+ // Before calling Wait(), make sure that the wake up condition
+ // has not already passed. Also, since spurious signal events
+ // are possible, check the condition in a while loop to make
+ // sure that the wake up condition is met when this thread
+ // returns from the Wait().
+ // See usage comments in src/base/synchronization/condition_variable.h.
+ while (!*wake_up_) {
+ condition_->Wait();
+ condition_->Signal();
+ }
+ }
+
+ uint32_t type;
+ while (iterator_->GetNext(&type) != 0) {
+ ++count_;
+ }
+ }
+
+ unsigned count() { return count_; }
+
+ private:
+ PersistentMemoryAllocator::Iterator* iterator_;
+ Lock* lock_;
+ ConditionVariable* condition_;
+ unsigned count_;
+ bool* wake_up_;
+
+ DISALLOW_COPY_AND_ASSIGN(CounterThread);
+};
+
+// Ensure that parallel iteration returns the same number of objects as
+// single-threaded iteration.
+TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
+ // Fill the memory segment with random allocations.
+ unsigned iterable_count = 0;
+ for (;;) {
+ uint32_t size = RandInt(1, 99);
+ uint32_t type = RandInt(100, 999);
+ Reference block = allocator_->Allocate(size, type);
+ if (!block)
+ break;
+ allocator_->MakeIterable(block);
+ ++iterable_count;
+ }
+ EXPECT_FALSE(allocator_->IsCorrupt());
+ EXPECT_TRUE(allocator_->IsFull());
+ EXPECT_EQ(iterable_count, CountIterables());
+
+ PersistentMemoryAllocator::Iterator iter(allocator_.get());
+ Lock lock;
+ ConditionVariable condition(&lock);
+ bool wake_up = false;
+
+ CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
+ CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
+ CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
+ CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
+ CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
+
+ t1.Start();
+ t2.Start();
+ t3.Start();
+ t4.Start();
+ t5.Start();
+
+ // Take the lock and set the wake up condition to true. This helps to
+ // avoid a race condition where the Signal() event is called before
+ // all the threads have reached the Wait() and thus never get woken up.
+ {
+ AutoLock autolock(lock);
+ wake_up = true;
+ }
+
+ // This will release all the waiting threads.
+ condition.Signal();
+
+ t1.Join();
+ t2.Join();
+ t3.Join();
+ t4.Join();
+ t5.Join();
+
+ EXPECT_EQ(iterable_count,
+ t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
+
+#if 0
+ // These ensure that the threads don't run sequentially. It shouldn't be
+ // enabled in general because it could lead to a flaky test if it happens
+ // simply by chance but it is useful during development to ensure that the
+ // test is working correctly.
+ EXPECT_NE(iterable_count, t1.count());
+ EXPECT_NE(iterable_count, t2.count());
+ EXPECT_NE(iterable_count, t3.count());
+ EXPECT_NE(iterable_count, t4.count());
+ EXPECT_NE(iterable_count, t5.count());
+#endif
+}
+
+// This test doesn't verify anything other than it doesn't crash. Its goal
+// is to find coding errors that aren't otherwise tested for, much like a
+// "fuzzer" would.
+// This test is suppsoed to fail on TSAN bot (crbug.com/579867).
+#if defined(THREAD_SANITIZER)
+#define MAYBE_CorruptionTest DISABLED_CorruptionTest
+#else
+#define MAYBE_CorruptionTest CorruptionTest
+#endif
+TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
+ char* memory = mem_segment_.get();
+ AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+ AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+ t1.Start();
+ t2.Start();
+ t3.Start();
+ t4.Start();
+ t5.Start();
+
+ do {
+ size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
+ char value = RandInt(0, 255);
+ memory[offset] = value;
+ } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+ t1.Join();
+ t2.Join();
+ t3.Join();
+ t4.Join();
+ t5.Join();
+
+ CountIterables();
+}
+
+// Attempt to cause crashes or loops by expressly creating dangerous conditions.
+TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
+ Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+ Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
+ Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
+ Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
+ Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
+ allocator_->MakeIterable(block1);
+ allocator_->MakeIterable(block2);
+ allocator_->MakeIterable(block3);
+ allocator_->MakeIterable(block4);
+ allocator_->MakeIterable(block5);
+ EXPECT_EQ(5U, CountIterables());
+ EXPECT_FALSE(allocator_->IsCorrupt());
+
+ // Create loop in iterable list and ensure it doesn't hang. The return value
+ // from CountIterables() in these cases is unpredictable. If there is a
+ // failure, the call will hang and the test killed for taking too long.
+ uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
+ EXPECT_EQ(block5, header4[3]);
+ header4[3] = block4;
+ CountIterables(); // loop: 1-2-3-4-4
+ EXPECT_TRUE(allocator_->IsCorrupt());
+
+ // Test where loop goes back to previous block.
+ header4[3] = block3;
+ CountIterables(); // loop: 1-2-3-4-3
+
+ // Test where loop goes back to the beginning.
+ header4[3] = block1;
+ CountIterables(); // loop: 1-2-3-4-1
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
+ LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
+ EXPECT_EQ(42U, allocator.Id());
+ EXPECT_NE(0U, allocator.Allocate(24, 1));
+ EXPECT_FALSE(allocator.IsFull());
+ EXPECT_FALSE(allocator.IsCorrupt());
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
+ SharedMemoryHandle shared_handle_1;
+ SharedMemoryHandle shared_handle_2;
+
+ PersistentMemoryAllocator::MemoryInfo meminfo1;
+ Reference r123, r456, r789;
+ {
+ std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
+ ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
+ SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
+ false);
+ EXPECT_FALSE(local.IsReadonly());
+ r123 = local.Allocate(123, 123);
+ r456 = local.Allocate(456, 456);
+ r789 = local.Allocate(789, 789);
+ local.MakeIterable(r123);
+ local.ChangeType(r456, 654, 456);
+ local.MakeIterable(r789);
+ local.GetMemoryInfo(&meminfo1);
+ EXPECT_FALSE(local.IsFull());
+ EXPECT_FALSE(local.IsCorrupt());
+
+ ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+ &shared_handle_1));
+ ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+ &shared_handle_2));
+ }
+
+ // Read-only test.
+ std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
+ /*readonly=*/true));
+ ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
+
+ SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
+ EXPECT_TRUE(shalloc2.IsReadonly());
+ EXPECT_EQ(TEST_ID, shalloc2.Id());
+ EXPECT_FALSE(shalloc2.IsFull());
+ EXPECT_FALSE(shalloc2.IsCorrupt());
+
+ PersistentMemoryAllocator::Iterator iter2(&shalloc2);
+ uint32_t type;
+ EXPECT_EQ(r123, iter2.GetNext(&type));
+ EXPECT_EQ(r789, iter2.GetNext(&type));
+ EXPECT_EQ(0U, iter2.GetNext(&type));
+
+ EXPECT_EQ(123U, shalloc2.GetType(r123));
+ EXPECT_EQ(654U, shalloc2.GetType(r456));
+ EXPECT_EQ(789U, shalloc2.GetType(r789));
+
+ PersistentMemoryAllocator::MemoryInfo meminfo2;
+ shalloc2.GetMemoryInfo(&meminfo2);
+ EXPECT_EQ(meminfo1.total, meminfo2.total);
+ EXPECT_EQ(meminfo1.free, meminfo2.free);
+
+ // Read/write test.
+ std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
+ /*readonly=*/false));
+ ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
+
+ SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
+ EXPECT_FALSE(shalloc3.IsReadonly());
+ EXPECT_EQ(TEST_ID, shalloc3.Id());
+ EXPECT_FALSE(shalloc3.IsFull());
+ EXPECT_FALSE(shalloc3.IsCorrupt());
+
+ PersistentMemoryAllocator::Iterator iter3(&shalloc3);
+ EXPECT_EQ(r123, iter3.GetNext(&type));
+ EXPECT_EQ(r789, iter3.GetNext(&type));
+ EXPECT_EQ(0U, iter3.GetNext(&type));
+
+ EXPECT_EQ(123U, shalloc3.GetType(r123));
+ EXPECT_EQ(654U, shalloc3.GetType(r456));
+ EXPECT_EQ(789U, shalloc3.GetType(r789));
+
+ PersistentMemoryAllocator::MemoryInfo meminfo3;
+ shalloc3.GetMemoryInfo(&meminfo3);
+ EXPECT_EQ(meminfo1.total, meminfo3.total);
+ EXPECT_EQ(meminfo1.free, meminfo3.free);
+
+ // Interconnectivity test.
+ Reference obj = shalloc3.Allocate(42, 42);
+ ASSERT_TRUE(obj);
+ shalloc3.MakeIterable(obj);
+ EXPECT_EQ(obj, iter2.GetNext(&type));
+ EXPECT_EQ(42U, type);
+}
+
+
+#if !defined(OS_NACL)
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("persistent_memory");
+
+ PersistentMemoryAllocator::MemoryInfo meminfo1;
+ Reference r123, r456, r789;
+ {
+ LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+ EXPECT_FALSE(local.IsReadonly());
+ r123 = local.Allocate(123, 123);
+ r456 = local.Allocate(456, 456);
+ r789 = local.Allocate(789, 789);
+ local.MakeIterable(r123);
+ local.ChangeType(r456, 654, 456);
+ local.MakeIterable(r789);
+ local.GetMemoryInfo(&meminfo1);
+ EXPECT_FALSE(local.IsFull());
+ EXPECT_FALSE(local.IsCorrupt());
+
+ File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+ ASSERT_TRUE(writer.IsValid());
+ writer.Write(0, (const char*)local.data(), local.used());
+ }
+
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(file_path);
+ EXPECT_TRUE(mmfile->IsValid());
+ const size_t mmlength = mmfile->length();
+ EXPECT_GE(meminfo1.total, mmlength);
+
+ FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
+ EXPECT_TRUE(file.IsReadonly());
+ EXPECT_EQ(TEST_ID, file.Id());
+ EXPECT_FALSE(file.IsFull());
+ EXPECT_FALSE(file.IsCorrupt());
+
+ PersistentMemoryAllocator::Iterator iter(&file);
+ uint32_t type;
+ EXPECT_EQ(r123, iter.GetNext(&type));
+ EXPECT_EQ(r789, iter.GetNext(&type));
+ EXPECT_EQ(0U, iter.GetNext(&type));
+
+ EXPECT_EQ(123U, file.GetType(r123));
+ EXPECT_EQ(654U, file.GetType(r456));
+ EXPECT_EQ(789U, file.GetType(r789));
+
+ PersistentMemoryAllocator::MemoryInfo meminfo2;
+ file.GetMemoryInfo(&meminfo2);
+ EXPECT_GE(meminfo1.total, meminfo2.total);
+ EXPECT_GE(meminfo1.free, meminfo2.free);
+ EXPECT_EQ(mmlength, meminfo2.total);
+ EXPECT_EQ(0U, meminfo2.free);
+}
+
+TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.path().AppendASCII("extend_test");
+ MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size.
+
+ // Start with a small but valid file of persistent data.
+ ASSERT_FALSE(PathExists(file_path));
+ {
+ LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+ local.Allocate(1, 1);
+ local.Allocate(11, 11);
+
+ File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+ ASSERT_TRUE(writer.IsValid());
+ writer.Write(0, (const char*)local.data(), local.used());
+ }
+ ASSERT_TRUE(PathExists(file_path));
+ int64_t before_size;
+ ASSERT_TRUE(GetFileSize(file_path, &before_size));
+
+ // Map it as an extendable read/write file and append to it.
+ {
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(
+ File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+ region, MemoryMappedFile::READ_WRITE_EXTEND);
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
+ "", false);
+ EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
+
+ allocator.Allocate(111, 111);
+ EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
+ }
+
+ // Validate that append worked.
+ int64_t after_size;
+ ASSERT_TRUE(GetFileSize(file_path, &after_size));
+ EXPECT_LT(before_size, after_size);
+
+ // Verify that it's still an acceptable file.
+ {
+ std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+ mmfile->Initialize(
+ File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+ region, MemoryMappedFile::READ_WRITE_EXTEND);
+ EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
+ EXPECT_TRUE(
+ FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
+ }
+}
+
+TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
+ const uint32_t kAllocAlignment =
+ PersistentMemoryAllocatorTest::GetAllocAlignment();
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+ local.MakeIterable(local.Allocate(1, 1));
+ local.MakeIterable(local.Allocate(11, 11));
+ const size_t minsize = local.used();
+ std::unique_ptr<char[]> garbage(new char[minsize]);
+ RandBytes(garbage.get(), minsize);
+
+ std::unique_ptr<MemoryMappedFile> mmfile;
+ char filename[100];
+ for (size_t filesize = minsize; filesize > 0; --filesize) {
+ strings::SafeSPrintf(filename, "memory_%d_A", filesize);
+ FilePath file_path = temp_dir.path().AppendASCII(filename);
+ ASSERT_FALSE(PathExists(file_path));
+ {
+ File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+ ASSERT_TRUE(writer.IsValid());
+ writer.Write(0, (const char*)local.data(), filesize);
+ }
+ ASSERT_TRUE(PathExists(file_path));
+
+ // Request read/write access for some sizes that are a multple of the
+ // allocator's alignment size. The allocator is strict about file size
+ // being a multiple of its internal alignment when doing read/write access.
+ const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
+ const uint32_t file_flags =
+ File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
+ const MemoryMappedFile::Access map_access =
+ read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
+
+ mmfile.reset(new MemoryMappedFile());
+ mmfile->Initialize(File(file_path, file_flags), map_access);
+ EXPECT_EQ(filesize, mmfile->length());
+ if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
+ // Make sure construction doesn't crash. It will, however, cause
+ // error messages warning about about a corrupted memory segment.
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+ read_only);
+ // Also make sure that iteration doesn't crash.
+ PersistentMemoryAllocator::Iterator iter(&allocator);
+ uint32_t type_id;
+ Reference ref;
+ while ((ref = iter.GetNext(&type_id)) != 0) {
+ const char* data = allocator.GetAsObject<char>(ref, 0);
+ uint32_t type = allocator.GetType(ref);
+ size_t size = allocator.GetAllocSize(ref);
+ // Ensure compiler can't optimize-out above variables.
+ (void)data;
+ (void)type;
+ (void)size;
+ }
+
+ // Ensure that short files are detected as corrupt and full files are not.
+ EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
+ } else {
+ // For filesize >= minsize, the file must be acceptable. This
+ // else clause (file-not-acceptable) should be reached only if
+ // filesize < minsize.
+ EXPECT_LT(filesize, minsize);
+ }
+
+ strings::SafeSPrintf(filename, "memory_%d_B", filesize);
+ file_path = temp_dir.path().AppendASCII(filename);
+ ASSERT_FALSE(PathExists(file_path));
+ {
+ File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+ ASSERT_TRUE(writer.IsValid());
+ writer.Write(0, (const char*)garbage.get(), filesize);
+ }
+ ASSERT_TRUE(PathExists(file_path));
+
+ mmfile.reset(new MemoryMappedFile());
+ mmfile->Initialize(File(file_path, file_flags), map_access);
+ EXPECT_EQ(filesize, mmfile->length());
+ if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
+ // Make sure construction doesn't crash. It will, however, cause
+ // error messages warning about about a corrupted memory segment.
+ FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+ read_only);
+ EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be.
+ } else {
+ // For filesize >= minsize, the file must be acceptable. This
+ // else clause (file-not-acceptable) should be reached only if
+ // filesize < minsize.
+ EXPECT_GT(minsize, filesize);
+ }
+ }
+}
+#endif // !defined(OS_NACL)
+
+} // namespace base
diff --git a/libchrome/base/metrics/persistent_sample_map.cc b/libchrome/base/metrics/persistent_sample_map.cc
new file mode 100644
index 0000000..15f83cd
--- /dev/null
+++ b/libchrome/base/metrics/persistent_sample_map.cc
@@ -0,0 +1,289 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a PersistentSampleMap. The logic here is
+// identical to that of SampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class PersistentSampleMapIterator : public SampleCountIterator {
+ public:
+ typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
+ SampleToCountMap;
+
+ explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
+ ~PersistentSampleMapIterator() override;
+
+ // SampleCountIterator:
+ bool Done() const override;
+ void Next() override;
+ void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const override;
+
+ private:
+ void SkipEmptyBuckets();
+
+ SampleToCountMap::const_iterator iter_;
+ const SampleToCountMap::const_iterator end_;
+};
+
+PersistentSampleMapIterator::PersistentSampleMapIterator(
+ const SampleToCountMap& sample_counts)
+ : iter_(sample_counts.begin()),
+ end_(sample_counts.end()) {
+ SkipEmptyBuckets();
+}
+
+PersistentSampleMapIterator::~PersistentSampleMapIterator() {}
+
+bool PersistentSampleMapIterator::Done() const {
+ return iter_ == end_;
+}
+
+void PersistentSampleMapIterator::Next() {
+ DCHECK(!Done());
+ ++iter_;
+ SkipEmptyBuckets();
+}
+
+void PersistentSampleMapIterator::Get(Sample* min,
+ Sample* max,
+ Count* count) const {
+ DCHECK(!Done());
+ if (min)
+ *min = iter_->first;
+ if (max)
+ *max = iter_->first + 1;
+ if (count)
+ *count = *iter_->second;
+}
+
+void PersistentSampleMapIterator::SkipEmptyBuckets() {
+ while (!Done() && *iter_->second == 0) {
+ ++iter_;
+ }
+}
+
+// This structure holds an entry for a PersistentSampleMap within a persistent
+// memory allocator. The "id" must be unique across all maps held by an
+// allocator or they will get attached to the wrong sample map.
+struct SampleRecord {
+ uint64_t id; // Unique identifier of owner.
+ Sample value; // The value for which this record holds a count.
+ Count count; // The count associated with the above value.
+};
+
+// The type-id used to identify sample records inside an allocator.
+const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
+
+} // namespace
+
+PersistentSampleMap::PersistentSampleMap(
+ uint64_t id,
+ PersistentHistogramAllocator* allocator,
+ Metadata* meta)
+ : HistogramSamples(id, meta), allocator_(allocator) {}
+
+PersistentSampleMap::~PersistentSampleMap() {
+ if (records_)
+ records_->Release(this);
+}
+
+void PersistentSampleMap::Accumulate(Sample value, Count count) {
+ *GetOrCreateSampleCountStorage(value) += count;
+ IncreaseSum(static_cast<int64_t>(count) * value);
+ IncreaseRedundantCount(count);
+}
+
+Count PersistentSampleMap::GetCount(Sample value) const {
+ // Have to override "const" to make sure all samples have been loaded before
+ // being able to know what value to return.
+ Count* count_pointer =
+ const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
+ return count_pointer ? *count_pointer : 0;
+}
+
+Count PersistentSampleMap::TotalCount() const {
+ // Have to override "const" in order to make sure all samples have been
+ // loaded before trying to iterate over the map.
+ const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+
+ Count count = 0;
+ for (const auto& entry : sample_counts_) {
+ count += *entry.second;
+ }
+ return count;
+}
+
+std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
+ // Have to override "const" in order to make sure all samples have been
+ // loaded before trying to iterate over the map.
+ const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+ return WrapUnique(new PersistentSampleMapIterator(sample_counts_));
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::GetNextPersistentRecord(
+ PersistentMemoryAllocator::Iterator& iterator,
+ uint64_t* sample_map_id) {
+ PersistentMemoryAllocator::Reference ref =
+ iterator.GetNextOfType(kTypeIdSampleRecord);
+ const SampleRecord* record =
+ iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ if (!record)
+ return 0;
+
+ *sample_map_id = record->id;
+ return ref;
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::CreatePersistentRecord(
+ PersistentMemoryAllocator* allocator,
+ uint64_t sample_map_id,
+ Sample value) {
+ PersistentMemoryAllocator::Reference ref =
+ allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
+ SampleRecord* record =
+ allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+
+ if (!record) {
+ NOTREACHED() << "full=" << allocator->IsFull()
+ << ", corrupt=" << allocator->IsCorrupt();
+ return 0;
+ }
+
+ record->id = sample_map_id;
+ record->value = value;
+ record->count = 0;
+ allocator->MakeIterable(ref);
+ return ref;
+}
+
+bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
+ Operator op) {
+ Sample min;
+ Sample max;
+ Count count;
+ for (; !iter->Done(); iter->Next()) {
+ iter->Get(&min, &max, &count);
+ if (min + 1 != max)
+ return false; // SparseHistogram only supports bucket with size 1.
+
+ *GetOrCreateSampleCountStorage(min) +=
+ (op == HistogramSamples::ADD) ? count : -count;
+ }
+ return true;
+}
+
+Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
+ // If |value| is already in the map, just return that.
+ auto it = sample_counts_.find(value);
+ if (it != sample_counts_.end())
+ return it->second;
+
+ // Import any new samples from persistent memory looking for the value.
+ return ImportSamples(value, false);
+}
+
+Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
+ // Get any existing count storage.
+ Count* count_pointer = GetSampleCountStorage(value);
+ if (count_pointer)
+ return count_pointer;
+
+ // Create a new record in persistent memory for the value. |records_| will
+ // have been initialized by the GetSampleCountStorage() call above.
+ DCHECK(records_);
+ PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
+ if (!ref) {
+ // If a new record could not be created then the underlying allocator is
+ // full or corrupt. Instead, allocate the counter from the heap. This
+ // sample will not be persistent, will not be shared, and will leak...
+ // but it's better than crashing.
+ count_pointer = new Count(0);
+ sample_counts_[value] = count_pointer;
+ return count_pointer;
+ }
+
+ // A race condition between two independent processes (i.e. two independent
+ // histogram objects sharing the same sample data) could cause two of the
+ // above records to be created. The allocator, however, forces a strict
+ // ordering on iterable objects so use the import method to actually add the
+ // just-created record. This ensures that all PersistentSampleMap objects
+ // will always use the same record, whichever was first made iterable.
+ // Thread-safety within a process where multiple threads use the same
+ // histogram object is delegated to the controlling histogram object which,
+ // for sparse histograms, is a lock object.
+ count_pointer = ImportSamples(value, false);
+ DCHECK(count_pointer);
+ return count_pointer;
+}
+
+PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
+ // The |records_| pointer is lazily fetched from the |allocator_| only on
+ // first use. Sometimes duplicate histograms are created by race conditions
+ // and if both were to grab the records object, there would be a conflict.
+ // Use of a histogram, and thus a call to this method, won't occur until
+ // after the histogram has been de-dup'd.
+ if (!records_)
+ records_ = allocator_->UseSampleMapRecords(id(), this);
+ return records_;
+}
+
+Count* PersistentSampleMap::ImportSamples(Sample until_value,
+ bool import_everything) {
+ Count* found_count = nullptr;
+ PersistentMemoryAllocator::Reference ref;
+ PersistentSampleMapRecords* records = GetRecords();
+ while ((ref = records->GetNext()) != 0) {
+ SampleRecord* record =
+ records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ if (!record)
+ continue;
+
+ DCHECK_EQ(id(), record->id);
+
+ // Check if the record's value is already known.
+ if (!ContainsKey(sample_counts_, record->value)) {
+ // No: Add it to map of known values.
+ sample_counts_[record->value] = &record->count;
+ } else {
+ // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+ // code & comment in GetOrCreateSampleCountStorage() for details.
+ // Check that nothing ever operated on the duplicate record.
+ DCHECK_EQ(0, record->count);
+ }
+
+ // Check if it's the value being searched for and, if so, keep a pointer
+ // to return later. Stop here unless everything is being imported.
+ // Because race conditions can cause multiple records for a single value,
+ // be sure to return the first one found.
+ if (record->value == until_value) {
+ if (!found_count)
+ found_count = &record->count;
+ if (!import_everything)
+ break;
+ }
+ }
+
+ return found_count;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/persistent_sample_map.h b/libchrome/base/metrics/persistent_sample_map.h
new file mode 100644
index 0000000..3c175db
--- /dev/null
+++ b/libchrome/base/metrics/persistent_sample_map.h
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PersistentSampleMap implements HistogramSamples interface. It is used
+// by the SparseHistogram class to store samples in persistent memory which
+// allows it to be shared between processes or live across restarts.
+
+#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_memory_allocator.h"
+
+namespace base {
+
+class PersistentHistogramAllocator;
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
+// The logic here is similar to that of SampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
+ public:
+ // Constructs a persistent sample map using a PersistentHistogramAllocator
+ // as the data source for persistent records.
+ PersistentSampleMap(uint64_t id,
+ PersistentHistogramAllocator* allocator,
+ Metadata* meta);
+
+ ~PersistentSampleMap() override;
+
+ // HistogramSamples:
+ void Accumulate(HistogramBase::Sample value,
+ HistogramBase::Count count) override;
+ HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+ HistogramBase::Count TotalCount() const override;
+ std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+ // Uses a persistent-memory |iterator| to locate and return information about
+ // the next record holding information for a PersistentSampleMap. The record
+ // could be for any Map so return the |sample_map_id| as well.
+ static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
+ PersistentMemoryAllocator::Iterator& iterator,
+ uint64_t* sample_map_id);
+
+ // Creates a new record in an |allocator| storing count information for a
+ // specific sample |value| of a histogram with the given |sample_map_id|.
+ static PersistentMemoryAllocator::Reference CreatePersistentRecord(
+ PersistentMemoryAllocator* allocator,
+ uint64_t sample_map_id,
+ HistogramBase::Sample value);
+
+ protected:
+ // Performs arithemetic. |op| is ADD or SUBTRACT.
+ bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+ // Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
+ // if sample does not exist.
+ HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
+
+ // Gets a pointer to a "count" corresponding to a given |value|, creating
+ // the sample (initialized to zero) if it does not already exists.
+ HistogramBase::Count* GetOrCreateSampleCountStorage(
+ HistogramBase::Sample value);
+
+ private:
+ // Gets the object that manages persistent records. This returns the
+ // |records_| member after first initializing it if necessary.
+ PersistentSampleMapRecords* GetRecords();
+
+ // Imports samples from persistent memory by iterating over all sample
+ // records found therein, adding them to the sample_counts_ map. If a
+ // count for the sample |until_value| is found, stop the import and return
+ // a pointer to that counter. If that value is not found, null will be
+ // returned after all currently available samples have been loaded. Pass
+ // true for |import_everything| to force the importing of all available
+ // samples even if a match is found.
+ HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
+ bool import_everything);
+
+ // All created/loaded sample values and their associated counts. The storage
+ // for the actual Count numbers is owned by the |records_| object and its
+ // underlying allocator.
+ std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
+
+ // The allocator that manages histograms inside persistent memory. This is
+ // owned externally and is expected to live beyond the life of this object.
+ PersistentHistogramAllocator* allocator_;
+
+ // The object that manages sample records inside persistent memory. This is
+ // owned by the |allocator_| object (above) and so, like it, is expected to
+ // live beyond the life of this object. This value is lazily-initialized on
+ // first use via the GetRecords() accessor method.
+ PersistentSampleMapRecords* records_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
diff --git a/libchrome/base/metrics/persistent_sample_map_unittest.cc b/libchrome/base/metrics/persistent_sample_map_unittest.cc
new file mode 100644
index 0000000..beb72e5
--- /dev/null
+++ b/libchrome/base/metrics/persistent_sample_map_unittest.cc
@@ -0,0 +1,263 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+std::unique_ptr<PersistentHistogramAllocator> CreateHistogramAllocator(
+ size_t bytes) {
+ return WrapUnique(new PersistentHistogramAllocator(
+ WrapUnique(new LocalPersistentMemoryAllocator(bytes, 0, ""))));
+}
+
+std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
+ PersistentHistogramAllocator* original) {
+ return WrapUnique(
+ new PersistentHistogramAllocator(WrapUnique(new PersistentMemoryAllocator(
+ const_cast<void*>(original->data()), original->length(), 0,
+ original->Id(), original->Name(), false))));
+}
+
+TEST(PersistentSampleMapTest, AccumulateTest) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta;
+ PersistentSampleMap samples(1, allocator.get(), &meta);
+
+ samples.Accumulate(1, 100);
+ samples.Accumulate(2, 200);
+ samples.Accumulate(1, -200);
+ EXPECT_EQ(-100, samples.GetCount(1));
+ EXPECT_EQ(200, samples.GetCount(2));
+
+ EXPECT_EQ(300, samples.sum());
+ EXPECT_EQ(100, samples.TotalCount());
+ EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta;
+ PersistentSampleMap samples(1, allocator.get(), &meta);
+
+ samples.Accumulate(250000000, 100);
+ samples.Accumulate(500000000, 200);
+ samples.Accumulate(250000000, -200);
+ EXPECT_EQ(-100, samples.GetCount(250000000));
+ EXPECT_EQ(200, samples.GetCount(500000000));
+
+ EXPECT_EQ(75000000000LL, samples.sum());
+ EXPECT_EQ(100, samples.TotalCount());
+ EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, AddSubtractTest) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta1;
+ PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+ samples1.Accumulate(1, 100);
+ samples1.Accumulate(2, 100);
+ samples1.Accumulate(3, 100);
+
+ std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+ DuplicateHistogramAllocator(allocator1.get());
+ HistogramSamples::Metadata meta2;
+ PersistentSampleMap samples2(2, allocator2.get(), &meta2);
+ samples2.Accumulate(1, 200);
+ samples2.Accumulate(2, 200);
+ samples2.Accumulate(4, 200);
+
+ samples1.Add(samples2);
+ EXPECT_EQ(300, samples1.GetCount(1));
+ EXPECT_EQ(300, samples1.GetCount(2));
+ EXPECT_EQ(100, samples1.GetCount(3));
+ EXPECT_EQ(200, samples1.GetCount(4));
+ EXPECT_EQ(2000, samples1.sum());
+ EXPECT_EQ(900, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ samples1.Subtract(samples2);
+ EXPECT_EQ(100, samples1.GetCount(1));
+ EXPECT_EQ(100, samples1.GetCount(2));
+ EXPECT_EQ(100, samples1.GetCount(3));
+ EXPECT_EQ(0, samples1.GetCount(4));
+ EXPECT_EQ(600, samples1.sum());
+ EXPECT_EQ(300, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, PersistenceTest) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta12;
+ PersistentSampleMap samples1(12, allocator1.get(), &meta12);
+ samples1.Accumulate(1, 100);
+ samples1.Accumulate(2, 200);
+ samples1.Accumulate(1, -200);
+ samples1.Accumulate(-1, 1);
+ EXPECT_EQ(-100, samples1.GetCount(1));
+ EXPECT_EQ(200, samples1.GetCount(2));
+ EXPECT_EQ(1, samples1.GetCount(-1));
+ EXPECT_EQ(299, samples1.sum());
+ EXPECT_EQ(101, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+ DuplicateHistogramAllocator(allocator1.get());
+ PersistentSampleMap samples2(12, allocator2.get(), &meta12);
+ EXPECT_EQ(samples1.id(), samples2.id());
+ EXPECT_EQ(samples1.sum(), samples2.sum());
+ EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+ EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+ EXPECT_EQ(-100, samples2.GetCount(1));
+ EXPECT_EQ(200, samples2.GetCount(2));
+ EXPECT_EQ(1, samples2.GetCount(-1));
+ EXPECT_EQ(299, samples2.sum());
+ EXPECT_EQ(101, samples2.TotalCount());
+ EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+ samples1.Accumulate(-1, -1);
+ EXPECT_EQ(0, samples2.GetCount(3));
+ EXPECT_EQ(0, samples1.GetCount(3));
+ samples2.Accumulate(3, 300);
+ EXPECT_EQ(300, samples2.GetCount(3));
+ EXPECT_EQ(300, samples1.GetCount(3));
+ EXPECT_EQ(samples1.sum(), samples2.sum());
+ EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+ EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+
+ EXPECT_EQ(0, samples2.GetCount(4));
+ EXPECT_EQ(0, samples1.GetCount(4));
+ samples1.Accumulate(4, 400);
+ EXPECT_EQ(400, samples2.GetCount(4));
+ EXPECT_EQ(400, samples1.GetCount(4));
+ samples2.Accumulate(4, 4000);
+ EXPECT_EQ(4400, samples2.GetCount(4));
+ EXPECT_EQ(4400, samples1.GetCount(4));
+ EXPECT_EQ(samples1.sum(), samples2.sum());
+ EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+ EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+}
+
+TEST(PersistentSampleMapIteratorTest, IterateTest) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta;
+ PersistentSampleMap samples(1, allocator.get(), &meta);
+ samples.Accumulate(1, 100);
+ samples.Accumulate(2, 200);
+ samples.Accumulate(4, -300);
+ samples.Accumulate(5, 0);
+
+ std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(1, min);
+ EXPECT_EQ(2, max);
+ EXPECT_EQ(100, count);
+ EXPECT_FALSE(it->GetBucketIndex(NULL));
+
+ it->Next();
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(2, min);
+ EXPECT_EQ(3, max);
+ EXPECT_EQ(200, count);
+
+ it->Next();
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(4, min);
+ EXPECT_EQ(5, max);
+ EXPECT_EQ(-300, count);
+
+ it->Next();
+ EXPECT_TRUE(it->Done());
+}
+
+TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta1;
+ PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+ samples1.Accumulate(5, 1);
+ samples1.Accumulate(10, 2);
+ samples1.Accumulate(15, 3);
+ samples1.Accumulate(20, 4);
+ samples1.Accumulate(25, 5);
+
+ std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+ DuplicateHistogramAllocator(allocator1.get());
+ HistogramSamples::Metadata meta2;
+ PersistentSampleMap samples2(2, allocator2.get(), &meta2);
+ samples2.Accumulate(5, 1);
+ samples2.Accumulate(20, 4);
+ samples2.Accumulate(25, 5);
+
+ samples1.Subtract(samples2);
+
+ std::unique_ptr<SampleCountIterator> it = samples1.Iterator();
+ EXPECT_FALSE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(10, min);
+ EXPECT_EQ(11, max);
+ EXPECT_EQ(2, count);
+
+ it->Next();
+ EXPECT_FALSE(it->Done());
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(15, min);
+ EXPECT_EQ(16, max);
+ EXPECT_EQ(3, count);
+
+ it->Next();
+ EXPECT_TRUE(it->Done());
+}
+
+// Only run this test on builds that support catching a DCHECK crash.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
+ std::unique_ptr<PersistentHistogramAllocator> allocator =
+ CreateHistogramAllocator(64 << 10); // 64 KiB
+ HistogramSamples::Metadata meta;
+ PersistentSampleMap samples(1, allocator.get(), &meta);
+
+ std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+ EXPECT_TRUE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+ EXPECT_DEATH(it->Get(&min, &max, &count), "");
+
+ EXPECT_DEATH(it->Next(), "");
+
+ samples.Accumulate(1, 100);
+ it = samples.Iterator();
+ EXPECT_FALSE(it->Done());
+}
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/metrics/sample_map.cc b/libchrome/base/metrics/sample_map.cc
new file mode 100644
index 0000000..8abd01e
--- /dev/null
+++ b/libchrome/base/metrics/sample_map.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_map.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a SampleMap. The logic here is identical
+// to that of PersistentSampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class SampleMapIterator : public SampleCountIterator {
+ public:
+ typedef std::map<HistogramBase::Sample, HistogramBase::Count>
+ SampleToCountMap;
+
+ explicit SampleMapIterator(const SampleToCountMap& sample_counts);
+ ~SampleMapIterator() override;
+
+ // SampleCountIterator:
+ bool Done() const override;
+ void Next() override;
+ void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const override;
+
+ private:
+ void SkipEmptyBuckets();
+
+ SampleToCountMap::const_iterator iter_;
+ const SampleToCountMap::const_iterator end_;
+};
+
+SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
+ : iter_(sample_counts.begin()),
+ end_(sample_counts.end()) {
+ SkipEmptyBuckets();
+}
+
+SampleMapIterator::~SampleMapIterator() {}
+
+bool SampleMapIterator::Done() const {
+ return iter_ == end_;
+}
+
+void SampleMapIterator::Next() {
+ DCHECK(!Done());
+ ++iter_;
+ SkipEmptyBuckets();
+}
+
+void SampleMapIterator::Get(Sample* min, Sample* max, Count* count) const {
+ DCHECK(!Done());
+ if (min)
+ *min = iter_->first;
+ if (max)
+ *max = iter_->first + 1;
+ if (count)
+ *count = iter_->second;
+}
+
+void SampleMapIterator::SkipEmptyBuckets() {
+ while (!Done() && iter_->second == 0) {
+ ++iter_;
+ }
+}
+
+} // namespace
+
+SampleMap::SampleMap() : SampleMap(0) {}
+
+SampleMap::SampleMap(uint64_t id) : HistogramSamples(id) {}
+
+SampleMap::~SampleMap() {}
+
+void SampleMap::Accumulate(Sample value, Count count) {
+ sample_counts_[value] += count;
+ IncreaseSum(static_cast<int64_t>(count) * value);
+ IncreaseRedundantCount(count);
+}
+
+Count SampleMap::GetCount(Sample value) const {
+ std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
+ if (it == sample_counts_.end())
+ return 0;
+ return it->second;
+}
+
+Count SampleMap::TotalCount() const {
+ Count count = 0;
+ for (const auto& entry : sample_counts_) {
+ count += entry.second;
+ }
+ return count;
+}
+
+std::unique_ptr<SampleCountIterator> SampleMap::Iterator() const {
+ return WrapUnique(new SampleMapIterator(sample_counts_));
+}
+
+bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
+ Sample min;
+ Sample max;
+ Count count;
+ for (; !iter->Done(); iter->Next()) {
+ iter->Get(&min, &max, &count);
+ if (min + 1 != max)
+ return false; // SparseHistogram only supports bucket with size 1.
+
+ sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
+ }
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/sample_map.h b/libchrome/base/metrics/sample_map.h
new file mode 100644
index 0000000..7458e05
--- /dev/null
+++ b/libchrome/base/metrics/sample_map.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SampleMap implements HistogramSamples interface. It is used by the
+// SparseHistogram class to store samples.
+
+#ifndef BASE_METRICS_SAMPLE_MAP_H_
+#define BASE_METRICS_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+
+namespace base {
+
+// The logic here is similar to that of PersistentSampleMap but with different
+// data structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT SampleMap : public HistogramSamples {
+ public:
+ SampleMap();
+ explicit SampleMap(uint64_t id);
+ ~SampleMap() override;
+
+ // HistogramSamples:
+ void Accumulate(HistogramBase::Sample value,
+ HistogramBase::Count count) override;
+ HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+ HistogramBase::Count TotalCount() const override;
+ std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+ protected:
+ // Performs arithemetic. |op| is ADD or SUBTRACT.
+ bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+ private:
+ std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
+
+ DISALLOW_COPY_AND_ASSIGN(SampleMap);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_SAMPLE_MAP_H_
diff --git a/libchrome/base/metrics/sample_map_unittest.cc b/libchrome/base/metrics/sample_map_unittest.cc
new file mode 100644
index 0000000..8f57710
--- /dev/null
+++ b/libchrome/base/metrics/sample_map_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_map.h"
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(SampleMapTest, AccumulateTest) {
+ SampleMap samples(1);
+
+ samples.Accumulate(1, 100);
+ samples.Accumulate(2, 200);
+ samples.Accumulate(1, -200);
+ EXPECT_EQ(-100, samples.GetCount(1));
+ EXPECT_EQ(200, samples.GetCount(2));
+
+ EXPECT_EQ(300, samples.sum());
+ EXPECT_EQ(100, samples.TotalCount());
+ EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(SampleMapTest, Accumulate_LargeValuesDontOverflow) {
+ SampleMap samples(1);
+
+ samples.Accumulate(250000000, 100);
+ samples.Accumulate(500000000, 200);
+ samples.Accumulate(250000000, -200);
+ EXPECT_EQ(-100, samples.GetCount(250000000));
+ EXPECT_EQ(200, samples.GetCount(500000000));
+
+ EXPECT_EQ(75000000000LL, samples.sum());
+ EXPECT_EQ(100, samples.TotalCount());
+ EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(SampleMapTest, AddSubtractTest) {
+ SampleMap samples1(1);
+ SampleMap samples2(2);
+
+ samples1.Accumulate(1, 100);
+ samples1.Accumulate(2, 100);
+ samples1.Accumulate(3, 100);
+
+ samples2.Accumulate(1, 200);
+ samples2.Accumulate(2, 200);
+ samples2.Accumulate(4, 200);
+
+ samples1.Add(samples2);
+ EXPECT_EQ(300, samples1.GetCount(1));
+ EXPECT_EQ(300, samples1.GetCount(2));
+ EXPECT_EQ(100, samples1.GetCount(3));
+ EXPECT_EQ(200, samples1.GetCount(4));
+ EXPECT_EQ(2000, samples1.sum());
+ EXPECT_EQ(900, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ samples1.Subtract(samples2);
+ EXPECT_EQ(100, samples1.GetCount(1));
+ EXPECT_EQ(100, samples1.GetCount(2));
+ EXPECT_EQ(100, samples1.GetCount(3));
+ EXPECT_EQ(0, samples1.GetCount(4));
+ EXPECT_EQ(600, samples1.sum());
+ EXPECT_EQ(300, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(SampleMapIteratorTest, IterateTest) {
+ SampleMap samples(1);
+ samples.Accumulate(1, 100);
+ samples.Accumulate(2, 200);
+ samples.Accumulate(4, -300);
+ samples.Accumulate(5, 0);
+
+ std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(1, min);
+ EXPECT_EQ(2, max);
+ EXPECT_EQ(100, count);
+ EXPECT_FALSE(it->GetBucketIndex(NULL));
+
+ it->Next();
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(2, min);
+ EXPECT_EQ(3, max);
+ EXPECT_EQ(200, count);
+
+ it->Next();
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(4, min);
+ EXPECT_EQ(5, max);
+ EXPECT_EQ(-300, count);
+
+ it->Next();
+ EXPECT_TRUE(it->Done());
+}
+
+TEST(SampleMapIteratorTest, SkipEmptyRanges) {
+ SampleMap samples(1);
+ samples.Accumulate(5, 1);
+ samples.Accumulate(10, 2);
+ samples.Accumulate(15, 3);
+ samples.Accumulate(20, 4);
+ samples.Accumulate(25, 5);
+
+ SampleMap samples2(2);
+ samples2.Accumulate(5, 1);
+ samples2.Accumulate(20, 4);
+ samples2.Accumulate(25, 5);
+
+ samples.Subtract(samples2);
+
+ std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+ EXPECT_FALSE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(10, min);
+ EXPECT_EQ(11, max);
+ EXPECT_EQ(2, count);
+
+ it->Next();
+ EXPECT_FALSE(it->Done());
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(15, min);
+ EXPECT_EQ(16, max);
+ EXPECT_EQ(3, count);
+
+ it->Next();
+ EXPECT_TRUE(it->Done());
+}
+
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+TEST(SampleMapIteratorDeathTest, IterateDoneTest) {
+ SampleMap samples(1);
+
+ std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+ EXPECT_TRUE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+ EXPECT_DEATH(it->Get(&min, &max, &count), "");
+
+ EXPECT_DEATH(it->Next(), "");
+
+ samples.Accumulate(1, 100);
+ it = samples.Iterator();
+ EXPECT_FALSE(it->Done());
+}
+
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/metrics/sample_vector.cc b/libchrome/base/metrics/sample_vector.cc
new file mode 100644
index 0000000..7b056cb
--- /dev/null
+++ b/libchrome/base/metrics/sample_vector.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_vector.h"
+
+#include "base/logging.h"
+#include "base/metrics/bucket_ranges.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+SampleVector::SampleVector(const BucketRanges* bucket_ranges)
+ : SampleVector(0, bucket_ranges) {}
+
+SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
+ : HistogramSamples(id),
+ local_counts_(bucket_ranges->bucket_count()),
+ counts_(&local_counts_[0]),
+ counts_size_(local_counts_.size()),
+ bucket_ranges_(bucket_ranges) {
+ CHECK_GE(bucket_ranges_->bucket_count(), 1u);
+}
+
+SampleVector::SampleVector(uint64_t id,
+ HistogramBase::AtomicCount* counts,
+ size_t /*counts_size*/,
+ Metadata* meta,
+ const BucketRanges* bucket_ranges)
+ : HistogramSamples(id, meta),
+ counts_(counts),
+ counts_size_(bucket_ranges->bucket_count()),
+ bucket_ranges_(bucket_ranges) {
+ CHECK_LE(bucket_ranges_->bucket_count(), counts_size_);
+ CHECK_GE(bucket_ranges_->bucket_count(), 1u);
+}
+
+SampleVector::~SampleVector() {}
+
+void SampleVector::Accumulate(Sample value, Count count) {
+ size_t bucket_index = GetBucketIndex(value);
+ subtle::NoBarrier_AtomicIncrement(&counts_[bucket_index], count);
+ IncreaseSum(static_cast<int64_t>(count) * value);
+ IncreaseRedundantCount(count);
+}
+
+Count SampleVector::GetCount(Sample value) const {
+ size_t bucket_index = GetBucketIndex(value);
+ return subtle::NoBarrier_Load(&counts_[bucket_index]);
+}
+
+Count SampleVector::TotalCount() const {
+ Count count = 0;
+ for (size_t i = 0; i < counts_size_; i++) {
+ count += subtle::NoBarrier_Load(&counts_[i]);
+ }
+ return count;
+}
+
+Count SampleVector::GetCountAtIndex(size_t bucket_index) const {
+ DCHECK(bucket_index < counts_size_);
+ return subtle::NoBarrier_Load(&counts_[bucket_index]);
+}
+
+std::unique_ptr<SampleCountIterator> SampleVector::Iterator() const {
+ return std::unique_ptr<SampleCountIterator>(
+ new SampleVectorIterator(counts_, counts_size_, bucket_ranges_));
+}
+
+bool SampleVector::AddSubtractImpl(SampleCountIterator* iter,
+ HistogramSamples::Operator op) {
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ // Go through the iterator and add the counts into correct bucket.
+ size_t index = 0;
+ while (index < counts_size_ && !iter->Done()) {
+ iter->Get(&min, &max, &count);
+ if (min == bucket_ranges_->range(index) &&
+ max == bucket_ranges_->range(index + 1)) {
+ // Sample matches this bucket!
+ subtle::NoBarrier_AtomicIncrement(
+ &counts_[index], op == HistogramSamples::ADD ? count : -count);
+ iter->Next();
+ } else if (min > bucket_ranges_->range(index)) {
+ // Sample is larger than current bucket range. Try next.
+ index++;
+ } else {
+ // Sample is smaller than current bucket range. We scan buckets from
+ // smallest to largest, so the sample value must be invalid.
+ return false;
+ }
+ }
+
+ return iter->Done();
+}
+
+// Use simple binary search. This is very general, but there are better
+// approaches if we knew that the buckets were linearly distributed.
+size_t SampleVector::GetBucketIndex(Sample value) const {
+ size_t bucket_count = bucket_ranges_->bucket_count();
+ CHECK_GE(bucket_count, 1u);
+ CHECK_GE(value, bucket_ranges_->range(0));
+ CHECK_LT(value, bucket_ranges_->range(bucket_count));
+
+ size_t under = 0;
+ size_t over = bucket_count;
+ size_t mid;
+ do {
+ DCHECK_GE(over, under);
+ mid = under + (over - under)/2;
+ if (mid == under)
+ break;
+ if (bucket_ranges_->range(mid) <= value)
+ under = mid;
+ else
+ over = mid;
+ } while (true);
+
+ DCHECK_LE(bucket_ranges_->range(mid), value);
+ CHECK_GT(bucket_ranges_->range(mid + 1), value);
+ return mid;
+}
+
+SampleVectorIterator::SampleVectorIterator(
+ const std::vector<HistogramBase::AtomicCount>* counts,
+ const BucketRanges* bucket_ranges)
+ : counts_(&(*counts)[0]),
+ counts_size_(counts->size()),
+ bucket_ranges_(bucket_ranges),
+ index_(0) {
+ CHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
+ SkipEmptyBuckets();
+}
+
+SampleVectorIterator::SampleVectorIterator(
+ const HistogramBase::AtomicCount* counts,
+ size_t counts_size,
+ const BucketRanges* bucket_ranges)
+ : counts_(counts),
+ counts_size_(counts_size),
+ bucket_ranges_(bucket_ranges),
+ index_(0) {
+ CHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
+ SkipEmptyBuckets();
+}
+
+SampleVectorIterator::~SampleVectorIterator() {}
+
+bool SampleVectorIterator::Done() const {
+ return index_ >= counts_size_;
+}
+
+void SampleVectorIterator::Next() {
+ DCHECK(!Done());
+ index_++;
+ SkipEmptyBuckets();
+}
+
+void SampleVectorIterator::Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const {
+ DCHECK(!Done());
+ if (min != NULL)
+ *min = bucket_ranges_->range(index_);
+ if (max != NULL)
+ *max = bucket_ranges_->range(index_ + 1);
+ if (count != NULL)
+ *count = subtle::NoBarrier_Load(&counts_[index_]);
+}
+
+bool SampleVectorIterator::GetBucketIndex(size_t* index) const {
+ DCHECK(!Done());
+ if (index != NULL)
+ *index = index_;
+ return true;
+}
+
+void SampleVectorIterator::SkipEmptyBuckets() {
+ if (Done())
+ return;
+
+ while (index_ < counts_size_) {
+ if (subtle::NoBarrier_Load(&counts_[index_]) != 0)
+ return;
+ index_++;
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/sample_vector.h b/libchrome/base/metrics/sample_vector.h
new file mode 100644
index 0000000..ee26c52
--- /dev/null
+++ b/libchrome/base/metrics/sample_vector.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// SampleVector implements HistogramSamples interface. It is used by all
+// Histogram based classes to store samples.
+
+#ifndef BASE_METRICS_SAMPLE_VECTOR_H_
+#define BASE_METRICS_SAMPLE_VECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+
+namespace base {
+
+class BucketRanges;
+
+class BASE_EXPORT SampleVector : public HistogramSamples {
+ public:
+ explicit SampleVector(const BucketRanges* bucket_ranges);
+ SampleVector(uint64_t id, const BucketRanges* bucket_ranges);
+ SampleVector(uint64_t id,
+ HistogramBase::AtomicCount* counts,
+ size_t counts_size,
+ Metadata* meta,
+ const BucketRanges* bucket_ranges);
+ ~SampleVector() override;
+
+ // HistogramSamples implementation:
+ void Accumulate(HistogramBase::Sample value,
+ HistogramBase::Count count) override;
+ HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+ HistogramBase::Count TotalCount() const override;
+ std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+ // Get count of a specific bucket.
+ HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
+
+ protected:
+ bool AddSubtractImpl(
+ SampleCountIterator* iter,
+ HistogramSamples::Operator op) override; // |op| is ADD or SUBTRACT.
+
+ virtual size_t GetBucketIndex(HistogramBase::Sample value) const;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
+ FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
+
+ // In the case where this class manages the memory, here it is.
+ std::vector<HistogramBase::AtomicCount> local_counts_;
+
+ // These are raw pointers rather than objects for flexibility. The actual
+ // memory is either managed by local_counts_ above or by an external object
+ // and passed in directly.
+ HistogramBase::AtomicCount* counts_;
+ size_t counts_size_;
+
+ // Shares the same BucketRanges with Histogram object.
+ const BucketRanges* const bucket_ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(SampleVector);
+};
+
+class BASE_EXPORT SampleVectorIterator : public SampleCountIterator {
+ public:
+ SampleVectorIterator(const std::vector<HistogramBase::AtomicCount>* counts,
+ const BucketRanges* bucket_ranges);
+ SampleVectorIterator(const HistogramBase::AtomicCount* counts,
+ size_t counts_size,
+ const BucketRanges* bucket_ranges);
+ ~SampleVectorIterator() override;
+
+ // SampleCountIterator implementation:
+ bool Done() const override;
+ void Next() override;
+ void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const override;
+
+ // SampleVector uses predefined buckets, so iterator can return bucket index.
+ bool GetBucketIndex(size_t* index) const override;
+
+ private:
+ void SkipEmptyBuckets();
+
+ const HistogramBase::AtomicCount* counts_;
+ size_t counts_size_;
+ const BucketRanges* bucket_ranges_;
+
+ size_t index_;
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_SAMPLE_VECTOR_H_
diff --git a/libchrome/base/metrics/sample_vector_unittest.cc b/libchrome/base/metrics/sample_vector_unittest.cc
new file mode 100644
index 0000000..02e48aa
--- /dev/null
+++ b/libchrome/base/metrics/sample_vector_unittest.cc
@@ -0,0 +1,294 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sample_vector.h"
+
+#include <limits.h>
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(SampleVectorTest, AccumulateTest) {
+ // Custom buckets: [1, 5) [5, 10)
+ BucketRanges ranges(3);
+ ranges.set_range(0, 1);
+ ranges.set_range(1, 5);
+ ranges.set_range(2, 10);
+ SampleVector samples(1, &ranges);
+
+ samples.Accumulate(1, 200);
+ samples.Accumulate(2, -300);
+ EXPECT_EQ(-100, samples.GetCountAtIndex(0));
+
+ samples.Accumulate(5, 200);
+ EXPECT_EQ(200, samples.GetCountAtIndex(1));
+
+ EXPECT_EQ(600, samples.sum());
+ EXPECT_EQ(100, samples.redundant_count());
+ EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+
+ samples.Accumulate(5, -100);
+ EXPECT_EQ(100, samples.GetCountAtIndex(1));
+
+ EXPECT_EQ(100, samples.sum());
+ EXPECT_EQ(0, samples.redundant_count());
+ EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+}
+
+TEST(SampleVectorTest, Accumulate_LargeValuesDontOverflow) {
+ // Custom buckets: [1, 250000000) [250000000, 500000000)
+ BucketRanges ranges(3);
+ ranges.set_range(0, 1);
+ ranges.set_range(1, 250000000);
+ ranges.set_range(2, 500000000);
+ SampleVector samples(1, &ranges);
+
+ samples.Accumulate(240000000, 200);
+ samples.Accumulate(249999999, -300);
+ EXPECT_EQ(-100, samples.GetCountAtIndex(0));
+
+ samples.Accumulate(250000000, 200);
+ EXPECT_EQ(200, samples.GetCountAtIndex(1));
+
+ EXPECT_EQ(23000000300LL, samples.sum());
+ EXPECT_EQ(100, samples.redundant_count());
+ EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+
+ samples.Accumulate(250000000, -100);
+ EXPECT_EQ(100, samples.GetCountAtIndex(1));
+
+ EXPECT_EQ(-1999999700LL, samples.sum());
+ EXPECT_EQ(0, samples.redundant_count());
+ EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+}
+
+TEST(SampleVectorTest, AddSubtractTest) {
+ // Custom buckets: [0, 1) [1, 2) [2, 3) [3, INT_MAX)
+ BucketRanges ranges(5);
+ ranges.set_range(0, 0);
+ ranges.set_range(1, 1);
+ ranges.set_range(2, 2);
+ ranges.set_range(3, 3);
+ ranges.set_range(4, INT_MAX);
+
+ SampleVector samples1(1, &ranges);
+ samples1.Accumulate(0, 100);
+ samples1.Accumulate(2, 100);
+ samples1.Accumulate(4, 100);
+ EXPECT_EQ(600, samples1.sum());
+ EXPECT_EQ(300, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ SampleVector samples2(2, &ranges);
+ samples2.Accumulate(1, 200);
+ samples2.Accumulate(2, 200);
+ samples2.Accumulate(4, 200);
+ EXPECT_EQ(1400, samples2.sum());
+ EXPECT_EQ(600, samples2.TotalCount());
+ EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+ samples1.Add(samples2);
+ EXPECT_EQ(100, samples1.GetCountAtIndex(0));
+ EXPECT_EQ(200, samples1.GetCountAtIndex(1));
+ EXPECT_EQ(300, samples1.GetCountAtIndex(2));
+ EXPECT_EQ(300, samples1.GetCountAtIndex(3));
+ EXPECT_EQ(2000, samples1.sum());
+ EXPECT_EQ(900, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ samples1.Subtract(samples2);
+ EXPECT_EQ(100, samples1.GetCountAtIndex(0));
+ EXPECT_EQ(0, samples1.GetCountAtIndex(1));
+ EXPECT_EQ(100, samples1.GetCountAtIndex(2));
+ EXPECT_EQ(100, samples1.GetCountAtIndex(3));
+ EXPECT_EQ(600, samples1.sum());
+ EXPECT_EQ(300, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+TEST(SampleVectorDeathTest, BucketIndexTest) {
+ // 8 buckets with exponential layout:
+ // [0, 1) [1, 2) [2, 4) [4, 8) [8, 16) [16, 32) [32, 64) [64, INT_MAX)
+ BucketRanges ranges(9);
+ Histogram::InitializeBucketRanges(1, 64, &ranges);
+ SampleVector samples(1, &ranges);
+
+ // Normal case
+ samples.Accumulate(0, 1);
+ samples.Accumulate(3, 2);
+ samples.Accumulate(64, 3);
+ EXPECT_EQ(1, samples.GetCount(0));
+ EXPECT_EQ(2, samples.GetCount(2));
+ EXPECT_EQ(3, samples.GetCount(65));
+
+ // Extreme case.
+ EXPECT_DEATH(samples.Accumulate(INT_MIN, 100), "");
+ EXPECT_DEATH(samples.Accumulate(-1, 100), "");
+ EXPECT_DEATH(samples.Accumulate(INT_MAX, 100), "");
+
+ // Custom buckets: [1, 5) [5, 10)
+ // Note, this is not a valid BucketRanges for Histogram because it does not
+ // have overflow buckets.
+ BucketRanges ranges2(3);
+ ranges2.set_range(0, 1);
+ ranges2.set_range(1, 5);
+ ranges2.set_range(2, 10);
+ SampleVector samples2(2, &ranges2);
+
+ // Normal case.
+ samples2.Accumulate(1, 1);
+ samples2.Accumulate(4, 1);
+ samples2.Accumulate(5, 2);
+ samples2.Accumulate(9, 2);
+ EXPECT_EQ(2, samples2.GetCount(1));
+ EXPECT_EQ(4, samples2.GetCount(5));
+
+ // Extreme case.
+ EXPECT_DEATH(samples2.Accumulate(0, 100), "");
+ EXPECT_DEATH(samples2.Accumulate(10, 100), "");
+}
+
+TEST(SampleVectorDeathTest, AddSubtractBucketNotMatchTest) {
+ // Custom buckets 1: [1, 3) [3, 5)
+ BucketRanges ranges1(3);
+ ranges1.set_range(0, 1);
+ ranges1.set_range(1, 3);
+ ranges1.set_range(2, 5);
+ SampleVector samples1(1, &ranges1);
+
+ // Custom buckets 2: [0, 1) [1, 3) [3, 6) [6, 7)
+ BucketRanges ranges2(5);
+ ranges2.set_range(0, 0);
+ ranges2.set_range(1, 1);
+ ranges2.set_range(2, 3);
+ ranges2.set_range(3, 6);
+ ranges2.set_range(4, 7);
+ SampleVector samples2(2, &ranges2);
+
+ samples2.Accumulate(1, 100);
+ samples1.Add(samples2);
+ EXPECT_EQ(100, samples1.GetCountAtIndex(0));
+
+ // Extra bucket in the beginning.
+ samples2.Accumulate(0, 100);
+ EXPECT_DEATH(samples1.Add(samples2), "");
+ EXPECT_DEATH(samples1.Subtract(samples2), "");
+
+ // Extra bucket in the end.
+ samples2.Accumulate(0, -100);
+ samples2.Accumulate(6, 100);
+ EXPECT_DEATH(samples1.Add(samples2), "");
+ EXPECT_DEATH(samples1.Subtract(samples2), "");
+
+ // Bucket not match: [3, 5) VS [3, 6)
+ samples2.Accumulate(6, -100);
+ samples2.Accumulate(3, 100);
+ EXPECT_DEATH(samples1.Add(samples2), "");
+ EXPECT_DEATH(samples1.Subtract(samples2), "");
+}
+
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+TEST(SampleVectorIteratorTest, IterateTest) {
+ BucketRanges ranges(5);
+ ranges.set_range(0, 0);
+ ranges.set_range(1, 1);
+ ranges.set_range(2, 2);
+ ranges.set_range(3, 3);
+ ranges.set_range(4, 4);
+
+ std::vector<HistogramBase::Count> counts(3);
+ counts[0] = 1;
+ counts[1] = 0; // Iterator will bypass this empty bucket.
+ counts[2] = 2;
+
+ // BucketRanges can have larger size than counts.
+ SampleVectorIterator it(&counts, &ranges);
+ size_t index;
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+ it.Get(&min, &max, &count);
+ EXPECT_EQ(0, min);
+ EXPECT_EQ(1, max);
+ EXPECT_EQ(1, count);
+ EXPECT_TRUE(it.GetBucketIndex(&index));
+ EXPECT_EQ(0u, index);
+
+ it.Next();
+ it.Get(&min, &max, &count);
+ EXPECT_EQ(2, min);
+ EXPECT_EQ(3, max);
+ EXPECT_EQ(2, count);
+ EXPECT_TRUE(it.GetBucketIndex(&index));
+ EXPECT_EQ(2u, index);
+
+ it.Next();
+ EXPECT_TRUE(it.Done());
+
+ // Create iterator from SampleVector.
+ SampleVector samples(1, &ranges);
+ samples.Accumulate(0, 0);
+ samples.Accumulate(1, 1);
+ samples.Accumulate(2, 2);
+ samples.Accumulate(3, 3);
+ std::unique_ptr<SampleCountIterator> it2 = samples.Iterator();
+
+ int i;
+ for (i = 1; !it2->Done(); i++, it2->Next()) {
+ it2->Get(&min, &max, &count);
+ EXPECT_EQ(i, min);
+ EXPECT_EQ(i + 1, max);
+ EXPECT_EQ(i, count);
+
+ size_t index;
+ EXPECT_TRUE(it2->GetBucketIndex(&index));
+ EXPECT_EQ(static_cast<size_t>(i), index);
+ }
+ EXPECT_EQ(4, i);
+}
+
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+TEST(SampleVectorIteratorDeathTest, IterateDoneTest) {
+ BucketRanges ranges(5);
+ ranges.set_range(0, 0);
+ ranges.set_range(1, 1);
+ ranges.set_range(2, 2);
+ ranges.set_range(3, 3);
+ ranges.set_range(4, INT_MAX);
+ SampleVector samples(1, &ranges);
+
+ std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+ EXPECT_TRUE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+ EXPECT_DEATH(it->Get(&min, &max, &count), "");
+
+ EXPECT_DEATH(it->Next(), "");
+
+ samples.Accumulate(2, 100);
+ it = samples.Iterator();
+ EXPECT_FALSE(it->Done());
+}
+
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/metrics/sparse_histogram.cc b/libchrome/base/metrics/sparse_histogram.cc
new file mode 100644
index 0000000..3c1222d
--- /dev/null
+++ b/libchrome/base/metrics/sparse_histogram.cc
@@ -0,0 +1,289 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sparse_histogram.h"
+
+#include <utility>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_sample_map.h"
+#include "base/metrics/sample_map.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+// static
+HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
+ int32_t flags) {
+ HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+ if (!histogram) {
+ // Try to create the histogram using a "persistent" allocator. As of
+ // 2016-02-25, the availability of such is controlled by a base::Feature
+ // that is off by default. If the allocator doesn't exist or if
+ // allocating from it fails, code below will allocate the histogram from
+ // the process heap.
+ PersistentMemoryAllocator::Reference histogram_ref = 0;
+ std::unique_ptr<HistogramBase> tentative_histogram;
+ PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+ if (allocator) {
+ tentative_histogram = allocator->AllocateHistogram(
+ SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
+ }
+
+ // Handle the case where no persistent allocator is present or the
+ // persistent allocation fails (perhaps because it is full).
+ if (!tentative_histogram) {
+ DCHECK(!histogram_ref); // Should never have been set.
+ DCHECK(!allocator); // Shouldn't have failed.
+ flags &= ~HistogramBase::kIsPersistent;
+ tentative_histogram.reset(new SparseHistogram(name));
+ tentative_histogram->SetFlags(flags);
+ }
+
+ // Register this histogram with the StatisticsRecorder. Keep a copy of
+ // the pointer value to tell later whether the locally created histogram
+ // was registered or deleted. The type is "void" because it could point
+ // to released memory after the following line.
+ const void* tentative_histogram_ptr = tentative_histogram.get();
+ histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+ tentative_histogram.release());
+
+ // Persistent histograms need some follow-up processing.
+ if (histogram_ref) {
+ allocator->FinalizeHistogram(histogram_ref,
+ histogram == tentative_histogram_ptr);
+ }
+
+ ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
+ } else {
+ ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
+ }
+
+ DCHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
+ return histogram;
+}
+
+// static
+std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
+ PersistentHistogramAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta) {
+ return WrapUnique(
+ new SparseHistogram(allocator, name, meta, logged_meta));
+}
+
+SparseHistogram::~SparseHistogram() {}
+
+uint64_t SparseHistogram::name_hash() const {
+ return samples_->id();
+}
+
+HistogramType SparseHistogram::GetHistogramType() const {
+ return SPARSE_HISTOGRAM;
+}
+
+bool SparseHistogram::HasConstructionArguments(
+ Sample /*expected_minimum*/,
+ Sample /*expected_maximum*/,
+ uint32_t /*expected_bucket_count*/) const {
+ // SparseHistogram never has min/max/bucket_count limit.
+ return false;
+}
+
+void SparseHistogram::Add(Sample value) {
+ AddCount(value, 1);
+}
+
+void SparseHistogram::AddCount(Sample value, int count) {
+ if (count <= 0) {
+ NOTREACHED();
+ return;
+ }
+ {
+ base::AutoLock auto_lock(lock_);
+ samples_->Accumulate(value, count);
+ }
+
+ FindAndRunCallback(value);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
+ std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+
+ base::AutoLock auto_lock(lock_);
+ snapshot->Add(*samples_);
+ return std::move(snapshot);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
+ DCHECK(!final_delta_created_);
+
+ std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+ base::AutoLock auto_lock(lock_);
+ snapshot->Add(*samples_);
+
+ // Subtract what was previously logged and update that information.
+ snapshot->Subtract(*logged_samples_);
+ logged_samples_->Add(*snapshot);
+ return std::move(snapshot);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotFinalDelta() const {
+ DCHECK(!final_delta_created_);
+ final_delta_created_ = true;
+
+ std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+ base::AutoLock auto_lock(lock_);
+ snapshot->Add(*samples_);
+
+ // Subtract what was previously logged and then return.
+ snapshot->Subtract(*logged_samples_);
+ return std::move(snapshot);
+}
+
+void SparseHistogram::AddSamples(const HistogramSamples& samples) {
+ base::AutoLock auto_lock(lock_);
+ samples_->Add(samples);
+}
+
+bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
+ base::AutoLock auto_lock(lock_);
+ return samples_->AddFromPickle(iter);
+}
+
+void SparseHistogram::WriteHTMLGraph(std::string* output) const {
+ output->append("<PRE>");
+ WriteAsciiImpl(true, "<br>", output);
+ output->append("</PRE>");
+}
+
+void SparseHistogram::WriteAscii(std::string* output) const {
+ WriteAsciiImpl(true, "\n", output);
+}
+
+bool SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
+ return pickle->WriteString(histogram_name()) && pickle->WriteInt(flags());
+}
+
+SparseHistogram::SparseHistogram(const std::string& name)
+ : HistogramBase(name),
+ samples_(new SampleMap(HashMetricName(name))),
+ logged_samples_(new SampleMap(samples_->id())) {}
+
+SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta)
+ : HistogramBase(name),
+ // While other histogram types maintain a static vector of values with
+ // sufficient space for both "active" and "logged" samples, with each
+ // SampleVector being given the appropriate half, sparse histograms
+ // have no such initial allocation. Each sample has its own record
+ // attached to a single PersistentSampleMap by a common 64-bit identifier.
+ // Since a sparse histogram has two sample maps (active and logged),
+ // there must be two sets of sample records with diffent IDs. The
+ // "active" samples use, for convenience purposes, an ID matching
+ // that of the histogram while the "logged" samples use that number
+ // plus 1.
+ samples_(new PersistentSampleMap(HashMetricName(name), allocator, meta)),
+ logged_samples_(
+ new PersistentSampleMap(samples_->id() + 1, allocator, logged_meta)) {
+}
+
+HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
+ std::string histogram_name;
+ int flags;
+ if (!iter->ReadString(&histogram_name) || !iter->ReadInt(&flags)) {
+ DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
+ return NULL;
+ }
+
+ flags &= ~HistogramBase::kIPCSerializationSourceFlag;
+
+ return SparseHistogram::FactoryGet(histogram_name, flags);
+}
+
+void SparseHistogram::GetParameters(DictionaryValue* /*params*/) const {
+ // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
+}
+
+void SparseHistogram::GetCountAndBucketData(Count* /*count*/,
+ int64_t* /*sum*/,
+ ListValue* /*buckets*/) const {
+ // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
+}
+
+void SparseHistogram::WriteAsciiImpl(bool graph_it,
+ const std::string& newline,
+ std::string* output) const {
+ // Get a local copy of the data so we are consistent.
+ std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
+ Count total_count = snapshot->TotalCount();
+ double scaled_total_count = total_count / 100.0;
+
+ WriteAsciiHeader(total_count, output);
+ output->append(newline);
+
+ // Determine how wide the largest bucket range is (how many digits to print),
+ // so that we'll be able to right-align starts for the graphical bars.
+ // Determine which bucket has the largest sample count so that we can
+ // normalize the graphical bar-width relative to that sample count.
+ Count largest_count = 0;
+ Sample largest_sample = 0;
+ std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
+ while (!it->Done()) {
+ Sample min;
+ Sample max;
+ Count count;
+ it->Get(&min, &max, &count);
+ if (min > largest_sample)
+ largest_sample = min;
+ if (count > largest_count)
+ largest_count = count;
+ it->Next();
+ }
+ size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
+
+ // iterate over each item and display them
+ it = snapshot->Iterator();
+ while (!it->Done()) {
+ Sample min;
+ Sample max;
+ Count count;
+ it->Get(&min, &max, &count);
+
+ // value is min, so display it
+ std::string range = GetSimpleAsciiBucketRange(min);
+ output->append(range);
+ for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+ output->push_back(' ');
+
+ if (graph_it)
+ WriteAsciiBucketGraph(count, largest_count, output);
+ WriteAsciiBucketValue(count, scaled_total_count, output);
+ output->append(newline);
+ it->Next();
+ }
+}
+
+void SparseHistogram::WriteAsciiHeader(const Count total_count,
+ std::string* output) const {
+ StringAppendF(output,
+ "Histogram: %s recorded %d samples",
+ histogram_name().c_str(),
+ total_count);
+ if (flags() & ~kHexRangePrintingFlag)
+ StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag);
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/sparse_histogram.h b/libchrome/base/metrics/sparse_histogram.h
new file mode 100644
index 0000000..3b302d6
--- /dev/null
+++ b/libchrome/base/metrics/sparse_histogram.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_SPARSE_HISTOGRAM_H_
+#define BASE_METRICS_SPARSE_HISTOGRAM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/sample_map.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a large range.
+//
+// The implementation uses a lock and a map, whereas other histogram types use a
+// vector and no lock. It is thus more costly to add values to, and each value
+// stored has more overhead, compared to the other histogram types. However it
+// may be more efficient in memory if the total number of sample values is small
+// compared to the range of their values.
+//
+// UMA_HISTOGRAM_ENUMERATION would be better suited for a smaller range of
+// enumerations that are (nearly) contiguous. Also for code that is expected to
+// run often or in a tight loop.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and or
+// infrequently recorded values.
+//
+// For instance, Sqlite.Version.* are SPARSE because for any given database,
+// there's going to be exactly one version logged, meaning no gain to having a
+// pre-allocated vector of slots once the fleet gets to version 4 or 5 or 10.
+// Likewise Sqlite.Error.* are SPARSE, because most databases generate few or no
+// errors and there are large gaps in the set of possible errors.
+#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
+ do { \
+ base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
+ name, base::HistogramBase::kUmaTargetedHistogramFlag); \
+ histogram->Add(sample); \
+ } while (0)
+
+class HistogramSamples;
+class PersistentHistogramAllocator;
+
+class BASE_EXPORT SparseHistogram : public HistogramBase {
+ public:
+ // If there's one with same name, return the existing one. If not, create a
+ // new one.
+ static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
+
+ // Create a histogram using data in persistent storage. The allocator must
+ // live longer than the created sparse histogram.
+ static std::unique_ptr<HistogramBase> PersistentCreate(
+ PersistentHistogramAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ ~SparseHistogram() override;
+
+ // HistogramBase implementation:
+ uint64_t name_hash() const override;
+ HistogramType GetHistogramType() const override;
+ bool HasConstructionArguments(Sample expected_minimum,
+ Sample expected_maximum,
+ uint32_t expected_bucket_count) const override;
+ void Add(Sample value) override;
+ void AddCount(Sample value, int count) override;
+ void AddSamples(const HistogramSamples& samples) override;
+ bool AddSamplesFromPickle(base::PickleIterator* iter) override;
+ std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+ std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+ std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
+ void WriteHTMLGraph(std::string* output) const override;
+ void WriteAscii(std::string* output) const override;
+
+ protected:
+ // HistogramBase implementation:
+ bool SerializeInfoImpl(base::Pickle* pickle) const override;
+
+ private:
+ // Clients should always use FactoryGet to create SparseHistogram.
+ explicit SparseHistogram(const std::string& name);
+
+ SparseHistogram(PersistentHistogramAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
+ friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
+ base::PickleIterator* iter);
+ static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+
+ void GetParameters(DictionaryValue* params) const override;
+ void GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const override;
+
+ // Helpers for emitting Ascii graphic. Each method appends data to output.
+ void WriteAsciiImpl(bool graph_it,
+ const std::string& newline,
+ std::string* output) const;
+
+ // Write a common header message describing this histogram.
+ void WriteAsciiHeader(const Count total_count,
+ std::string* output) const;
+
+ // For constuctor calling.
+ friend class SparseHistogramTest;
+
+ // Protects access to |samples_|.
+ mutable base::Lock lock_;
+
+ // Flag to indicate if PrepareFinalDelta has been previously called.
+ mutable bool final_delta_created_ = false;
+
+ std::unique_ptr<HistogramSamples> samples_;
+ std::unique_ptr<HistogramSamples> logged_samples_;
+
+ DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_SPARSE_HISTOGRAM_H_
diff --git a/libchrome/base/metrics/sparse_histogram_unittest.cc b/libchrome/base/metrics/sparse_histogram_unittest.cc
new file mode 100644
index 0000000..eab7790
--- /dev/null
+++ b/libchrome/base/metrics/sparse_histogram_unittest.cc
@@ -0,0 +1,329 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/sparse_histogram.h"
+
+#include <memory>
+#include <string>
+
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/sample_map.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class SparseHistogramTest : public testing::TestWithParam<bool> {
+ protected:
+ const int32_t kAllocatorMemorySize = 8 << 20; // 8 MiB
+
+ SparseHistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
+ void SetUp() override {
+ if (use_persistent_histogram_allocator_)
+ CreatePersistentMemoryAllocator();
+
+ // Each test will have a clean state (no Histogram / BucketRanges
+ // registered).
+ InitializeStatisticsRecorder();
+ }
+
+ void TearDown() override {
+ if (allocator_) {
+ ASSERT_FALSE(allocator_->IsFull());
+ ASSERT_FALSE(allocator_->IsCorrupt());
+ }
+ UninitializeStatisticsRecorder();
+ DestroyPersistentMemoryAllocator();
+ }
+
+ void InitializeStatisticsRecorder() {
+ DCHECK(!statistics_recorder_);
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+ }
+
+ void UninitializeStatisticsRecorder() {
+ statistics_recorder_.reset();
+ }
+
+ void CreatePersistentMemoryAllocator() {
+ // By getting the results-histogram before any persistent allocator
+ // is attached, that histogram is guaranteed not to be stored in
+ // any persistent memory segment (which simplifies some tests).
+ GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+
+ GlobalHistogramAllocator::CreateWithLocalMemory(
+ kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
+ allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+ }
+
+ void DestroyPersistentMemoryAllocator() {
+ allocator_ = nullptr;
+ GlobalHistogramAllocator::ReleaseForTesting();
+ }
+
+ std::unique_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
+ return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
+ }
+
+ const bool use_persistent_histogram_allocator_;
+
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+ PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SparseHistogramTest);
+};
+
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent,
+ SparseHistogramTest,
+ testing::Bool());
+
+
+TEST_P(SparseHistogramTest, BasicTest) {
+ std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+ std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+ EXPECT_EQ(0, snapshot->TotalCount());
+ EXPECT_EQ(0, snapshot->sum());
+
+ histogram->Add(100);
+ std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+ EXPECT_EQ(1, snapshot1->TotalCount());
+ EXPECT_EQ(1, snapshot1->GetCount(100));
+
+ histogram->Add(100);
+ histogram->Add(101);
+ std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+ EXPECT_EQ(3, snapshot2->TotalCount());
+ EXPECT_EQ(2, snapshot2->GetCount(100));
+ EXPECT_EQ(1, snapshot2->GetCount(101));
+}
+
+TEST_P(SparseHistogramTest, BasicTestAddCount) {
+ std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+ std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+ EXPECT_EQ(0, snapshot->TotalCount());
+ EXPECT_EQ(0, snapshot->sum());
+
+ histogram->AddCount(100, 15);
+ std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+ EXPECT_EQ(15, snapshot1->TotalCount());
+ EXPECT_EQ(15, snapshot1->GetCount(100));
+
+ histogram->AddCount(100, 15);
+ histogram->AddCount(101, 25);
+ std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+ EXPECT_EQ(55, snapshot2->TotalCount());
+ EXPECT_EQ(30, snapshot2->GetCount(100));
+ EXPECT_EQ(25, snapshot2->GetCount(101));
+}
+
+TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
+ std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+ std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+ EXPECT_EQ(0, snapshot->TotalCount());
+ EXPECT_EQ(0, snapshot->sum());
+
+ histogram->AddCount(1000000000, 15);
+ std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+ EXPECT_EQ(15, snapshot1->TotalCount());
+ EXPECT_EQ(15, snapshot1->GetCount(1000000000));
+
+ histogram->AddCount(1000000000, 15);
+ histogram->AddCount(1010000000, 25);
+ std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+ EXPECT_EQ(55, snapshot2->TotalCount());
+ EXPECT_EQ(30, snapshot2->GetCount(1000000000));
+ EXPECT_EQ(25, snapshot2->GetCount(1010000000));
+ EXPECT_EQ(55250000000LL, snapshot2->sum());
+}
+
+TEST_P(SparseHistogramTest, MacroBasicTest) {
+ UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 200);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
+
+ StatisticsRecorder::Histograms histograms;
+ StatisticsRecorder::GetHistograms(&histograms);
+
+ ASSERT_EQ(1U, histograms.size());
+ HistogramBase* sparse_histogram = histograms[0];
+
+ EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
+ EXPECT_EQ("Sparse", sparse_histogram->histogram_name());
+ EXPECT_EQ(
+ HistogramBase::kUmaTargetedHistogramFlag |
+ (use_persistent_histogram_allocator_ ? HistogramBase::kIsPersistent
+ : 0),
+ sparse_histogram->flags());
+
+ std::unique_ptr<HistogramSamples> samples =
+ sparse_histogram->SnapshotSamples();
+ EXPECT_EQ(3, samples->TotalCount());
+ EXPECT_EQ(2, samples->GetCount(100));
+ EXPECT_EQ(1, samples->GetCount(200));
+}
+
+TEST_P(SparseHistogramTest, MacroInLoopTest) {
+ // Unlike the macros in histogram.h, SparseHistogram macros can have a
+ // variable as histogram name.
+ for (int i = 0; i < 2; i++) {
+ std::string name = StringPrintf("Sparse%d", i + 1);
+ UMA_HISTOGRAM_SPARSE_SLOWLY(name, 100);
+ }
+
+ StatisticsRecorder::Histograms histograms;
+ StatisticsRecorder::GetHistograms(&histograms);
+ ASSERT_EQ(2U, histograms.size());
+
+ std::string name1 = histograms[0]->histogram_name();
+ std::string name2 = histograms[1]->histogram_name();
+ EXPECT_TRUE(("Sparse1" == name1 && "Sparse2" == name2) ||
+ ("Sparse2" == name1 && "Sparse1" == name2));
+}
+
+TEST_P(SparseHistogramTest, Serialize) {
+ std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+ histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
+
+ Pickle pickle;
+ histogram->SerializeInfo(&pickle);
+
+ PickleIterator iter(pickle);
+
+ int type;
+ EXPECT_TRUE(iter.ReadInt(&type));
+ EXPECT_EQ(SPARSE_HISTOGRAM, type);
+
+ std::string name;
+ EXPECT_TRUE(iter.ReadString(&name));
+ EXPECT_EQ("Sparse", name);
+
+ int flag;
+ EXPECT_TRUE(iter.ReadInt(&flag));
+ EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag, flag);
+
+ // No more data in the pickle.
+ EXPECT_FALSE(iter.SkipBytes(1));
+}
+
+// Ensure that race conditions that cause multiple, identical sparse histograms
+// to be created will safely resolve to a single one.
+TEST_P(SparseHistogramTest, DuplicationSafety) {
+ const char histogram_name[] = "Duplicated";
+ size_t histogram_count = StatisticsRecorder::GetHistogramCount();
+
+ // Create a histogram that we will later duplicate.
+ HistogramBase* original =
+ SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+ ++histogram_count;
+ DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+ original->Add(1);
+
+ // Create a duplicate. This has to happen differently depending on where the
+ // memory is taken from.
+ if (use_persistent_histogram_allocator_) {
+ // To allocate from persistent memory, clear the last_created reference in
+ // the GlobalHistogramAllocator. This will cause an Import to recreate
+ // the just-created histogram which will then be released as a duplicate.
+ GlobalHistogramAllocator::Get()->ClearLastCreatedReferenceForTesting();
+ // Creating a different histogram will first do an Import to ensure it
+ // hasn't been created elsewhere, triggering the duplication and release.
+ SparseHistogram::FactoryGet("something.new", HistogramBase::kNoFlags);
+ ++histogram_count;
+ } else {
+ // To allocate from the heap, just call the (private) constructor directly.
+ // Delete it immediately like would have happened within FactoryGet();
+ std::unique_ptr<SparseHistogram> something =
+ NewSparseHistogram(histogram_name);
+ DCHECK_NE(original, something.get());
+ }
+ DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+
+ // Re-creating the histogram via FactoryGet() will return the same one.
+ HistogramBase* duplicate =
+ SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+ DCHECK_EQ(original, duplicate);
+ DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+ duplicate->Add(2);
+
+ // Ensure that original histograms are still cross-functional.
+ original->Add(2);
+ duplicate->Add(1);
+ std::unique_ptr<HistogramSamples> snapshot_orig = original->SnapshotSamples();
+ std::unique_ptr<HistogramSamples> snapshot_dup = duplicate->SnapshotSamples();
+ DCHECK_EQ(2, snapshot_orig->GetCount(2));
+ DCHECK_EQ(2, snapshot_dup->GetCount(1));
+}
+
+TEST_P(SparseHistogramTest, FactoryTime) {
+ const int kTestCreateCount = 1 << 10; // Must be power-of-2.
+ const int kTestLookupCount = 100000;
+ const int kTestAddCount = 100000;
+
+ // Create all histogram names in advance for accurate timing below.
+ std::vector<std::string> histogram_names;
+ for (int i = 0; i < kTestCreateCount; ++i) {
+ histogram_names.push_back(
+ StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+ }
+
+ // Calculate cost of creating histograms.
+ TimeTicks create_start = TimeTicks::Now();
+ for (int i = 0; i < kTestCreateCount; ++i)
+ SparseHistogram::FactoryGet(histogram_names[i], HistogramBase::kNoFlags);
+ TimeDelta create_ticks = TimeTicks::Now() - create_start;
+ int64_t create_ms = create_ticks.InMilliseconds();
+
+ VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+ << "ms or about "
+ << (create_ms * 1000000) / kTestCreateCount
+ << "ns each.";
+
+ // Calculate cost of looking up existing histograms.
+ TimeTicks lookup_start = TimeTicks::Now();
+ for (int i = 0; i < kTestLookupCount; ++i) {
+ // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+ // order less likely to be cacheable (but still hit them all) should the
+ // underlying storage use the exact histogram name as the key.
+ const int i_mult = 6007;
+ static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+ int index = (i * i_mult) & (kTestCreateCount - 1);
+ SparseHistogram::FactoryGet(histogram_names[index],
+ HistogramBase::kNoFlags);
+ }
+ TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+ int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+ VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+ << "ms or about "
+ << (lookup_ms * 1000000) / kTestLookupCount
+ << "ns each.";
+
+ // Calculate cost of accessing histograms.
+ HistogramBase* histogram =
+ SparseHistogram::FactoryGet(histogram_names[0], HistogramBase::kNoFlags);
+ ASSERT_TRUE(histogram);
+ TimeTicks add_start = TimeTicks::Now();
+ for (int i = 0; i < kTestAddCount; ++i)
+ histogram->Add(i & 127);
+ TimeDelta add_ticks = TimeTicks::Now() - add_start;
+ int64_t add_ms = add_ticks.InMilliseconds();
+
+ VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+ << "ms or about "
+ << (add_ms * 1000000) / kTestAddCount
+ << "ns each.";
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/statistics_recorder.cc b/libchrome/base/metrics/statistics_recorder.cc
new file mode 100644
index 0000000..42ed5a9
--- /dev/null
+++ b/libchrome/base/metrics/statistics_recorder.cc
@@ -0,0 +1,543 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/statistics_recorder.h"
+
+#include <memory>
+
+#include "base/at_exit.h"
+#include "base/debug/leak_annotations.h"
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
+#include "base/values.h"
+
+namespace {
+
+// Initialize histogram statistics gathering system.
+base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+bool HistogramNameLesser(const base::HistogramBase* a,
+ const base::HistogramBase* b) {
+ return a->histogram_name() < b->histogram_name();
+}
+
+} // namespace
+
+namespace base {
+
+StatisticsRecorder::HistogramIterator::HistogramIterator(
+ const HistogramMap::iterator& iter, bool include_persistent)
+ : iter_(iter),
+ include_persistent_(include_persistent) {
+ // The starting location could point to a persistent histogram when such
+ // is not wanted. If so, skip it.
+ if (!include_persistent_ && iter_ != histograms_->end() &&
+ (iter_->second->flags() & HistogramBase::kIsPersistent)) {
+ // This operator will continue to skip until a non-persistent histogram
+ // is found.
+ operator++();
+ }
+}
+
+StatisticsRecorder::HistogramIterator::HistogramIterator(
+ const HistogramIterator& rhs)
+ : iter_(rhs.iter_),
+ include_persistent_(rhs.include_persistent_) {
+}
+
+StatisticsRecorder::HistogramIterator::~HistogramIterator() {}
+
+StatisticsRecorder::HistogramIterator&
+StatisticsRecorder::HistogramIterator::operator++() {
+ const HistogramMap::iterator histograms_end = histograms_->end();
+ if (iter_ == histograms_end || lock_ == NULL)
+ return *this;
+
+ base::AutoLock auto_lock(*lock_);
+
+ for (;;) {
+ ++iter_;
+ if (iter_ == histograms_end)
+ break;
+ if (!include_persistent_ && (iter_->second->flags() &
+ HistogramBase::kIsPersistent)) {
+ continue;
+ }
+ break;
+ }
+
+ return *this;
+}
+
+StatisticsRecorder::~StatisticsRecorder() {
+ DCHECK(lock_);
+ DCHECK(histograms_);
+ DCHECK(ranges_);
+
+ // Clean out what this object created and then restore what existed before.
+ Reset();
+ base::AutoLock auto_lock(*lock_);
+ histograms_ = existing_histograms_.release();
+ callbacks_ = existing_callbacks_.release();
+ ranges_ = existing_ranges_.release();
+}
+
+// static
+void StatisticsRecorder::Initialize() {
+ // Ensure that an instance of the StatisticsRecorder object is created.
+ g_statistics_recorder_.Get();
+}
+
+// static
+bool StatisticsRecorder::IsActive() {
+ if (lock_ == NULL)
+ return false;
+ base::AutoLock auto_lock(*lock_);
+ return NULL != histograms_;
+}
+
+// static
+HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
+ HistogramBase* histogram) {
+ // As per crbug.com/79322 the histograms are intentionally leaked, so we need
+ // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
+ // for an object, the duplicates should not be annotated.
+ // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
+ // twice if (lock_ == NULL) || (!histograms_).
+ if (lock_ == NULL) {
+ ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
+ return histogram;
+ }
+
+ HistogramBase* histogram_to_delete = NULL;
+ HistogramBase* histogram_to_return = NULL;
+ {
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL) {
+ histogram_to_return = histogram;
+ } else {
+ const std::string& name = histogram->histogram_name();
+ HistogramMap::iterator it = histograms_->find(name);
+ if (histograms_->end() == it) {
+ // The StringKey references the name within |histogram| rather than
+ // making a copy.
+ (*histograms_)[name] = histogram;
+ ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
+ // If there are callbacks for this histogram, we set the kCallbackExists
+ // flag.
+ auto callback_iterator = callbacks_->find(name);
+ if (callback_iterator != callbacks_->end()) {
+ if (!callback_iterator->second.is_null())
+ histogram->SetFlags(HistogramBase::kCallbackExists);
+ else
+ histogram->ClearFlags(HistogramBase::kCallbackExists);
+ }
+ histogram_to_return = histogram;
+ } else if (histogram == it->second) {
+ // The histogram was registered before.
+ histogram_to_return = histogram;
+ } else {
+ // We already have one histogram with this name.
+ DCHECK_EQ(histogram->histogram_name(),
+ it->second->histogram_name()) << "hash collision";
+ histogram_to_return = it->second;
+ histogram_to_delete = histogram;
+ }
+ }
+ }
+ delete histogram_to_delete;
+ return histogram_to_return;
+}
+
+// static
+const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+ const BucketRanges* ranges) {
+ DCHECK(ranges->HasValidChecksum());
+ std::unique_ptr<const BucketRanges> ranges_deleter;
+
+ if (lock_ == NULL) {
+ ANNOTATE_LEAKING_OBJECT_PTR(ranges);
+ return ranges;
+ }
+
+ base::AutoLock auto_lock(*lock_);
+ if (ranges_ == NULL) {
+ ANNOTATE_LEAKING_OBJECT_PTR(ranges);
+ return ranges;
+ }
+
+ std::list<const BucketRanges*>* checksum_matching_list;
+ RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
+ if (ranges_->end() == ranges_it) {
+ // Add a new matching list to map.
+ checksum_matching_list = new std::list<const BucketRanges*>();
+ ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
+ (*ranges_)[ranges->checksum()] = checksum_matching_list;
+ } else {
+ checksum_matching_list = ranges_it->second;
+ }
+
+ for (const BucketRanges* existing_ranges : *checksum_matching_list) {
+ if (existing_ranges->Equals(ranges)) {
+ if (existing_ranges == ranges) {
+ return ranges;
+ } else {
+ ranges_deleter.reset(ranges);
+ return existing_ranges;
+ }
+ }
+ }
+ // We haven't found a BucketRanges which has the same ranges. Register the
+ // new BucketRanges.
+ checksum_matching_list->push_front(ranges);
+ return ranges;
+}
+
+// static
+void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
+ std::string* output) {
+ if (!IsActive())
+ return;
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
+ for (const HistogramBase* histogram : snapshot) {
+ histogram->WriteHTMLGraph(output);
+ output->append("<br><hr><br>");
+ }
+}
+
+// static
+void StatisticsRecorder::WriteGraph(const std::string& query,
+ std::string* output) {
+ if (!IsActive())
+ return;
+ if (query.length())
+ StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
+ else
+ output->append("Collections of all histograms\n");
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
+ for (const HistogramBase* histogram : snapshot) {
+ histogram->WriteAscii(output);
+ output->append("\n");
+ }
+}
+
+// static
+std::string StatisticsRecorder::ToJSON(const std::string& query) {
+ if (!IsActive())
+ return std::string();
+
+ std::string output("{");
+ if (!query.empty()) {
+ output += "\"query\":";
+ EscapeJSONString(query, true, &output);
+ output += ",";
+ }
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ output += "\"histograms\":[";
+ bool first_histogram = true;
+ for (const HistogramBase* histogram : snapshot) {
+ if (first_histogram)
+ first_histogram = false;
+ else
+ output += ",";
+ std::string json;
+ histogram->WriteJSON(&json);
+ output += json;
+ }
+ output += "]}";
+ return output;
+}
+
+// static
+void StatisticsRecorder::GetHistograms(Histograms* output) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return;
+
+ for (const auto& entry : *histograms_) {
+ output->push_back(entry.second);
+ }
+}
+
+// static
+void StatisticsRecorder::GetBucketRanges(
+ std::vector<const BucketRanges*>* output) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (ranges_ == NULL)
+ return;
+
+ for (const auto& entry : *ranges_) {
+ for (auto* range_entry : *entry.second) {
+ output->push_back(range_entry);
+ }
+ }
+}
+
+// static
+HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
+ // This must be called *before* the lock is acquired below because it will
+ // call back into this object to register histograms. Those called methods
+ // will acquire the lock at that time.
+ ImportGlobalPersistentHistograms();
+
+ if (lock_ == NULL)
+ return NULL;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return NULL;
+
+ HistogramMap::iterator it = histograms_->find(name);
+ if (histograms_->end() == it)
+ return NULL;
+ return it->second;
+}
+
+// static
+StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
+ bool include_persistent) {
+ DCHECK(histograms_);
+ ImportGlobalPersistentHistograms();
+
+ HistogramMap::iterator iter_begin;
+ {
+ base::AutoLock auto_lock(*lock_);
+ iter_begin = histograms_->begin();
+ }
+ return HistogramIterator(iter_begin, include_persistent);
+}
+
+// static
+StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
+ HistogramMap::iterator iter_end;
+ {
+ base::AutoLock auto_lock(*lock_);
+ iter_end = histograms_->end();
+ }
+ return HistogramIterator(iter_end, true);
+}
+
+// static
+void StatisticsRecorder::InitLogOnShutdown() {
+ if (lock_ == nullptr)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
+}
+
+// static
+void StatisticsRecorder::GetSnapshot(const std::string& query,
+ Histograms* snapshot) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return;
+
+ for (const auto& entry : *histograms_) {
+ if (entry.second->histogram_name().find(query) != std::string::npos)
+ snapshot->push_back(entry.second);
+ }
+}
+
+// static
+bool StatisticsRecorder::SetCallback(
+ const std::string& name,
+ const StatisticsRecorder::OnSampleCallback& cb) {
+ DCHECK(!cb.is_null());
+ if (lock_ == NULL)
+ return false;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return false;
+
+ if (ContainsKey(*callbacks_, name))
+ return false;
+ callbacks_->insert(std::make_pair(name, cb));
+
+ auto it = histograms_->find(name);
+ if (it != histograms_->end())
+ it->second->SetFlags(HistogramBase::kCallbackExists);
+
+ return true;
+}
+
+// static
+void StatisticsRecorder::ClearCallback(const std::string& name) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return;
+
+ callbacks_->erase(name);
+
+ // We also clear the flag from the histogram (if it exists).
+ auto it = histograms_->find(name);
+ if (it != histograms_->end())
+ it->second->ClearFlags(HistogramBase::kCallbackExists);
+}
+
+// static
+StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
+ const std::string& name) {
+ if (lock_ == NULL)
+ return OnSampleCallback();
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return OnSampleCallback();
+
+ auto callback_iterator = callbacks_->find(name);
+ return callback_iterator != callbacks_->end() ? callback_iterator->second
+ : OnSampleCallback();
+}
+
+// static
+size_t StatisticsRecorder::GetHistogramCount() {
+ if (!lock_)
+ return 0;
+
+ base::AutoLock auto_lock(*lock_);
+ if (!histograms_)
+ return 0;
+ return histograms_->size();
+}
+
+// static
+void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
+ if (histograms_)
+ histograms_->erase(name);
+}
+
+// static
+std::unique_ptr<StatisticsRecorder>
+StatisticsRecorder::CreateTemporaryForTesting() {
+ return WrapUnique(new StatisticsRecorder());
+}
+
+// static
+void StatisticsRecorder::UninitializeForTesting() {
+ // Stop now if it's never been initialized.
+ if (lock_ == NULL || histograms_ == NULL)
+ return;
+
+ // Get the global instance and destruct it. It's held in static memory so
+ // can't "delete" it; call the destructor explicitly.
+ DCHECK(g_statistics_recorder_.private_instance_);
+ g_statistics_recorder_.Get().~StatisticsRecorder();
+
+ // Now the ugly part. There's no official way to release a LazyInstance once
+ // created so it's necessary to clear out an internal variable which
+ // shouldn't be publicly visible but is for initialization reasons.
+ g_statistics_recorder_.private_instance_ = 0;
+}
+
+// static
+void StatisticsRecorder::ImportGlobalPersistentHistograms() {
+ if (lock_ == NULL)
+ return;
+
+ // Import histograms from known persistent storage. Histograms could have
+ // been added by other processes and they must be fetched and recognized
+ // locally. If the persistent memory segment is not shared between processes,
+ // this call does nothing.
+ GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+ if (allocator)
+ allocator->ImportHistogramsToStatisticsRecorder();
+}
+
+// This singleton instance should be started during the single threaded portion
+// of main(), and hence it is not thread safe. It initializes globals to
+// provide support for all future calls.
+StatisticsRecorder::StatisticsRecorder() {
+ if (lock_ == NULL) {
+ // This will leak on purpose. It's the only way to make sure we won't race
+ // against the static uninitialization of the module while one of our
+ // static methods relying on the lock get called at an inappropriate time
+ // during the termination phase. Since it's a static data member, we will
+ // leak one per process, which would be similar to the instance allocated
+ // during static initialization and released only on process termination.
+ lock_ = new base::Lock;
+ }
+
+ base::AutoLock auto_lock(*lock_);
+
+ existing_histograms_.reset(histograms_);
+ existing_callbacks_.reset(callbacks_);
+ existing_ranges_.reset(ranges_);
+
+ histograms_ = new HistogramMap;
+ callbacks_ = new CallbackMap;
+ ranges_ = new RangesMap;
+
+ InitLogOnShutdownWithoutLock();
+}
+
+void StatisticsRecorder::InitLogOnShutdownWithoutLock() {
+ if (!vlog_initialized_ && VLOG_IS_ON(1)) {
+ vlog_initialized_ = true;
+ AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
+ }
+}
+
+// static
+void StatisticsRecorder::Reset() {
+ // If there's no lock then there is nothing to reset.
+ if (!lock_)
+ return;
+
+ std::unique_ptr<HistogramMap> histograms_deleter;
+ std::unique_ptr<CallbackMap> callbacks_deleter;
+ std::unique_ptr<RangesMap> ranges_deleter;
+ // We don't delete lock_ on purpose to avoid having to properly protect
+ // against it going away after we checked for NULL in the static methods.
+ {
+ base::AutoLock auto_lock(*lock_);
+ histograms_deleter.reset(histograms_);
+ callbacks_deleter.reset(callbacks_);
+ ranges_deleter.reset(ranges_);
+ histograms_ = NULL;
+ callbacks_ = NULL;
+ ranges_ = NULL;
+ }
+ // We are going to leak the histograms and the ranges.
+}
+
+// static
+void StatisticsRecorder::DumpHistogramsToVlog(void* /*instance*/) {
+ std::string output;
+ StatisticsRecorder::WriteGraph(std::string(), &output);
+ VLOG(1) << output;
+}
+
+
+// static
+StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
+// static
+StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL;
+// static
+StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
+// static
+base::Lock* StatisticsRecorder::lock_ = NULL;
+
+} // namespace base
diff --git a/libchrome/base/metrics/statistics_recorder.h b/libchrome/base/metrics/statistics_recorder.h
new file mode 100644
index 0000000..c3c6ace
--- /dev/null
+++ b/libchrome/base/metrics/statistics_recorder.h
@@ -0,0 +1,243 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// StatisticsRecorder holds all Histograms and BucketRanges that are used by
+// Histograms in the system. It provides a general place for
+// Histograms/BucketRanges to register, and supports a global API for accessing
+// (i.e., dumping, or graphing) the data.
+
+#ifndef BASE_METRICS_STATISTICS_RECORDER_H_
+#define BASE_METRICS_STATISTICS_RECORDER_H_
+
+#include <stdint.h>
+
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/gtest_prod_util.h"
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/strings/string_piece.h"
+
+class SubprocessMetricsProviderTest;
+
+namespace base {
+
+class BucketRanges;
+class Lock;
+
+class BASE_EXPORT StatisticsRecorder {
+ public:
+ // A class used as a key for the histogram map below. It always references
+ // a string owned outside of this class, likely in the value of the map.
+ class StringKey : public StringPiece {
+ public:
+ // Constructs the StringKey using various sources. The source must live
+ // at least as long as the created object.
+ StringKey(const std::string& str) : StringPiece(str) {}
+ StringKey(StringPiece str) : StringPiece(str) {}
+
+ // Though StringPiece is better passed by value than by reference, in
+ // this case it's being passed many times and likely already been stored
+ // in memory (not just registers) so the benefit of pass-by-value is
+ // negated.
+ bool operator<(const StringKey& rhs) const {
+ // Since order is unimportant in the map and string comparisons can be
+ // slow, use the length as the primary sort value.
+ if (length() < rhs.length())
+ return true;
+ if (length() > rhs.length())
+ return false;
+
+ // Fall back to an actual string comparison. The lengths are the same
+ // so a simple memory-compare is sufficient. This is slightly more
+ // efficient than calling operator<() for StringPiece which would
+ // again have to check lengths before calling wordmemcmp().
+ return wordmemcmp(data(), rhs.data(), length()) < 0;
+ }
+ };
+
+ typedef std::map<StringKey, HistogramBase*> HistogramMap;
+ typedef std::vector<HistogramBase*> Histograms;
+
+ // A class for iterating over the histograms held within this global resource.
+ class BASE_EXPORT HistogramIterator {
+ public:
+ HistogramIterator(const HistogramMap::iterator& iter,
+ bool include_persistent);
+ HistogramIterator(const HistogramIterator& rhs); // Must be copyable.
+ ~HistogramIterator();
+
+ HistogramIterator& operator++();
+ HistogramIterator operator++(int) {
+ HistogramIterator tmp(*this);
+ operator++();
+ return tmp;
+ }
+
+ bool operator==(const HistogramIterator& rhs) const {
+ return iter_ == rhs.iter_;
+ }
+ bool operator!=(const HistogramIterator& rhs) const {
+ return iter_ != rhs.iter_;
+ }
+ HistogramBase* operator*() { return iter_->second; }
+
+ private:
+ HistogramMap::iterator iter_;
+ const bool include_persistent_;
+ };
+
+ ~StatisticsRecorder();
+
+ // Initializes the StatisticsRecorder system. Safe to call multiple times.
+ static void Initialize();
+
+ // Find out if histograms can now be registered into our list.
+ static bool IsActive();
+
+ // Register, or add a new histogram to the collection of statistics. If an
+ // identically named histogram is already registered, then the argument
+ // |histogram| will deleted. The returned value is always the registered
+ // histogram (either the argument, or the pre-existing registered histogram).
+ static HistogramBase* RegisterOrDeleteDuplicate(HistogramBase* histogram);
+
+ // Register, or add a new BucketRanges. If an identically BucketRanges is
+ // already registered, then the argument |ranges| will deleted. The returned
+ // value is always the registered BucketRanges (either the argument, or the
+ // pre-existing one).
+ static const BucketRanges* RegisterOrDeleteDuplicateRanges(
+ const BucketRanges* ranges);
+
+ // Methods for appending histogram data to a string. Only histograms which
+ // have |query| as a substring are written to |output| (an empty string will
+ // process all registered histograms).
+ static void WriteHTMLGraph(const std::string& query, std::string* output);
+ static void WriteGraph(const std::string& query, std::string* output);
+
+ // Returns the histograms with |query| as a substring as JSON text (an empty
+ // |query| will process all registered histograms).
+ static std::string ToJSON(const std::string& query);
+
+ // Method for extracting histograms which were marked for use by UMA.
+ static void GetHistograms(Histograms* output);
+
+ // Method for extracting BucketRanges used by all histograms registered.
+ static void GetBucketRanges(std::vector<const BucketRanges*>* output);
+
+ // Find a histogram by name. It matches the exact name. This method is thread
+ // safe. It returns NULL if a matching histogram is not found.
+ static HistogramBase* FindHistogram(base::StringPiece name);
+
+ // Support for iterating over known histograms.
+ static HistogramIterator begin(bool include_persistent);
+ static HistogramIterator end();
+
+ // GetSnapshot copies some of the pointers to registered histograms into the
+ // caller supplied vector (Histograms). Only histograms which have |query| as
+ // a substring are copied (an empty string will process all registered
+ // histograms).
+ static void GetSnapshot(const std::string& query, Histograms* snapshot);
+
+ typedef base::Callback<void(HistogramBase::Sample)> OnSampleCallback;
+
+ // SetCallback sets the callback to notify when a new sample is recorded on
+ // the histogram referred to by |histogram_name|. The call to this method can
+ // be be done before or after the histogram is created. This method is thread
+ // safe. The return value is whether or not the callback was successfully set.
+ static bool SetCallback(const std::string& histogram_name,
+ const OnSampleCallback& callback);
+
+ // ClearCallback clears any callback set on the histogram referred to by
+ // |histogram_name|. This method is thread safe.
+ static void ClearCallback(const std::string& histogram_name);
+
+ // FindCallback retrieves the callback for the histogram referred to by
+ // |histogram_name|, or a null callback if no callback exists for this
+ // histogram. This method is thread safe.
+ static OnSampleCallback FindCallback(const std::string& histogram_name);
+
+ // Returns the number of known histograms.
+ static size_t GetHistogramCount();
+
+ // Initializes logging histograms with --v=1. Safe to call multiple times.
+ // Is called from ctor but for browser it seems that it is more useful to
+ // start logging after statistics recorder, so we need to init log-on-shutdown
+ // later.
+ static void InitLogOnShutdown();
+
+ // Removes a histogram from the internal set of known ones. This can be
+ // necessary during testing persistent histograms where the underlying
+ // memory is being released.
+ static void ForgetHistogramForTesting(base::StringPiece name);
+
+ // Creates a local StatisticsRecorder object for testing purposes. All new
+ // histograms will be registered in it until it is destructed or pushed
+ // aside for the lifetime of yet another SR object. The destruction of the
+ // returned object will re-activate the previous one. Always release SR
+ // objects in the opposite order to which they're created.
+ static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
+ WARN_UNUSED_RESULT;
+
+ // Resets any global instance of the statistics-recorder that was created
+ // by a call to Initialize().
+ static void UninitializeForTesting();
+
+ private:
+ // We keep a map of callbacks to histograms, so that as histograms are
+ // created, we can set the callback properly.
+ typedef std::map<std::string, OnSampleCallback> CallbackMap;
+
+ // We keep all |bucket_ranges_| in a map, from checksum to a list of
+ // |bucket_ranges_|. Checksum is calculated from the |ranges_| in
+ // |bucket_ranges_|.
+ typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
+
+ friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
+ friend class StatisticsRecorderTest;
+
+ // Imports histograms from global persistent memory. The global lock must
+ // not be held during this call.
+ static void ImportGlobalPersistentHistograms();
+
+ // The constructor just initializes static members. Usually client code should
+ // use Initialize to do this. But in test code, you can friend this class and
+ // call the constructor to get a clean StatisticsRecorder.
+ StatisticsRecorder();
+
+ // Initialize implementation but without lock. Caller should guard
+ // StatisticsRecorder by itself if needed (it isn't in unit tests).
+ void InitLogOnShutdownWithoutLock();
+
+ // These are copies of everything that existed when the (test) Statistics-
+ // Recorder was created. The global ones have to be moved aside to create a
+ // clean environment.
+ std::unique_ptr<HistogramMap> existing_histograms_;
+ std::unique_ptr<CallbackMap> existing_callbacks_;
+ std::unique_ptr<RangesMap> existing_ranges_;
+
+ bool vlog_initialized_ = false;
+
+ static void Reset();
+ static void DumpHistogramsToVlog(void* instance);
+
+ static HistogramMap* histograms_;
+ static CallbackMap* callbacks_;
+ static RangesMap* ranges_;
+
+ // Lock protects access to above maps.
+ static base::Lock* lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_STATISTICS_RECORDER_H_
diff --git a/libchrome/base/metrics/statistics_recorder_unittest.cc b/libchrome/base/metrics/statistics_recorder_unittest.cc
new file mode 100644
index 0000000..65e2c98
--- /dev/null
+++ b/libchrome/base/metrics/statistics_recorder_unittest.cc
@@ -0,0 +1,659 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/statistics_recorder.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/json/json_reader.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Class to make sure any manipulations we do to the min log level are
+// contained (i.e., do not affect other unit tests).
+class LogStateSaver {
+ public:
+ LogStateSaver() : old_min_log_level_(logging::GetMinLogLevel()) {}
+
+ ~LogStateSaver() {
+ logging::SetMinLogLevel(old_min_log_level_);
+ logging::SetLogAssertHandler(nullptr);
+ }
+
+ private:
+ int old_min_log_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+};
+
+} // namespace
+
+namespace base {
+
+class StatisticsRecorderTest : public testing::TestWithParam<bool> {
+ protected:
+ const int32_t kAllocatorMemorySize = 64 << 10; // 64 KiB
+
+ StatisticsRecorderTest() : use_persistent_histogram_allocator_(GetParam()) {
+ // Get this first so it never gets created in persistent storage and will
+ // not appear in the StatisticsRecorder after it is re-initialized.
+ PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+
+ // Each test will have a clean state (no Histogram / BucketRanges
+ // registered).
+ InitializeStatisticsRecorder();
+
+ // Use persistent memory for histograms if so indicated by test parameter.
+ if (use_persistent_histogram_allocator_) {
+ GlobalHistogramAllocator::CreateWithLocalMemory(
+ kAllocatorMemorySize, 0, "StatisticsRecorderTest");
+ }
+ }
+
+ ~StatisticsRecorderTest() override {
+ GlobalHistogramAllocator::ReleaseForTesting();
+ UninitializeStatisticsRecorder();
+ }
+
+ void InitializeStatisticsRecorder() {
+ DCHECK(!statistics_recorder_);
+ StatisticsRecorder::UninitializeForTesting();
+ statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+ }
+
+ void UninitializeStatisticsRecorder() {
+ statistics_recorder_.reset();
+ StatisticsRecorder::UninitializeForTesting();
+ }
+
+ Histogram* CreateHistogram(const std::string& name,
+ HistogramBase::Sample min,
+ HistogramBase::Sample max,
+ size_t bucket_count) {
+ BucketRanges* ranges = new BucketRanges(bucket_count + 1);
+ Histogram::InitializeBucketRanges(min, max, ranges);
+ const BucketRanges* registered_ranges =
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
+ return new Histogram(name, min, max, registered_ranges);
+ }
+
+ void DeleteHistogram(HistogramBase* histogram) {
+ delete histogram;
+ }
+
+ int CountIterableHistograms(StatisticsRecorder::HistogramIterator* iter) {
+ int count = 0;
+ for (; *iter != StatisticsRecorder::end(); ++*iter) {
+ ++count;
+ }
+ return count;
+ }
+
+ void InitLogOnShutdown() {
+ DCHECK(statistics_recorder_);
+ statistics_recorder_->InitLogOnShutdownWithoutLock();
+ }
+
+ bool VLogInitialized() {
+ DCHECK(statistics_recorder_);
+ return statistics_recorder_->vlog_initialized_;
+ }
+
+ const bool use_persistent_histogram_allocator_;
+
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+ std::unique_ptr<GlobalHistogramAllocator> old_global_allocator_;
+
+ private:
+ LogStateSaver log_state_saver_;
+
+ DISALLOW_COPY_AND_ASSIGN(StatisticsRecorderTest);
+};
+
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(Allocator, StatisticsRecorderTest, testing::Bool());
+
+TEST_P(StatisticsRecorderTest, NotInitialized) {
+ UninitializeStatisticsRecorder();
+
+ ASSERT_FALSE(StatisticsRecorder::IsActive());
+
+ StatisticsRecorder::Histograms registered_histograms;
+ std::vector<const BucketRanges*> registered_ranges;
+
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(0u, registered_histograms.size());
+
+ Histogram* histogram = CreateHistogram("TestHistogram", 1, 1000, 10);
+
+ // When StatisticsRecorder is not initialized, register is a noop.
+ EXPECT_EQ(histogram,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram));
+ // Manually delete histogram that was not registered.
+ DeleteHistogram(histogram);
+
+ // RegisterOrDeleteDuplicateRanges is a no-op.
+ BucketRanges* ranges = new BucketRanges(3);
+ ranges->ResetChecksum();
+ EXPECT_EQ(ranges,
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges));
+ StatisticsRecorder::GetBucketRanges(®istered_ranges);
+ EXPECT_EQ(0u, registered_ranges.size());
+}
+
+TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
+ std::vector<const BucketRanges*> registered_ranges;
+
+ BucketRanges* ranges1 = new BucketRanges(3);
+ ranges1->ResetChecksum();
+ BucketRanges* ranges2 = new BucketRanges(4);
+ ranges2->ResetChecksum();
+
+ // Register new ranges.
+ EXPECT_EQ(ranges1,
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
+ EXPECT_EQ(ranges2,
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges2));
+ StatisticsRecorder::GetBucketRanges(®istered_ranges);
+ ASSERT_EQ(2u, registered_ranges.size());
+
+ // Register some ranges again.
+ EXPECT_EQ(ranges1,
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
+ registered_ranges.clear();
+ StatisticsRecorder::GetBucketRanges(®istered_ranges);
+ ASSERT_EQ(2u, registered_ranges.size());
+ // Make sure the ranges is still the one we know.
+ ASSERT_EQ(3u, ranges1->size());
+ EXPECT_EQ(0, ranges1->range(0));
+ EXPECT_EQ(0, ranges1->range(1));
+ EXPECT_EQ(0, ranges1->range(2));
+
+ // Register ranges with same values.
+ BucketRanges* ranges3 = new BucketRanges(3);
+ ranges3->ResetChecksum();
+ EXPECT_EQ(ranges1, // returning ranges1
+ StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges3));
+ registered_ranges.clear();
+ StatisticsRecorder::GetBucketRanges(®istered_ranges);
+ ASSERT_EQ(2u, registered_ranges.size());
+}
+
+TEST_P(StatisticsRecorderTest, RegisterHistogram) {
+ // Create a Histogram that was not registered.
+ Histogram* histogram = CreateHistogram("TestHistogram", 1, 1000, 10);
+
+ StatisticsRecorder::Histograms registered_histograms;
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(0u, registered_histograms.size());
+
+ // Register the Histogram.
+ EXPECT_EQ(histogram,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram));
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(1u, registered_histograms.size());
+
+ // Register the same Histogram again.
+ EXPECT_EQ(histogram,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram));
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(1u, registered_histograms.size());
+}
+
+TEST_P(StatisticsRecorderTest, FindHistogram) {
+ HistogramBase* histogram1 = Histogram::FactoryGet(
+ "TestHistogram1", 1, 1000, 10, HistogramBase::kNoFlags);
+ HistogramBase* histogram2 = Histogram::FactoryGet(
+ "TestHistogram2", 1, 1000, 10, HistogramBase::kNoFlags);
+
+ EXPECT_EQ(histogram1, StatisticsRecorder::FindHistogram("TestHistogram1"));
+ EXPECT_EQ(histogram2, StatisticsRecorder::FindHistogram("TestHistogram2"));
+ EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
+
+ // Create a new global allocator using the same memory as the old one. Any
+ // old one is kept around so the memory doesn't get released.
+ old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+ if (use_persistent_histogram_allocator_) {
+ GlobalHistogramAllocator::CreateWithPersistentMemory(
+ const_cast<void*>(old_global_allocator_->data()),
+ old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+ old_global_allocator_->Name());
+ }
+
+ // Reset statistics-recorder to validate operation from a clean start.
+ UninitializeStatisticsRecorder();
+ InitializeStatisticsRecorder();
+
+ if (use_persistent_histogram_allocator_) {
+ EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+ EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+ } else {
+ EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+ EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+ }
+ EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
+}
+
+TEST_P(StatisticsRecorderTest, GetSnapshot) {
+ Histogram::FactoryGet("TestHistogram1", 1, 1000, 10, Histogram::kNoFlags);
+ Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, Histogram::kNoFlags);
+ Histogram::FactoryGet("TestHistogram3", 1, 1000, 10, Histogram::kNoFlags);
+
+ StatisticsRecorder::Histograms snapshot;
+ StatisticsRecorder::GetSnapshot("Test", &snapshot);
+ EXPECT_EQ(3u, snapshot.size());
+
+ snapshot.clear();
+ StatisticsRecorder::GetSnapshot("1", &snapshot);
+ EXPECT_EQ(1u, snapshot.size());
+
+ snapshot.clear();
+ StatisticsRecorder::GetSnapshot("hello", &snapshot);
+ EXPECT_EQ(0u, snapshot.size());
+}
+
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
+ StatisticsRecorder::Histograms registered_histograms;
+
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ ASSERT_EQ(0u, registered_histograms.size());
+
+ // Create a histogram.
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(1u, registered_histograms.size());
+
+ // Get an existing histogram.
+ HistogramBase* histogram2 = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(1u, registered_histograms.size());
+ EXPECT_EQ(histogram, histogram2);
+
+ // Create a LinearHistogram.
+ histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(2u, registered_histograms.size());
+
+ // Create a BooleanHistogram.
+ histogram = BooleanHistogram::FactoryGet(
+ "TestBooleanHistogram", HistogramBase::kNoFlags);
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(3u, registered_histograms.size());
+
+ // Create a CustomHistogram.
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(4u, registered_histograms.size());
+}
+
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithMacros) {
+ // Macros cache pointers and so tests that use them can only be run once.
+ // Stop immediately if this test has run previously.
+ static bool already_run = false;
+ if (already_run)
+ return;
+ already_run = true;
+
+ StatisticsRecorder::Histograms registered_histograms;
+
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogramCounts", 1, 1000000, 50, HistogramBase::kNoFlags);
+
+ // The histogram we got from macro is the same as from FactoryGet.
+ LOCAL_HISTOGRAM_COUNTS("TestHistogramCounts", 30);
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ ASSERT_EQ(1u, registered_histograms.size());
+ EXPECT_EQ(histogram, registered_histograms[0]);
+
+ LOCAL_HISTOGRAM_TIMES("TestHistogramTimes", TimeDelta::FromDays(1));
+ LOCAL_HISTOGRAM_ENUMERATION("TestHistogramEnumeration", 20, 200);
+
+ registered_histograms.clear();
+ StatisticsRecorder::GetHistograms(®istered_histograms);
+ EXPECT_EQ(3u, registered_histograms.size());
+}
+
+TEST_P(StatisticsRecorderTest, BucketRangesSharing) {
+ std::vector<const BucketRanges*> ranges;
+ StatisticsRecorder::GetBucketRanges(&ranges);
+ EXPECT_EQ(0u, ranges.size());
+
+ Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags);
+ Histogram::FactoryGet("Histogram2", 1, 64, 8, HistogramBase::kNoFlags);
+
+ StatisticsRecorder::GetBucketRanges(&ranges);
+ EXPECT_EQ(1u, ranges.size());
+
+ Histogram::FactoryGet("Histogram3", 1, 64, 16, HistogramBase::kNoFlags);
+
+ ranges.clear();
+ StatisticsRecorder::GetBucketRanges(&ranges);
+ EXPECT_EQ(2u, ranges.size());
+}
+
+TEST_P(StatisticsRecorderTest, ToJSON) {
+ Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+ ->Add(30);
+ Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+ ->Add(40);
+ Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+ ->Add(30);
+ Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+ ->Add(40);
+
+ std::string json(StatisticsRecorder::ToJSON(std::string()));
+
+ // Check for valid JSON.
+ std::unique_ptr<Value> root = JSONReader::Read(json);
+ ASSERT_TRUE(root.get());
+
+ DictionaryValue* root_dict = NULL;
+ ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+
+ // No query should be set.
+ ASSERT_FALSE(root_dict->HasKey("query"));
+
+ ListValue* histogram_list = NULL;
+ ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
+ ASSERT_EQ(2u, histogram_list->GetSize());
+
+ // Examine the first histogram.
+ DictionaryValue* histogram_dict = NULL;
+ ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
+
+ int sample_count;
+ ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
+ EXPECT_EQ(2, sample_count);
+
+ // Test the query filter.
+ std::string query("TestHistogram2");
+ json = StatisticsRecorder::ToJSON(query);
+
+ root = JSONReader::Read(json);
+ ASSERT_TRUE(root.get());
+ ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+
+ std::string query_value;
+ ASSERT_TRUE(root_dict->GetString("query", &query_value));
+ EXPECT_EQ(query, query_value);
+
+ ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
+ ASSERT_EQ(1u, histogram_list->GetSize());
+
+ ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
+
+ std::string histogram_name;
+ ASSERT_TRUE(histogram_dict->GetString("name", &histogram_name));
+ EXPECT_EQ("TestHistogram2", histogram_name);
+
+ json.clear();
+ UninitializeStatisticsRecorder();
+
+ // No data should be returned.
+ json = StatisticsRecorder::ToJSON(query);
+ EXPECT_TRUE(json.empty());
+}
+
+TEST_P(StatisticsRecorderTest, IterationTest) {
+ Histogram::FactoryGet("IterationTest1", 1, 64, 16, HistogramBase::kNoFlags);
+ Histogram::FactoryGet("IterationTest2", 1, 64, 16, HistogramBase::kNoFlags);
+
+ StatisticsRecorder::HistogramIterator i1 = StatisticsRecorder::begin(true);
+ EXPECT_EQ(2, CountIterableHistograms(&i1));
+
+ StatisticsRecorder::HistogramIterator i2 = StatisticsRecorder::begin(false);
+ EXPECT_EQ(use_persistent_histogram_allocator_ ? 0 : 2,
+ CountIterableHistograms(&i2));
+
+ // Create a new global allocator using the same memory as the old one. Any
+ // old one is kept around so the memory doesn't get released.
+ old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+ if (use_persistent_histogram_allocator_) {
+ GlobalHistogramAllocator::CreateWithPersistentMemory(
+ const_cast<void*>(old_global_allocator_->data()),
+ old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+ old_global_allocator_->Name());
+ }
+
+ // Reset statistics-recorder to validate operation from a clean start.
+ UninitializeStatisticsRecorder();
+ InitializeStatisticsRecorder();
+
+ StatisticsRecorder::HistogramIterator i3 = StatisticsRecorder::begin(true);
+ EXPECT_EQ(use_persistent_histogram_allocator_ ? 2 : 0,
+ CountIterableHistograms(&i3));
+
+ StatisticsRecorder::HistogramIterator i4 = StatisticsRecorder::begin(false);
+ EXPECT_EQ(0, CountIterableHistograms(&i4));
+}
+
+namespace {
+
+// CallbackCheckWrapper is simply a convenient way to check and store that
+// a callback was actually run.
+struct CallbackCheckWrapper {
+ CallbackCheckWrapper() : called(false), last_histogram_value(0) {}
+
+ void OnHistogramChanged(base::HistogramBase::Sample histogram_value) {
+ called = true;
+ last_histogram_value = histogram_value;
+ }
+
+ bool called;
+ base::HistogramBase::Sample last_histogram_value;
+};
+
+} // namespace
+
+// Check that you can't overwrite the callback with another.
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
+ CallbackCheckWrapper callback_wrapper;
+
+ bool result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_TRUE(result);
+
+ result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_FALSE(result);
+}
+
+// Check that you can't overwrite the callback with another.
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
+ HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ bool result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+ base::HistogramBase::kCallbackExists);
+
+ result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_FALSE(result);
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+ base::HistogramBase::kCallbackExists);
+
+ histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+}
+
+// Check that you can't overwrite the callback with another.
+TEST_P(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
+ HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ bool result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+ base::HistogramBase::kCallbackExists);
+
+ base::StatisticsRecorder::ClearCallback("TestHistogram");
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists, 0);
+
+ histogram->Add(1);
+
+ EXPECT_FALSE(callback_wrapper.called);
+}
+
+// Check that callback is used.
+TEST_P(StatisticsRecorderTest, CallbackUsedTest) {
+ {
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+
+ {
+ HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestLinearHistogram",
+ base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ linear_histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+
+ {
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestCustomHistogram",
+ base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ custom_histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+
+ {
+ HistogramBase* custom_histogram = SparseHistogram::FactoryGet(
+ "TestSparseHistogram", HistogramBase::kNoFlags);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestSparseHistogram",
+ base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ custom_histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+}
+
+// Check that setting a callback before the histogram exists works.
+TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+ histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
+ UninitializeStatisticsRecorder();
+ logging::SetMinLogLevel(logging::LOG_WARNING);
+ InitializeStatisticsRecorder();
+ EXPECT_FALSE(VLOG_IS_ON(1));
+ EXPECT_FALSE(VLogInitialized());
+ InitLogOnShutdown();
+ EXPECT_FALSE(VLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitializedExplicitly) {
+ UninitializeStatisticsRecorder();
+ logging::SetMinLogLevel(logging::LOG_WARNING);
+ InitializeStatisticsRecorder();
+ EXPECT_FALSE(VLOG_IS_ON(1));
+ EXPECT_FALSE(VLogInitialized());
+ logging::SetMinLogLevel(logging::LOG_VERBOSE);
+ EXPECT_TRUE(VLOG_IS_ON(1));
+ InitLogOnShutdown();
+ EXPECT_TRUE(VLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitialized) {
+ UninitializeStatisticsRecorder();
+ logging::SetMinLogLevel(logging::LOG_VERBOSE);
+ InitializeStatisticsRecorder();
+ EXPECT_TRUE(VLOG_IS_ON(1));
+ EXPECT_TRUE(VLogInitialized());
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/user_metrics.cc b/libchrome/base/metrics/user_metrics.cc
new file mode 100644
index 0000000..169a063
--- /dev/null
+++ b/libchrome/base/metrics/user_metrics.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/user_metrics.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace {
+
+LazyInstance<std::vector<ActionCallback>> g_callbacks =
+ LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>> g_task_runner =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+void RecordAction(const UserMetricsAction& action) {
+ RecordComputedAction(action.str_);
+}
+
+void RecordComputedAction(const std::string& action) {
+ if (!g_task_runner.Get()) {
+ DCHECK(g_callbacks.Get().empty());
+ return;
+ }
+
+ if (!g_task_runner.Get()->BelongsToCurrentThread()) {
+ g_task_runner.Get()->PostTask(FROM_HERE,
+ Bind(&RecordComputedAction, action));
+ return;
+ }
+
+ for (const ActionCallback& callback : g_callbacks.Get()) {
+ callback.Run(action);
+ }
+}
+
+void AddActionCallback(const ActionCallback& callback) {
+ // Only allow adding a callback if the task runner is set.
+ DCHECK(g_task_runner.Get());
+ DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+ g_callbacks.Get().push_back(callback);
+}
+
+void RemoveActionCallback(const ActionCallback& callback) {
+ DCHECK(g_task_runner.Get());
+ DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+ std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
+ for (size_t i = 0; i < callbacks->size(); ++i) {
+ if ((*callbacks)[i].Equals(callback)) {
+ callbacks->erase(callbacks->begin() + i);
+ return;
+ }
+ }
+}
+
+void SetRecordActionTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ DCHECK(task_runner->BelongsToCurrentThread());
+ DCHECK(!g_task_runner.Get() || g_task_runner.Get()->BelongsToCurrentThread());
+ g_task_runner.Get() = task_runner;
+}
+
+} // namespace base
diff --git a/libchrome/base/metrics/user_metrics.h b/libchrome/base/metrics/user_metrics.h
new file mode 100644
index 0000000..93701e8
--- /dev/null
+++ b/libchrome/base/metrics/user_metrics.h
@@ -0,0 +1,70 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_USER_METRICS_H_
+#define BASE_METRICS_USER_METRICS_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/metrics/user_metrics_action.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// This module provides some helper functions for logging actions tracked by
+// the user metrics system.
+
+// Record that the user performed an action.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+//
+// "Action" here means a user-generated event:
+// good: "Reload", "CloseTab", and "IMEInvoked"
+// not good: "SSLDialogShown", "PageLoaded", "DiskFull"
+// We use this to gather anonymized information about how users are
+// interacting with the browser.
+// WARNING: In calls to this function, UserMetricsAction should be followed by a
+// string literal parameter and not a variable e.g.
+// RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
+//
+// Once a new recorded action is added, run
+// tools/metrics/actions/extract_actions.py
+// to add the metric to actions.xml, then update the <owner>s and <description>
+// sections. Make sure to include the actions.xml file when you upload your code
+// for review!
+//
+// For more complicated situations (like when there are many different
+// possible actions), see RecordComputedAction().
+BASE_EXPORT void RecordAction(const UserMetricsAction& action);
+
+// This function has identical input and behavior to RecordAction(), but is
+// not automatically found by the action-processing scripts. It can be used
+// when it's a pain to enumerate all possible actions, but if you use this
+// you need to also update the rules for extracting known actions in
+// tools/metrics/actions/extract_actions.py.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+BASE_EXPORT void RecordComputedAction(const std::string& action);
+
+// Called with the action string.
+typedef Callback<void(const std::string&)> ActionCallback;
+
+// Add/remove action callbacks (see above).
+// These functions must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
+BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
+
+// Set the task runner on which to record actions.
+BASE_EXPORT void SetRecordActionTaskRunner(
+ scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+} // namespace base
+
+#endif // BASE_METRICS_USER_METRICS_H_
diff --git a/libchrome/base/metrics/user_metrics_action.h b/libchrome/base/metrics/user_metrics_action.h
new file mode 100644
index 0000000..3eca3dd
--- /dev/null
+++ b/libchrome/base/metrics/user_metrics_action.h
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_USER_METRICS_ACTION_H_
+#define BASE_METRICS_USER_METRICS_ACTION_H_
+
+namespace base {
+
+// UserMetricsAction exists purely to standardize on the parameters passed to
+// UserMetrics. That way, our toolset can scan the source code reliable for
+// constructors and extract the associated string constants.
+// WARNING: When using UserMetricsAction you should use a string literal
+// parameter e.g.
+// RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
+// Please see tools/metrics/actions/extract_actions.py for details.
+struct UserMetricsAction {
+ const char* str_;
+ explicit UserMetricsAction(const char* str) : str_(str) {}
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_USER_METRICS_ACTION_H_
diff --git a/libchrome/base/native_library.h b/libchrome/base/native_library.h
new file mode 100644
index 0000000..b4f3a3c
--- /dev/null
+++ b/libchrome/base/native_library.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NATIVE_LIBRARY_H_
+#define BASE_NATIVE_LIBRARY_H_
+
+// This file defines a cross-platform "NativeLibrary" type which represents
+// a loadable module.
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_MACOSX)
+#import <CoreFoundation/CoreFoundation.h>
+#endif // OS_*
+
+namespace base {
+
+class FilePath;
+
+#if defined(OS_WIN)
+using NativeLibrary = HMODULE;
+#elif defined(OS_MACOSX)
+enum NativeLibraryType {
+ BUNDLE,
+ DYNAMIC_LIB
+};
+enum NativeLibraryObjCStatus {
+ OBJC_UNKNOWN,
+ OBJC_PRESENT,
+ OBJC_NOT_PRESENT,
+};
+struct NativeLibraryStruct {
+ NativeLibraryType type;
+ CFBundleRefNum bundle_resource_ref;
+ NativeLibraryObjCStatus objc_status;
+ union {
+ CFBundleRef bundle;
+ void* dylib;
+ };
+};
+using NativeLibrary = NativeLibraryStruct*;
+#elif defined(OS_POSIX)
+using NativeLibrary = void*;
+#endif // OS_*
+
+struct BASE_EXPORT NativeLibraryLoadError {
+#if defined(OS_WIN)
+ NativeLibraryLoadError() : code(0) {}
+#endif // OS_WIN
+
+ // Returns a string representation of the load error.
+ std::string ToString() const;
+
+#if defined(OS_WIN)
+ DWORD code;
+#else
+ std::string message;
+#endif // OS_WIN
+};
+
+// Loads a native library from disk. Release it with UnloadNativeLibrary when
+// you're done. Returns NULL on failure.
+// If |error| is not NULL, it may be filled in on load error.
+BASE_EXPORT NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+ NativeLibraryLoadError* error);
+
+#if defined(OS_WIN)
+// Loads a native library from disk. Release it with UnloadNativeLibrary when
+// you're done.
+// This function retrieves the LoadLibrary function exported from kernel32.dll
+// and calls it instead of directly calling the LoadLibrary function via the
+// import table.
+BASE_EXPORT NativeLibrary LoadNativeLibraryDynamically(
+ const FilePath& library_path);
+#endif // OS_WIN
+
+// Unloads a native library.
+BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
+
+// Gets a function pointer from a native library.
+BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+ StringPiece name);
+
+// Returns the full platform specific name for a native library.
+// |name| must be ASCII.
+// For example:
+// "mylib" returns "mylib.dll" on Windows, "libmylib.so" on Linux,
+// "libmylib.dylib" on Mac.
+BASE_EXPORT std::string GetNativeLibraryName(StringPiece name);
+
+} // namespace base
+
+#endif // BASE_NATIVE_LIBRARY_H_
diff --git a/libchrome/base/native_library_posix.cc b/libchrome/base/native_library_posix.cc
new file mode 100644
index 0000000..2dc434b
--- /dev/null
+++ b/libchrome/base/native_library_posix.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/native_library.h"
+
+#include <dlfcn.h>
+
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+std::string NativeLibraryLoadError::ToString() const {
+ return message;
+}
+
+// static
+NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+ NativeLibraryLoadError* error) {
+ // dlopen() opens the file off disk.
+ ThreadRestrictions::AssertIOAllowed();
+
+ // We deliberately do not use RTLD_DEEPBIND. For the history why, please
+ // refer to the bug tracker. Some useful bug reports to read include:
+ // http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
+ // and http://crbug.com/40794.
+ void* dl = dlopen(library_path.value().c_str(), RTLD_LAZY);
+ if (!dl && error)
+ error->message = dlerror();
+
+ return dl;
+}
+
+// static
+void UnloadNativeLibrary(NativeLibrary library) {
+ int ret = dlclose(library);
+ if (ret < 0) {
+ DLOG(ERROR) << "dlclose failed: " << dlerror();
+ NOTREACHED();
+ }
+}
+
+// static
+void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
+ StringPiece name) {
+ return dlsym(library, name.data());
+}
+
+// static
+std::string GetNativeLibraryName(StringPiece name) {
+ DCHECK(IsStringASCII(name));
+ return "lib" + name.as_string() + ".so";
+}
+
+} // namespace base
diff --git a/libchrome/base/numerics/OWNERS b/libchrome/base/numerics/OWNERS
new file mode 100644
index 0000000..41f35fc
--- /dev/null
+++ b/libchrome/base/numerics/OWNERS
@@ -0,0 +1,3 @@
+jschuh@chromium.org
+tsepez@chromium.org
+
diff --git a/libchrome/base/numerics/safe_conversions.h b/libchrome/base/numerics/safe_conversions.h
new file mode 100644
index 0000000..6b558af
--- /dev/null
+++ b/libchrome/base/numerics/safe_conversions.h
@@ -0,0 +1,166 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions_impl.h"
+
+namespace base {
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+constexpr bool IsValueInRangeForNumericType(Src value) {
+ return internal::DstRangeRelationToSrcRange<Dst>(value) ==
+ internal::RANGE_VALID;
+}
+
+// Convenience function for determining if a numeric value is negative without
+// throwing compiler warnings on: unsigned(value) < 0.
+template <typename T>
+constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+IsValueNegative(T value) {
+ static_assert(std::numeric_limits<T>::is_specialized,
+ "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T>
+constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
+ bool>::type IsValueNegative(T) {
+ static_assert(std::numeric_limits<T>::is_specialized,
+ "Argument must be numeric.");
+ return false;
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst, typename Src>
+inline Dst checked_cast(Src value) {
+ CHECK(IsValueInRangeForNumericType<Dst>(value));
+ return static_cast<Dst>(value);
+}
+
+// HandleNaN will cause this class to CHECK(false).
+struct SaturatedCastNaNBehaviorCheck {
+ template <typename T>
+ static T HandleNaN() {
+ CHECK(false);
+ return T();
+ }
+};
+
+// HandleNaN will return 0 in this case.
+struct SaturatedCastNaNBehaviorReturnZero {
+ template <typename T>
+ static constexpr T HandleNaN() {
+ return T();
+ }
+};
+
+namespace internal {
+// This wrapper is used for C++11 constexpr support by avoiding the declaration
+// of local variables in the saturated_cast template function.
+template <typename Dst, class NaNHandler, typename Src>
+constexpr Dst saturated_cast_impl(const Src value,
+ const RangeConstraint constraint) {
+ return constraint == RANGE_VALID
+ ? static_cast<Dst>(value)
+ : (constraint == RANGE_UNDERFLOW
+ ? std::numeric_limits<Dst>::min()
+ : (constraint == RANGE_OVERFLOW
+ ? std::numeric_limits<Dst>::max()
+ : (constraint == RANGE_INVALID
+ ? NaNHandler::template HandleNaN<Dst>()
+ : (NOTREACHED(), static_cast<Dst>(value)))));
+}
+} // namespace internal
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate rather than overflow or
+// underflow. NaN assignment to an integral will defer the behavior to a
+// specified class. By default, it will return 0.
+template <typename Dst,
+ class NaNHandler = SaturatedCastNaNBehaviorReturnZero,
+ typename Src>
+constexpr Dst saturated_cast(Src value) {
+ return std::numeric_limits<Dst>::is_iec559
+ ? static_cast<Dst>(value) // Floating point optimization.
+ : internal::saturated_cast_impl<Dst, NaNHandler>(
+ value, internal::DstRangeRelationToSrcRange<Dst>(value));
+}
+
+// strict_cast<> is analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large enough
+// to contain any value in the source type. It performs no runtime checking.
+template <typename Dst, typename Src>
+constexpr Dst strict_cast(Src value) {
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
+ static_assert(std::numeric_limits<Dst>::is_specialized,
+ "Result must be numeric.");
+ static_assert((internal::StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ internal::NUMERIC_RANGE_CONTAINED),
+ "The numeric conversion is out of range for this type. You "
+ "should probably use one of the following conversion "
+ "mechanisms on the value you want to pass:\n"
+ "- base::checked_cast\n"
+ "- base::saturated_cast\n"
+ "- base::CheckedNumeric");
+
+ return static_cast<Dst>(value);
+}
+
+// StrictNumeric implements compile time range checking between numeric types by
+// wrapping assignment operations in a strict_cast. This class is intended to be
+// used for function arguments and return types, to ensure the destination type
+// can always contain the source type. This is essentially the same as enforcing
+// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
+// incrementally at API boundaries, making it easier to convert code so that it
+// compiles cleanly with truncation warnings enabled.
+// This template should introduce no runtime overhead, but it also provides no
+// runtime checking of any of the associated mathematical operations. Use
+// CheckedNumeric for runtime range checks of the actual value being assigned.
+template <typename T>
+class StrictNumeric {
+ public:
+ typedef T type;
+
+ constexpr StrictNumeric() : value_(0) {}
+
+ // Copy constructor.
+ template <typename Src>
+ constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
+ : value_(strict_cast<T>(rhs.value_)) {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to StrictNumerics to make them easier to use.
+ template <typename Src>
+ constexpr StrictNumeric(Src value)
+ : value_(strict_cast<T>(value)) {}
+
+ // The numeric cast operator basically handles all the magic.
+ template <typename Dst>
+ constexpr operator Dst() const {
+ return strict_cast<Dst>(value_);
+ }
+
+ private:
+ const T value_;
+};
+
+// Explicitly make a shorter size_t typedef for convenience.
+typedef StrictNumeric<size_t> SizeT;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_H_
diff --git a/libchrome/base/numerics/safe_conversions_impl.h b/libchrome/base/numerics/safe_conversions_impl.h
new file mode 100644
index 0000000..0f0aebc
--- /dev/null
+++ b/libchrome/base/numerics/safe_conversions_impl.h
@@ -0,0 +1,265 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <climits>
+#include <limits>
+
+namespace base {
+namespace internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute one by adding one to the number of non-sign bits. This allows
+// for accurate range comparisons between floating point and integer types.
+template <typename NumericType>
+struct MaxExponent {
+ static_assert(std::is_arithmetic<NumericType>::value,
+ "Argument must be numeric.");
+ static const int value = std::numeric_limits<NumericType>::is_iec559
+ ? std::numeric_limits<NumericType>::max_exponent
+ : (sizeof(NumericType) * CHAR_BIT + 1 -
+ std::numeric_limits<NumericType>::is_signed);
+};
+
+enum IntegerRepresentation {
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+ NUMERIC_RANGE_NOT_CONTAINED,
+ NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign =
+ std::numeric_limits<Src>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED >
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value > MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED> {
+ static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+enum RangeConstraint {
+ RANGE_VALID = 0x0, // Value can be represented by the destination type.
+ RANGE_UNDERFLOW = 0x1, // Value would overflow.
+ RANGE_OVERFLOW = 0x2, // Value would underflow.
+ RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+};
+
+// Helper function for coercing an int back to a RangeContraint.
+constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+ // TODO(jschuh): Once we get full C++14 support we want this
+ // assert(integer_range_constraint >= RANGE_VALID &&
+ // integer_range_constraint <= RANGE_INVALID)
+ return static_cast<RangeConstraint>(integer_range_constraint);
+}
+
+// This function creates a RangeConstraint from an upper and lower bound
+// check by taking advantage of the fact that only NaN can be out of range in
+// both directions at once.
+constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+ bool is_in_lower_bound) {
+ return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
+ (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
+}
+
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src>
+struct NarrowingRange {
+ typedef typename std::numeric_limits<Src> SrcLimits;
+ typedef typename std::numeric_limits<Dst> DstLimits;
+ // The following logic avoids warnings where the max function is
+ // instantiated with invalid values for a bit shift (even though
+ // such a function can never be called).
+ static const int shift = (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits &&
+ SrcLimits::is_iec559 &&
+ DstLimits::is_integer)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+
+ static constexpr Dst max() {
+ // We use UINTMAX_C below to avoid compiler warnings about shifting floating
+ // points. Since it's a compile time calculation, it shouldn't have any
+ // performance impact.
+ return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
+ }
+
+ static constexpr Dst min() {
+ return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
+ : DstLimits::min();
+ }
+};
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Dst range is statically determined to contain Src: Nothing to check.
+template <typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign,
+ IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ DstSign,
+ SrcSign,
+ NUMERIC_RANGE_CONTAINED> {
+ static constexpr RangeConstraint Check(Src /*value*/) { return RANGE_VALID; }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeConstraint Check(Src value) {
+ return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
+ (value >= NarrowingRange<Dst, Src>::min()));
+ }
+};
+
+// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeConstraint Check(Src value) {
+ return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
+ }
+};
+
+// Unsigned to signed: The upper boundary may be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeConstraint Check(Src value) {
+ return sizeof(Dst) > sizeof(Src)
+ ? RANGE_VALID
+ : GetRangeConstraint(
+ value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
+ true);
+ }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static constexpr RangeConstraint Check(Src value) {
+ return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
+ ? GetRangeConstraint(true, value >= static_cast<Src>(0))
+ : GetRangeConstraint(
+ value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
+ value >= static_cast<Src>(0));
+ }
+};
+
+template <typename Dst, typename Src>
+constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) {
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
+ static_assert(std::numeric_limits<Dst>::is_specialized,
+ "Result must be numeric.");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+}
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
diff --git a/libchrome/base/numerics/safe_math.h b/libchrome/base/numerics/safe_math.h
new file mode 100644
index 0000000..d0003b7
--- /dev/null
+++ b/libchrome/base/numerics/safe_math.h
@@ -0,0 +1,309 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_H_
+#define BASE_NUMERICS_SAFE_MATH_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/numerics/safe_math_impl.h"
+
+namespace base {
+
+namespace internal {
+
+// CheckedNumeric implements all the logic and operators for detecting integer
+// boundary conditions such as overflow, underflow, and invalid conversions.
+// The CheckedNumeric type implicitly converts from floating point and integer
+// data types, and contains overloads for basic arithmetic operations (i.e.: +,
+// -, *, /, %).
+//
+// The following methods convert from CheckedNumeric to standard numeric values:
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+// has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+// call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+// state is not valid.
+// ValueFloating() - Returns the underlying floating point value (valid only
+// only for floating point CheckedNumeric types).
+//
+// Bitwise operations are explicitly not supported, because correct
+// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
+// operations are explicitly not supported because they could result in a crash
+// on a CHECK condition. You should use patterns like the following for these
+// operations:
+// Bitwise operation:
+// CheckedNumeric<int> checked_int = untrusted_input_value;
+// int x = checked_int.ValueOrDefault(0) | kFlagValues;
+// Comparison:
+// CheckedNumeric<size_t> checked_size = untrusted_input_value;
+// checked_size += HEADER LENGTH;
+// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
+// Do stuff...
+template <typename T>
+class CheckedNumeric {
+ static_assert(std::is_arithmetic<T>::value,
+ "CheckedNumeric<T>: T must be a numeric type.");
+
+ public:
+ typedef T type;
+
+ CheckedNumeric() {}
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumeric(const CheckedNumeric<Src>& rhs)
+ : state_(rhs.ValueUnsafe(), rhs.validity()) {}
+
+ template <typename Src>
+ CheckedNumeric(Src value, RangeConstraint validity)
+ : state_(value, validity) {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to CheckedNumerics to make them easier to use.
+ template <typename Src>
+ CheckedNumeric(Src value) // NOLINT(runtime/explicit)
+ : state_(value) {
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
+ }
+
+ // This is not an explicit constructor because we want a seamless conversion
+ // from StrictNumeric types.
+ template <typename Src>
+ CheckedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : state_(static_cast<Src>(value)) {
+ }
+
+ // IsValid() is the public API to test if a CheckedNumeric is currently valid.
+ bool IsValid() const { return validity() == RANGE_VALID; }
+
+ // ValueOrDie() The primary accessor for the underlying value. If the current
+ // state is not valid it will CHECK and crash.
+ T ValueOrDie() const {
+ CHECK(IsValid());
+ return state_.value();
+ }
+
+ // ValueOrDefault(T default_value) A convenience method that returns the
+ // current value if the state is valid, and the supplied default_value for
+ // any other state.
+ T ValueOrDefault(T default_value) const {
+ return IsValid() ? state_.value() : default_value;
+ }
+
+ // ValueFloating() - Since floating point values include their validity state,
+ // we provide an easy method for extracting them directly, without a risk of
+ // crashing on a CHECK.
+ T ValueFloating() const {
+ static_assert(std::numeric_limits<T>::is_iec559, "Argument must be float.");
+ return CheckedNumeric<T>::cast(*this).ValueUnsafe();
+ }
+
+ // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
+ // tests and to avoid a big matrix of friend operator overloads. But the
+ // values it returns are likely to change in the future.
+ // Returns: current validity state (i.e. valid, overflow, underflow, nan).
+ // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+ // saturation/wrapping so we can expose this state consistently and implement
+ // saturated arithmetic.
+ RangeConstraint validity() const { return state_.validity(); }
+
+ // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
+ // for tests and to avoid a big matrix of friend operator overloads. But the
+ // values it returns are likely to change in the future.
+ // Returns: the raw numeric value, regardless of the current state.
+ // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+ // saturation/wrapping so we can expose this state consistently and implement
+ // saturated arithmetic.
+ T ValueUnsafe() const { return state_.value(); }
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src> CheckedNumeric& operator+=(Src rhs);
+ template <typename Src> CheckedNumeric& operator-=(Src rhs);
+ template <typename Src> CheckedNumeric& operator*=(Src rhs);
+ template <typename Src> CheckedNumeric& operator/=(Src rhs);
+ template <typename Src> CheckedNumeric& operator%=(Src rhs);
+
+ CheckedNumeric operator-() const {
+ RangeConstraint validity;
+ T value = CheckedNeg(state_.value(), &validity);
+ // Negation is always valid for floating point.
+ if (std::numeric_limits<T>::is_iec559)
+ return CheckedNumeric<T>(value);
+
+ validity = GetRangeConstraint(state_.validity() | validity);
+ return CheckedNumeric<T>(value, validity);
+ }
+
+ CheckedNumeric Abs() const {
+ RangeConstraint validity;
+ T value = CheckedAbs(state_.value(), &validity);
+ // Absolute value is always valid for floating point.
+ if (std::numeric_limits<T>::is_iec559)
+ return CheckedNumeric<T>(value);
+
+ validity = GetRangeConstraint(state_.validity() | validity);
+ return CheckedNumeric<T>(value, validity);
+ }
+
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const {
+ return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+ CheckedUnsignedAbs(state_.value()), state_.validity());
+ }
+
+ CheckedNumeric& operator++() {
+ *this += 1;
+ return *this;
+ }
+
+ CheckedNumeric operator++(int) {
+ CheckedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ CheckedNumeric& operator--() {
+ *this -= 1;
+ return *this;
+ }
+
+ CheckedNumeric operator--(int) {
+ CheckedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These static methods behave like a convenience cast operator targeting
+ // the desired CheckedNumeric type. As an optimization, a reference is
+ // returned when Src is the same type as T.
+ template <typename Src>
+ static CheckedNumeric<T> cast(
+ Src u,
+ typename std::enable_if<std::numeric_limits<Src>::is_specialized,
+ int>::type = 0) {
+ return u;
+ }
+
+ template <typename Src>
+ static CheckedNumeric<T> cast(
+ const CheckedNumeric<Src>& u,
+ typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) {
+ return u;
+ }
+
+ static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
+
+ private:
+ template <typename NumericType>
+ struct UnderlyingType {
+ using type = NumericType;
+ };
+
+ template <typename NumericType>
+ struct UnderlyingType<CheckedNumeric<NumericType>> {
+ using type = NumericType;
+ };
+
+ CheckedNumericState<T> state_;
+};
+
+// This is the boilerplate for the standard arithmetic operator overloads. A
+// macro isn't the prettiest solution, but it beats rewriting these five times.
+// Some details worth noting are:
+// * We apply the standard arithmetic promotions.
+// * We skip range checks for floating points.
+// * We skip range checks for destination integers with sufficient range.
+// TODO(jschuh): extract these out into templates.
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
+ /* Binary arithmetic operator for CheckedNumerics of the same type. */ \
+ template <typename T> \
+ CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \
+ const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T>::type Promotion; \
+ /* Floating point always takes the fast path */ \
+ if (std::numeric_limits<T>::is_iec559) \
+ return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
+ if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \
+ return CheckedNumeric<Promotion>( \
+ lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
+ GetRangeConstraint(rhs.validity() | lhs.validity())); \
+ RangeConstraint validity = RANGE_VALID; \
+ T result = static_cast<T>( \
+ Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \
+ static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \
+ return CheckedNumeric<Promotion>( \
+ result, \
+ GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \
+ } \
+ /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
+ template <typename T> \
+ template <typename Src> \
+ CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \
+ *this = CheckedNumeric<T>::cast(*this) \
+ OP CheckedNumeric<typename UnderlyingType<Src>::type>::cast(rhs); \
+ return *this; \
+ } \
+ /* Binary arithmetic operator for CheckedNumeric of different type. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>( \
+ lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
+ GetRangeConstraint(rhs.validity() | lhs.validity())); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ } \
+ /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
+ template <typename T, typename Src, \
+ typename std::enable_if<std::is_arithmetic<Src>::value>::type* = \
+ nullptr> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ const CheckedNumeric<T>& lhs, Src rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, \
+ lhs.validity()); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ } \
+ /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
+ template <typename T, typename Src, \
+ typename std::enable_if<std::is_arithmetic<Src>::value>::type* = \
+ nullptr> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ Src lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), \
+ rhs.validity()); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ }
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+
+#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
+
+} // namespace internal
+
+using internal::CheckedNumeric;
+
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_MATH_H_
diff --git a/libchrome/base/numerics/safe_math_impl.h b/libchrome/base/numerics/safe_math_impl.h
new file mode 100644
index 0000000..f214f3f
--- /dev/null
+++ b/libchrome/base/numerics/safe_math_impl.h
@@ -0,0 +1,532 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SAFE_MATH_IMPL_H_
+#define BASE_NUMERICS_SAFE_MATH_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+#include <type_traits>
+
+#include "base/numerics/safe_conversions.h"
+
+namespace base {
+namespace internal {
+
+// Everything from here up to the floating point operations is portable C++,
+// but it may not be fast. This code could be split based on
+// platform/architecture and replaced with potentially faster implementations.
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForSizeAndSign;
+template <>
+struct IntegerForSizeAndSign<1, true> {
+ typedef int8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<1, false> {
+ typedef uint8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, true> {
+ typedef int16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, false> {
+ typedef uint16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, true> {
+ typedef int32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, false> {
+ typedef uint32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, true> {
+ typedef int64_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, false> {
+ typedef uint64_t type;
+};
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+
+template <typename Integer>
+struct UnsignedIntegerForSize {
+ typedef typename std::enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
+};
+
+template <typename Integer>
+struct SignedIntegerForSize {
+ typedef typename std::enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
+};
+
+template <typename Integer>
+struct TwiceWiderInteger {
+ typedef typename std::enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<
+ sizeof(Integer) * 2,
+ std::numeric_limits<Integer>::is_signed>::type>::type type;
+};
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const typename std::enable_if<std::numeric_limits<Integer>::is_integer,
+ size_t>::type value =
+ CHAR_BIT * sizeof(Integer) - 1;
+};
+
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. CheckedUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+ bool IsInteger = std::numeric_limits<Numeric>::is_integer,
+ bool IsFloat = std::numeric_limits<Numeric>::is_iec559>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false> {
+ typedef typename UnsignedIntegerForSize<Numeric>::type type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true> {
+ typedef Numeric type;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename T>
+constexpr bool HasSignBit(T x) {
+ // Cast to unsigned since right shift on signed is undefined.
+ return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
+ PositionOfSignBit<T>::value);
+}
+
+// This wrapper undoes the standard integer promotions.
+template <typename T>
+constexpr T BinaryComplement(T x) {
+ return static_cast<T>(~x);
+}
+
+// Here are the actual portable checked integer math implementations.
+// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
+// way to coalesce things into the CheckedNumericState specializations below.
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedAdd(T x, T y, RangeConstraint* validity) {
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+ // Addition is valid if the sign of (x + y) is equal to either that of x or
+ // that of y.
+ if (std::numeric_limits<T>::is_signed) {
+ if (HasSignBit(BinaryComplement(
+ static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) {
+ *validity = RANGE_VALID;
+ } else { // Direction of wrap is inverse of result sign.
+ *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+ }
+ } else { // Unsigned is either valid or overflow.
+ *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+ }
+ return static_cast<T>(uresult);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedSub(T x, T y, RangeConstraint* validity) {
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+ // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+ // the same sign.
+ if (std::numeric_limits<T>::is_signed) {
+ if (HasSignBit(BinaryComplement(
+ static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) {
+ *validity = RANGE_VALID;
+ } else { // Direction of wrap is inverse of result sign.
+ *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+ }
+ } else { // Unsigned is either valid or underflow.
+ *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+ }
+ return static_cast<T>(uresult);
+}
+
+// Integer multiplication is a bit complicated. In the fast case we just
+// we just promote to a twice wider type, and range check the result. In the
+// slow case we need to manually check that the result won't be truncated by
+// checking with division against the appropriate bound.
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ sizeof(T) * 2 <= sizeof(uintmax_t),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ typedef typename TwiceWiderInteger<T>::type IntermediateType;
+ IntermediateType tmp =
+ static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
+ *validity = DstRangeRelationToSrcRange<T>(tmp);
+ return static_cast<T>(tmp);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed &&
+ (sizeof(T) * 2 > sizeof(uintmax_t)),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ // If either side is zero then the result will be zero.
+ if (!x || !y) {
+ *validity = RANGE_VALID;
+ return static_cast<T>(0);
+
+ } else if (x > 0) {
+ if (y > 0)
+ *validity =
+ x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
+ else
+ *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
+ : RANGE_UNDERFLOW;
+
+ } else {
+ if (y > 0)
+ *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
+ : RANGE_UNDERFLOW;
+ else
+ *validity =
+ y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+ }
+
+ return static_cast<T>(x * y);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed &&
+ (sizeof(T) * 2 > sizeof(uintmax_t)),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
+ ? RANGE_VALID
+ : RANGE_OVERFLOW;
+ return static_cast<T>(x * y);
+}
+
+// Division just requires a check for an invalid negation on signed min/-1.
+template <typename T>
+T CheckedDiv(T x,
+ T y,
+ RangeConstraint* validity,
+ typename std::enable_if<std::numeric_limits<T>::is_integer,
+ int>::type = 0) {
+ if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
+ y == static_cast<T>(-1)) {
+ *validity = RANGE_OVERFLOW;
+ return std::numeric_limits<T>::min();
+ }
+
+ *validity = RANGE_VALID;
+ return static_cast<T>(x / y);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+ *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
+ return static_cast<T>(x % y);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+ *validity = RANGE_VALID;
+ return static_cast<T>(x % y);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+ *validity =
+ value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ // The negation of signed min is min, so catch that one.
+ return static_cast<T>(-value);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+ // The only legal unsigned negation is zero.
+ *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
+ return static_cast<T>(
+ -static_cast<typename SignedIntegerForSize<T>::type>(value));
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+ *validity =
+ value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ return static_cast<T>(std::abs(value));
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+ // T is unsigned, so |value| must already be positive.
+ *validity = RANGE_VALID;
+ return value;
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed,
+ typename UnsignedIntegerForSize<T>::type>::type
+CheckedUnsignedAbs(T value) {
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedT;
+ return value == std::numeric_limits<T>::min()
+ ? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1
+ : static_cast<UnsignedT>(std::abs(value));
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedUnsignedAbs(T value) {
+ // T is unsigned, so |value| must already be positive.
+ return static_cast<T>(value);
+}
+
+// These are the floating point stubs that the compiler needs to see. Only the
+// negation operation is ever called.
+#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
+ template <typename T> \
+ typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type \
+ Checked##NAME(T, T, RangeConstraint*) { \
+ NOTREACHED(); \
+ return static_cast<T>(0); \
+ }
+
+BASE_FLOAT_ARITHMETIC_STUBS(Add)
+BASE_FLOAT_ARITHMETIC_STUBS(Sub)
+BASE_FLOAT_ARITHMETIC_STUBS(Mul)
+BASE_FLOAT_ARITHMETIC_STUBS(Div)
+BASE_FLOAT_ARITHMETIC_STUBS(Mod)
+
+#undef BASE_FLOAT_ARITHMETIC_STUBS
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
+ T value,
+ RangeConstraint*) {
+ return static_cast<T>(-value);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
+ T value,
+ RangeConstraint*) {
+ return static_cast<T>(std::abs(value));
+}
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+ NUMERIC_INTEGER,
+ NUMERIC_FLOATING,
+ NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+ static const NumericRepresentation value =
+ std::numeric_limits<NumericType>::is_integer
+ ? NUMERIC_INTEGER
+ : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
+};
+
+template <typename T, NumericRepresentation type =
+ GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ private:
+ T value_;
+ RangeConstraint validity_ : CHAR_BIT; // Actually requires only two bits.
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
+
+ template <typename Src>
+ CheckedNumericState(Src value, RangeConstraint validity)
+ : value_(static_cast<T>(value)),
+ validity_(GetRangeConstraint(validity |
+ DstRangeRelationToSrcRange<T>(value))) {
+ static_assert(std::numeric_limits<Src>::is_specialized,
+ "Argument must be numeric.");
+ }
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(static_cast<T>(rhs.value())),
+ validity_(GetRangeConstraint(
+ rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
+
+ template <typename Src>
+ explicit CheckedNumericState(
+ Src value,
+ typename std::enable_if<std::numeric_limits<Src>::is_specialized,
+ int>::type = 0)
+ : value_(static_cast<T>(value)),
+ validity_(DstRangeRelationToSrcRange<T>(value)) {}
+
+ RangeConstraint validity() const { return validity_; }
+ T value() const { return value_; }
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ private:
+ T value_;
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ CheckedNumericState() : value_(0.0) {}
+
+ template <typename Src>
+ CheckedNumericState(
+ Src value,
+ RangeConstraint /*validity*/,
+ typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
+ 0) {
+ switch (DstRangeRelationToSrcRange<T>(value)) {
+ case RANGE_VALID:
+ value_ = static_cast<T>(value);
+ break;
+
+ case RANGE_UNDERFLOW:
+ value_ = -std::numeric_limits<T>::infinity();
+ break;
+
+ case RANGE_OVERFLOW:
+ value_ = std::numeric_limits<T>::infinity();
+ break;
+
+ case RANGE_INVALID:
+ value_ = std::numeric_limits<T>::quiet_NaN();
+ break;
+
+ default:
+ NOTREACHED();
+ }
+ }
+
+ template <typename Src>
+ explicit CheckedNumericState(
+ Src value,
+ typename std::enable_if<std::numeric_limits<Src>::is_specialized,
+ int>::type = 0)
+ : value_(static_cast<T>(value)) {}
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(static_cast<T>(rhs.value())) {}
+
+ RangeConstraint validity() const {
+ return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
+ value_ >= -std::numeric_limits<T>::max());
+ }
+ T value() const { return value_; }
+};
+
+// For integers less than 128-bit and floats 32-bit or larger, we have the type
+// with the larger maximum exponent take precedence.
+enum ArithmeticPromotionCategory { LEFT_PROMOTION, RIGHT_PROMOTION };
+
+template <typename Lhs,
+ typename Rhs = Lhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct ArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ typedef Lhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ typedef Rhs type;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value = !std::numeric_limits<T>::is_iec559 &&
+ StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Lhs)) &&
+ StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Rhs));
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_NUMERICS_SAFE_MATH_IMPL_H_
diff --git a/libchrome/base/numerics/safe_numerics_unittest.cc b/libchrome/base/numerics/safe_numerics_unittest.cc
new file mode 100644
index 0000000..4be7ab5
--- /dev/null
+++ b/libchrome/base/numerics/safe_numerics_unittest.cc
@@ -0,0 +1,762 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(COMPILER_MSVC) && defined(ARCH_CPU_32_BITS)
+#include <mmintrin.h>
+#endif
+
+using std::numeric_limits;
+using base::CheckedNumeric;
+using base::checked_cast;
+using base::IsValueInRangeForNumericType;
+using base::IsValueNegative;
+using base::SizeT;
+using base::StrictNumeric;
+using base::saturated_cast;
+using base::strict_cast;
+using base::internal::MaxExponent;
+using base::internal::RANGE_VALID;
+using base::internal::RANGE_INVALID;
+using base::internal::RANGE_OVERFLOW;
+using base::internal::RANGE_UNDERFLOW;
+using base::internal::SignedIntegerForSize;
+
+// These tests deliberately cause arithmetic overflows. If the compiler is
+// aggressive enough, it can const fold these overflows. Disable warnings about
+// overflows for const expressions.
+#if defined(OS_WIN)
+#pragma warning(disable:4756)
+#endif
+
+// This is a helper function for finding the maximum value in Src that can be
+// wholy represented as the destination floating-point type.
+template <typename Dst, typename Src>
+Dst GetMaxConvertibleToFloat() {
+ typedef numeric_limits<Dst> DstLimits;
+ typedef numeric_limits<Src> SrcLimits;
+ static_assert(SrcLimits::is_specialized, "Source must be numeric.");
+ static_assert(DstLimits::is_specialized, "Destination must be numeric.");
+ CHECK(DstLimits::is_iec559);
+
+ if (SrcLimits::digits <= DstLimits::digits &&
+ MaxExponent<Src>::value <= MaxExponent<Dst>::value)
+ return SrcLimits::max();
+ Src max = SrcLimits::max() / 2 + (SrcLimits::is_integer ? 1 : 0);
+ while (max != static_cast<Src>(static_cast<Dst>(max))) {
+ max /= 2;
+ }
+ return static_cast<Dst>(max);
+}
+
+// Helper macros to wrap displaying the conversion types and line numbers.
+#define TEST_EXPECTED_VALIDITY(expected, actual) \
+ EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid()) \
+ << "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
+ << " on line " << line;
+
+#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
+#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
+
+#define TEST_EXPECTED_VALUE(expected, actual) \
+ EXPECT_EQ(static_cast<Dst>(expected), \
+ CheckedNumeric<Dst>(actual).ValueUnsafe()) \
+ << "Result test: Value " << +((actual).ValueUnsafe()) << " as " << dst \
+ << " on line " << line;
+
+// Signed integer arithmetic.
+template <typename Dst>
+static void TestSpecializedArithmetic(
+ const char* dst,
+ int line,
+ typename std::enable_if<numeric_limits<Dst>::is_integer &&
+ numeric_limits<Dst>::is_signed,
+ int>::type = 0) {
+ typedef numeric_limits<Dst> DstLimits;
+ TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
+
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+ -DstLimits::max());
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+ -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::max());
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
+
+ // Modulus is legal only for integers.
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+ TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
+ // Test all the different modulus combinations.
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(0, 1 % CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+ CheckedNumeric<Dst> checked_dst = 1;
+ TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+}
+
+// Unsigned integer arithmetic.
+template <typename Dst>
+static void TestSpecializedArithmetic(
+ const char* dst,
+ int line,
+ typename std::enable_if<numeric_limits<Dst>::is_integer &&
+ !numeric_limits<Dst>::is_signed,
+ int>::type = 0) {
+ typedef numeric_limits<Dst> DstLimits;
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+ TEST_EXPECTED_SUCCESS(
+ CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
+ std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
+ .UnsignedAbs());
+
+ // Modulus is legal only for integers.
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
+ // Test all the different modulus combinations.
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(0, 1 % CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+ CheckedNumeric<Dst> checked_dst = 1;
+ TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+}
+
+// Floating point arithmetic.
+template <typename Dst>
+void TestSpecializedArithmetic(
+ const char* dst,
+ int line,
+ typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
+ typedef numeric_limits<Dst> DstLimits;
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
+
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+ -DstLimits::max());
+
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+ -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::max());
+
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+
+ TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
+ EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
+}
+
+// Generic arithmetic tests.
+template <typename Dst>
+static void TestArithmetic(const char* dst, int line) {
+ typedef numeric_limits<Dst> DstLimits;
+
+ EXPECT_EQ(true, CheckedNumeric<Dst>().IsValid());
+ EXPECT_EQ(false,
+ CheckedNumeric<Dst>(CheckedNumeric<Dst>(DstLimits::max()) *
+ DstLimits::max()).IsValid());
+ EXPECT_EQ(static_cast<Dst>(0), CheckedNumeric<Dst>().ValueOrDie());
+ EXPECT_EQ(static_cast<Dst>(0), CheckedNumeric<Dst>().ValueOrDefault(1));
+ EXPECT_EQ(static_cast<Dst>(1),
+ CheckedNumeric<Dst>(CheckedNumeric<Dst>(DstLimits::max()) *
+ DstLimits::max()).ValueOrDefault(1));
+
+ // Test the operator combinations.
+ TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) + CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) - CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) * CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(2, 1 + CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(0, 1 - CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(1, 1 * CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(1, 1 / CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) + 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) - 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) * 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
+ CheckedNumeric<Dst> checked_dst = 1;
+ TEST_EXPECTED_VALUE(2, checked_dst += 1);
+ checked_dst = 1;
+ TEST_EXPECTED_VALUE(0, checked_dst -= 1);
+ checked_dst = 1;
+ TEST_EXPECTED_VALUE(1, checked_dst *= 1);
+ checked_dst = 1;
+ TEST_EXPECTED_VALUE(1, checked_dst /= 1);
+
+ // Generic negation.
+ TEST_EXPECTED_VALUE(0, -CheckedNumeric<Dst>());
+ TEST_EXPECTED_VALUE(-1, -CheckedNumeric<Dst>(1));
+ TEST_EXPECTED_VALUE(1, -CheckedNumeric<Dst>(-1));
+ TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
+ -CheckedNumeric<Dst>(DstLimits::max()));
+
+ // Generic absolute value.
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>().Abs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).Abs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ CheckedNumeric<Dst>(DstLimits::max()).Abs());
+
+ // Generic addition.
+ TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
+ TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
+ DstLimits::max());
+
+ // Generic subtraction.
+ TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
+ TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
+
+ // Generic multiplication.
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
+ TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>(1) * 1));
+ TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+ DstLimits::max());
+
+ // Generic division.
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
+ TEST_EXPECTED_VALUE(DstLimits::min() / 2,
+ CheckedNumeric<Dst>(DstLimits::min()) / 2);
+ TEST_EXPECTED_VALUE(DstLimits::max() / 2,
+ CheckedNumeric<Dst>(DstLimits::max()) / 2);
+
+ TestSpecializedArithmetic<Dst>(dst, line);
+}
+
+// Helper macro to wrap displaying the conversion types and line numbers.
+#define TEST_ARITHMETIC(Dst) TestArithmetic<Dst>(#Dst, __LINE__)
+
+TEST(SafeNumerics, SignedIntegerMath) {
+ TEST_ARITHMETIC(int8_t);
+ TEST_ARITHMETIC(int);
+ TEST_ARITHMETIC(intptr_t);
+ TEST_ARITHMETIC(intmax_t);
+}
+
+TEST(SafeNumerics, UnsignedIntegerMath) {
+ TEST_ARITHMETIC(uint8_t);
+ TEST_ARITHMETIC(unsigned int);
+ TEST_ARITHMETIC(uintptr_t);
+ TEST_ARITHMETIC(uintmax_t);
+}
+
+TEST(SafeNumerics, FloatingPointMath) {
+ TEST_ARITHMETIC(float);
+ TEST_ARITHMETIC(double);
+}
+
+// Enumerates the five different conversions types we need to test.
+enum NumericConversionType {
+ SIGN_PRESERVING_VALUE_PRESERVING,
+ SIGN_PRESERVING_NARROW,
+ SIGN_TO_UNSIGN_WIDEN_OR_EQUAL,
+ SIGN_TO_UNSIGN_NARROW,
+ UNSIGN_TO_SIGN_NARROW_OR_EQUAL,
+};
+
+// Template covering the different conversion tests.
+template <typename Dst, typename Src, NumericConversionType conversion>
+struct TestNumericConversion {};
+
+// EXPECT_EQ wrappers providing specific detail on test failures.
+#define TEST_EXPECTED_RANGE(expected, actual) \
+ EXPECT_EQ(expected, base::internal::DstRangeRelationToSrcRange<Dst>(actual)) \
+ << "Conversion test: " << src << " value " << actual << " to " << dst \
+ << " on line " << line;
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
+ static void Test(const char *dst, const char *src, int line) {
+ typedef numeric_limits<Src> SrcLimits;
+ typedef numeric_limits<Dst> DstLimits;
+ // Integral to floating.
+ static_assert((DstLimits::is_iec559 && SrcLimits::is_integer) ||
+ // Not floating to integral and...
+ (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
+ // Same sign, same numeric, source is narrower or same.
+ ((SrcLimits::is_signed == DstLimits::is_signed &&
+ sizeof(Dst) >= sizeof(Src)) ||
+ // Or signed destination and source is smaller
+ (DstLimits::is_signed && sizeof(Dst) > sizeof(Src)))),
+ "Comparison must be sign preserving and value preserving");
+
+ const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
+ TEST_EXPECTED_SUCCESS(checked_dst);
+ if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
+ if (MaxExponent<Dst>::value >= MaxExponent<Src>::value * 2 - 1) {
+ // At least twice larger type.
+ TEST_EXPECTED_SUCCESS(SrcLimits::max() * checked_dst);
+
+ } else { // Larger, but not at least twice as large.
+ TEST_EXPECTED_FAILURE(SrcLimits::max() * checked_dst);
+ TEST_EXPECTED_SUCCESS(checked_dst + 1);
+ }
+ } else { // Same width type.
+ TEST_EXPECTED_FAILURE(checked_dst + 1);
+ }
+
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+ if (SrcLimits::is_iec559) {
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max() * static_cast<Src>(-1));
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
+ TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ } else if (numeric_limits<Src>::is_signed) {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ }
+ }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
+ static void Test(const char *dst, const char *src, int line) {
+ typedef numeric_limits<Src> SrcLimits;
+ typedef numeric_limits<Dst> DstLimits;
+ static_assert(SrcLimits::is_signed == DstLimits::is_signed,
+ "Destination and source sign must be the same");
+ static_assert(sizeof(Dst) < sizeof(Src) ||
+ (DstLimits::is_integer && SrcLimits::is_iec559),
+ "Destination must be narrower than source");
+
+ const CheckedNumeric<Dst> checked_dst;
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+ TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
+ TEST_EXPECTED_FAILURE(checked_dst - SrcLimits::max());
+
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+ if (SrcLimits::is_iec559) {
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
+ TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ if (DstLimits::is_integer) {
+ if (SrcLimits::digits < DstLimits::digits) {
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+ static_cast<Src>(DstLimits::max()));
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+ }
+ TEST_EXPECTED_RANGE(
+ RANGE_VALID,
+ static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ }
+ } else if (SrcLimits::is_signed) {
+ TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
+ } else {
+ TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ }
+ }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
+ static void Test(const char *dst, const char *src, int line) {
+ typedef numeric_limits<Src> SrcLimits;
+ typedef numeric_limits<Dst> DstLimits;
+ static_assert(sizeof(Dst) >= sizeof(Src),
+ "Destination must be equal or wider than source.");
+ static_assert(SrcLimits::is_signed, "Source must be signed");
+ static_assert(!DstLimits::is_signed, "Destination must be unsigned");
+
+ const CheckedNumeric<Dst> checked_dst;
+ TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+ TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
+
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
+ }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
+ static void Test(const char *dst, const char *src, int line) {
+ typedef numeric_limits<Src> SrcLimits;
+ typedef numeric_limits<Dst> DstLimits;
+ static_assert((DstLimits::is_integer && SrcLimits::is_iec559) ||
+ (sizeof(Dst) < sizeof(Src)),
+ "Destination must be narrower than source.");
+ static_assert(SrcLimits::is_signed, "Source must be signed.");
+ static_assert(!DstLimits::is_signed, "Destination must be unsigned.");
+
+ const CheckedNumeric<Dst> checked_dst;
+ TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+ TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
+
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
+ if (SrcLimits::is_iec559) {
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
+ TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ if (DstLimits::is_integer) {
+ if (SrcLimits::digits < DstLimits::digits) {
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+ static_cast<Src>(DstLimits::max()));
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+ }
+ TEST_EXPECTED_RANGE(
+ RANGE_VALID,
+ static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ }
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ }
+ }
+};
+
+template <typename Dst, typename Src>
+struct TestNumericConversion<Dst, Src, UNSIGN_TO_SIGN_NARROW_OR_EQUAL> {
+ static void Test(const char *dst, const char *src, int line) {
+ typedef numeric_limits<Src> SrcLimits;
+ typedef numeric_limits<Dst> DstLimits;
+ static_assert(sizeof(Dst) <= sizeof(Src),
+ "Destination must be narrower or equal to source.");
+ static_assert(!SrcLimits::is_signed, "Source must be unsigned.");
+ static_assert(DstLimits::is_signed, "Destination must be signed.");
+
+ const CheckedNumeric<Dst> checked_dst;
+ TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+ TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
+
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+ }
+};
+
+// Helper macro to wrap displaying the conversion types and line numbers
+#define TEST_NUMERIC_CONVERSION(d, s, t) \
+ TestNumericConversion<d, s, t>::Test(#d, #s, __LINE__)
+
+TEST(SafeNumerics, IntMinOperations) {
+ TEST_NUMERIC_CONVERSION(int8_t, int8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(uint8_t, uint8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+ TEST_NUMERIC_CONVERSION(int8_t, int, SIGN_PRESERVING_NARROW);
+ TEST_NUMERIC_CONVERSION(uint8_t, unsigned int, SIGN_PRESERVING_NARROW);
+ TEST_NUMERIC_CONVERSION(int8_t, float, SIGN_PRESERVING_NARROW);
+
+ TEST_NUMERIC_CONVERSION(uint8_t, int8_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+ TEST_NUMERIC_CONVERSION(uint8_t, int, SIGN_TO_UNSIGN_NARROW);
+ TEST_NUMERIC_CONVERSION(uint8_t, intmax_t, SIGN_TO_UNSIGN_NARROW);
+ TEST_NUMERIC_CONVERSION(uint8_t, float, SIGN_TO_UNSIGN_NARROW);
+
+ TEST_NUMERIC_CONVERSION(int8_t, unsigned int, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+ TEST_NUMERIC_CONVERSION(int8_t, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, IntOperations) {
+ TEST_NUMERIC_CONVERSION(int, int, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(unsigned int, unsigned int,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(int, int8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(unsigned int, uint8_t,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(int, uint8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+ TEST_NUMERIC_CONVERSION(int, intmax_t, SIGN_PRESERVING_NARROW);
+ TEST_NUMERIC_CONVERSION(unsigned int, uintmax_t, SIGN_PRESERVING_NARROW);
+ TEST_NUMERIC_CONVERSION(int, float, SIGN_PRESERVING_NARROW);
+ TEST_NUMERIC_CONVERSION(int, double, SIGN_PRESERVING_NARROW);
+
+ TEST_NUMERIC_CONVERSION(unsigned int, int, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+ TEST_NUMERIC_CONVERSION(unsigned int, int8_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+ TEST_NUMERIC_CONVERSION(unsigned int, intmax_t, SIGN_TO_UNSIGN_NARROW);
+ TEST_NUMERIC_CONVERSION(unsigned int, float, SIGN_TO_UNSIGN_NARROW);
+ TEST_NUMERIC_CONVERSION(unsigned int, double, SIGN_TO_UNSIGN_NARROW);
+
+ TEST_NUMERIC_CONVERSION(int, unsigned int, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+ TEST_NUMERIC_CONVERSION(int, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, IntMaxOperations) {
+ TEST_NUMERIC_CONVERSION(intmax_t, intmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(uintmax_t, uintmax_t,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(intmax_t, int, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(uintmax_t, unsigned int,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(intmax_t, unsigned int,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(intmax_t, uint8_t, SIGN_PRESERVING_VALUE_PRESERVING);
+
+ TEST_NUMERIC_CONVERSION(intmax_t, float, SIGN_PRESERVING_NARROW);
+ TEST_NUMERIC_CONVERSION(intmax_t, double, SIGN_PRESERVING_NARROW);
+
+ TEST_NUMERIC_CONVERSION(uintmax_t, int, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+ TEST_NUMERIC_CONVERSION(uintmax_t, int8_t, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+
+ TEST_NUMERIC_CONVERSION(uintmax_t, float, SIGN_TO_UNSIGN_NARROW);
+ TEST_NUMERIC_CONVERSION(uintmax_t, double, SIGN_TO_UNSIGN_NARROW);
+
+ TEST_NUMERIC_CONVERSION(intmax_t, uintmax_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, FloatOperations) {
+ TEST_NUMERIC_CONVERSION(float, intmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(float, uintmax_t,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(float, int, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(float, unsigned int,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+
+ TEST_NUMERIC_CONVERSION(float, double, SIGN_PRESERVING_NARROW);
+}
+
+TEST(SafeNumerics, DoubleOperations) {
+ TEST_NUMERIC_CONVERSION(double, intmax_t, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(double, uintmax_t,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(double, int, SIGN_PRESERVING_VALUE_PRESERVING);
+ TEST_NUMERIC_CONVERSION(double, unsigned int,
+ SIGN_PRESERVING_VALUE_PRESERVING);
+}
+
+TEST(SafeNumerics, SizeTOperations) {
+ TEST_NUMERIC_CONVERSION(size_t, int, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL);
+ TEST_NUMERIC_CONVERSION(int, size_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
+}
+
+TEST(SafeNumerics, CastTests) {
+// MSVC catches and warns that we're forcing saturation in these tests.
+// Since that's intentional, we need to shut this warning off.
+#if defined(COMPILER_MSVC)
+#pragma warning(disable : 4756)
+#endif
+
+ int small_positive = 1;
+ int small_negative = -1;
+ double double_small = 1.0;
+ double double_large = numeric_limits<double>::max();
+ double double_infinity = numeric_limits<float>::infinity();
+ double double_large_int = numeric_limits<int>::max();
+ double double_small_int = numeric_limits<int>::min();
+
+ // Just test that the casts compile, since the other tests cover logic.
+ EXPECT_EQ(0, checked_cast<int>(static_cast<size_t>(0)));
+ EXPECT_EQ(0, strict_cast<int>(static_cast<char>(0)));
+ EXPECT_EQ(0, strict_cast<int>(static_cast<unsigned char>(0)));
+ EXPECT_EQ(0U, strict_cast<unsigned>(static_cast<unsigned char>(0)));
+ EXPECT_EQ(1ULL, static_cast<uint64_t>(StrictNumeric<size_t>(1U)));
+ EXPECT_EQ(1ULL, static_cast<uint64_t>(SizeT(1U)));
+ EXPECT_EQ(1U, static_cast<size_t>(StrictNumeric<unsigned>(1U)));
+
+ EXPECT_TRUE(CheckedNumeric<uint64_t>(StrictNumeric<unsigned>(1U)).IsValid());
+ EXPECT_TRUE(CheckedNumeric<int>(StrictNumeric<unsigned>(1U)).IsValid());
+ EXPECT_FALSE(CheckedNumeric<unsigned>(StrictNumeric<int>(-1)).IsValid());
+
+ EXPECT_TRUE(IsValueNegative(-1));
+ EXPECT_TRUE(IsValueNegative(numeric_limits<int>::min()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::min()));
+ EXPECT_TRUE(IsValueNegative(-numeric_limits<double>::max()));
+ EXPECT_FALSE(IsValueNegative(0));
+ EXPECT_FALSE(IsValueNegative(1));
+ EXPECT_FALSE(IsValueNegative(0u));
+ EXPECT_FALSE(IsValueNegative(1u));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<int>::max()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::max()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<double>::max()));
+
+ // These casts and coercions will fail to compile:
+ // EXPECT_EQ(0, strict_cast<int>(static_cast<size_t>(0)));
+ // EXPECT_EQ(0, strict_cast<size_t>(static_cast<int>(0)));
+ // EXPECT_EQ(1ULL, StrictNumeric<size_t>(1));
+ // EXPECT_EQ(1, StrictNumeric<size_t>(1U));
+
+ // Test various saturation corner cases.
+ EXPECT_EQ(saturated_cast<int>(small_negative),
+ static_cast<int>(small_negative));
+ EXPECT_EQ(saturated_cast<int>(small_positive),
+ static_cast<int>(small_positive));
+ EXPECT_EQ(saturated_cast<unsigned>(small_negative),
+ static_cast<unsigned>(0));
+ EXPECT_EQ(saturated_cast<int>(double_small),
+ static_cast<int>(double_small));
+ EXPECT_EQ(saturated_cast<int>(double_large), numeric_limits<int>::max());
+ EXPECT_EQ(saturated_cast<float>(double_large), double_infinity);
+ EXPECT_EQ(saturated_cast<float>(-double_large), -double_infinity);
+ EXPECT_EQ(numeric_limits<int>::min(), saturated_cast<int>(double_small_int));
+ EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
+
+ float not_a_number = std::numeric_limits<float>::infinity() -
+ std::numeric_limits<float>::infinity();
+ EXPECT_TRUE(std::isnan(not_a_number));
+ EXPECT_EQ(0, saturated_cast<int>(not_a_number));
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+TEST(SafeNumerics, SaturatedCastChecks) {
+ float not_a_number = std::numeric_limits<float>::infinity() -
+ std::numeric_limits<float>::infinity();
+ EXPECT_TRUE(std::isnan(not_a_number));
+ EXPECT_DEATH((saturated_cast<int, base::SaturatedCastNaNBehaviorCheck>(
+ not_a_number)), "");
+}
+
+#endif // GTEST_HAS_DEATH_TEST
+
+TEST(SafeNumerics, IsValueInRangeForNumericType) {
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(2));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0xffffffff)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(2));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffff));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffffu));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0x80000000u));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0xffffffffu));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x80000000)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min()) - 1));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(2));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0xffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(2));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffff));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x80000000u));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x80000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0xffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x100000000)));
+ EXPECT_TRUE(
+ IsValueInRangeForNumericType<int64_t>(INT64_C(0x7fffffffffffffff)));
+ EXPECT_TRUE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0x7fffffffffffffff)));
+ EXPECT_FALSE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0x8000000000000000)));
+ EXPECT_FALSE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ std::numeric_limits<int64_t>::min()));
+}
+
+TEST(SafeNumerics, CompoundNumericOperations) {
+ CheckedNumeric<int> a = 1;
+ CheckedNumeric<int> b = 2;
+ CheckedNumeric<int> c = 3;
+ CheckedNumeric<int> d = 4;
+ a += b;
+ EXPECT_EQ(3, a.ValueOrDie());
+ a -= c;
+ EXPECT_EQ(0, a.ValueOrDie());
+ d /= b;
+ EXPECT_EQ(2, d.ValueOrDie());
+ d *= d;
+ EXPECT_EQ(4, d.ValueOrDie());
+
+ CheckedNumeric<int> too_large = std::numeric_limits<int>::max();
+ EXPECT_TRUE(too_large.IsValid());
+ too_large += d;
+ EXPECT_FALSE(too_large.IsValid());
+ too_large -= d;
+ EXPECT_FALSE(too_large.IsValid());
+ too_large /= d;
+ EXPECT_FALSE(too_large.IsValid());
+}
diff --git a/libchrome/base/observer_list.h b/libchrome/base/observer_list.h
new file mode 100644
index 0000000..afe1f46
--- /dev/null
+++ b/libchrome/base/observer_list.h
@@ -0,0 +1,249 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OBSERVER_LIST_H_
+#define BASE_OBSERVER_LIST_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/stl_util.h"
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// OVERVIEW:
+//
+// A container for a list of observers. Unlike a normal STL vector or list,
+// this container can be modified during iteration without invalidating the
+// iterator. So, it safely handles the case of an observer removing itself
+// or other observers from the list while observers are being notified.
+//
+// TYPICAL USAGE:
+//
+// class MyWidget {
+// public:
+// ...
+//
+// class Observer {
+// public:
+// virtual void OnFoo(MyWidget* w) = 0;
+// virtual void OnBar(MyWidget* w, int x, int y) = 0;
+// };
+//
+// void AddObserver(Observer* obs) {
+// observer_list_.AddObserver(obs);
+// }
+//
+// void RemoveObserver(Observer* obs) {
+// observer_list_.RemoveObserver(obs);
+// }
+//
+// void NotifyFoo() {
+// FOR_EACH_OBSERVER(Observer, observer_list_, OnFoo(this));
+// }
+//
+// void NotifyBar(int x, int y) {
+// FOR_EACH_OBSERVER(Observer, observer_list_, OnBar(this, x, y));
+// }
+//
+// private:
+// base::ObserverList<Observer> observer_list_;
+// };
+//
+//
+///////////////////////////////////////////////////////////////////////////////
+
+namespace base {
+
+template <typename ObserverType>
+class ObserverListThreadSafe;
+
+template <class ObserverType>
+class ObserverListBase
+ : public SupportsWeakPtr<ObserverListBase<ObserverType>> {
+ public:
+ // Enumeration of which observers are notified.
+ enum NotificationType {
+ // Specifies that any observers added during notification are notified.
+ // This is the default type if non type is provided to the constructor.
+ NOTIFY_ALL,
+
+ // Specifies that observers added while sending out notification are not
+ // notified.
+ NOTIFY_EXISTING_ONLY
+ };
+
+ // An iterator class that can be used to access the list of observers. See
+ // also the FOR_EACH_OBSERVER macro defined below.
+ class Iterator {
+ public:
+ explicit Iterator(ObserverListBase<ObserverType>* list);
+ ~Iterator();
+ ObserverType* GetNext();
+
+ private:
+ WeakPtr<ObserverListBase<ObserverType>> list_;
+ size_t index_;
+ size_t max_index_;
+ };
+
+ ObserverListBase() : notify_depth_(0), type_(NOTIFY_ALL) {}
+ explicit ObserverListBase(NotificationType type)
+ : notify_depth_(0), type_(type) {}
+
+ // Add an observer to the list. An observer should not be added to
+ // the same list more than once.
+ void AddObserver(ObserverType* obs);
+
+ // Remove an observer from the list if it is in the list.
+ void RemoveObserver(ObserverType* obs);
+
+ // Determine whether a particular observer is in the list.
+ bool HasObserver(const ObserverType* observer) const;
+
+ void Clear();
+
+ protected:
+ size_t size() const { return observers_.size(); }
+
+ void Compact();
+
+ private:
+ friend class ObserverListThreadSafe<ObserverType>;
+
+ typedef std::vector<ObserverType*> ListType;
+
+ ListType observers_;
+ int notify_depth_;
+ NotificationType type_;
+
+ friend class ObserverListBase::Iterator;
+
+ DISALLOW_COPY_AND_ASSIGN(ObserverListBase);
+};
+
+template <class ObserverType>
+ObserverListBase<ObserverType>::Iterator::Iterator(
+ ObserverListBase<ObserverType>* list)
+ : list_(list->AsWeakPtr()),
+ index_(0),
+ max_index_(list->type_ == NOTIFY_ALL ? std::numeric_limits<size_t>::max()
+ : list->observers_.size()) {
+ ++list_->notify_depth_;
+}
+
+template <class ObserverType>
+ObserverListBase<ObserverType>::Iterator::~Iterator() {
+ if (list_.get() && --list_->notify_depth_ == 0)
+ list_->Compact();
+}
+
+template <class ObserverType>
+ObserverType* ObserverListBase<ObserverType>::Iterator::GetNext() {
+ if (!list_.get())
+ return nullptr;
+ ListType& observers = list_->observers_;
+ // Advance if the current element is null
+ size_t max_index = std::min(max_index_, observers.size());
+ while (index_ < max_index && !observers[index_])
+ ++index_;
+ return index_ < max_index ? observers[index_++] : nullptr;
+}
+
+template <class ObserverType>
+void ObserverListBase<ObserverType>::AddObserver(ObserverType* obs) {
+ DCHECK(obs);
+ if (ContainsValue(observers_, obs)) {
+ NOTREACHED() << "Observers can only be added once!";
+ return;
+ }
+ observers_.push_back(obs);
+}
+
+template <class ObserverType>
+void ObserverListBase<ObserverType>::RemoveObserver(ObserverType* obs) {
+ DCHECK(obs);
+ typename ListType::iterator it =
+ std::find(observers_.begin(), observers_.end(), obs);
+ if (it != observers_.end()) {
+ if (notify_depth_) {
+ *it = nullptr;
+ } else {
+ observers_.erase(it);
+ }
+ }
+}
+
+template <class ObserverType>
+bool ObserverListBase<ObserverType>::HasObserver(
+ const ObserverType* observer) const {
+ for (size_t i = 0; i < observers_.size(); ++i) {
+ if (observers_[i] == observer)
+ return true;
+ }
+ return false;
+}
+
+template <class ObserverType>
+void ObserverListBase<ObserverType>::Clear() {
+ if (notify_depth_) {
+ for (typename ListType::iterator it = observers_.begin();
+ it != observers_.end(); ++it) {
+ *it = nullptr;
+ }
+ } else {
+ observers_.clear();
+ }
+}
+
+template <class ObserverType>
+void ObserverListBase<ObserverType>::Compact() {
+ observers_.erase(
+ std::remove(observers_.begin(), observers_.end(), nullptr),
+ observers_.end());
+}
+
+template <class ObserverType, bool check_empty = false>
+class ObserverList : public ObserverListBase<ObserverType> {
+ public:
+ typedef typename ObserverListBase<ObserverType>::NotificationType
+ NotificationType;
+
+ ObserverList() {}
+ explicit ObserverList(NotificationType type)
+ : ObserverListBase<ObserverType>(type) {}
+
+ ~ObserverList() {
+ // When check_empty is true, assert that the list is empty on destruction.
+ if (check_empty) {
+ ObserverListBase<ObserverType>::Compact();
+ DCHECK_EQ(ObserverListBase<ObserverType>::size(), 0U);
+ }
+ }
+
+ bool might_have_observers() const {
+ return ObserverListBase<ObserverType>::size() != 0;
+ }
+};
+
+#define FOR_EACH_OBSERVER(ObserverType, observer_list, func) \
+ do { \
+ if ((observer_list).might_have_observers()) { \
+ typename base::ObserverListBase<ObserverType>::Iterator \
+ it_inside_observer_macro(&observer_list); \
+ ObserverType* obs; \
+ while ((obs = it_inside_observer_macro.GetNext()) != nullptr) \
+ obs->func; \
+ } \
+ } while (0)
+
+} // namespace base
+
+#endif // BASE_OBSERVER_LIST_H_
diff --git a/libchrome/base/observer_list_threadsafe.h b/libchrome/base/observer_list_threadsafe.h
new file mode 100644
index 0000000..fe78354
--- /dev/null
+++ b/libchrome/base/observer_list_threadsafe.h
@@ -0,0 +1,273 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OBSERVER_LIST_THREADSAFE_H_
+#define BASE_OBSERVER_LIST_THREADSAFE_H_
+
+#include <algorithm>
+#include <map>
+#include <tuple>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/observer_list.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// OVERVIEW:
+//
+// A thread-safe container for a list of observers.
+// This is similar to the observer_list (see observer_list.h), but it
+// is more robust for multi-threaded situations.
+//
+// The following use cases are supported:
+// * Observers can register for notifications from any thread.
+// Callbacks to the observer will occur on the same thread where
+// the observer initially called AddObserver() from.
+// * Any thread may trigger a notification via Notify().
+// * Observers can remove themselves from the observer list inside
+// of a callback.
+// * If one thread is notifying observers concurrently with an observer
+// removing itself from the observer list, the notifications will
+// be silently dropped.
+//
+// The drawback of the threadsafe observer list is that notifications
+// are not as real-time as the non-threadsafe version of this class.
+// Notifications will always be done via PostTask() to another thread,
+// whereas with the non-thread-safe observer_list, notifications happen
+// synchronously and immediately.
+//
+// IMPLEMENTATION NOTES
+// The ObserverListThreadSafe maintains an ObserverList for each thread
+// which uses the ThreadSafeObserver. When Notifying the observers,
+// we simply call PostTask to each registered thread, and then each thread
+// will notify its regular ObserverList.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+namespace base {
+
+// Forward declaration for ObserverListThreadSafeTraits.
+template <class ObserverType>
+class ObserverListThreadSafe;
+
+namespace internal {
+
+// An UnboundMethod is a wrapper for a method where the actual object is
+// provided at Run dispatch time.
+template <class T, class Method, class Params>
+class UnboundMethod {
+ public:
+ UnboundMethod(Method m, const Params& p) : m_(m), p_(p) {
+ static_assert((internal::ParamsUseScopedRefptrCorrectly<Params>::value),
+ "bad unbound method params");
+ }
+ void Run(T* obj) const {
+ DispatchToMethod(obj, m_, p_);
+ }
+ private:
+ Method m_;
+ Params p_;
+};
+
+} // namespace internal
+
+// This class is used to work around VS2005 not accepting:
+//
+// friend class
+// base::RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
+//
+// Instead of friending the class, we could friend the actual function
+// which calls delete. However, this ends up being
+// RefCountedThreadSafe::DeleteInternal(), which is private. So we
+// define our own templated traits class so we can friend it.
+template <class T>
+struct ObserverListThreadSafeTraits {
+ static void Destruct(const ObserverListThreadSafe<T>* x) {
+ delete x;
+ }
+};
+
+template <class ObserverType>
+class ObserverListThreadSafe
+ : public RefCountedThreadSafe<
+ ObserverListThreadSafe<ObserverType>,
+ ObserverListThreadSafeTraits<ObserverType>> {
+ public:
+ typedef typename ObserverList<ObserverType>::NotificationType
+ NotificationType;
+
+ ObserverListThreadSafe()
+ : type_(ObserverListBase<ObserverType>::NOTIFY_ALL) {}
+ explicit ObserverListThreadSafe(NotificationType type) : type_(type) {}
+
+ // Add an observer to the list. An observer should not be added to
+ // the same list more than once.
+ void AddObserver(ObserverType* obs) {
+ // If there is not a current MessageLoop, it is impossible to notify on it,
+ // so do not add the observer.
+ if (!MessageLoop::current())
+ return;
+
+ ObserverList<ObserverType>* list = nullptr;
+ PlatformThreadId thread_id = PlatformThread::CurrentId();
+ {
+ AutoLock lock(list_lock_);
+ if (observer_lists_.find(thread_id) == observer_lists_.end())
+ observer_lists_[thread_id] = new ObserverListContext(type_);
+ list = &(observer_lists_[thread_id]->list);
+ }
+ list->AddObserver(obs);
+ }
+
+ // Remove an observer from the list if it is in the list.
+ // If there are pending notifications in-transit to the observer, they will
+ // be aborted.
+ // If the observer to be removed is in the list, RemoveObserver MUST
+ // be called from the same thread which called AddObserver.
+ void RemoveObserver(ObserverType* obs) {
+ ObserverListContext* context = nullptr;
+ ObserverList<ObserverType>* list = nullptr;
+ PlatformThreadId thread_id = PlatformThread::CurrentId();
+ {
+ AutoLock lock(list_lock_);
+ typename ObserversListMap::iterator it = observer_lists_.find(thread_id);
+ if (it == observer_lists_.end()) {
+ // This will happen if we try to remove an observer on a thread
+ // we never added an observer for.
+ return;
+ }
+ context = it->second;
+ list = &context->list;
+
+ // If we're about to remove the last observer from the list,
+ // then we can remove this observer_list entirely.
+ if (list->HasObserver(obs) && list->size() == 1)
+ observer_lists_.erase(it);
+ }
+ list->RemoveObserver(obs);
+
+ // If RemoveObserver is called from a notification, the size will be
+ // nonzero. Instead of deleting here, the NotifyWrapper will delete
+ // when it finishes iterating.
+ if (list->size() == 0)
+ delete context;
+ }
+
+ // Verifies that the list is currently empty (i.e. there are no observers).
+ void AssertEmpty() const {
+ AutoLock lock(list_lock_);
+ DCHECK(observer_lists_.empty());
+ }
+
+ // Notify methods.
+ // Make a thread-safe callback to each Observer in the list.
+ // Note, these calls are effectively asynchronous. You cannot assume
+ // that at the completion of the Notify call that all Observers have
+ // been Notified. The notification may still be pending delivery.
+ template <class Method, class... Params>
+ void Notify(const tracked_objects::Location& from_here,
+ Method m,
+ const Params&... params) {
+ internal::UnboundMethod<ObserverType, Method, std::tuple<Params...>> method(
+ m, std::make_tuple(params...));
+
+ AutoLock lock(list_lock_);
+ for (const auto& entry : observer_lists_) {
+ ObserverListContext* context = entry.second;
+ context->task_runner->PostTask(
+ from_here,
+ Bind(&ObserverListThreadSafe<ObserverType>::template NotifyWrapper<
+ Method, std::tuple<Params...>>,
+ this, context, method));
+ }
+ }
+
+ private:
+ // See comment above ObserverListThreadSafeTraits' definition.
+ friend struct ObserverListThreadSafeTraits<ObserverType>;
+
+ struct ObserverListContext {
+ explicit ObserverListContext(NotificationType type)
+ : task_runner(ThreadTaskRunnerHandle::Get()), list(type) {}
+
+ scoped_refptr<SingleThreadTaskRunner> task_runner;
+ ObserverList<ObserverType> list;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ObserverListContext);
+ };
+
+ ~ObserverListThreadSafe() {
+ STLDeleteValues(&observer_lists_);
+ }
+
+ // Wrapper which is called to fire the notifications for each thread's
+ // ObserverList. This function MUST be called on the thread which owns
+ // the unsafe ObserverList.
+ template <class Method, class Params>
+ void NotifyWrapper(
+ ObserverListContext* context,
+ const internal::UnboundMethod<ObserverType, Method, Params>& method) {
+ // Check that this list still needs notifications.
+ {
+ AutoLock lock(list_lock_);
+ typename ObserversListMap::iterator it =
+ observer_lists_.find(PlatformThread::CurrentId());
+
+ // The ObserverList could have been removed already. In fact, it could
+ // have been removed and then re-added! If the master list's loop
+ // does not match this one, then we do not need to finish this
+ // notification.
+ if (it == observer_lists_.end() || it->second != context)
+ return;
+ }
+
+ {
+ typename ObserverList<ObserverType>::Iterator it(&context->list);
+ ObserverType* obs;
+ while ((obs = it.GetNext()) != nullptr)
+ method.Run(obs);
+ }
+
+ // If there are no more observers on the list, we can now delete it.
+ if (context->list.size() == 0) {
+ {
+ AutoLock lock(list_lock_);
+ // Remove |list| if it's not already removed.
+ // This can happen if multiple observers got removed in a notification.
+ // See http://crbug.com/55725.
+ typename ObserversListMap::iterator it =
+ observer_lists_.find(PlatformThread::CurrentId());
+ if (it != observer_lists_.end() && it->second == context)
+ observer_lists_.erase(it);
+ }
+ delete context;
+ }
+ }
+
+ // Key by PlatformThreadId because in tests, clients can attempt to remove
+ // observers without a MessageLoop. If this were keyed by MessageLoop, that
+ // operation would be silently ignored, leaving garbage in the ObserverList.
+ typedef std::map<PlatformThreadId, ObserverListContext*>
+ ObserversListMap;
+
+ mutable Lock list_lock_; // Protects the observer_lists_.
+ ObserversListMap observer_lists_;
+ const NotificationType type_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
+};
+
+} // namespace base
+
+#endif // BASE_OBSERVER_LIST_THREADSAFE_H_
diff --git a/libchrome/base/observer_list_unittest.cc b/libchrome/base/observer_list_unittest.cc
new file mode 100644
index 0000000..097a2ed
--- /dev/null
+++ b/libchrome/base/observer_list_unittest.cc
@@ -0,0 +1,545 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/observer_list.h"
+#include "base/observer_list_threadsafe.h"
+
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Foo {
+ public:
+ virtual void Observe(int x) = 0;
+ virtual ~Foo() {}
+};
+
+class Adder : public Foo {
+ public:
+ explicit Adder(int scaler) : total(0), scaler_(scaler) {}
+ void Observe(int x) override { total += x * scaler_; }
+ ~Adder() override {}
+ int total;
+
+ private:
+ int scaler_;
+};
+
+class Disrupter : public Foo {
+ public:
+ Disrupter(ObserverList<Foo>* list, Foo* doomed)
+ : list_(list),
+ doomed_(doomed) {
+ }
+ ~Disrupter() override {}
+ void Observe(int x) override { list_->RemoveObserver(doomed_); }
+
+ private:
+ ObserverList<Foo>* list_;
+ Foo* doomed_;
+};
+
+class ThreadSafeDisrupter : public Foo {
+ public:
+ ThreadSafeDisrupter(ObserverListThreadSafe<Foo>* list, Foo* doomed)
+ : list_(list),
+ doomed_(doomed) {
+ }
+ ~ThreadSafeDisrupter() override {}
+ void Observe(int x) override { list_->RemoveObserver(doomed_); }
+
+ private:
+ ObserverListThreadSafe<Foo>* list_;
+ Foo* doomed_;
+};
+
+template <typename ObserverListType>
+class AddInObserve : public Foo {
+ public:
+ explicit AddInObserve(ObserverListType* observer_list)
+ : added(false),
+ observer_list(observer_list),
+ adder(1) {
+ }
+
+ void Observe(int x) override {
+ if (!added) {
+ added = true;
+ observer_list->AddObserver(&adder);
+ }
+ }
+
+ bool added;
+ ObserverListType* observer_list;
+ Adder adder;
+};
+
+
+static const int kThreadRunTime = 2000; // ms to run the multi-threaded test.
+
+// A thread for use in the ThreadSafeObserver test
+// which will add and remove itself from the notification
+// list repeatedly.
+class AddRemoveThread : public PlatformThread::Delegate,
+ public Foo {
+ public:
+ AddRemoveThread(ObserverListThreadSafe<Foo>* list, bool notify)
+ : list_(list),
+ loop_(nullptr),
+ in_list_(false),
+ start_(Time::Now()),
+ count_observes_(0),
+ count_addtask_(0),
+ do_notifies_(notify),
+ weak_factory_(this) {
+ }
+
+ ~AddRemoveThread() override {}
+
+ void ThreadMain() override {
+ loop_ = new MessageLoop(); // Fire up a message loop.
+ loop_->task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+ RunLoop().Run();
+ //LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
+ // count_observes_ << ", " << count_addtask_;
+ delete loop_;
+ loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
+ delete this;
+ }
+
+ // This task just keeps posting to itself in an attempt
+ // to race with the notifier.
+ void AddTask() {
+ count_addtask_++;
+
+ if ((Time::Now() - start_).InMilliseconds() > kThreadRunTime) {
+ VLOG(1) << "DONE!";
+ return;
+ }
+
+ if (!in_list_) {
+ list_->AddObserver(this);
+ in_list_ = true;
+ }
+
+ if (do_notifies_) {
+ list_->Notify(FROM_HERE, &Foo::Observe, 10);
+ }
+
+ loop_->task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
+ }
+
+ void Quit() {
+ loop_->task_runner()->PostTask(FROM_HERE,
+ MessageLoop::QuitWhenIdleClosure());
+ }
+
+ void Observe(int x) override {
+ count_observes_++;
+
+ // If we're getting called after we removed ourselves from
+ // the list, that is very bad!
+ DCHECK(in_list_);
+
+ // This callback should fire on the appropriate thread
+ EXPECT_EQ(loop_, MessageLoop::current());
+
+ list_->RemoveObserver(this);
+ in_list_ = false;
+ }
+
+ private:
+ ObserverListThreadSafe<Foo>* list_;
+ MessageLoop* loop_;
+ bool in_list_; // Are we currently registered for notifications.
+ // in_list_ is only used on |this| thread.
+ Time start_; // The time we started the test.
+
+ int count_observes_; // Number of times we observed.
+ int count_addtask_; // Number of times thread AddTask was called
+ bool do_notifies_; // Whether these threads should do notifications.
+
+ base::WeakPtrFactory<AddRemoveThread> weak_factory_;
+};
+
+TEST(ObserverListTest, BasicTest) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1), e(-1);
+ Disrupter evil(&observer_list, &c);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+
+ EXPECT_TRUE(observer_list.HasObserver(&a));
+ EXPECT_FALSE(observer_list.HasObserver(&c));
+
+ FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
+
+ observer_list.AddObserver(&evil);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ // Removing an observer not in the list should do nothing.
+ observer_list.RemoveObserver(&e);
+
+ FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(-20, b.total);
+ EXPECT_EQ(0, c.total);
+ EXPECT_EQ(-10, d.total);
+ EXPECT_EQ(0, e.total);
+}
+
+TEST(ObserverListThreadSafeTest, BasicTest) {
+ MessageLoop loop;
+
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>);
+ Adder a(1);
+ Adder b(-1);
+ Adder c(1);
+ Adder d(-1);
+ ThreadSafeDisrupter evil(observer_list.get(), &c);
+
+ observer_list->AddObserver(&a);
+ observer_list->AddObserver(&b);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+ RunLoop().RunUntilIdle();
+
+ observer_list->AddObserver(&evil);
+ observer_list->AddObserver(&c);
+ observer_list->AddObserver(&d);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(-20, b.total);
+ EXPECT_EQ(0, c.total);
+ EXPECT_EQ(-10, d.total);
+}
+
+TEST(ObserverListThreadSafeTest, RemoveObserver) {
+ MessageLoop loop;
+
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>);
+ Adder a(1), b(1);
+
+ // A workaround for the compiler bug. See http://crbug.com/121960.
+ EXPECT_NE(&a, &b);
+
+ // Should do nothing.
+ observer_list->RemoveObserver(&a);
+ observer_list->RemoveObserver(&b);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(0, a.total);
+ EXPECT_EQ(0, b.total);
+
+ observer_list->AddObserver(&a);
+
+ // Should also do nothing.
+ observer_list->RemoveObserver(&b);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(10, a.total);
+ EXPECT_EQ(0, b.total);
+}
+
+TEST(ObserverListThreadSafeTest, WithoutMessageLoop) {
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>);
+
+ Adder a(1), b(1), c(1);
+
+ // No MessageLoop, so these should not be added.
+ observer_list->AddObserver(&a);
+ observer_list->AddObserver(&b);
+
+ {
+ // Add c when there's a loop.
+ MessageLoop loop;
+ observer_list->AddObserver(&c);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(0, a.total);
+ EXPECT_EQ(0, b.total);
+ EXPECT_EQ(10, c.total);
+
+ // Now add a when there's a loop.
+ observer_list->AddObserver(&a);
+
+ // Remove c when there's a loop.
+ observer_list->RemoveObserver(&c);
+
+ // Notify again.
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 20);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(0, b.total);
+ EXPECT_EQ(10, c.total);
+ }
+
+ // Removing should always succeed with or without a loop.
+ observer_list->RemoveObserver(&a);
+
+ // Notifying should not fail but should also be a no-op.
+ MessageLoop loop;
+ observer_list->AddObserver(&b);
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 30);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(30, b.total);
+ EXPECT_EQ(10, c.total);
+}
+
+class FooRemover : public Foo {
+ public:
+ explicit FooRemover(ObserverListThreadSafe<Foo>* list) : list_(list) {}
+ ~FooRemover() override {}
+
+ void AddFooToRemove(Foo* foo) {
+ foos_.push_back(foo);
+ }
+
+ void Observe(int x) override {
+ std::vector<Foo*> tmp;
+ tmp.swap(foos_);
+ for (std::vector<Foo*>::iterator it = tmp.begin();
+ it != tmp.end(); ++it) {
+ list_->RemoveObserver(*it);
+ }
+ }
+
+ private:
+ const scoped_refptr<ObserverListThreadSafe<Foo> > list_;
+ std::vector<Foo*> foos_;
+};
+
+TEST(ObserverListThreadSafeTest, RemoveMultipleObservers) {
+ MessageLoop loop;
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>);
+
+ FooRemover a(observer_list.get());
+ Adder b(1);
+
+ observer_list->AddObserver(&a);
+ observer_list->AddObserver(&b);
+
+ a.AddFooToRemove(&a);
+ a.AddFooToRemove(&b);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+ RunLoop().RunUntilIdle();
+}
+
+// A test driver for a multi-threaded notification loop. Runs a number
+// of observer threads, each of which constantly adds/removes itself
+// from the observer list. Optionally, if cross_thread_notifies is set
+// to true, the observer threads will also trigger notifications to
+// all observers.
+static void ThreadSafeObserverHarness(int num_threads,
+ bool cross_thread_notifies) {
+ MessageLoop loop;
+
+ const int kMaxThreads = 15;
+ num_threads = num_threads > kMaxThreads ? kMaxThreads : num_threads;
+
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>);
+ Adder a(1);
+ Adder b(-1);
+ Adder c(1);
+ Adder d(-1);
+
+ observer_list->AddObserver(&a);
+ observer_list->AddObserver(&b);
+
+ AddRemoveThread* threaded_observer[kMaxThreads];
+ base::PlatformThreadHandle threads[kMaxThreads];
+ for (int index = 0; index < num_threads; index++) {
+ threaded_observer[index] = new AddRemoveThread(observer_list.get(), false);
+ EXPECT_TRUE(PlatformThread::Create(0,
+ threaded_observer[index], &threads[index]));
+ }
+
+ Time start = Time::Now();
+ while (true) {
+ if ((Time::Now() - start).InMilliseconds() > kThreadRunTime)
+ break;
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 10);
+
+ RunLoop().RunUntilIdle();
+ }
+
+ for (int index = 0; index < num_threads; index++) {
+ threaded_observer[index]->Quit();
+ PlatformThread::Join(threads[index]);
+ }
+}
+
+TEST(ObserverListThreadSafeTest, CrossThreadObserver) {
+ // Use 7 observer threads. Notifications only come from
+ // the main thread.
+ ThreadSafeObserverHarness(7, false);
+}
+
+TEST(ObserverListThreadSafeTest, CrossThreadNotifications) {
+ // Use 3 observer threads. Notifications will fire from
+ // the main thread and all 3 observer threads.
+ ThreadSafeObserverHarness(3, true);
+}
+
+TEST(ObserverListThreadSafeTest, OutlivesMessageLoop) {
+ MessageLoop* loop = new MessageLoop;
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>);
+
+ Adder a(1);
+ observer_list->AddObserver(&a);
+ delete loop;
+ // Test passes if we don't crash here.
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+}
+
+TEST(ObserverListTest, Existing) {
+ ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
+ Adder a(1);
+ AddInObserve<ObserverList<Foo> > b(&observer_list);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+
+ FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+
+ EXPECT_TRUE(b.added);
+ // B's adder should not have been notified because it was added during
+ // notification.
+ EXPECT_EQ(0, b.adder.total);
+
+ // Notify again to make sure b's adder is notified.
+ FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ EXPECT_EQ(1, b.adder.total);
+}
+
+// Same as above, but for ObserverListThreadSafe
+TEST(ObserverListThreadSafeTest, Existing) {
+ MessageLoop loop;
+ scoped_refptr<ObserverListThreadSafe<Foo> > observer_list(
+ new ObserverListThreadSafe<Foo>(ObserverList<Foo>::NOTIFY_EXISTING_ONLY));
+ Adder a(1);
+ AddInObserve<ObserverListThreadSafe<Foo> > b(observer_list.get());
+
+ observer_list->AddObserver(&a);
+ observer_list->AddObserver(&b);
+
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+ RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(b.added);
+ // B's adder should not have been notified because it was added during
+ // notification.
+ EXPECT_EQ(0, b.adder.total);
+
+ // Notify again to make sure b's adder is notified.
+ observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(1, b.adder.total);
+}
+
+class AddInClearObserve : public Foo {
+ public:
+ explicit AddInClearObserve(ObserverList<Foo>* list)
+ : list_(list), added_(false), adder_(1) {}
+
+ void Observe(int /* x */) override {
+ list_->Clear();
+ list_->AddObserver(&adder_);
+ added_ = true;
+ }
+
+ bool added() const { return added_; }
+ const Adder& adder() const { return adder_; }
+
+ private:
+ ObserverList<Foo>* const list_;
+
+ bool added_;
+ Adder adder_;
+};
+
+TEST(ObserverListTest, ClearNotifyAll) {
+ ObserverList<Foo> observer_list;
+ AddInClearObserve a(&observer_list);
+
+ observer_list.AddObserver(&a);
+
+ FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ EXPECT_TRUE(a.added());
+ EXPECT_EQ(1, a.adder().total)
+ << "Adder should observe once and have sum of 1.";
+}
+
+TEST(ObserverListTest, ClearNotifyExistingOnly) {
+ ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
+ AddInClearObserve a(&observer_list);
+
+ observer_list.AddObserver(&a);
+
+ FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ EXPECT_TRUE(a.added());
+ EXPECT_EQ(0, a.adder().total)
+ << "Adder should not observe, so sum should still be 0.";
+}
+
+class ListDestructor : public Foo {
+ public:
+ explicit ListDestructor(ObserverList<Foo>* list) : list_(list) {}
+ ~ListDestructor() override {}
+
+ void Observe(int x) override { delete list_; }
+
+ private:
+ ObserverList<Foo>* list_;
+};
+
+
+TEST(ObserverListTest, IteratorOutlivesList) {
+ ObserverList<Foo>* observer_list = new ObserverList<Foo>;
+ ListDestructor a(observer_list);
+ observer_list->AddObserver(&a);
+
+ FOR_EACH_OBSERVER(Foo, *observer_list, Observe(0));
+ // If this test fails, there'll be Valgrind errors when this function goes out
+ // of scope.
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/optional.h b/libchrome/base/optional.h
new file mode 100644
index 0000000..b468964
--- /dev/null
+++ b/libchrome/base/optional.h
@@ -0,0 +1,457 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OPTIONAL_H_
+#define BASE_OPTIONAL_H_
+
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/template_util.h"
+
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+struct in_place_t {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+ constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place
+constexpr in_place_t in_place = {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+namespace internal {
+
+template <typename T, bool = base::is_trivially_destructible<T>::value>
+struct OptionalStorage {
+ // When T is not trivially destructible we must call its
+ // destructor before deallocating its memory.
+ ~OptionalStorage() {
+ if (!is_null_)
+ buffer_.template data_as<T>()->~T();
+ }
+
+ bool is_null_ = true;
+ base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+template <typename T>
+struct OptionalStorage<T, true> {
+ // When T is trivially destructible (i.e. its destructor does nothing)
+ // there is no need to call it.
+ // Since |base::AlignedMemory| is just an array its destructor
+ // is trivial. Explicitly defaulting the destructor means it's not
+ // user-provided. All of this together make this destructor trivial.
+ ~OptionalStorage() = default;
+
+ bool is_null_ = true;
+ base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+} // namespace internal
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - The constructor and emplace method using initializer_list are not
+// implemented because 'initializer_list' is banned from Chromium.
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+template <typename T>
+class Optional {
+ public:
+ using value_type = T;
+
+ constexpr Optional() = default;
+ Optional(base::nullopt_t) : Optional() {}
+
+ Optional(const Optional& other) {
+ if (!other.storage_.is_null_)
+ Init(other.value());
+ }
+
+ Optional(Optional&& other) {
+ if (!other.storage_.is_null_)
+ Init(std::move(other.value()));
+ }
+
+ Optional(const T& value) { Init(value); }
+
+ Optional(T&& value) { Init(std::move(value)); }
+
+ template <class... Args>
+ explicit Optional(base::in_place_t, Args&&... args) {
+ emplace(std::forward<Args>(args)...);
+ }
+
+ ~Optional() = default;
+
+ Optional& operator=(base::nullopt_t) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ Optional& operator=(const Optional& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(other.value());
+ return *this;
+ }
+
+ Optional& operator=(Optional&& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(std::move(other.value()));
+ return *this;
+ }
+
+ template <class U>
+ typename std::enable_if<std::is_same<std::decay<U>, T>::value,
+ Optional&>::type
+ operator=(U&& value) {
+ InitOrAssign(std::forward<U>(value));
+ return *this;
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T* operator->() const {
+ DCHECK(!storage_.is_null_);
+ return &value();
+ }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T* operator->() {
+ DCHECK(!storage_.is_null_);
+ return &value();
+ }
+
+ constexpr const T& operator*() const& { return value(); }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T& operator*() & { return value(); }
+
+ constexpr const T&& operator*() const&& { return std::move(value()); }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T&& operator*() && { return std::move(value()); }
+
+ constexpr explicit operator bool() const { return !storage_.is_null_; }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T& value() & {
+ DCHECK(!storage_.is_null_);
+ return *storage_.buffer_.template data_as<T>();
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T& value() const& {
+ DCHECK(!storage_.is_null_);
+ return *storage_.buffer_.template data_as<T>();
+ }
+
+ // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+ // meant to be 'constexpr const'.
+ T&& value() && {
+ DCHECK(!storage_.is_null_);
+ return std::move(*storage_.buffer_.template data_as<T>());
+ }
+
+ // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ const T&& value() const&& {
+ DCHECK(!storage_.is_null_);
+ return std::move(*storage_.buffer_.template data_as<T>());
+ }
+
+ template <class U>
+ constexpr T value_or(U&& default_value) const& {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_copy_constructible<T>::value,
+ // "T must be copy constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : value();
+ }
+
+ template <class U>
+ T value_or(U&& default_value) && {
+ // TODO(mlamouri): add the following assert when possible:
+ // static_assert(std::is_move_constructible<T>::value,
+ // "T must be move constructible");
+ static_assert(std::is_convertible<U, T>::value,
+ "U must be convertible to T");
+ return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+ : std::move(value());
+ }
+
+ void swap(Optional& other) {
+ if (storage_.is_null_ && other.storage_.is_null_)
+ return;
+
+ if (storage_.is_null_ != other.storage_.is_null_) {
+ if (storage_.is_null_) {
+ Init(std::move(*other.storage_.buffer_.template data_as<T>()));
+ other.FreeIfNeeded();
+ } else {
+ other.Init(std::move(*storage_.buffer_.template data_as<T>()));
+ FreeIfNeeded();
+ }
+ return;
+ }
+
+ DCHECK(!storage_.is_null_ && !other.storage_.is_null_);
+ using std::swap;
+ swap(**this, *other);
+ }
+
+ template <class... Args>
+ void emplace(Args&&... args) {
+ FreeIfNeeded();
+ Init(std::forward<Args>(args)...);
+ }
+
+ private:
+ void Init(const T& value) {
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(value);
+ storage_.is_null_ = false;
+ }
+
+ void Init(T&& value) {
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(std::move(value));
+ storage_.is_null_ = false;
+ }
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(storage_.is_null_);
+ new (storage_.buffer_.void_data()) T(std::forward<Args>(args)...);
+ storage_.is_null_ = false;
+ }
+
+ void InitOrAssign(const T& value) {
+ if (storage_.is_null_)
+ Init(value);
+ else
+ *storage_.buffer_.template data_as<T>() = value;
+ }
+
+ void InitOrAssign(T&& value) {
+ if (storage_.is_null_)
+ Init(std::move(value));
+ else
+ *storage_.buffer_.template data_as<T>() = std::move(value);
+ }
+
+ void FreeIfNeeded() {
+ if (storage_.is_null_)
+ return;
+ storage_.buffer_.template data_as<T>()->~T();
+ storage_.is_null_ = true;
+ }
+
+ internal::OptionalStorage<T> storage_;
+};
+
+template <class T>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !!lhs != !!rhs ? false : lhs == nullopt || (*lhs == *rhs);
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return rhs == nullopt ? false : (lhs == nullopt ? true : *lhs < *rhs);
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(rhs < lhs);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return rhs < lhs;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<T>& rhs) {
+ return !(lhs < rhs);
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, base::nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(base::nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, base::nullopt_t) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator!=(base::nullopt_t, const Optional<T>& opt) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, base::nullopt_t) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator<(base::nullopt_t, const Optional<T>& opt) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, base::nullopt_t) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(base::nullopt_t, const Optional<T>& opt) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, base::nullopt_t) {
+ return !!opt;
+}
+
+template <class T>
+constexpr bool operator>(base::nullopt_t, const Optional<T>& opt) {
+ return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, base::nullopt_t) {
+ return true;
+}
+
+template <class T>
+constexpr bool operator>=(base::nullopt_t, const Optional<T>& opt) {
+ return !opt;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, const T& value) {
+ return opt != nullopt ? *opt == value : false;
+}
+
+template <class T>
+constexpr bool operator==(const T& value, const Optional<T>& opt) {
+ return opt == value;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, const T& value) {
+ return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator!=(const T& value, const Optional<T>& opt) {
+ return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, const T& value) {
+ return opt != nullopt ? *opt < value : true;
+}
+
+template <class T>
+constexpr bool operator<(const T& value, const Optional<T>& opt) {
+ return opt != nullopt ? value < *opt : false;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, const T& value) {
+ return !(opt > value);
+}
+
+template <class T>
+constexpr bool operator<=(const T& value, const Optional<T>& opt) {
+ return !(value > opt);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, const T& value) {
+ return value < opt;
+}
+
+template <class T>
+constexpr bool operator>(const T& value, const Optional<T>& opt) {
+ return opt < value;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, const T& value) {
+ return !(opt < value);
+}
+
+template <class T>
+constexpr bool operator>=(const T& value, const Optional<T>& opt) {
+ return !(value < opt);
+}
+
+template <class T>
+constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
+ return Optional<typename std::decay<T>::type>(std::forward<T>(value));
+}
+
+template <class T>
+void swap(Optional<T>& lhs, Optional<T>& rhs) {
+ lhs.swap(rhs);
+}
+
+} // namespace base
+
+namespace std {
+
+template <class T>
+struct hash<base::Optional<T>> {
+ size_t operator()(const base::Optional<T>& opt) const {
+ return opt == base::nullopt ? 0 : std::hash<T>()(*opt);
+ }
+};
+
+} // namespace std
+
+#endif // BASE_OPTIONAL_H_
diff --git a/libchrome/base/optional_unittest.cc b/libchrome/base/optional_unittest.cc
new file mode 100644
index 0000000..d6bf263
--- /dev/null
+++ b/libchrome/base/optional_unittest.cc
@@ -0,0 +1,1301 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/optional.h"
+
+#include <set>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Object used to test complex object with Optional<T> in addition of the move
+// semantics.
+class TestObject {
+ public:
+ enum class State {
+ DEFAULT_CONSTRUCTED,
+ VALUE_CONSTRUCTED,
+ COPY_CONSTRUCTED,
+ MOVE_CONSTRUCTED,
+ MOVED_FROM,
+ COPY_ASSIGNED,
+ MOVE_ASSIGNED,
+ SWAPPED,
+ };
+
+ TestObject() : foo_(0), bar_(0.0), state_(State::DEFAULT_CONSTRUCTED) {}
+
+ TestObject(int foo, double bar)
+ : foo_(foo), bar_(bar), state_(State::VALUE_CONSTRUCTED) {}
+
+ TestObject(const TestObject& other)
+ : foo_(other.foo_), bar_(other.bar_), state_(State::COPY_CONSTRUCTED) {}
+
+ TestObject(TestObject&& other)
+ : foo_(std::move(other.foo_)),
+ bar_(std::move(other.bar_)),
+ state_(State::MOVE_CONSTRUCTED) {
+ other.state_ = State::MOVED_FROM;
+ }
+
+ TestObject& operator=(const TestObject& other) {
+ foo_ = other.foo_;
+ bar_ = other.bar_;
+ state_ = State::COPY_ASSIGNED;
+ return *this;
+ }
+
+ TestObject& operator=(TestObject&& other) {
+ foo_ = other.foo_;
+ bar_ = other.bar_;
+ state_ = State::MOVE_ASSIGNED;
+ other.state_ = State::MOVED_FROM;
+ return *this;
+ }
+
+ void Swap(TestObject* other) {
+ using std::swap;
+ swap(foo_, other->foo_);
+ swap(bar_, other->bar_);
+ state_ = State::SWAPPED;
+ other->state_ = State::SWAPPED;
+ }
+
+ bool operator==(const TestObject& other) const {
+ return foo_ == other.foo_ && bar_ == other.bar_;
+ }
+
+ int foo() const { return foo_; }
+ State state() const { return state_; }
+
+ private:
+ int foo_;
+ double bar_;
+ State state_;
+};
+
+// Implementing Swappable concept.
+void swap(TestObject& lhs, TestObject& rhs) {
+ lhs.Swap(&rhs);
+}
+
+class NonTriviallyDestructible {
+ ~NonTriviallyDestructible() {}
+};
+
+} // anonymous namespace
+
+static_assert(is_trivially_destructible<Optional<int>>::value,
+ "OptionalIsTriviallyDestructible");
+
+static_assert(
+ !is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
+ "OptionalIsTriviallyDestructible");
+
+TEST(OptionalTest, DefaultConstructor) {
+ {
+ Optional<float> o;
+ EXPECT_FALSE(o);
+ }
+
+ {
+ Optional<std::string> o;
+ EXPECT_FALSE(o);
+ }
+
+ {
+ Optional<TestObject> o;
+ EXPECT_FALSE(o);
+ }
+}
+
+TEST(OptionalTest, CopyConstructor) {
+ {
+ Optional<float> first(0.1f);
+ Optional<float> other(first);
+
+ EXPECT_TRUE(other);
+ EXPECT_EQ(other.value(), 0.1f);
+ EXPECT_EQ(first, other);
+ }
+
+ {
+ Optional<std::string> first("foo");
+ Optional<std::string> other(first);
+
+ EXPECT_TRUE(other);
+ EXPECT_EQ(other.value(), "foo");
+ EXPECT_EQ(first, other);
+ }
+
+ {
+ Optional<TestObject> first(TestObject(3, 0.1));
+ Optional<TestObject> other(first);
+
+ EXPECT_TRUE(!!other);
+ EXPECT_TRUE(other.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(first == other);
+ }
+}
+
+TEST(OptionalTest, ValueConstructor) {
+ {
+ Optional<float> o(0.1f);
+ EXPECT_TRUE(o);
+ EXPECT_EQ(o.value(), 0.1f);
+ }
+
+ {
+ Optional<std::string> o("foo");
+ EXPECT_TRUE(o);
+ EXPECT_EQ(o.value(), "foo");
+ }
+
+ {
+ Optional<TestObject> o(TestObject(3, 0.1));
+ EXPECT_TRUE(!!o);
+ EXPECT_TRUE(o.value() == TestObject(3, 0.1));
+ }
+}
+
+TEST(OptionalTest, MoveConstructor) {
+ {
+ Optional<float> first(0.1f);
+ Optional<float> second(std::move(first));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ(second.value(), 0.1f);
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<std::string> first("foo");
+ Optional<std::string> second(std::move(first));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ("foo", second.value());
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<TestObject> first(TestObject(3, 0.1));
+ Optional<TestObject> second(std::move(first));
+
+ EXPECT_TRUE(!!second);
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+ EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+ EXPECT_TRUE(!!first);
+ EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+ }
+}
+
+TEST(OptionalTest, MoveValueConstructor) {
+ {
+ Optional<float> first(0.1f);
+ Optional<float> second(std::move(first.value()));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ(second.value(), 0.1f);
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<std::string> first("foo");
+ Optional<std::string> second(std::move(first.value()));
+
+ EXPECT_TRUE(second);
+ EXPECT_EQ("foo", second.value());
+
+ EXPECT_TRUE(first);
+ }
+
+ {
+ Optional<TestObject> first(TestObject(3, 0.1));
+ Optional<TestObject> second(std::move(first.value()));
+
+ EXPECT_TRUE(!!second);
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+ EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+ EXPECT_TRUE(!!first);
+ EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+ }
+}
+
+TEST(OptionalTest, ConstructorForwardArguments) {
+ {
+ Optional<float> a(base::in_place, 0.1f);
+ EXPECT_TRUE(a);
+ EXPECT_EQ(0.1f, a.value());
+ }
+
+ {
+ Optional<std::string> a(base::in_place, "foo");
+ EXPECT_TRUE(a);
+ EXPECT_EQ("foo", a.value());
+ }
+
+ {
+ Optional<TestObject> a(base::in_place, 0, 0.1);
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(TestObject(0, 0.1) == a.value());
+ }
+}
+
+TEST(OptionalTest, NulloptConstructor) {
+ Optional<int> a = base::nullopt;
+ EXPECT_FALSE(a);
+}
+
+TEST(OptionalTest, AssignValue) {
+ {
+ Optional<float> a;
+ EXPECT_FALSE(a);
+ a = 0.1f;
+ EXPECT_TRUE(a);
+
+ Optional<float> b(0.1f);
+ EXPECT_TRUE(a == b);
+ }
+
+ {
+ Optional<std::string> a;
+ EXPECT_FALSE(a);
+ a = std::string("foo");
+ EXPECT_TRUE(a);
+
+ Optional<std::string> b(std::string("foo"));
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<TestObject> a;
+ EXPECT_FALSE(!!a);
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(!!a);
+
+ Optional<TestObject> b(TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+
+ {
+ Optional<TestObject> a = TestObject(4, 1.0);
+ EXPECT_TRUE(!!a);
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(!!a);
+
+ Optional<TestObject> b(TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+}
+
+TEST(OptionalTest, AssignObject) {
+ {
+ Optional<float> a;
+ Optional<float> b(0.1f);
+ a = b;
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ(a.value(), 0.1f);
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<std::string> a;
+ Optional<std::string> b("foo");
+ a = b;
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ(a.value(), "foo");
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = b;
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(4, 1.0));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = b;
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+ EXPECT_TRUE(a == b);
+ }
+}
+
+TEST(OptionalTest, AssignObject_rvalue) {
+ {
+ Optional<float> a;
+ Optional<float> b(0.1f);
+ a = std::move(b);
+
+ EXPECT_TRUE(a);
+ EXPECT_TRUE(b);
+ EXPECT_EQ(0.1f, a.value());
+ }
+
+ {
+ Optional<std::string> a;
+ Optional<std::string> b("foo");
+ a = std::move(b);
+
+ EXPECT_TRUE(a);
+ EXPECT_TRUE(b);
+ EXPECT_EQ("foo", a.value());
+ }
+
+ {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = std::move(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, a->state());
+ EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+ }
+
+ {
+ Optional<TestObject> a(TestObject(4, 1.0));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ a = std::move(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+ EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, a->state());
+ EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+ }
+}
+
+TEST(OptionalTest, AssignNull) {
+ {
+ Optional<float> a(0.1f);
+ Optional<float> b(0.2f);
+ a = base::nullopt;
+ b = base::nullopt;
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ Optional<std::string> b("bar");
+ a = base::nullopt;
+ b = base::nullopt;
+ EXPECT_EQ(a, b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ Optional<TestObject> b(TestObject(4, 1.0));
+ a = base::nullopt;
+ b = base::nullopt;
+ EXPECT_TRUE(a == b);
+ }
+}
+
+TEST(OptionalTest, OperatorStar) {
+ {
+ Optional<float> a(0.1f);
+ EXPECT_EQ(a.value(), *a);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ EXPECT_EQ(a.value(), *a);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ EXPECT_EQ(a.value(), *a);
+ }
+}
+
+TEST(OptionalTest, OperatorStar_rvalue) {
+ EXPECT_EQ(0.1f, *Optional<float>(0.1f));
+ EXPECT_EQ(std::string("foo"), *Optional<std::string>("foo"));
+ EXPECT_TRUE(TestObject(3, 0.1) == *Optional<TestObject>(TestObject(3, 0.1)));
+}
+
+TEST(OptionalTest, OperatorArrow) {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ EXPECT_EQ(a->foo(), 3);
+}
+
+TEST(OptionalTest, Value_rvalue) {
+ EXPECT_EQ(0.1f, Optional<float>(0.1f).value());
+ EXPECT_EQ(std::string("foo"), Optional<std::string>("foo").value());
+ EXPECT_TRUE(TestObject(3, 0.1) ==
+ Optional<TestObject>(TestObject(3, 0.1)).value());
+}
+
+TEST(OptionalTest, ValueOr) {
+ {
+ Optional<float> a;
+ EXPECT_EQ(0.0f, a.value_or(0.0f));
+
+ a = 0.1f;
+ EXPECT_EQ(0.1f, a.value_or(0.0f));
+
+ a = base::nullopt;
+ EXPECT_EQ(0.0f, a.value_or(0.0f));
+ }
+
+ {
+ Optional<std::string> a;
+ EXPECT_EQ("bar", a.value_or("bar"));
+
+ a = std::string("foo");
+ EXPECT_EQ(std::string("foo"), a.value_or("bar"));
+
+ a = base::nullopt;
+ EXPECT_EQ(std::string("bar"), a.value_or("bar"));
+ }
+
+ {
+ Optional<TestObject> a;
+ EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+
+ a = TestObject(3, 0.1);
+ EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(3, 0.1));
+
+ a = base::nullopt;
+ EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+ }
+}
+
+TEST(OptionalTest, Swap_bothNoValue) {
+ Optional<TestObject> a, b;
+ a.swap(b);
+
+ EXPECT_FALSE(a);
+ EXPECT_FALSE(b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_inHasValue) {
+ Optional<TestObject> a(TestObject(1, 0.3));
+ Optional<TestObject> b;
+ a.swap(b);
+
+ EXPECT_FALSE(a);
+
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_outHasValue) {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(1, 0.3));
+ a.swap(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_FALSE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_bothValue) {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ Optional<TestObject> b(TestObject(1, 0.3));
+ a.swap(b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+ EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+ EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Emplace) {
+ {
+ Optional<float> a(0.1f);
+ a.emplace(0.3f);
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ(0.3f, a.value());
+ }
+
+ {
+ Optional<std::string> a("foo");
+ a.emplace("bar");
+
+ EXPECT_TRUE(a);
+ EXPECT_EQ("bar", a.value());
+ }
+
+ {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ a.emplace(TestObject(1, 0.2));
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(TestObject(1, 0.2) == a.value());
+ }
+}
+
+TEST(OptionalTest, Equals_TwoEmpty) {
+ Optional<int> a;
+ Optional<int> b;
+
+ EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoEquals) {
+ Optional<int> a(1);
+ Optional<int> b(1);
+
+ EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_OneEmpty) {
+ Optional<int> a;
+ Optional<int> b(1);
+
+ EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoDifferent) {
+ Optional<int> a(0);
+ Optional<int> b(1);
+
+ EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEmpty) {
+ Optional<int> a;
+ Optional<int> b;
+
+ EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEquals) {
+ Optional<int> a(1);
+ Optional<int> b(1);
+
+ EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_OneEmpty) {
+ Optional<int> a;
+ Optional<int> b(1);
+
+ EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoDifferent) {
+ Optional<int> a(0);
+ Optional<int> b(1);
+
+ EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, Less_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l < r);
+}
+
+TEST(OptionalTest, Less_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothValues) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_TRUE(l < r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l < r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l < r);
+ }
+}
+
+TEST(OptionalTest, LessEq_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_FALSE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothValues) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_TRUE(l <= r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l <= r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l <= r);
+ }
+}
+
+TEST(OptionalTest, Greater_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_TRUE(l > r);
+}
+
+TEST(OptionalTest, Greater_BothValue) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_FALSE(l > r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l > r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l > r);
+ }
+}
+
+TEST(OptionalTest, GreaterEq_BothEmpty) {
+ Optional<int> l;
+ Optional<int> r;
+
+ EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_LeftEmpty) {
+ Optional<int> l;
+ Optional<int> r(1);
+
+ EXPECT_FALSE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_RightEmpty) {
+ Optional<int> l(1);
+ Optional<int> r;
+
+ EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_BothValue) {
+ {
+ Optional<int> l(1);
+ Optional<int> r(2);
+
+ EXPECT_FALSE(l >= r);
+ }
+ {
+ Optional<int> l(2);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l >= r);
+ }
+ {
+ Optional<int> l(1);
+ Optional<int> r(1);
+
+ EXPECT_TRUE(l >= r);
+ }
+}
+
+TEST(OptionalTest, OptNullEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(opt == base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt == base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(base::nullopt == opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(base::nullopt == opt);
+ }
+}
+
+TEST(OptionalTest, OptNullNotEq) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(opt != base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt != base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptNotEq) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(base::nullopt != opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(base::nullopt != opt);
+ }
+}
+
+TEST(OptionalTest, OptNullLower) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(opt < base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt < base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptLower) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(base::nullopt < opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(base::nullopt < opt);
+ }
+}
+
+TEST(OptionalTest, OptNullLowerEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(opt <= base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt <= base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptLowerEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(base::nullopt <= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(base::nullopt <= opt);
+ }
+}
+
+TEST(OptionalTest, OptNullGreater) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(opt > base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt > base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptGreater) {
+ {
+ Optional<int> opt;
+ EXPECT_FALSE(base::nullopt > opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(base::nullopt > opt);
+ }
+}
+
+TEST(OptionalTest, OptNullGreaterEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(opt >= base::nullopt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt >= base::nullopt);
+ }
+}
+
+TEST(OptionalTest, NullOptGreaterEq) {
+ {
+ Optional<int> opt;
+ EXPECT_TRUE(base::nullopt >= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(base::nullopt >= opt);
+ }
+}
+
+TEST(OptionalTest, ValueEq_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(opt == 1);
+}
+
+TEST(OptionalTest, ValueEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt == 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt == 1);
+ }
+}
+
+TEST(OptionalTest, EqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(1 == opt);
+}
+
+TEST(OptionalTest, EqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(1 == opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(1 == opt);
+ }
+}
+
+TEST(OptionalTest, ValueNotEq_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(opt != 1);
+}
+
+TEST(OptionalTest, ValueNotEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt != 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt != 1);
+ }
+}
+
+TEST(OptionalTest, NotEqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(1 != opt);
+}
+
+TEST(OptionalTest, NotEqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(1 != opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(1 != opt);
+ }
+}
+
+TEST(OptionalTest, ValueLess_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(opt < 1);
+}
+
+TEST(OptionalTest, ValueLess_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt < 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt < 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(opt < 1);
+ }
+}
+
+TEST(OptionalTest, LessValue_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(1 < opt);
+}
+
+TEST(OptionalTest, LessValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(1 < opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(1 < opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(1 < opt);
+ }
+}
+
+TEST(OptionalTest, ValueLessEq_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(opt <= 1);
+}
+
+TEST(OptionalTest, ValueLessEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt <= 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt <= 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(opt <= 1);
+ }
+}
+
+TEST(OptionalTest, LessEqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(1 <= opt);
+}
+
+TEST(OptionalTest, LessEqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(1 <= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(1 <= opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(1 <= opt);
+ }
+}
+
+TEST(OptionalTest, ValueGreater_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(opt > 1);
+}
+
+TEST(OptionalTest, ValueGreater_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt > 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(opt > 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(opt > 1);
+ }
+}
+
+TEST(OptionalTest, GreaterValue_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(1 > opt);
+}
+
+TEST(OptionalTest, GreaterValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(1 > opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_FALSE(1 > opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(1 > opt);
+ }
+}
+
+TEST(OptionalTest, ValueGreaterEq_Empty) {
+ Optional<int> opt;
+ EXPECT_FALSE(opt >= 1);
+}
+
+TEST(OptionalTest, ValueGreaterEq_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt >= 1);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(opt >= 1);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_TRUE(opt >= 1);
+ }
+}
+
+TEST(OptionalTest, GreaterEqValue_Empty) {
+ Optional<int> opt;
+ EXPECT_TRUE(1 >= opt);
+}
+
+TEST(OptionalTest, GreaterEqValue_NotEmpty) {
+ {
+ Optional<int> opt(0);
+ EXPECT_TRUE(1 >= opt);
+ }
+ {
+ Optional<int> opt(1);
+ EXPECT_TRUE(1 >= opt);
+ }
+ {
+ Optional<int> opt(2);
+ EXPECT_FALSE(1 >= opt);
+ }
+}
+
+TEST(OptionalTest, NotEquals) {
+ {
+ Optional<float> a(0.1f);
+ Optional<float> b(0.2f);
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ Optional<std::string> b("bar");
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ Optional<TestObject> b(TestObject(4, 1.0));
+ EXPECT_TRUE(a != b);
+ }
+}
+
+TEST(OptionalTest, NotEqualsNull) {
+ {
+ Optional<float> a(0.1f);
+ Optional<float> b(0.1f);
+ b = base::nullopt;
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<std::string> a("foo");
+ Optional<std::string> b("foo");
+ b = base::nullopt;
+ EXPECT_NE(a, b);
+ }
+
+ {
+ Optional<TestObject> a(TestObject(3, 0.1));
+ Optional<TestObject> b(TestObject(3, 0.1));
+ b = base::nullopt;
+ EXPECT_TRUE(a != b);
+ }
+}
+
+TEST(OptionalTest, MakeOptional) {
+ {
+ Optional<float> o = base::make_optional(32.f);
+ EXPECT_TRUE(o);
+ EXPECT_EQ(32.f, *o);
+
+ float value = 3.f;
+ o = base::make_optional(std::move(value));
+ EXPECT_TRUE(o);
+ EXPECT_EQ(3.f, *o);
+ }
+
+ {
+ Optional<std::string> o = base::make_optional(std::string("foo"));
+ EXPECT_TRUE(o);
+ EXPECT_EQ("foo", *o);
+
+ std::string value = "bar";
+ o = base::make_optional(std::move(value));
+ EXPECT_TRUE(o);
+ EXPECT_EQ(std::string("bar"), *o);
+ }
+
+ {
+ Optional<TestObject> o = base::make_optional(TestObject(3, 0.1));
+ EXPECT_TRUE(!!o);
+ EXPECT_TRUE(TestObject(3, 0.1) == *o);
+
+ TestObject value = TestObject(0, 0.42);
+ o = base::make_optional(std::move(value));
+ EXPECT_TRUE(!!o);
+ EXPECT_TRUE(TestObject(0, 0.42) == *o);
+ EXPECT_EQ(TestObject::State::MOVED_FROM, value.state());
+ EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, o->state());
+
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
+ base::make_optional(std::move(value))->state());
+ }
+}
+
+TEST(OptionalTest, NonMemberSwap_bothNoValue) {
+ Optional<TestObject> a, b;
+ base::swap(a, b);
+
+ EXPECT_FALSE(!!a);
+ EXPECT_FALSE(!!b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_inHasValue) {
+ Optional<TestObject> a(TestObject(1, 0.3));
+ Optional<TestObject> b;
+ base::swap(a, b);
+
+ EXPECT_FALSE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_outHasValue) {
+ Optional<TestObject> a;
+ Optional<TestObject> b(TestObject(1, 0.3));
+ base::swap(a, b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_FALSE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_bothValue) {
+ Optional<TestObject> a(TestObject(0, 0.1));
+ Optional<TestObject> b(TestObject(1, 0.3));
+ base::swap(a, b);
+
+ EXPECT_TRUE(!!a);
+ EXPECT_TRUE(!!b);
+ EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+ EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+ EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+ EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Hash_OptionalReflectsInternal) {
+ {
+ std::hash<int> int_hash;
+ std::hash<Optional<int>> opt_int_hash;
+
+ EXPECT_EQ(int_hash(1), opt_int_hash(Optional<int>(1)));
+ }
+
+ {
+ std::hash<std::string> str_hash;
+ std::hash<Optional<std::string>> opt_str_hash;
+
+ EXPECT_EQ(str_hash(std::string("foobar")),
+ opt_str_hash(Optional<std::string>(std::string("foobar"))));
+ }
+}
+
+TEST(OptionalTest, Hash_NullOptEqualsNullOpt) {
+ std::hash<Optional<int>> opt_int_hash;
+ std::hash<Optional<std::string>> opt_str_hash;
+
+ EXPECT_EQ(opt_str_hash(Optional<std::string>()),
+ opt_int_hash(Optional<int>()));
+}
+
+TEST(OptionalTest, Hash_UseInSet) {
+ std::set<Optional<int>> setOptInt;
+
+ EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+
+ setOptInt.insert(Optional<int>(3));
+ EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+ EXPECT_NE(setOptInt.end(), setOptInt.find(3));
+}
+
+} // namespace base
diff --git a/libchrome/base/os_compat_android.cc b/libchrome/base/os_compat_android.cc
new file mode 100644
index 0000000..1eb6536
--- /dev/null
+++ b/libchrome/base/os_compat_android.cc
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include <asm/unistd.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+
+#if !defined(__LP64__)
+#include <time64.h>
+#endif
+
+#include "base/rand_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+
+extern "C" {
+// There is no futimes() avaiable in Bionic, so we provide our own
+// implementation until it is there.
+int futimes(int fd, const struct timeval tv[2]) {
+ if (tv == NULL)
+ return syscall(__NR_utimensat, fd, NULL, NULL, 0);
+
+ if (tv[0].tv_usec < 0 || tv[0].tv_usec >= 1000000 ||
+ tv[1].tv_usec < 0 || tv[1].tv_usec >= 1000000) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ // Convert timeval to timespec.
+ struct timespec ts[2];
+ ts[0].tv_sec = tv[0].tv_sec;
+ ts[0].tv_nsec = tv[0].tv_usec * 1000;
+ ts[1].tv_sec = tv[1].tv_sec;
+ ts[1].tv_nsec = tv[1].tv_usec * 1000;
+ return syscall(__NR_utimensat, fd, NULL, ts, 0);
+}
+
+#if !defined(__LP64__)
+// 32-bit Android has only timegm64() and not timegm().
+// We replicate the behaviour of timegm() when the result overflows time_t.
+time_t timegm(struct tm* const t) {
+ // time_t is signed on Android.
+ static const time_t kTimeMax = ~(1L << (sizeof(time_t) * CHAR_BIT - 1));
+ static const time_t kTimeMin = (1L << (sizeof(time_t) * CHAR_BIT - 1));
+ time64_t result = timegm64(t);
+ if (result < kTimeMin || result > kTimeMax)
+ return -1;
+ return result;
+}
+#endif
+
+// The following is only needed when building with GCC 4.6 or higher
+// (i.e. not with Android GCC 4.4.3, nor with Clang).
+//
+// GCC is now capable of optimizing successive calls to sin() and cos() into
+// a single call to sincos(). This means that source code that looks like:
+//
+// double c, s;
+// c = cos(angle);
+// s = sin(angle);
+//
+// Will generate machine code that looks like:
+//
+// double c, s;
+// sincos(angle, &s, &c);
+//
+// Unfortunately, sincos() and friends are not part of the Android libm.so
+// library provided by the NDK for API level 9. When the optimization kicks
+// in, it makes the final build fail with a puzzling message (puzzling
+// because 'sincos' doesn't appear anywhere in the sources!).
+//
+// To solve this, we provide our own implementation of the sincos() function
+// and related friends. Note that we must also explicitely tell GCC to disable
+// optimizations when generating these. Otherwise, the generated machine code
+// for each function would simply end up calling itself, resulting in a
+// runtime crash due to stack overflow.
+//
+#if defined(__GNUC__) && !defined(__clang__) && \
+ !defined(ANDROID_SINCOS_PROVIDED)
+
+// For the record, Clang does not support the 'optimize' attribute.
+// In the unlikely event that it begins performing this optimization too,
+// we'll have to find a different way to achieve this. NOTE: Tested with O1
+// which still performs the optimization.
+//
+#define GCC_NO_OPTIMIZE __attribute__((optimize("O0")))
+
+GCC_NO_OPTIMIZE
+void sincos(double angle, double* s, double *c) {
+ *c = cos(angle);
+ *s = sin(angle);
+}
+
+GCC_NO_OPTIMIZE
+void sincosf(float angle, float* s, float* c) {
+ *c = cosf(angle);
+ *s = sinf(angle);
+}
+
+#endif // __GNUC__ && !__clang__
+
+// An implementation of mkdtemp, since it is not exposed by the NDK
+// for native API level 9 that we target.
+//
+// For any changes in the mkdtemp function, you should manually run the unittest
+// OsCompatAndroidTest.DISABLED_TestMkdTemp in your local machine to check if it
+// passes. Please don't enable it, since it creates a directory and may be
+// source of flakyness.
+char* mkdtemp(char* path) {
+ if (path == NULL) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ const int path_len = strlen(path);
+
+ // The last six characters of 'path' must be XXXXXX.
+ const base::StringPiece kSuffix("XXXXXX");
+ const int kSuffixLen = kSuffix.length();
+ if (!base::StringPiece(path, path_len).ends_with(kSuffix)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ // If the path contains a directory, as in /tmp/foo/XXXXXXXX, make sure
+ // that /tmp/foo exists, otherwise we're going to loop a really long
+ // time for nothing below
+ char* dirsep = strrchr(path, '/');
+ if (dirsep != NULL) {
+ struct stat st;
+ int ret;
+
+ *dirsep = '\0'; // Terminating directory path temporarily
+
+ ret = stat(path, &st);
+
+ *dirsep = '/'; // Restoring directory separator
+ if (ret < 0) // Directory probably does not exist
+ return NULL;
+ if (!S_ISDIR(st.st_mode)) { // Not a directory
+ errno = ENOTDIR;
+ return NULL;
+ }
+ }
+
+ // Max number of tries using different random suffixes.
+ const int kMaxTries = 100;
+
+ // Now loop until we CAN create a directory by that name or we reach the max
+ // number of tries.
+ for (int i = 0; i < kMaxTries; ++i) {
+ // Fill the suffix XXXXXX with a random string composed of a-z chars.
+ for (int pos = 0; pos < kSuffixLen; ++pos) {
+ char rand_char = static_cast<char>(base::RandInt('a', 'z'));
+ path[path_len - kSuffixLen + pos] = rand_char;
+ }
+ if (mkdir(path, 0700) == 0) {
+ // We just created the directory succesfully.
+ return path;
+ }
+ if (errno != EEXIST) {
+ // The directory doesn't exist, but an error occured
+ return NULL;
+ }
+ }
+
+ // We reached the max number of tries.
+ return NULL;
+}
+
+} // extern "C"
diff --git a/libchrome/base/os_compat_android.h b/libchrome/base/os_compat_android.h
new file mode 100644
index 0000000..0f25444
--- /dev/null
+++ b/libchrome/base/os_compat_android.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OS_COMPAT_ANDROID_H_
+#define BASE_OS_COMPAT_ANDROID_H_
+
+#include <fcntl.h>
+#include <sys/types.h>
+#include <utime.h>
+
+// Not implemented in Bionic.
+extern "C" int futimes(int fd, const struct timeval tv[2]);
+
+// Not exposed or implemented in Bionic.
+extern "C" char* mkdtemp(char* path);
+
+// Android has no timegm().
+extern "C" time_t timegm(struct tm* const t);
+
+// The lockf() function is not available on Android; we translate to flock().
+#define F_LOCK LOCK_EX
+#define F_ULOCK LOCK_UN
+inline int lockf(int fd, int cmd, off_t ignored_len) {
+ return flock(fd, cmd);
+}
+
+#endif // BASE_OS_COMPAT_ANDROID_H_
diff --git a/libchrome/base/os_compat_android_unittest.cc b/libchrome/base/os_compat_android_unittest.cc
new file mode 100644
index 0000000..7fbdc6d
--- /dev/null
+++ b/libchrome/base/os_compat_android_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+typedef testing::Test OsCompatAndroidTest;
+
+// Keep this Unittest DISABLED_ , because it actually creates a directory in the
+// device and it may be source of flakyness. For any changes in the mkdtemp
+// function, you should run this unittest in your local machine to check if it
+// passes.
+TEST_F(OsCompatAndroidTest, DISABLED_TestMkdTemp) {
+ FilePath tmp_dir;
+ EXPECT_TRUE(base::GetTempDir(&tmp_dir));
+
+ // Not six XXXXXX at the suffix of the path.
+ FilePath sub_dir = tmp_dir.Append("XX");
+ std::string sub_dir_string = sub_dir.value();
+ // this should be OK since mkdtemp just replaces characters in place
+ char* buffer = const_cast<char*>(sub_dir_string.c_str());
+ EXPECT_EQ(NULL, mkdtemp(buffer));
+
+ // Directory does not exist
+ char invalid_path2[] = "doesntoexist/foobarXXXXXX";
+ EXPECT_EQ(NULL, mkdtemp(invalid_path2));
+
+ // Successfully create a tmp dir.
+ FilePath sub_dir2 = tmp_dir.Append("XXXXXX");
+ std::string sub_dir2_string = sub_dir2.value();
+ // this should be OK since mkdtemp just replaces characters in place
+ char* buffer2 = const_cast<char*>(sub_dir2_string.c_str());
+ EXPECT_TRUE(mkdtemp(buffer2) != NULL);
+}
+
+} // namespace base
diff --git a/libchrome/base/pending_task.cc b/libchrome/base/pending_task.cc
new file mode 100644
index 0000000..73834bd
--- /dev/null
+++ b/libchrome/base/pending_task.cc
@@ -0,0 +1,56 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pending_task.h"
+
+#include "base/tracked_objects.h"
+
+namespace base {
+
+PendingTask::PendingTask(const tracked_objects::Location& posted_from,
+ base::Closure task)
+ : base::TrackingInfo(posted_from, TimeTicks()),
+ task(std::move(task)),
+ posted_from(posted_from),
+ sequence_num(0),
+ nestable(true),
+ is_high_res(false) {
+}
+
+PendingTask::PendingTask(const tracked_objects::Location& posted_from,
+ base::Closure task,
+ TimeTicks delayed_run_time,
+ bool nestable)
+ : base::TrackingInfo(posted_from, delayed_run_time),
+ task(std::move(task)),
+ posted_from(posted_from),
+ sequence_num(0),
+ nestable(nestable),
+ is_high_res(false) {
+}
+
+PendingTask::PendingTask(PendingTask&& other) = default;
+
+PendingTask::~PendingTask() {
+}
+
+PendingTask& PendingTask::operator=(PendingTask&& other) = default;
+
+bool PendingTask::operator<(const PendingTask& other) const {
+ // Since the top of a priority queue is defined as the "greatest" element, we
+ // need to invert the comparison here. We want the smaller time to be at the
+ // top of the heap.
+
+ if (delayed_run_time < other.delayed_run_time)
+ return false;
+
+ if (delayed_run_time > other.delayed_run_time)
+ return true;
+
+ // If the times happen to match, then we use the sequence number to decide.
+ // Compare the difference to support integer roll-over.
+ return (sequence_num - other.sequence_num) > 0;
+}
+
+} // namespace base
diff --git a/libchrome/base/pending_task.h b/libchrome/base/pending_task.h
new file mode 100644
index 0000000..5761653
--- /dev/null
+++ b/libchrome/base/pending_task.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PENDING_TASK_H_
+#define BASE_PENDING_TASK_H_
+
+#include <queue>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/time/time.h"
+#include "base/tracking_info.h"
+
+namespace base {
+
+// Contains data about a pending task. Stored in TaskQueue and DelayedTaskQueue
+// for use by classes that queue and execute tasks.
+struct BASE_EXPORT PendingTask : public TrackingInfo {
+ PendingTask(const tracked_objects::Location& posted_from,
+ Closure task);
+ PendingTask(const tracked_objects::Location& posted_from,
+ Closure task,
+ TimeTicks delayed_run_time,
+ bool nestable);
+ PendingTask(PendingTask&& other);
+ ~PendingTask();
+
+ PendingTask& operator=(PendingTask&& other);
+
+ // Used to support sorting.
+ bool operator<(const PendingTask& other) const;
+
+ // The task to run.
+ Closure task;
+
+ // The site this PendingTask was posted from.
+ tracked_objects::Location posted_from;
+
+ // Secondary sort key for run time.
+ int sequence_num;
+
+ // OK to dispatch from a nested loop.
+ bool nestable;
+
+ // Needs high resolution timers.
+ bool is_high_res;
+};
+
+using TaskQueue = std::queue<PendingTask>;
+
+// PendingTasks are sorted by their |delayed_run_time| property.
+using DelayedTaskQueue = std::priority_queue<base::PendingTask>;
+
+} // namespace base
+
+#endif // BASE_PENDING_TASK_H_
diff --git a/libchrome/base/pickle.cc b/libchrome/base/pickle.cc
new file mode 100644
index 0000000..4ef167b
--- /dev/null
+++ b/libchrome/base/pickle.cc
@@ -0,0 +1,483 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pickle.h"
+
+#include <stdlib.h>
+
+#include <algorithm> // for max()
+#include <limits>
+
+#include "base/bits.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+const int Pickle::kPayloadUnit = 64;
+
+static const size_t kCapacityReadOnly = static_cast<size_t>(-1);
+
+PickleIterator::PickleIterator(const Pickle& pickle)
+ : payload_(pickle.payload()),
+ read_index_(0),
+ end_index_(pickle.payload_size()) {
+}
+
+template <typename Type>
+inline bool PickleIterator::ReadBuiltinType(Type* result) {
+ const char* read_from = GetReadPointerAndAdvance<Type>();
+ if (!read_from)
+ return false;
+ if (sizeof(Type) > sizeof(uint32_t))
+ memcpy(result, read_from, sizeof(*result));
+ else
+ *result = *reinterpret_cast<const Type*>(read_from);
+ return true;
+}
+
+inline void PickleIterator::Advance(size_t size) {
+ size_t aligned_size = bits::Align(size, sizeof(uint32_t));
+ if (end_index_ - read_index_ < aligned_size) {
+ read_index_ = end_index_;
+ } else {
+ read_index_ += aligned_size;
+ }
+}
+
+template<typename Type>
+inline const char* PickleIterator::GetReadPointerAndAdvance() {
+ if (sizeof(Type) > end_index_ - read_index_) {
+ read_index_ = end_index_;
+ return NULL;
+ }
+ const char* current_read_ptr = payload_ + read_index_;
+ Advance(sizeof(Type));
+ return current_read_ptr;
+}
+
+const char* PickleIterator::GetReadPointerAndAdvance(int num_bytes) {
+ if (num_bytes < 0 ||
+ end_index_ - read_index_ < static_cast<size_t>(num_bytes)) {
+ read_index_ = end_index_;
+ return NULL;
+ }
+ const char* current_read_ptr = payload_ + read_index_;
+ Advance(num_bytes);
+ return current_read_ptr;
+}
+
+inline const char* PickleIterator::GetReadPointerAndAdvance(
+ int num_elements,
+ size_t size_element) {
+ // Check for int32_t overflow.
+ int64_t num_bytes = static_cast<int64_t>(num_elements) * size_element;
+ int num_bytes32 = static_cast<int>(num_bytes);
+ if (num_bytes != static_cast<int64_t>(num_bytes32))
+ return NULL;
+ return GetReadPointerAndAdvance(num_bytes32);
+}
+
+bool PickleIterator::ReadBool(bool* result) {
+ return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadInt(int* result) {
+ return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadLong(long* result) {
+ // Always read long as a 64-bit value to ensure compatibility between 32-bit
+ // and 64-bit processes.
+ int64_t result_int64 = 0;
+ if (!ReadBuiltinType(&result_int64))
+ return false;
+ // CHECK if the cast truncates the value so that we know to change this IPC
+ // parameter to use int64_t.
+ *result = base::checked_cast<long>(result_int64);
+ return true;
+}
+
+bool PickleIterator::ReadUInt16(uint16_t* result) {
+ return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadUInt32(uint32_t* result) {
+ return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadInt64(int64_t* result) {
+ return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadUInt64(uint64_t* result) {
+ return ReadBuiltinType(result);
+}
+
+bool PickleIterator::ReadFloat(float* result) {
+ // crbug.com/315213
+ // The source data may not be properly aligned, and unaligned float reads
+ // cause SIGBUS on some ARM platforms, so force using memcpy to copy the data
+ // into the result.
+ const char* read_from = GetReadPointerAndAdvance<float>();
+ if (!read_from)
+ return false;
+ memcpy(result, read_from, sizeof(*result));
+ return true;
+}
+
+bool PickleIterator::ReadDouble(double* result) {
+ // crbug.com/315213
+ // The source data may not be properly aligned, and unaligned double reads
+ // cause SIGBUS on some ARM platforms, so force using memcpy to copy the data
+ // into the result.
+ const char* read_from = GetReadPointerAndAdvance<double>();
+ if (!read_from)
+ return false;
+ memcpy(result, read_from, sizeof(*result));
+ return true;
+}
+
+bool PickleIterator::ReadString(std::string* result) {
+ int len;
+ if (!ReadInt(&len))
+ return false;
+ const char* read_from = GetReadPointerAndAdvance(len);
+ if (!read_from)
+ return false;
+
+ result->assign(read_from, len);
+ return true;
+}
+
+bool PickleIterator::ReadStringPiece(StringPiece* result) {
+ int len;
+ if (!ReadInt(&len))
+ return false;
+ const char* read_from = GetReadPointerAndAdvance(len);
+ if (!read_from)
+ return false;
+
+ *result = StringPiece(read_from, len);
+ return true;
+}
+
+bool PickleIterator::ReadString16(string16* result) {
+ int len;
+ if (!ReadInt(&len))
+ return false;
+ const char* read_from = GetReadPointerAndAdvance(len, sizeof(char16));
+ if (!read_from)
+ return false;
+
+ result->assign(reinterpret_cast<const char16*>(read_from), len);
+ return true;
+}
+
+bool PickleIterator::ReadStringPiece16(StringPiece16* result) {
+ int len;
+ if (!ReadInt(&len))
+ return false;
+ const char* read_from = GetReadPointerAndAdvance(len, sizeof(char16));
+ if (!read_from)
+ return false;
+
+ *result = StringPiece16(reinterpret_cast<const char16*>(read_from), len);
+ return true;
+}
+
+bool PickleIterator::ReadData(const char** data, int* length) {
+ *length = 0;
+ *data = 0;
+
+ if (!ReadInt(length))
+ return false;
+
+ return ReadBytes(data, *length);
+}
+
+bool PickleIterator::ReadBytes(const char** data, int length) {
+ const char* read_from = GetReadPointerAndAdvance(length);
+ if (!read_from)
+ return false;
+ *data = read_from;
+ return true;
+}
+
+PickleSizer::PickleSizer() {}
+
+PickleSizer::~PickleSizer() {}
+
+void PickleSizer::AddString(const StringPiece& value) {
+ AddInt();
+ AddBytes(static_cast<int>(value.size()));
+}
+
+void PickleSizer::AddString16(const StringPiece16& value) {
+ AddInt();
+ AddBytes(static_cast<int>(value.size() * sizeof(char16)));
+}
+
+void PickleSizer::AddData(int length) {
+ CHECK_GE(length, 0);
+ AddInt();
+ AddBytes(length);
+}
+
+void PickleSizer::AddBytes(int length) {
+ payload_size_ += bits::Align(length, sizeof(uint32_t));
+}
+
+void PickleSizer::AddAttachment() {
+ // From IPC::Message::WriteAttachment
+ AddBool();
+ AddInt();
+}
+
+template <size_t length> void PickleSizer::AddBytesStatic() {
+ DCHECK_LE(length, static_cast<size_t>(std::numeric_limits<int>::max()));
+ AddBytes(length);
+}
+
+template void PickleSizer::AddBytesStatic<2>();
+template void PickleSizer::AddBytesStatic<4>();
+template void PickleSizer::AddBytesStatic<8>();
+
+Pickle::Attachment::Attachment() {}
+
+Pickle::Attachment::~Attachment() {}
+
+// Payload is uint32_t aligned.
+
+Pickle::Pickle()
+ : header_(NULL),
+ header_size_(sizeof(Header)),
+ capacity_after_header_(0),
+ write_offset_(0) {
+ static_assert((Pickle::kPayloadUnit & (Pickle::kPayloadUnit - 1)) == 0,
+ "Pickle::kPayloadUnit must be a power of two");
+ Resize(kPayloadUnit);
+ header_->payload_size = 0;
+}
+
+Pickle::Pickle(int header_size)
+ : header_(NULL),
+ header_size_(bits::Align(header_size, sizeof(uint32_t))),
+ capacity_after_header_(0),
+ write_offset_(0) {
+ DCHECK_GE(static_cast<size_t>(header_size), sizeof(Header));
+ DCHECK_LE(header_size, kPayloadUnit);
+ Resize(kPayloadUnit);
+ header_->payload_size = 0;
+}
+
+Pickle::Pickle(const char* data, int data_len)
+ : header_(reinterpret_cast<Header*>(const_cast<char*>(data))),
+ header_size_(0),
+ capacity_after_header_(kCapacityReadOnly),
+ write_offset_(0) {
+ if (data_len >= static_cast<int>(sizeof(Header)))
+ header_size_ = data_len - header_->payload_size;
+
+ if (header_size_ > static_cast<unsigned int>(data_len))
+ header_size_ = 0;
+
+ if (header_size_ != bits::Align(header_size_, sizeof(uint32_t)))
+ header_size_ = 0;
+
+ // If there is anything wrong with the data, we're not going to use it.
+ if (!header_size_)
+ header_ = NULL;
+}
+
+Pickle::Pickle(const Pickle& other)
+ : header_(NULL),
+ header_size_(other.header_size_),
+ capacity_after_header_(0),
+ write_offset_(other.write_offset_) {
+ Resize(other.header_->payload_size);
+ memcpy(header_, other.header_, header_size_ + other.header_->payload_size);
+}
+
+Pickle::~Pickle() {
+ if (capacity_after_header_ != kCapacityReadOnly)
+ free(header_);
+}
+
+Pickle& Pickle::operator=(const Pickle& other) {
+ if (this == &other) {
+ NOTREACHED();
+ return *this;
+ }
+ if (capacity_after_header_ == kCapacityReadOnly) {
+ header_ = NULL;
+ capacity_after_header_ = 0;
+ }
+ if (header_size_ != other.header_size_) {
+ free(header_);
+ header_ = NULL;
+ header_size_ = other.header_size_;
+ }
+ Resize(other.header_->payload_size);
+ memcpy(header_, other.header_,
+ other.header_size_ + other.header_->payload_size);
+ write_offset_ = other.write_offset_;
+ return *this;
+}
+
+bool Pickle::WriteString(const StringPiece& value) {
+ if (!WriteInt(static_cast<int>(value.size())))
+ return false;
+
+ return WriteBytes(value.data(), static_cast<int>(value.size()));
+}
+
+bool Pickle::WriteString16(const StringPiece16& value) {
+ if (!WriteInt(static_cast<int>(value.size())))
+ return false;
+
+ return WriteBytes(value.data(),
+ static_cast<int>(value.size()) * sizeof(char16));
+}
+
+bool Pickle::WriteData(const char* data, int length) {
+ return length >= 0 && WriteInt(length) && WriteBytes(data, length);
+}
+
+bool Pickle::WriteBytes(const void* data, int length) {
+ WriteBytesCommon(data, length);
+ return true;
+}
+
+void Pickle::Reserve(size_t length) {
+ size_t data_len = bits::Align(length, sizeof(uint32_t));
+ DCHECK_GE(data_len, length);
+#ifdef ARCH_CPU_64_BITS
+ DCHECK_LE(data_len, std::numeric_limits<uint32_t>::max());
+#endif
+ DCHECK_LE(write_offset_, std::numeric_limits<uint32_t>::max() - data_len);
+ size_t new_size = write_offset_ + data_len;
+ if (new_size > capacity_after_header_)
+ Resize(capacity_after_header_ * 2 + new_size);
+}
+
+bool Pickle::WriteAttachment(scoped_refptr<Attachment> /*attachment*/) {
+ return false;
+}
+
+bool Pickle::ReadAttachment(base::PickleIterator* /*iter*/,
+ scoped_refptr<Attachment>* /*attachment*/) const {
+ return false;
+}
+
+bool Pickle::HasAttachments() const {
+ return false;
+}
+
+void Pickle::Resize(size_t new_capacity) {
+ CHECK_NE(capacity_after_header_, kCapacityReadOnly);
+ capacity_after_header_ = bits::Align(new_capacity, kPayloadUnit);
+ void* p = realloc(header_, GetTotalAllocatedSize());
+ CHECK(p);
+ header_ = reinterpret_cast<Header*>(p);
+}
+
+void* Pickle::ClaimBytes(size_t num_bytes) {
+ void* p = ClaimUninitializedBytesInternal(num_bytes);
+ CHECK(p);
+ memset(p, 0, num_bytes);
+ return p;
+}
+
+size_t Pickle::GetTotalAllocatedSize() const {
+ if (capacity_after_header_ == kCapacityReadOnly)
+ return 0;
+ return header_size_ + capacity_after_header_;
+}
+
+// static
+const char* Pickle::FindNext(size_t header_size,
+ const char* start,
+ const char* end) {
+ size_t pickle_size = 0;
+ if (!PeekNext(header_size, start, end, &pickle_size))
+ return NULL;
+
+ if (pickle_size > static_cast<size_t>(end - start))
+ return NULL;
+
+ return start + pickle_size;
+}
+
+// static
+bool Pickle::PeekNext(size_t header_size,
+ const char* start,
+ const char* end,
+ size_t* pickle_size) {
+ DCHECK_EQ(header_size, bits::Align(header_size, sizeof(uint32_t)));
+ DCHECK_GE(header_size, sizeof(Header));
+ DCHECK_LE(header_size, static_cast<size_t>(kPayloadUnit));
+
+ size_t length = static_cast<size_t>(end - start);
+ if (length < sizeof(Header))
+ return false;
+
+ const Header* hdr = reinterpret_cast<const Header*>(start);
+ if (length < header_size)
+ return false;
+
+ if (hdr->payload_size > std::numeric_limits<size_t>::max() - header_size) {
+ // If payload_size causes an overflow, we return maximum possible
+ // pickle size to indicate that.
+ *pickle_size = std::numeric_limits<size_t>::max();
+ } else {
+ *pickle_size = header_size + hdr->payload_size;
+ }
+ return true;
+}
+
+template <size_t length> void Pickle::WriteBytesStatic(const void* data) {
+ WriteBytesCommon(data, length);
+}
+
+template void Pickle::WriteBytesStatic<2>(const void* data);
+template void Pickle::WriteBytesStatic<4>(const void* data);
+template void Pickle::WriteBytesStatic<8>(const void* data);
+
+inline void* Pickle::ClaimUninitializedBytesInternal(size_t length) {
+ DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
+ << "oops: pickle is readonly";
+ size_t data_len = bits::Align(length, sizeof(uint32_t));
+ DCHECK_GE(data_len, length);
+#ifdef ARCH_CPU_64_BITS
+ DCHECK_LE(data_len, std::numeric_limits<uint32_t>::max());
+#endif
+ DCHECK_LE(write_offset_, std::numeric_limits<uint32_t>::max() - data_len);
+ size_t new_size = write_offset_ + data_len;
+ if (new_size > capacity_after_header_) {
+ size_t new_capacity = capacity_after_header_ * 2;
+ const size_t kPickleHeapAlign = 4096;
+ if (new_capacity > kPickleHeapAlign)
+ new_capacity = bits::Align(new_capacity, kPickleHeapAlign) - kPayloadUnit;
+ Resize(std::max(new_capacity, new_size));
+ }
+
+ char* write = mutable_payload() + write_offset_;
+ memset(write + length, 0, data_len - length); // Always initialize padding
+ header_->payload_size = static_cast<uint32_t>(new_size);
+ write_offset_ = new_size;
+ return write;
+}
+
+inline void Pickle::WriteBytesCommon(const void* data, size_t length) {
+ DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
+ << "oops: pickle is readonly";
+ MSAN_CHECK_MEM_IS_INITIALIZED(data, length);
+ void* write = ClaimUninitializedBytesInternal(length);
+ memcpy(write, data, length);
+}
+
+} // namespace base
diff --git a/libchrome/base/pickle.h b/libchrome/base/pickle.h
new file mode 100644
index 0000000..40f5d26
--- /dev/null
+++ b/libchrome/base/pickle.h
@@ -0,0 +1,385 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PICKLE_H_
+#define BASE_PICKLE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+#if defined(OS_POSIX)
+#include "base/files/file.h"
+#endif
+
+namespace base {
+
+class Pickle;
+
+// PickleIterator reads data from a Pickle. The Pickle object must remain valid
+// while the PickleIterator object is in use.
+class BASE_EXPORT PickleIterator {
+ public:
+ PickleIterator() : payload_(NULL), read_index_(0), end_index_(0) {}
+ explicit PickleIterator(const Pickle& pickle);
+
+ // Methods for reading the payload of the Pickle. To read from the start of
+ // the Pickle, create a PickleIterator from a Pickle. If successful, these
+ // methods return true. Otherwise, false is returned to indicate that the
+ // result could not be extracted. It is not possible to read from the iterator
+ // after that.
+ bool ReadBool(bool* result) WARN_UNUSED_RESULT;
+ bool ReadInt(int* result) WARN_UNUSED_RESULT;
+ bool ReadLong(long* result) WARN_UNUSED_RESULT;
+ bool ReadUInt16(uint16_t* result) WARN_UNUSED_RESULT;
+ bool ReadUInt32(uint32_t* result) WARN_UNUSED_RESULT;
+ bool ReadInt64(int64_t* result) WARN_UNUSED_RESULT;
+ bool ReadUInt64(uint64_t* result) WARN_UNUSED_RESULT;
+ bool ReadFloat(float* result) WARN_UNUSED_RESULT;
+ bool ReadDouble(double* result) WARN_UNUSED_RESULT;
+ bool ReadString(std::string* result) WARN_UNUSED_RESULT;
+ // The StringPiece data will only be valid for the lifetime of the message.
+ bool ReadStringPiece(StringPiece* result) WARN_UNUSED_RESULT;
+ bool ReadString16(string16* result) WARN_UNUSED_RESULT;
+ // The StringPiece16 data will only be valid for the lifetime of the message.
+ bool ReadStringPiece16(StringPiece16* result) WARN_UNUSED_RESULT;
+
+ // A pointer to the data will be placed in |*data|, and the length will be
+ // placed in |*length|. The pointer placed into |*data| points into the
+ // message's buffer so it will be scoped to the lifetime of the message (or
+ // until the message data is mutated). Do not keep the pointer around!
+ bool ReadData(const char** data, int* length) WARN_UNUSED_RESULT;
+
+ // A pointer to the data will be placed in |*data|. The caller specifies the
+ // number of bytes to read, and ReadBytes will validate this length. The
+ // pointer placed into |*data| points into the message's buffer so it will be
+ // scoped to the lifetime of the message (or until the message data is
+ // mutated). Do not keep the pointer around!
+ bool ReadBytes(const char** data, int length) WARN_UNUSED_RESULT;
+
+ // A safer version of ReadInt() that checks for the result not being negative.
+ // Use it for reading the object sizes.
+ bool ReadLength(int* result) WARN_UNUSED_RESULT {
+ return ReadInt(result) && *result >= 0;
+ }
+
+ // Skips bytes in the read buffer and returns true if there are at least
+ // num_bytes available. Otherwise, does nothing and returns false.
+ bool SkipBytes(int num_bytes) WARN_UNUSED_RESULT {
+ return !!GetReadPointerAndAdvance(num_bytes);
+ }
+
+ private:
+ // Read Type from Pickle.
+ template <typename Type>
+ bool ReadBuiltinType(Type* result);
+
+ // Advance read_index_ but do not allow it to exceed end_index_.
+ // Keeps read_index_ aligned.
+ void Advance(size_t size);
+
+ // Get read pointer for Type and advance read pointer.
+ template<typename Type>
+ const char* GetReadPointerAndAdvance();
+
+ // Get read pointer for |num_bytes| and advance read pointer. This method
+ // checks num_bytes for negativity and wrapping.
+ const char* GetReadPointerAndAdvance(int num_bytes);
+
+ // Get read pointer for (num_elements * size_element) bytes and advance read
+ // pointer. This method checks for int overflow, negativity and wrapping.
+ const char* GetReadPointerAndAdvance(int num_elements,
+ size_t size_element);
+
+ const char* payload_; // Start of our pickle's payload.
+ size_t read_index_; // Offset of the next readable byte in payload.
+ size_t end_index_; // Payload size.
+
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, GetReadPointerAndAdvance);
+};
+
+// This class provides an interface analogous to base::Pickle's WriteFoo()
+// methods and can be used to accurately compute the size of a hypothetical
+// Pickle's payload without having to reference the Pickle implementation.
+class BASE_EXPORT PickleSizer {
+ public:
+ PickleSizer();
+ ~PickleSizer();
+
+ // Returns the computed size of the payload.
+ size_t payload_size() const { return payload_size_; }
+
+ void AddBool() { return AddInt(); }
+ void AddInt() { AddPOD<int>(); }
+ void AddLong() { AddPOD<uint64_t>(); }
+ void AddUInt16() { return AddPOD<uint16_t>(); }
+ void AddUInt32() { return AddPOD<uint32_t>(); }
+ void AddInt64() { return AddPOD<int64_t>(); }
+ void AddUInt64() { return AddPOD<uint64_t>(); }
+ void AddFloat() { return AddPOD<float>(); }
+ void AddDouble() { return AddPOD<double>(); }
+ void AddString(const StringPiece& value);
+ void AddString16(const StringPiece16& value);
+ void AddData(int length);
+ void AddBytes(int length);
+ void AddAttachment();
+
+ private:
+ // Just like AddBytes() but with a compile-time size for performance.
+ template<size_t length> void BASE_EXPORT AddBytesStatic();
+
+ template <typename T>
+ void AddPOD() { AddBytesStatic<sizeof(T)>(); }
+
+ size_t payload_size_ = 0;
+};
+
+// This class provides facilities for basic binary value packing and unpacking.
+//
+// The Pickle class supports appending primitive values (ints, strings, etc.)
+// to a pickle instance. The Pickle instance grows its internal memory buffer
+// dynamically to hold the sequence of primitive values. The internal memory
+// buffer is exposed as the "data" of the Pickle. This "data" can be passed
+// to a Pickle object to initialize it for reading.
+//
+// When reading from a Pickle object, it is important for the consumer to know
+// what value types to read and in what order to read them as the Pickle does
+// not keep track of the type of data written to it.
+//
+// The Pickle's data has a header which contains the size of the Pickle's
+// payload. It can optionally support additional space in the header. That
+// space is controlled by the header_size parameter passed to the Pickle
+// constructor.
+//
+class BASE_EXPORT Pickle {
+ public:
+ // Auxiliary data attached to a Pickle. Pickle must be subclassed along with
+ // this interface in order to provide a concrete implementation of support
+ // for attachments. The base Pickle implementation does not accept
+ // attachments.
+ class BASE_EXPORT Attachment : public RefCountedThreadSafe<Attachment> {
+ public:
+ Attachment();
+
+ protected:
+ friend class RefCountedThreadSafe<Attachment>;
+ virtual ~Attachment();
+
+ DISALLOW_COPY_AND_ASSIGN(Attachment);
+ };
+
+ // Initialize a Pickle object using the default header size.
+ Pickle();
+
+ // Initialize a Pickle object with the specified header size in bytes, which
+ // must be greater-than-or-equal-to sizeof(Pickle::Header). The header size
+ // will be rounded up to ensure that the header size is 32bit-aligned.
+ explicit Pickle(int header_size);
+
+ // Initializes a Pickle from a const block of data. The data is not copied;
+ // instead the data is merely referenced by this Pickle. Only const methods
+ // should be used on the Pickle when initialized this way. The header
+ // padding size is deduced from the data length.
+ Pickle(const char* data, int data_len);
+
+ // Initializes a Pickle as a deep copy of another Pickle.
+ Pickle(const Pickle& other);
+
+ // Note: There are no virtual methods in this class. This destructor is
+ // virtual as an element of defensive coding. Other classes have derived from
+ // this class, and there is a *chance* that they will cast into this base
+ // class before destruction. At least one such class does have a virtual
+ // destructor, suggesting at least some need to call more derived destructors.
+ virtual ~Pickle();
+
+ // Performs a deep copy.
+ Pickle& operator=(const Pickle& other);
+
+ // Returns the number of bytes written in the Pickle, including the header.
+ size_t size() const { return header_size_ + header_->payload_size; }
+
+ // Returns the data for this Pickle.
+ const void* data() const { return header_; }
+
+ // Returns the effective memory capacity of this Pickle, that is, the total
+ // number of bytes currently dynamically allocated or 0 in the case of a
+ // read-only Pickle. This should be used only for diagnostic / profiling
+ // purposes.
+ size_t GetTotalAllocatedSize() const;
+
+ // Methods for adding to the payload of the Pickle. These values are
+ // appended to the end of the Pickle's payload. When reading values from a
+ // Pickle, it is important to read them in the order in which they were added
+ // to the Pickle.
+
+ bool WriteBool(bool value) {
+ return WriteInt(value ? 1 : 0);
+ }
+ bool WriteInt(int value) {
+ return WritePOD(value);
+ }
+ bool WriteLong(long value) {
+ // Always write long as a 64-bit value to ensure compatibility between
+ // 32-bit and 64-bit processes.
+ return WritePOD(static_cast<int64_t>(value));
+ }
+ bool WriteUInt16(uint16_t value) { return WritePOD(value); }
+ bool WriteUInt32(uint32_t value) { return WritePOD(value); }
+ bool WriteInt64(int64_t value) { return WritePOD(value); }
+ bool WriteUInt64(uint64_t value) { return WritePOD(value); }
+ bool WriteFloat(float value) {
+ return WritePOD(value);
+ }
+ bool WriteDouble(double value) {
+ return WritePOD(value);
+ }
+ bool WriteString(const StringPiece& value);
+ bool WriteString16(const StringPiece16& value);
+ // "Data" is a blob with a length. When you read it out you will be given the
+ // length. See also WriteBytes.
+ bool WriteData(const char* data, int length);
+ // "Bytes" is a blob with no length. The caller must specify the length both
+ // when reading and writing. It is normally used to serialize PoD types of a
+ // known size. See also WriteData.
+ bool WriteBytes(const void* data, int length);
+
+ // WriteAttachment appends |attachment| to the pickle. It returns
+ // false iff the set is full or if the Pickle implementation does not support
+ // attachments.
+ virtual bool WriteAttachment(scoped_refptr<Attachment> attachment);
+
+ // ReadAttachment parses an attachment given the parsing state |iter| and
+ // writes it to |*attachment|. It returns true on success.
+ virtual bool ReadAttachment(base::PickleIterator* iter,
+ scoped_refptr<Attachment>* attachment) const;
+
+ // Indicates whether the pickle has any attachments.
+ virtual bool HasAttachments() const;
+
+ // Reserves space for upcoming writes when multiple writes will be made and
+ // their sizes are computed in advance. It can be significantly faster to call
+ // Reserve() before calling WriteFoo() multiple times.
+ void Reserve(size_t additional_capacity);
+
+ // Payload follows after allocation of Header (header size is customizable).
+ struct Header {
+ uint32_t payload_size; // Specifies the size of the payload.
+ };
+
+ // Returns the header, cast to a user-specified type T. The type T must be a
+ // subclass of Header and its size must correspond to the header_size passed
+ // to the Pickle constructor.
+ template <class T>
+ T* headerT() {
+ DCHECK_EQ(header_size_, sizeof(T));
+ return static_cast<T*>(header_);
+ }
+ template <class T>
+ const T* headerT() const {
+ DCHECK_EQ(header_size_, sizeof(T));
+ return static_cast<const T*>(header_);
+ }
+
+ // The payload is the pickle data immediately following the header.
+ size_t payload_size() const {
+ return header_ ? header_->payload_size : 0;
+ }
+
+ const char* payload() const {
+ return reinterpret_cast<const char*>(header_) + header_size_;
+ }
+
+ // Returns the address of the byte immediately following the currently valid
+ // header + payload.
+ const char* end_of_payload() const {
+ // This object may be invalid.
+ return header_ ? payload() + payload_size() : NULL;
+ }
+
+ protected:
+ char* mutable_payload() {
+ return reinterpret_cast<char*>(header_) + header_size_;
+ }
+
+ size_t capacity_after_header() const {
+ return capacity_after_header_;
+ }
+
+ // Resize the capacity, note that the input value should not include the size
+ // of the header.
+ void Resize(size_t new_capacity);
+
+ // Claims |num_bytes| bytes of payload. This is similar to Reserve() in that
+ // it may grow the capacity, but it also advances the write offset of the
+ // pickle by |num_bytes|. Claimed memory, including padding, is zeroed.
+ //
+ // Returns the address of the first byte claimed.
+ void* ClaimBytes(size_t num_bytes);
+
+ // Find the end of the pickled data that starts at range_start. Returns NULL
+ // if the entire Pickle is not found in the given data range.
+ static const char* FindNext(size_t header_size,
+ const char* range_start,
+ const char* range_end);
+
+ // Parse pickle header and return total size of the pickle. Data range
+ // doesn't need to contain entire pickle.
+ // Returns true if pickle header was found and parsed. Callers must check
+ // returned |pickle_size| for sanity (against maximum message size, etc).
+ // NOTE: when function successfully parses a header, but encounters an
+ // overflow during pickle size calculation, it sets |pickle_size| to the
+ // maximum size_t value and returns true.
+ static bool PeekNext(size_t header_size,
+ const char* range_start,
+ const char* range_end,
+ size_t* pickle_size);
+
+ // The allocation granularity of the payload.
+ static const int kPayloadUnit;
+
+ private:
+ friend class PickleIterator;
+
+ Header* header_;
+ size_t header_size_; // Supports extra data between header and payload.
+ // Allocation size of payload (or -1 if allocation is const). Note: this
+ // doesn't count the header.
+ size_t capacity_after_header_;
+ // The offset at which we will write the next field. Note: this doesn't count
+ // the header.
+ size_t write_offset_;
+
+ // Just like WriteBytes, but with a compile-time size, for performance.
+ template<size_t length> void BASE_EXPORT WriteBytesStatic(const void* data);
+
+ // Writes a POD by copying its bytes.
+ template <typename T> bool WritePOD(const T& data) {
+ WriteBytesStatic<sizeof(data)>(&data);
+ return true;
+ }
+
+ inline void* ClaimUninitializedBytesInternal(size_t num_bytes);
+ inline void WriteBytesCommon(const void* data, size_t length);
+
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, DeepCopyResize);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, Resize);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, PeekNext);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, PeekNextOverflow);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNext);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNextWithIncompleteHeader);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNextOverflow);
+};
+
+} // namespace base
+
+#endif // BASE_PICKLE_H_
diff --git a/libchrome/base/pickle_unittest.cc b/libchrome/base/pickle_unittest.cc
new file mode 100644
index 0000000..e00edd9
--- /dev/null
+++ b/libchrome/base/pickle_unittest.cc
@@ -0,0 +1,668 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pickle.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const bool testbool1 = false;
+const bool testbool2 = true;
+const int testint = 2093847192;
+const long testlong = 1093847192;
+const uint16_t testuint16 = 32123;
+const uint32_t testuint32 = 1593847192;
+const int64_t testint64 = -0x7E8CA9253104BDFCLL;
+const uint64_t testuint64 = 0xCE8CA9253104BDF7ULL;
+const float testfloat = 3.1415926935f;
+const double testdouble = 2.71828182845904523;
+const std::string teststring("Hello world"); // note non-aligned string length
+const std::wstring testwstring(L"Hello, world");
+const string16 teststring16(ASCIIToUTF16("Hello, world"));
+const char testrawstring[] = "Hello new world"; // Test raw string writing
+// Test raw char16 writing, assumes UTF16 encoding is ANSI for alpha chars.
+const char16 testrawstring16[] = {'A', 'l', 'o', 'h', 'a', 0};
+const char testdata[] = "AAA\0BBB\0";
+const int testdatalen = arraysize(testdata) - 1;
+
+// checks that the results can be read correctly from the Pickle
+void VerifyResult(const Pickle& pickle) {
+ PickleIterator iter(pickle);
+
+ bool outbool;
+ EXPECT_TRUE(iter.ReadBool(&outbool));
+ EXPECT_FALSE(outbool);
+ EXPECT_TRUE(iter.ReadBool(&outbool));
+ EXPECT_TRUE(outbool);
+
+ int outint;
+ EXPECT_TRUE(iter.ReadInt(&outint));
+ EXPECT_EQ(testint, outint);
+
+ long outlong;
+ EXPECT_TRUE(iter.ReadLong(&outlong));
+ EXPECT_EQ(testlong, outlong);
+
+ uint16_t outuint16;
+ EXPECT_TRUE(iter.ReadUInt16(&outuint16));
+ EXPECT_EQ(testuint16, outuint16);
+
+ uint32_t outuint32;
+ EXPECT_TRUE(iter.ReadUInt32(&outuint32));
+ EXPECT_EQ(testuint32, outuint32);
+
+ int64_t outint64;
+ EXPECT_TRUE(iter.ReadInt64(&outint64));
+ EXPECT_EQ(testint64, outint64);
+
+ uint64_t outuint64;
+ EXPECT_TRUE(iter.ReadUInt64(&outuint64));
+ EXPECT_EQ(testuint64, outuint64);
+
+ float outfloat;
+ EXPECT_TRUE(iter.ReadFloat(&outfloat));
+ EXPECT_EQ(testfloat, outfloat);
+
+ double outdouble;
+ EXPECT_TRUE(iter.ReadDouble(&outdouble));
+ EXPECT_EQ(testdouble, outdouble);
+
+ std::string outstring;
+ EXPECT_TRUE(iter.ReadString(&outstring));
+ EXPECT_EQ(teststring, outstring);
+
+ string16 outstring16;
+ EXPECT_TRUE(iter.ReadString16(&outstring16));
+ EXPECT_EQ(teststring16, outstring16);
+
+ StringPiece outstringpiece;
+ EXPECT_TRUE(iter.ReadStringPiece(&outstringpiece));
+ EXPECT_EQ(testrawstring, outstringpiece);
+
+ StringPiece16 outstringpiece16;
+ EXPECT_TRUE(iter.ReadStringPiece16(&outstringpiece16));
+ EXPECT_EQ(testrawstring16, outstringpiece16);
+
+ const char* outdata;
+ int outdatalen;
+ EXPECT_TRUE(iter.ReadData(&outdata, &outdatalen));
+ EXPECT_EQ(testdatalen, outdatalen);
+ EXPECT_EQ(memcmp(testdata, outdata, outdatalen), 0);
+
+ // reads past the end should fail
+ EXPECT_FALSE(iter.ReadInt(&outint));
+}
+
+} // namespace
+
+TEST(PickleTest, EncodeDecode) {
+ Pickle pickle;
+
+ EXPECT_TRUE(pickle.WriteBool(testbool1));
+ EXPECT_TRUE(pickle.WriteBool(testbool2));
+ EXPECT_TRUE(pickle.WriteInt(testint));
+ EXPECT_TRUE(pickle.WriteLong(testlong));
+ EXPECT_TRUE(pickle.WriteUInt16(testuint16));
+ EXPECT_TRUE(pickle.WriteUInt32(testuint32));
+ EXPECT_TRUE(pickle.WriteInt64(testint64));
+ EXPECT_TRUE(pickle.WriteUInt64(testuint64));
+ EXPECT_TRUE(pickle.WriteFloat(testfloat));
+ EXPECT_TRUE(pickle.WriteDouble(testdouble));
+ EXPECT_TRUE(pickle.WriteString(teststring));
+ EXPECT_TRUE(pickle.WriteString16(teststring16));
+ EXPECT_TRUE(pickle.WriteString(testrawstring));
+ EXPECT_TRUE(pickle.WriteString16(testrawstring16));
+ EXPECT_TRUE(pickle.WriteData(testdata, testdatalen));
+ VerifyResult(pickle);
+
+ // test copy constructor
+ Pickle pickle2(pickle);
+ VerifyResult(pickle2);
+
+ // test operator=
+ Pickle pickle3;
+ pickle3 = pickle;
+ VerifyResult(pickle3);
+}
+
+// Tests that reading/writing a long works correctly when the source process
+// is 64-bit. We rely on having both 32- and 64-bit trybots to validate both
+// arms of the conditional in this test.
+TEST(PickleTest, LongFrom64Bit) {
+ Pickle pickle;
+ // Under the hood long is always written as a 64-bit value, so simulate a
+ // 64-bit long even on 32-bit architectures by explicitly writing an int64_t.
+ EXPECT_TRUE(pickle.WriteInt64(testint64));
+
+ PickleIterator iter(pickle);
+ long outlong;
+ if (sizeof(long) < sizeof(int64_t)) {
+ // ReadLong() should return false when the original written value can't be
+ // represented as a long.
+#if GTEST_HAS_DEATH_TEST
+ EXPECT_DEATH(ignore_result(iter.ReadLong(&outlong)), "");
+#endif
+ } else {
+ EXPECT_TRUE(iter.ReadLong(&outlong));
+ EXPECT_EQ(testint64, outlong);
+ }
+}
+
+// Tests that we can handle really small buffers.
+TEST(PickleTest, SmallBuffer) {
+ std::unique_ptr<char[]> buffer(new char[1]);
+
+ // We should not touch the buffer.
+ Pickle pickle(buffer.get(), 1);
+
+ PickleIterator iter(pickle);
+ int data;
+ EXPECT_FALSE(iter.ReadInt(&data));
+}
+
+// Tests that we can handle improper headers.
+TEST(PickleTest, BigSize) {
+ int buffer[] = { 0x56035200, 25, 40, 50 };
+
+ Pickle pickle(reinterpret_cast<char*>(buffer), sizeof(buffer));
+
+ PickleIterator iter(pickle);
+ int data;
+ EXPECT_FALSE(iter.ReadInt(&data));
+}
+
+TEST(PickleTest, UnalignedSize) {
+ int buffer[] = { 10, 25, 40, 50 };
+
+ Pickle pickle(reinterpret_cast<char*>(buffer), sizeof(buffer));
+
+ PickleIterator iter(pickle);
+ int data;
+ EXPECT_FALSE(iter.ReadInt(&data));
+}
+
+TEST(PickleTest, ZeroLenStr) {
+ Pickle pickle;
+ EXPECT_TRUE(pickle.WriteString(std::string()));
+
+ PickleIterator iter(pickle);
+ std::string outstr;
+ EXPECT_TRUE(iter.ReadString(&outstr));
+ EXPECT_EQ("", outstr);
+}
+
+TEST(PickleTest, ZeroLenStr16) {
+ Pickle pickle;
+ EXPECT_TRUE(pickle.WriteString16(string16()));
+
+ PickleIterator iter(pickle);
+ std::string outstr;
+ EXPECT_TRUE(iter.ReadString(&outstr));
+ EXPECT_EQ("", outstr);
+}
+
+TEST(PickleTest, BadLenStr) {
+ Pickle pickle;
+ EXPECT_TRUE(pickle.WriteInt(-2));
+
+ PickleIterator iter(pickle);
+ std::string outstr;
+ EXPECT_FALSE(iter.ReadString(&outstr));
+}
+
+TEST(PickleTest, BadLenStr16) {
+ Pickle pickle;
+ EXPECT_TRUE(pickle.WriteInt(-1));
+
+ PickleIterator iter(pickle);
+ string16 outstr;
+ EXPECT_FALSE(iter.ReadString16(&outstr));
+}
+
+TEST(PickleTest, PeekNext) {
+ struct CustomHeader : base::Pickle::Header {
+ int cookies[10];
+ };
+
+ Pickle pickle(sizeof(CustomHeader));
+
+ EXPECT_TRUE(pickle.WriteString("Goooooooooooogle"));
+
+ const char* pickle_data = static_cast<const char*>(pickle.data());
+
+ size_t pickle_size;
+
+ // Data range doesn't contain header
+ EXPECT_FALSE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader) - 1,
+ &pickle_size));
+
+ // Data range contains header
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, pickle.size());
+
+ // Data range contains header and some other data
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader) + 1,
+ &pickle_size));
+ EXPECT_EQ(pickle_size, pickle.size());
+
+ // Data range contains full pickle
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + pickle.size(),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, pickle.size());
+}
+
+TEST(PickleTest, PeekNextOverflow) {
+ struct CustomHeader : base::Pickle::Header {
+ int cookies[10];
+ };
+
+ CustomHeader header;
+
+ // Check if we can wrap around at all
+ if (sizeof(size_t) > sizeof(header.payload_size))
+ return;
+
+ const char* pickle_data = reinterpret_cast<const char*>(&header);
+
+ size_t pickle_size;
+
+ // Wrapping around is detected and reported as maximum size_t value
+ header.payload_size = static_cast<uint32_t>(
+ 1 - static_cast<int32_t>(sizeof(CustomHeader)));
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, std::numeric_limits<size_t>::max());
+
+ // Ridiculous pickle sizes are fine (callers are supposed to
+ // verify them)
+ header.payload_size =
+ std::numeric_limits<uint32_t>::max() / 2 - sizeof(CustomHeader);
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, std::numeric_limits<uint32_t>::max() / 2);
+}
+
+TEST(PickleTest, FindNext) {
+ Pickle pickle;
+ EXPECT_TRUE(pickle.WriteInt(1));
+ EXPECT_TRUE(pickle.WriteString("Domo"));
+
+ const char* start = reinterpret_cast<const char*>(pickle.data());
+ const char* end = start + pickle.size();
+
+ EXPECT_TRUE(end == Pickle::FindNext(pickle.header_size_, start, end));
+ EXPECT_TRUE(NULL == Pickle::FindNext(pickle.header_size_, start, end - 1));
+ EXPECT_TRUE(end == Pickle::FindNext(pickle.header_size_, start, end + 1));
+}
+
+TEST(PickleTest, FindNextWithIncompleteHeader) {
+ size_t header_size = sizeof(Pickle::Header);
+ std::unique_ptr<char[]> buffer(new char[header_size - 1]);
+ memset(buffer.get(), 0x1, header_size - 1);
+
+ const char* start = buffer.get();
+ const char* end = start + header_size - 1;
+
+ EXPECT_TRUE(NULL == Pickle::FindNext(header_size, start, end));
+}
+
+#if defined(COMPILER_MSVC)
+#pragma warning(push)
+#pragma warning(disable: 4146)
+#endif
+TEST(PickleTest, FindNextOverflow) {
+ size_t header_size = sizeof(Pickle::Header);
+ size_t header_size2 = 2 * header_size;
+ size_t payload_received = 100;
+ std::unique_ptr<char[]> buffer(new char[header_size2 + payload_received]);
+ const char* start = buffer.get();
+ Pickle::Header* header = reinterpret_cast<Pickle::Header*>(buffer.get());
+ const char* end = start + header_size2 + payload_received;
+ // It is impossible to construct an overflow test otherwise.
+ if (sizeof(size_t) > sizeof(header->payload_size) ||
+ sizeof(uintptr_t) > sizeof(header->payload_size))
+ return;
+
+ header->payload_size = -(reinterpret_cast<uintptr_t>(start) + header_size2);
+ EXPECT_TRUE(NULL == Pickle::FindNext(header_size2, start, end));
+
+ header->payload_size = -header_size2;
+ EXPECT_TRUE(NULL == Pickle::FindNext(header_size2, start, end));
+
+ header->payload_size = 0;
+ end = start + header_size;
+ EXPECT_TRUE(NULL == Pickle::FindNext(header_size2, start, end));
+}
+#if defined(COMPILER_MSVC)
+#pragma warning(pop)
+#endif
+
+TEST(PickleTest, GetReadPointerAndAdvance) {
+ Pickle pickle;
+
+ PickleIterator iter(pickle);
+ EXPECT_FALSE(iter.GetReadPointerAndAdvance(1));
+
+ EXPECT_TRUE(pickle.WriteInt(1));
+ EXPECT_TRUE(pickle.WriteInt(2));
+ int bytes = sizeof(int) * 2;
+
+ EXPECT_TRUE(PickleIterator(pickle).GetReadPointerAndAdvance(0));
+ EXPECT_TRUE(PickleIterator(pickle).GetReadPointerAndAdvance(1));
+ EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(-1));
+ EXPECT_TRUE(PickleIterator(pickle).GetReadPointerAndAdvance(bytes));
+ EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(bytes + 1));
+ EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(INT_MAX));
+ EXPECT_FALSE(PickleIterator(pickle).GetReadPointerAndAdvance(INT_MIN));
+}
+
+TEST(PickleTest, Resize) {
+ size_t unit = Pickle::kPayloadUnit;
+ std::unique_ptr<char[]> data(new char[unit]);
+ char* data_ptr = data.get();
+ for (size_t i = 0; i < unit; i++)
+ data_ptr[i] = 'G';
+
+ // construct a message that will be exactly the size of one payload unit,
+ // note that any data will have a 4-byte header indicating the size
+ const size_t payload_size_after_header = unit - sizeof(uint32_t);
+ Pickle pickle;
+ pickle.WriteData(
+ data_ptr, static_cast<int>(payload_size_after_header - sizeof(uint32_t)));
+ size_t cur_payload = payload_size_after_header;
+
+ // note: we assume 'unit' is a power of 2
+ EXPECT_EQ(unit, pickle.capacity_after_header());
+ EXPECT_EQ(pickle.payload_size(), payload_size_after_header);
+
+ // fill out a full page (noting data header)
+ pickle.WriteData(data_ptr, static_cast<int>(unit - sizeof(uint32_t)));
+ cur_payload += unit;
+ EXPECT_EQ(unit * 2, pickle.capacity_after_header());
+ EXPECT_EQ(cur_payload, pickle.payload_size());
+
+ // one more byte should double the capacity
+ pickle.WriteData(data_ptr, 1);
+ cur_payload += 8;
+ EXPECT_EQ(unit * 4, pickle.capacity_after_header());
+ EXPECT_EQ(cur_payload, pickle.payload_size());
+}
+
+namespace {
+
+struct CustomHeader : Pickle::Header {
+ int blah;
+};
+
+} // namespace
+
+TEST(PickleTest, HeaderPadding) {
+ const uint32_t kMagic = 0x12345678;
+
+ Pickle pickle(sizeof(CustomHeader));
+ pickle.WriteInt(kMagic);
+
+ // this should not overwrite the 'int' payload
+ pickle.headerT<CustomHeader>()->blah = 10;
+
+ PickleIterator iter(pickle);
+ int result;
+ ASSERT_TRUE(iter.ReadInt(&result));
+
+ EXPECT_EQ(static_cast<uint32_t>(result), kMagic);
+}
+
+TEST(PickleTest, EqualsOperator) {
+ Pickle source;
+ source.WriteInt(1);
+
+ Pickle copy_refs_source_buffer(static_cast<const char*>(source.data()),
+ source.size());
+ Pickle copy;
+ copy = copy_refs_source_buffer;
+ ASSERT_EQ(source.size(), copy.size());
+}
+
+TEST(PickleTest, EvilLengths) {
+ Pickle source;
+ std::string str(100000, 'A');
+ EXPECT_TRUE(source.WriteData(str.c_str(), 100000));
+ // ReadString16 used to have its read buffer length calculation wrong leading
+ // to out-of-bounds reading.
+ PickleIterator iter(source);
+ string16 str16;
+ EXPECT_FALSE(iter.ReadString16(&str16));
+
+ // And check we didn't break ReadString16.
+ str16 = (wchar_t) 'A';
+ Pickle str16_pickle;
+ EXPECT_TRUE(str16_pickle.WriteString16(str16));
+ iter = PickleIterator(str16_pickle);
+ EXPECT_TRUE(iter.ReadString16(&str16));
+ EXPECT_EQ(1U, str16.length());
+
+ // Check we don't fail in a length check with invalid String16 size.
+ // (1<<31) * sizeof(char16) == 0, so this is particularly evil.
+ Pickle bad_len;
+ EXPECT_TRUE(bad_len.WriteInt(1 << 31));
+ iter = PickleIterator(bad_len);
+ EXPECT_FALSE(iter.ReadString16(&str16));
+}
+
+// Check we can write zero bytes of data and 'data' can be NULL.
+TEST(PickleTest, ZeroLength) {
+ Pickle pickle;
+ EXPECT_TRUE(pickle.WriteData(NULL, 0));
+
+ PickleIterator iter(pickle);
+ const char* outdata;
+ int outdatalen;
+ EXPECT_TRUE(iter.ReadData(&outdata, &outdatalen));
+ EXPECT_EQ(0, outdatalen);
+ // We can't assert that outdata is NULL.
+}
+
+// Check that ReadBytes works properly with an iterator initialized to NULL.
+TEST(PickleTest, ReadBytes) {
+ Pickle pickle;
+ int data = 0x7abcd;
+ EXPECT_TRUE(pickle.WriteBytes(&data, sizeof(data)));
+
+ PickleIterator iter(pickle);
+ const char* outdata_char = NULL;
+ EXPECT_TRUE(iter.ReadBytes(&outdata_char, sizeof(data)));
+
+ int outdata;
+ memcpy(&outdata, outdata_char, sizeof(outdata));
+ EXPECT_EQ(data, outdata);
+}
+
+// Checks that when a pickle is deep-copied, the result is not larger than
+// needed.
+TEST(PickleTest, DeepCopyResize) {
+ Pickle pickle;
+ while (pickle.capacity_after_header() != pickle.payload_size())
+ pickle.WriteBool(true);
+
+ // Make a deep copy.
+ Pickle pickle2(pickle);
+
+ // Check that there isn't any extraneous capacity.
+ EXPECT_EQ(pickle.capacity_after_header(), pickle2.capacity_after_header());
+}
+
+namespace {
+
+// Publicly exposes the ClaimBytes interface for testing.
+class TestingPickle : public Pickle {
+ public:
+ TestingPickle() {}
+
+ void* ClaimBytes(size_t num_bytes) { return Pickle::ClaimBytes(num_bytes); }
+};
+
+} // namespace
+
+// Checks that claimed bytes are zero-initialized.
+TEST(PickleTest, ClaimBytesInitialization) {
+ static const int kChunkSize = 64;
+ TestingPickle pickle;
+ const char* bytes = static_cast<const char*>(pickle.ClaimBytes(kChunkSize));
+ for (size_t i = 0; i < kChunkSize; ++i) {
+ EXPECT_EQ(0, bytes[i]);
+ }
+}
+
+// Checks that ClaimBytes properly advances the write offset.
+TEST(PickleTest, ClaimBytes) {
+ std::string data("Hello, world!");
+
+ TestingPickle pickle;
+ pickle.WriteUInt32(data.size());
+ void* bytes = pickle.ClaimBytes(data.size());
+ pickle.WriteInt(42);
+ memcpy(bytes, data.data(), data.size());
+
+ PickleIterator iter(pickle);
+ uint32_t out_data_length;
+ EXPECT_TRUE(iter.ReadUInt32(&out_data_length));
+ EXPECT_EQ(data.size(), out_data_length);
+
+ const char* out_data = nullptr;
+ EXPECT_TRUE(iter.ReadBytes(&out_data, out_data_length));
+ EXPECT_EQ(data, std::string(out_data, out_data_length));
+
+ int out_value;
+ EXPECT_TRUE(iter.ReadInt(&out_value));
+ EXPECT_EQ(42, out_value);
+}
+
+// Checks that PickleSizer and Pickle agree on the size of things.
+TEST(PickleTest, PickleSizer) {
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteBool(true);
+ sizer.AddBool();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteInt(42);
+ sizer.AddInt();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteLong(42);
+ sizer.AddLong();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteUInt16(42);
+ sizer.AddUInt16();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteUInt32(42);
+ sizer.AddUInt32();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteInt64(42);
+ sizer.AddInt64();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteUInt64(42);
+ sizer.AddUInt64();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteFloat(42.0f);
+ sizer.AddFloat();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteDouble(42.0);
+ sizer.AddDouble();
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteString(teststring);
+ sizer.AddString(teststring);
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteString16(teststring16);
+ sizer.AddString16(teststring16);
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteData(testdata, testdatalen);
+ sizer.AddData(testdatalen);
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+ {
+ TestingPickle pickle;
+ base::PickleSizer sizer;
+ pickle.WriteBytes(testdata, testdatalen);
+ sizer.AddBytes(testdatalen);
+ EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/posix/eintr_wrapper.h b/libchrome/base/posix/eintr_wrapper.h
new file mode 100644
index 0000000..5a5dc75
--- /dev/null
+++ b/libchrome/base/posix/eintr_wrapper.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+// To prevent long-lasting loops (which would likely be a bug, such as a signal
+// that should be masked) to go unnoticed, there is a limit after which the
+// caller will nonetheless see an EINTR in Debug builds.
+//
+// On Windows, this wrapper macro does nothing.
+//
+// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
+// value of close is significant. See http://crbug.com/269623.
+
+#ifndef BASE_POSIX_EINTR_WRAPPER_H_
+#define BASE_POSIX_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+
+#include <errno.h>
+
+#if defined(NDEBUG)
+
+#define HANDLE_EINTR(x) ({ \
+ decltype(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ } while (eintr_wrapper_result == -1 && errno == EINTR); \
+ eintr_wrapper_result; \
+})
+
+#else
+
+#define HANDLE_EINTR(x) ({ \
+ int eintr_wrapper_counter = 0; \
+ decltype(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ } while (eintr_wrapper_result == -1 && errno == EINTR && \
+ eintr_wrapper_counter++ < 100); \
+ eintr_wrapper_result; \
+})
+
+#endif // NDEBUG
+
+#define IGNORE_EINTR(x) ({ \
+ decltype(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ if (eintr_wrapper_result == -1 && errno == EINTR) { \
+ eintr_wrapper_result = 0; \
+ } \
+ } while (0); \
+ eintr_wrapper_result; \
+})
+
+#else
+
+#define HANDLE_EINTR(x) (x)
+#define IGNORE_EINTR(x) (x)
+
+#endif // OS_POSIX
+
+#endif // BASE_POSIX_EINTR_WRAPPER_H_
diff --git a/libchrome/base/posix/file_descriptor_shuffle.cc b/libchrome/base/posix/file_descriptor_shuffle.cc
new file mode 100644
index 0000000..d2fd39a
--- /dev/null
+++ b/libchrome/base/posix/file_descriptor_shuffle.cc
@@ -0,0 +1,100 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/file_descriptor_shuffle.h"
+
+#include <unistd.h>
+#include <stddef.h>
+#include <ostream>
+
+#include "base/posix/eintr_wrapper.h"
+#include "base/logging.h"
+
+namespace base {
+
+bool PerformInjectiveMultimapDestructive(
+ InjectiveMultimap* m, InjectionDelegate* delegate) {
+ static const size_t kMaxExtraFDs = 16;
+ int extra_fds[kMaxExtraFDs];
+ unsigned next_extra_fd = 0;
+
+ // DANGER: this function must not allocate or lock.
+ // Cannot use STL iterators here, since debug iterators use locks.
+
+ for (size_t i_index = 0; i_index < m->size(); ++i_index) {
+ InjectiveMultimap::value_type* i = &(*m)[i_index];
+ int temp_fd = -1;
+
+ // We DCHECK the injectiveness of the mapping.
+ for (size_t j_index = i_index + 1; j_index < m->size(); ++j_index) {
+ InjectiveMultimap::value_type* j = &(*m)[j_index];
+ DCHECK(i->dest != j->dest) << "Both fd " << i->source
+ << " and " << j->source << " map to " << i->dest;
+ }
+
+ const bool is_identity = i->source == i->dest;
+
+ for (size_t j_index = i_index + 1; j_index < m->size(); ++j_index) {
+ InjectiveMultimap::value_type* j = &(*m)[j_index];
+ if (!is_identity && i->dest == j->source) {
+ if (temp_fd == -1) {
+ if (!delegate->Duplicate(&temp_fd, i->dest))
+ return false;
+ if (next_extra_fd < kMaxExtraFDs) {
+ extra_fds[next_extra_fd++] = temp_fd;
+ } else {
+ RAW_LOG(ERROR, "PerformInjectiveMultimapDestructive overflowed "
+ "extra_fds. Leaking file descriptors!");
+ }
+ }
+
+ j->source = temp_fd;
+ j->close = false;
+ }
+
+ if (i->close && i->source == j->dest)
+ i->close = false;
+
+ if (i->close && i->source == j->source) {
+ i->close = false;
+ j->close = true;
+ }
+ }
+
+ if (!is_identity) {
+ if (!delegate->Move(i->source, i->dest))
+ return false;
+ }
+
+ if (!is_identity && i->close)
+ delegate->Close(i->source);
+ }
+
+ for (unsigned i = 0; i < next_extra_fd; i++)
+ delegate->Close(extra_fds[i]);
+
+ return true;
+}
+
+bool PerformInjectiveMultimap(const InjectiveMultimap& m_in,
+ InjectionDelegate* delegate) {
+ InjectiveMultimap m(m_in);
+ return PerformInjectiveMultimapDestructive(&m, delegate);
+}
+
+bool FileDescriptorTableInjection::Duplicate(int* result, int fd) {
+ *result = HANDLE_EINTR(dup(fd));
+ return *result >= 0;
+}
+
+bool FileDescriptorTableInjection::Move(int src, int dest) {
+ return HANDLE_EINTR(dup2(src, dest)) != -1;
+}
+
+void FileDescriptorTableInjection::Close(int fd) {
+ int ret = IGNORE_EINTR(close(fd));
+ DPCHECK(ret == 0);
+}
+
+} // namespace base
diff --git a/libchrome/base/posix/file_descriptor_shuffle.h b/libchrome/base/posix/file_descriptor_shuffle.h
new file mode 100644
index 0000000..78e3a7d
--- /dev/null
+++ b/libchrome/base/posix/file_descriptor_shuffle.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_FILE_DESCRIPTOR_SHUFFLE_H_
+#define BASE_POSIX_FILE_DESCRIPTOR_SHUFFLE_H_
+
+// This code exists to shuffle file descriptors, which is commonly needed when
+// forking subprocesses. The naive approach (just call dup2 to set up the
+// desired descriptors) is very simple, but wrong: it won't handle edge cases
+// (like mapping 0 -> 1, 1 -> 0) correctly.
+//
+// In order to unittest this code, it's broken into the abstract action (an
+// injective multimap) and the concrete code for dealing with file descriptors.
+// Users should use the code like this:
+// base::InjectiveMultimap file_descriptor_map;
+// file_descriptor_map.push_back(base::InjectionArc(devnull, 0, true));
+// file_descriptor_map.push_back(base::InjectionArc(devnull, 2, true));
+// file_descriptor_map.push_back(base::InjectionArc(pipe[1], 1, true));
+// base::ShuffleFileDescriptors(file_descriptor_map);
+//
+// and trust the the Right Thing will get done.
+
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+// A Delegate which performs the actions required to perform an injective
+// multimapping in place.
+class InjectionDelegate {
+ public:
+ // Duplicate |fd|, an element of the domain, and write a fresh element of the
+ // domain into |result|. Returns true iff successful.
+ virtual bool Duplicate(int* result, int fd) = 0;
+ // Destructively move |src| to |dest|, overwriting |dest|. Returns true iff
+ // successful.
+ virtual bool Move(int src, int dest) = 0;
+ // Delete an element of the domain.
+ virtual void Close(int fd) = 0;
+
+ protected:
+ virtual ~InjectionDelegate() {}
+};
+
+// An implementation of the InjectionDelegate interface using the file
+// descriptor table of the current process as the domain.
+class BASE_EXPORT FileDescriptorTableInjection : public InjectionDelegate {
+ bool Duplicate(int* result, int fd) override;
+ bool Move(int src, int dest) override;
+ void Close(int fd) override;
+};
+
+// A single arc of the directed graph which describes an injective multimapping.
+struct InjectionArc {
+ InjectionArc(int in_source, int in_dest, bool in_close)
+ : source(in_source),
+ dest(in_dest),
+ close(in_close) {
+ }
+
+ int source;
+ int dest;
+ bool close; // if true, delete the source element after performing the
+ // mapping.
+};
+
+typedef std::vector<InjectionArc> InjectiveMultimap;
+
+BASE_EXPORT bool PerformInjectiveMultimap(const InjectiveMultimap& map,
+ InjectionDelegate* delegate);
+
+BASE_EXPORT bool PerformInjectiveMultimapDestructive(
+ InjectiveMultimap* map,
+ InjectionDelegate* delegate);
+
+// This function will not call malloc but will mutate |map|
+static inline bool ShuffleFileDescriptors(InjectiveMultimap* map) {
+ FileDescriptorTableInjection delegate;
+ return PerformInjectiveMultimapDestructive(map, &delegate);
+}
+
+} // namespace base
+
+#endif // BASE_POSIX_FILE_DESCRIPTOR_SHUFFLE_H_
diff --git a/libchrome/base/posix/file_descriptor_shuffle_unittest.cc b/libchrome/base/posix/file_descriptor_shuffle_unittest.cc
new file mode 100644
index 0000000..3dfbf7e
--- /dev/null
+++ b/libchrome/base/posix/file_descriptor_shuffle_unittest.cc
@@ -0,0 +1,281 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/file_descriptor_shuffle.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// 'Duplicated' file descriptors start at this number
+const int kDuplicateBase = 1000;
+
+} // namespace
+
+namespace base {
+
+struct Action {
+ enum Type {
+ CLOSE,
+ MOVE,
+ DUPLICATE,
+ };
+
+ Action(Type in_type, int in_fd1, int in_fd2 = -1)
+ : type(in_type),
+ fd1(in_fd1),
+ fd2(in_fd2) {
+ }
+
+ bool operator==(const Action& other) const {
+ return other.type == type &&
+ other.fd1 == fd1 &&
+ other.fd2 == fd2;
+ }
+
+ Type type;
+ int fd1;
+ int fd2;
+};
+
+class InjectionTracer : public InjectionDelegate {
+ public:
+ InjectionTracer()
+ : next_duplicate_(kDuplicateBase) {
+ }
+
+ bool Duplicate(int* result, int fd) override {
+ *result = next_duplicate_++;
+ actions_.push_back(Action(Action::DUPLICATE, *result, fd));
+ return true;
+ }
+
+ bool Move(int src, int dest) override {
+ actions_.push_back(Action(Action::MOVE, src, dest));
+ return true;
+ }
+
+ void Close(int fd) override { actions_.push_back(Action(Action::CLOSE, fd)); }
+
+ const std::vector<Action>& actions() const { return actions_; }
+
+ private:
+ int next_duplicate_;
+ std::vector<Action> actions_;
+};
+
+TEST(FileDescriptorShuffleTest, Empty) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ EXPECT_EQ(0u, tracer.actions().size());
+}
+
+TEST(FileDescriptorShuffleTest, Noop) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 0, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ EXPECT_EQ(0u, tracer.actions().size());
+}
+
+TEST(FileDescriptorShuffleTest, NoopAndClose) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 0, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ EXPECT_EQ(0u, tracer.actions().size());
+}
+
+TEST(FileDescriptorShuffleTest, Simple1) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(1u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+}
+
+TEST(FileDescriptorShuffleTest, Simple2) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, false));
+ map.push_back(InjectionArc(2, 3, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(2u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 2, 3));
+}
+
+TEST(FileDescriptorShuffleTest, Simple3) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(2u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::CLOSE, 0));
+}
+
+TEST(FileDescriptorShuffleTest, Simple4) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(10, 0, true));
+ map.push_back(InjectionArc(1, 1, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(2u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 10, 0));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::CLOSE, 10));
+}
+
+TEST(FileDescriptorShuffleTest, Cycle) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, false));
+ map.push_back(InjectionArc(1, 0, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(4u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] ==
+ Action(Action::DUPLICATE, kDuplicateBase, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+ EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, CycleAndClose1) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, true));
+ map.push_back(InjectionArc(1, 0, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(4u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] ==
+ Action(Action::DUPLICATE, kDuplicateBase, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+ EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, CycleAndClose2) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, false));
+ map.push_back(InjectionArc(1, 0, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(4u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] ==
+ Action(Action::DUPLICATE, kDuplicateBase, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+ EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, CycleAndClose3) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, true));
+ map.push_back(InjectionArc(1, 0, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(4u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] ==
+ Action(Action::DUPLICATE, kDuplicateBase, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::MOVE, kDuplicateBase, 0));
+ EXPECT_TRUE(tracer.actions()[3] == Action(Action::CLOSE, kDuplicateBase));
+}
+
+TEST(FileDescriptorShuffleTest, Fanout) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, false));
+ map.push_back(InjectionArc(0, 2, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(2u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+}
+
+TEST(FileDescriptorShuffleTest, FanoutAndClose1) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, true));
+ map.push_back(InjectionArc(0, 2, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(3u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::CLOSE, 0));
+}
+
+TEST(FileDescriptorShuffleTest, FanoutAndClose2) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, false));
+ map.push_back(InjectionArc(0, 2, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(3u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::CLOSE, 0));
+}
+
+TEST(FileDescriptorShuffleTest, FanoutAndClose3) {
+ InjectiveMultimap map;
+ InjectionTracer tracer;
+ map.push_back(InjectionArc(0, 1, true));
+ map.push_back(InjectionArc(0, 2, true));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &tracer));
+ ASSERT_EQ(3u, tracer.actions().size());
+ EXPECT_TRUE(tracer.actions()[0] == Action(Action::MOVE, 0, 1));
+ EXPECT_TRUE(tracer.actions()[1] == Action(Action::MOVE, 0, 2));
+ EXPECT_TRUE(tracer.actions()[2] == Action(Action::CLOSE, 0));
+}
+
+class FailingDelegate : public InjectionDelegate {
+ public:
+ bool Duplicate(int* result, int fd) override { return false; }
+
+ bool Move(int src, int dest) override { return false; }
+
+ void Close(int fd) override {}
+};
+
+TEST(FileDescriptorShuffleTest, EmptyWithFailure) {
+ InjectiveMultimap map;
+ FailingDelegate failing;
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &failing));
+}
+
+TEST(FileDescriptorShuffleTest, NoopWithFailure) {
+ InjectiveMultimap map;
+ FailingDelegate failing;
+ map.push_back(InjectionArc(0, 0, false));
+
+ EXPECT_TRUE(PerformInjectiveMultimap(map, &failing));
+}
+
+TEST(FileDescriptorShuffleTest, Simple1WithFailure) {
+ InjectiveMultimap map;
+ FailingDelegate failing;
+ map.push_back(InjectionArc(0, 1, false));
+
+ EXPECT_FALSE(PerformInjectiveMultimap(map, &failing));
+}
+
+} // namespace base
diff --git a/libchrome/base/posix/global_descriptors.cc b/libchrome/base/posix/global_descriptors.cc
new file mode 100644
index 0000000..6c18783
--- /dev/null
+++ b/libchrome/base/posix/global_descriptors.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/global_descriptors.h"
+
+#include <vector>
+#include <utility>
+
+#include "base/logging.h"
+
+namespace base {
+
+GlobalDescriptors::Descriptor::Descriptor(Key key, int fd)
+ : key(key), fd(fd), region(base::MemoryMappedFile::Region::kWholeFile) {
+}
+
+GlobalDescriptors::Descriptor::Descriptor(Key key,
+ int fd,
+ base::MemoryMappedFile::Region region)
+ : key(key), fd(fd), region(region) {
+}
+
+// static
+GlobalDescriptors* GlobalDescriptors::GetInstance() {
+ typedef Singleton<base::GlobalDescriptors,
+ LeakySingletonTraits<base::GlobalDescriptors> >
+ GlobalDescriptorsSingleton;
+ return GlobalDescriptorsSingleton::get();
+}
+
+int GlobalDescriptors::Get(Key key) const {
+ const int ret = MaybeGet(key);
+
+ if (ret == -1)
+ DLOG(FATAL) << "Unknown global descriptor: " << key;
+ return ret;
+}
+
+int GlobalDescriptors::MaybeGet(Key key) const {
+ for (Mapping::const_iterator
+ i = descriptors_.begin(); i != descriptors_.end(); ++i) {
+ if (i->key == key)
+ return i->fd;
+ }
+
+ return -1;
+}
+
+void GlobalDescriptors::Set(Key key, int fd) {
+ Set(key, fd, base::MemoryMappedFile::Region::kWholeFile);
+}
+
+void GlobalDescriptors::Set(Key key,
+ int fd,
+ base::MemoryMappedFile::Region region) {
+ for (auto& i : descriptors_) {
+ if (i.key == key) {
+ i.fd = fd;
+ i.region = region;
+ return;
+ }
+ }
+
+ descriptors_.push_back(Descriptor(key, fd, region));
+}
+
+base::MemoryMappedFile::Region GlobalDescriptors::GetRegion(Key key) const {
+ for (const auto& i : descriptors_) {
+ if (i.key == key)
+ return i.region;
+ }
+ DLOG(FATAL) << "Unknown global descriptor: " << key;
+ return base::MemoryMappedFile::Region::kWholeFile;
+}
+
+void GlobalDescriptors::Reset(const Mapping& mapping) {
+ descriptors_ = mapping;
+}
+
+GlobalDescriptors::GlobalDescriptors() {}
+
+GlobalDescriptors::~GlobalDescriptors() {}
+
+} // namespace base
diff --git a/libchrome/base/posix/global_descriptors.h b/libchrome/base/posix/global_descriptors.h
new file mode 100644
index 0000000..edb299d
--- /dev/null
+++ b/libchrome/base/posix/global_descriptors.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_GLOBAL_DESCRIPTORS_H_
+#define BASE_POSIX_GLOBAL_DESCRIPTORS_H_
+
+#include "build/build_config.h"
+
+#include <vector>
+#include <utility>
+
+#include <stdint.h>
+
+#include "base/files/memory_mapped_file.h"
+#include "base/memory/singleton.h"
+
+namespace base {
+
+// It's common practice to install file descriptors into well known slot
+// numbers before execing a child; stdin, stdout and stderr are ubiqutous
+// examples.
+//
+// However, when using a zygote model, this becomes troublesome. Since the
+// descriptors which need to be in these slots generally aren't known, any code
+// could open a resource and take one of the reserved descriptors. Simply
+// overwriting the slot isn't a viable solution.
+//
+// We could try to fill the reserved slots as soon as possible, but this is a
+// fragile solution since global constructors etc are able to open files.
+//
+// Instead, we retreat from the idea of installing descriptors in specific
+// slots and add a layer of indirection in the form of this singleton object.
+// It maps from an abstract key to a descriptor. If independent modules each
+// need to define keys, then values should be chosen randomly so as not to
+// collide.
+class BASE_EXPORT GlobalDescriptors {
+ public:
+ typedef uint32_t Key;
+ struct Descriptor {
+ Descriptor(Key key, int fd);
+ Descriptor(Key key, int fd, base::MemoryMappedFile::Region region);
+
+ // Globally unique key.
+ Key key;
+ // Actual FD.
+ int fd;
+ // Optional region, defaults to kWholeFile.
+ base::MemoryMappedFile::Region region;
+ };
+ typedef std::vector<Descriptor> Mapping;
+
+ // Often we want a canonical descriptor for a given Key. In this case, we add
+ // the following constant to the key value:
+#if !defined(OS_ANDROID)
+ static const int kBaseDescriptor = 3; // 0, 1, 2 are already taken.
+#else
+ // 3 used by __android_log_write().
+ // 4 used by... something important on Android M.
+ // 5 used by... something important on Android L... on low-end devices.
+ // TODO(amistry): An Android, this mechanism is only used for tests since the
+ // content child launcher spawns a process by creating a new Activity using
+ // the Android APIs. For tests, come up with a way that doesn't require using
+ // a pre-defined fd.
+ static const int kBaseDescriptor = 6;
+#endif
+
+ // Return the singleton instance of GlobalDescriptors.
+ static GlobalDescriptors* GetInstance();
+
+ // Get a descriptor given a key. It is a fatal error if the key is not known.
+ int Get(Key key) const;
+
+ // Get a descriptor given a key. Returns -1 on error.
+ int MaybeGet(Key key) const;
+
+ // Get a region given a key. It is a fatal error if the key is not known.
+ base::MemoryMappedFile::Region GetRegion(Key key) const;
+
+ // Set the descriptor for the given |key|. This sets the region associated
+ // with |key| to kWholeFile.
+ void Set(Key key, int fd);
+
+ // Set the descriptor and |region| for the given |key|.
+ void Set(Key key, int fd, base::MemoryMappedFile::Region region);
+
+ void Reset(const Mapping& mapping);
+
+ private:
+ friend struct DefaultSingletonTraits<GlobalDescriptors>;
+ GlobalDescriptors();
+ ~GlobalDescriptors();
+
+ Mapping descriptors_;
+};
+
+} // namespace base
+
+#endif // BASE_POSIX_GLOBAL_DESCRIPTORS_H_
diff --git a/libchrome/base/posix/safe_strerror.cc b/libchrome/base/posix/safe_strerror.cc
new file mode 100644
index 0000000..798658e
--- /dev/null
+++ b/libchrome/base/posix/safe_strerror.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(__ANDROID__)
+// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
+// is defined, but the symbol is renamed to __gnu_strerror_r which only exists
+// on those later versions. To preserve ABI compatibility with older versions,
+// undefine _GNU_SOURCE and use the POSIX version.
+#undef _GNU_SOURCE
+#endif
+
+#include "base/posix/safe_strerror.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(__GLIBC__) || defined(OS_NACL)
+#define USE_HISTORICAL_STRERRO_R 1
+#else
+#define USE_HISTORICAL_STRERRO_R 0
+#endif
+
+#if USE_HISTORICAL_STRERRO_R && defined(__GNUC__)
+// GCC will complain about the unused second wrap function unless we tell it
+// that we meant for them to be potentially unused, which is exactly what this
+// attribute is for.
+#define POSSIBLY_UNUSED __attribute__((unused))
+#else
+#define POSSIBLY_UNUSED
+#endif
+
+#if USE_HISTORICAL_STRERRO_R
+// glibc has two strerror_r functions: a historical GNU-specific one that
+// returns type char *, and a POSIX.1-2001 compliant one available since 2.3.4
+// that returns int. This wraps the GNU-specific one.
+static void POSSIBLY_UNUSED wrap_posix_strerror_r(
+ char *(*strerror_r_ptr)(int, char *, size_t),
+ int err,
+ char *buf,
+ size_t len) {
+ // GNU version.
+ char *rc = (*strerror_r_ptr)(err, buf, len);
+ if (rc != buf) {
+ // glibc did not use buf and returned a static string instead. Copy it
+ // into buf.
+ buf[0] = '\0';
+ strncat(buf, rc, len - 1);
+ }
+ // The GNU version never fails. Unknown errors get an "unknown error" message.
+ // The result is always null terminated.
+}
+#endif // USE_HISTORICAL_STRERRO_R
+
+// Wrapper for strerror_r functions that implement the POSIX interface. POSIX
+// does not define the behaviour for some of the edge cases, so we wrap it to
+// guarantee that they are handled. This is compiled on all POSIX platforms, but
+// it will only be used on Linux if the POSIX strerror_r implementation is
+// being used (see below).
+static void POSSIBLY_UNUSED wrap_posix_strerror_r(
+ int (*strerror_r_ptr)(int, char *, size_t),
+ int err,
+ char *buf,
+ size_t len) {
+ int old_errno = errno;
+ // Have to cast since otherwise we get an error if this is the GNU version
+ // (but in such a scenario this function is never called). Sadly we can't use
+ // C++-style casts because the appropriate one is reinterpret_cast but it's
+ // considered illegal to reinterpret_cast a type to itself, so we get an
+ // error in the opposite case.
+ int result = (*strerror_r_ptr)(err, buf, len);
+ if (result == 0) {
+ // POSIX is vague about whether the string will be terminated, although
+ // it indirectly implies that typically ERANGE will be returned, instead
+ // of truncating the string. We play it safe by always terminating the
+ // string explicitly.
+ buf[len - 1] = '\0';
+ } else {
+ // Error. POSIX is vague about whether the return value is itself a system
+ // error code or something else. On Linux currently it is -1 and errno is
+ // set. On BSD-derived systems it is a system error and errno is unchanged.
+ // We try and detect which case it is so as to put as much useful info as
+ // we can into our message.
+ int strerror_error; // The error encountered in strerror
+ int new_errno = errno;
+ if (new_errno != old_errno) {
+ // errno was changed, so probably the return value is just -1 or something
+ // else that doesn't provide any info, and errno is the error.
+ strerror_error = new_errno;
+ } else {
+ // Either the error from strerror_r was the same as the previous value, or
+ // errno wasn't used. Assume the latter.
+ strerror_error = result;
+ }
+ // snprintf truncates and always null-terminates.
+ snprintf(buf,
+ len,
+ "Error %d while retrieving error %d",
+ strerror_error,
+ err);
+ }
+ errno = old_errno;
+}
+
+void safe_strerror_r(int err, char *buf, size_t len) {
+ if (buf == NULL || len <= 0) {
+ return;
+ }
+ // If using glibc (i.e., Linux), the compiler will automatically select the
+ // appropriate overloaded function based on the function type of strerror_r.
+ // The other one will be elided from the translation unit since both are
+ // static.
+ wrap_posix_strerror_r(&strerror_r, err, buf, len);
+}
+
+std::string safe_strerror(int err) {
+ const int buffer_size = 256;
+ char buf[buffer_size];
+ safe_strerror_r(err, buf, sizeof(buf));
+ return std::string(buf);
+}
+
+} // namespace base
diff --git a/libchrome/base/posix/safe_strerror.h b/libchrome/base/posix/safe_strerror.h
new file mode 100644
index 0000000..2945312
--- /dev/null
+++ b/libchrome/base/posix/safe_strerror.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_SAFE_STRERROR_H_
+#define BASE_POSIX_SAFE_STRERROR_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// BEFORE using anything from this file, first look at PLOG and friends in
+// logging.h and use them instead if applicable.
+//
+// This file declares safe, portable alternatives to the POSIX strerror()
+// function. strerror() is inherently unsafe in multi-threaded apps and should
+// never be used. Doing so can cause crashes. Additionally, the thread-safe
+// alternative strerror_r varies in semantics across platforms. Use these
+// functions instead.
+
+// Thread-safe strerror function with dependable semantics that never fails.
+// It will write the string form of error "err" to buffer buf of length len.
+// If there is an error calling the OS's strerror_r() function then a message to
+// that effect will be printed into buf, truncating if necessary. The final
+// result is always null-terminated. The value of errno is never changed.
+//
+// Use this instead of strerror_r().
+BASE_EXPORT void safe_strerror_r(int err, char *buf, size_t len);
+
+// Calls safe_strerror_r with a buffer of suitable size and returns the result
+// in a C++ string.
+//
+// Use this instead of strerror(). Note though that safe_strerror_r will be
+// more robust in the case of heap corruption errors, since it doesn't need to
+// allocate a string.
+BASE_EXPORT std::string safe_strerror(int err);
+
+} // namespace base
+
+#endif // BASE_POSIX_SAFE_STRERROR_H_
diff --git a/libchrome/base/posix/unix_domain_socket_linux.cc b/libchrome/base/posix/unix_domain_socket_linux.cc
new file mode 100644
index 0000000..25ddb54
--- /dev/null
+++ b/libchrome/base/posix/unix_domain_socket_linux.cc
@@ -0,0 +1,247 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/posix/unix_domain_socket_linux.h"
+
+#include <errno.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
+#include "build/build_config.h"
+
+#if !defined(OS_NACL_NONSFI)
+#include <sys/uio.h>
+#endif
+
+namespace base {
+
+const size_t UnixDomainSocket::kMaxFileDescriptors = 16;
+
+#if !defined(OS_NACL_NONSFI)
+// Creates a connected pair of UNIX-domain SOCK_SEQPACKET sockets, and passes
+// ownership of the newly allocated file descriptors to |one| and |two|.
+// Returns true on success.
+static bool CreateSocketPair(ScopedFD* one, ScopedFD* two) {
+ int raw_socks[2];
+ if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, raw_socks) == -1)
+ return false;
+ one->reset(raw_socks[0]);
+ two->reset(raw_socks[1]);
+ return true;
+}
+
+// static
+bool UnixDomainSocket::EnableReceiveProcessId(int fd) {
+ const int enable = 1;
+ return setsockopt(fd, SOL_SOCKET, SO_PASSCRED, &enable, sizeof(enable)) == 0;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+// static
+bool UnixDomainSocket::SendMsg(int fd,
+ const void* buf,
+ size_t length,
+ const std::vector<int>& fds) {
+ struct msghdr msg;
+ memset(&msg, 0, sizeof(msg));
+ struct iovec iov = { const_cast<void*>(buf), length };
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ char* control_buffer = NULL;
+ if (fds.size()) {
+ const unsigned control_len = CMSG_SPACE(sizeof(int) * fds.size());
+ control_buffer = new char[control_len];
+
+ struct cmsghdr* cmsg;
+ msg.msg_control = control_buffer;
+ msg.msg_controllen = control_len;
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int) * fds.size());
+ memcpy(CMSG_DATA(cmsg), &fds[0], sizeof(int) * fds.size());
+ msg.msg_controllen = cmsg->cmsg_len;
+ }
+
+ // Avoid a SIGPIPE if the other end breaks the connection.
+ // Due to a bug in the Linux kernel (net/unix/af_unix.c) MSG_NOSIGNAL isn't
+ // regarded for SOCK_SEQPACKET in the AF_UNIX domain, but it is mandated by
+ // POSIX.
+ const int flags = MSG_NOSIGNAL;
+ const ssize_t r = HANDLE_EINTR(sendmsg(fd, &msg, flags));
+ const bool ret = static_cast<ssize_t>(length) == r;
+ delete[] control_buffer;
+ return ret;
+}
+
+// static
+ssize_t UnixDomainSocket::RecvMsg(int fd,
+ void* buf,
+ size_t length,
+ std::vector<ScopedFD>* fds) {
+ return UnixDomainSocket::RecvMsgWithPid(fd, buf, length, fds, NULL);
+}
+
+// static
+ssize_t UnixDomainSocket::RecvMsgWithPid(int fd,
+ void* buf,
+ size_t length,
+ std::vector<ScopedFD>* fds,
+ ProcessId* pid) {
+ return UnixDomainSocket::RecvMsgWithFlags(fd, buf, length, 0, fds, pid);
+}
+
+// static
+ssize_t UnixDomainSocket::RecvMsgWithFlags(int fd,
+ void* buf,
+ size_t length,
+ int flags,
+ std::vector<ScopedFD>* fds,
+ ProcessId* out_pid) {
+ fds->clear();
+
+ struct msghdr msg;
+ memset(&msg, 0, sizeof(msg));
+ struct iovec iov = { buf, length };
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ const size_t kControlBufferSize =
+ CMSG_SPACE(sizeof(int) * kMaxFileDescriptors)
+#if !defined(OS_NACL_NONSFI)
+ // The PNaCl toolchain for Non-SFI binary build does not support ucred.
+ + CMSG_SPACE(sizeof(struct ucred))
+#endif
+ ;
+ char control_buffer[kControlBufferSize];
+ msg.msg_control = control_buffer;
+ msg.msg_controllen = sizeof(control_buffer);
+
+ const ssize_t r = HANDLE_EINTR(recvmsg(fd, &msg, flags));
+ if (r == -1)
+ return -1;
+
+ int* wire_fds = NULL;
+ unsigned wire_fds_len = 0;
+ ProcessId pid = -1;
+
+ if (msg.msg_controllen > 0) {
+ struct cmsghdr* cmsg;
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ const unsigned payload_len = cmsg->cmsg_len - CMSG_LEN(0);
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_RIGHTS) {
+ DCHECK_EQ(payload_len % sizeof(int), 0u);
+ DCHECK_EQ(wire_fds, static_cast<void*>(nullptr));
+ wire_fds = reinterpret_cast<int*>(CMSG_DATA(cmsg));
+ wire_fds_len = payload_len / sizeof(int);
+ }
+#if !defined(OS_NACL_NONSFI)
+ // The PNaCl toolchain for Non-SFI binary build does not support
+ // SCM_CREDENTIALS.
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_CREDENTIALS) {
+ DCHECK_EQ(payload_len, sizeof(struct ucred));
+ DCHECK_EQ(pid, -1);
+ pid = reinterpret_cast<struct ucred*>(CMSG_DATA(cmsg))->pid;
+ }
+#endif
+ }
+ }
+
+ if (msg.msg_flags & MSG_TRUNC || msg.msg_flags & MSG_CTRUNC) {
+ for (unsigned i = 0; i < wire_fds_len; ++i)
+ close(wire_fds[i]);
+ errno = EMSGSIZE;
+ return -1;
+ }
+
+ if (wire_fds) {
+ for (unsigned i = 0; i < wire_fds_len; ++i)
+ fds->push_back(ScopedFD(wire_fds[i])); // TODO(mdempsky): emplace_back
+ }
+
+ if (out_pid) {
+ // |pid| will legitimately be -1 if we read EOF, so only DCHECK if we
+ // actually received a message. Unfortunately, Linux allows sending zero
+ // length messages, which are indistinguishable from EOF, so this check
+ // has false negatives.
+ if (r > 0 || msg.msg_controllen > 0)
+ DCHECK_GE(pid, 0);
+
+ *out_pid = pid;
+ }
+
+ return r;
+}
+
+#if !defined(OS_NACL_NONSFI)
+// static
+ssize_t UnixDomainSocket::SendRecvMsg(int fd,
+ uint8_t* reply,
+ unsigned max_reply_len,
+ int* result_fd,
+ const Pickle& request) {
+ return UnixDomainSocket::SendRecvMsgWithFlags(fd, reply, max_reply_len,
+ 0, /* recvmsg_flags */
+ result_fd, request);
+}
+
+// static
+ssize_t UnixDomainSocket::SendRecvMsgWithFlags(int fd,
+ uint8_t* reply,
+ unsigned max_reply_len,
+ int recvmsg_flags,
+ int* result_fd,
+ const Pickle& request) {
+ // This socketpair is only used for the IPC and is cleaned up before
+ // returning.
+ ScopedFD recv_sock, send_sock;
+ if (!CreateSocketPair(&recv_sock, &send_sock))
+ return -1;
+
+ {
+ std::vector<int> send_fds;
+ send_fds.push_back(send_sock.get());
+ if (!SendMsg(fd, request.data(), request.size(), send_fds))
+ return -1;
+ }
+
+ // Close the sending end of the socket right away so that if our peer closes
+ // it before sending a response (e.g., from exiting), RecvMsgWithFlags() will
+ // return EOF instead of hanging.
+ send_sock.reset();
+
+ std::vector<ScopedFD> recv_fds;
+ // When porting to OSX keep in mind it doesn't support MSG_NOSIGNAL, so the
+ // sender might get a SIGPIPE.
+ const ssize_t reply_len = RecvMsgWithFlags(
+ recv_sock.get(), reply, max_reply_len, recvmsg_flags, &recv_fds, NULL);
+ recv_sock.reset();
+ if (reply_len == -1)
+ return -1;
+
+ // If we received more file descriptors than caller expected, then we treat
+ // that as an error.
+ if (recv_fds.size() > (result_fd != NULL ? 1 : 0)) {
+ NOTREACHED();
+ return -1;
+ }
+
+ if (result_fd)
+ *result_fd = recv_fds.empty() ? -1 : recv_fds[0].release();
+
+ return reply_len;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+} // namespace base
diff --git a/libchrome/base/posix/unix_domain_socket_linux.h b/libchrome/base/posix/unix_domain_socket_linux.h
new file mode 100644
index 0000000..2ba739e
--- /dev/null
+++ b/libchrome/base/posix/unix_domain_socket_linux.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POSIX_UNIX_DOMAIN_SOCKET_LINUX_H_
+#define BASE_POSIX_UNIX_DOMAIN_SOCKET_LINUX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/files/scoped_file.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class Pickle;
+
+class BASE_EXPORT UnixDomainSocket {
+ public:
+ // Maximum number of file descriptors that can be read by RecvMsg().
+ static const size_t kMaxFileDescriptors;
+
+#if !defined(OS_NACL_NONSFI)
+ // Use to enable receiving process IDs in RecvMsgWithPid. Should be called on
+ // the receiving socket (i.e., the socket passed to RecvMsgWithPid). Returns
+ // true if successful.
+ static bool EnableReceiveProcessId(int fd);
+#endif // !defined(OS_NACL_NONSFI)
+
+ // Use sendmsg to write the given msg and include a vector of file
+ // descriptors. Returns true if successful.
+ static bool SendMsg(int fd,
+ const void* msg,
+ size_t length,
+ const std::vector<int>& fds);
+
+ // Use recvmsg to read a message and an array of file descriptors. Returns
+ // -1 on failure. Note: will read, at most, |kMaxFileDescriptors| descriptors.
+ static ssize_t RecvMsg(int fd,
+ void* msg,
+ size_t length,
+ std::vector<ScopedFD>* fds);
+
+ // Same as RecvMsg above, but also returns the sender's process ID (as seen
+ // from the caller's namespace). However, before using this function to
+ // receive process IDs, EnableReceiveProcessId() should be called on the
+ // receiving socket.
+ static ssize_t RecvMsgWithPid(int fd,
+ void* msg,
+ size_t length,
+ std::vector<ScopedFD>* fds,
+ ProcessId* pid);
+
+#if !defined(OS_NACL_NONSFI)
+ // Perform a sendmsg/recvmsg pair.
+ // 1. This process creates a UNIX SEQPACKET socketpair. Using
+ // connection-oriented sockets (SEQPACKET or STREAM) is critical here,
+ // because if one of the ends closes the other one must be notified.
+ // 2. This process writes a request to |fd| with an SCM_RIGHTS control
+ // message containing on end of the fresh socket pair.
+ // 3. This process blocks reading from the other end of the fresh
+ // socketpair.
+ // 4. The target process receives the request, processes it and writes the
+ // reply to the end of the socketpair contained in the request.
+ // 5. This process wakes up and continues.
+ //
+ // fd: descriptor to send the request on
+ // reply: buffer for the reply
+ // reply_len: size of |reply|
+ // result_fd: (may be NULL) the file descriptor returned in the reply
+ // (if any)
+ // request: the bytes to send in the request
+ static ssize_t SendRecvMsg(int fd,
+ uint8_t* reply,
+ unsigned reply_len,
+ int* result_fd,
+ const Pickle& request);
+
+ // Similar to SendRecvMsg(), but |recvmsg_flags| allows to control the flags
+ // of the recvmsg(2) call.
+ static ssize_t SendRecvMsgWithFlags(int fd,
+ uint8_t* reply,
+ unsigned reply_len,
+ int recvmsg_flags,
+ int* result_fd,
+ const Pickle& request);
+#endif // !defined(OS_NACL_NONSFI)
+ private:
+ // Similar to RecvMsg, but allows to specify |flags| for recvmsg(2).
+ static ssize_t RecvMsgWithFlags(int fd,
+ void* msg,
+ size_t length,
+ int flags,
+ std::vector<ScopedFD>* fds,
+ ProcessId* pid);
+};
+
+} // namespace base
+
+#endif // BASE_POSIX_UNIX_DOMAIN_SOCKET_LINUX_H_
diff --git a/libchrome/base/posix/unix_domain_socket_linux_unittest.cc b/libchrome/base/posix/unix_domain_socket_linux_unittest.cc
new file mode 100644
index 0000000..3f5173c
--- /dev/null
+++ b/libchrome/base/posix/unix_domain_socket_linux_unittest.cc
@@ -0,0 +1,163 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/location.h"
+#include "base/pickle.h"
+#include "base/posix/unix_domain_socket_linux.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+TEST(UnixDomainSocketTest, SendRecvMsgAbortOnReplyFDClose) {
+ Thread message_thread("UnixDomainSocketTest");
+ ASSERT_TRUE(message_thread.Start());
+
+ int fds[2];
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds));
+ ScopedFD scoped_fd0(fds[0]);
+ ScopedFD scoped_fd1(fds[1]);
+
+ // Have the thread send a synchronous message via the socket.
+ Pickle request;
+ message_thread.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(IgnoreResult(&UnixDomainSocket::SendRecvMsg), fds[1],
+ static_cast<uint8_t*>(NULL), 0U, static_cast<int*>(NULL), request));
+
+ // Receive the message.
+ std::vector<ScopedFD> message_fds;
+ uint8_t buffer[16];
+ ASSERT_EQ(static_cast<int>(request.size()),
+ UnixDomainSocket::RecvMsg(fds[0], buffer, sizeof(buffer),
+ &message_fds));
+ ASSERT_EQ(1U, message_fds.size());
+
+ // Close the reply FD.
+ message_fds.clear();
+
+ // Check that the thread didn't get blocked.
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ message_thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
+ ASSERT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(5000)));
+}
+
+TEST(UnixDomainSocketTest, SendRecvMsgAvoidsSIGPIPE) {
+ // Make sure SIGPIPE isn't being ignored.
+ struct sigaction act = {}, oldact;
+ act.sa_handler = SIG_DFL;
+ ASSERT_EQ(0, sigaction(SIGPIPE, &act, &oldact));
+ int fds[2];
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds));
+ ScopedFD scoped_fd1(fds[1]);
+ ASSERT_EQ(0, IGNORE_EINTR(close(fds[0])));
+
+ // Have the thread send a synchronous message via the socket. Unless the
+ // message is sent with MSG_NOSIGNAL, this shall result in SIGPIPE.
+ Pickle request;
+ ASSERT_EQ(-1,
+ UnixDomainSocket::SendRecvMsg(fds[1], static_cast<uint8_t*>(NULL),
+ 0U, static_cast<int*>(NULL), request));
+ ASSERT_EQ(EPIPE, errno);
+ // Restore the SIGPIPE handler.
+ ASSERT_EQ(0, sigaction(SIGPIPE, &oldact, NULL));
+}
+
+// Simple sanity check within a single process that receiving PIDs works.
+TEST(UnixDomainSocketTest, RecvPid) {
+ int fds[2];
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds));
+ ScopedFD recv_sock(fds[0]);
+ ScopedFD send_sock(fds[1]);
+
+ ASSERT_TRUE(UnixDomainSocket::EnableReceiveProcessId(recv_sock.get()));
+
+ static const char kHello[] = "hello";
+ ASSERT_TRUE(UnixDomainSocket::SendMsg(
+ send_sock.get(), kHello, sizeof(kHello), std::vector<int>()));
+
+ // Extra receiving buffer space to make sure we really received only
+ // sizeof(kHello) bytes and it wasn't just truncated to fit the buffer.
+ char buf[sizeof(kHello) + 1];
+ ProcessId sender_pid;
+ std::vector<ScopedFD> fd_vec;
+ const ssize_t nread = UnixDomainSocket::RecvMsgWithPid(
+ recv_sock.get(), buf, sizeof(buf), &fd_vec, &sender_pid);
+ ASSERT_EQ(sizeof(kHello), static_cast<size_t>(nread));
+ ASSERT_EQ(0, memcmp(buf, kHello, sizeof(kHello)));
+ ASSERT_EQ(0U, fd_vec.size());
+
+ ASSERT_EQ(getpid(), sender_pid);
+}
+
+// Same as above, but send the max number of file descriptors too.
+TEST(UnixDomainSocketTest, RecvPidWithMaxDescriptors) {
+ int fds[2];
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds));
+ ScopedFD recv_sock(fds[0]);
+ ScopedFD send_sock(fds[1]);
+
+ ASSERT_TRUE(UnixDomainSocket::EnableReceiveProcessId(recv_sock.get()));
+
+ static const char kHello[] = "hello";
+ std::vector<int> send_fds(UnixDomainSocket::kMaxFileDescriptors,
+ send_sock.get());
+ ASSERT_TRUE(UnixDomainSocket::SendMsg(
+ send_sock.get(), kHello, sizeof(kHello), send_fds));
+
+ // Extra receiving buffer space to make sure we really received only
+ // sizeof(kHello) bytes and it wasn't just truncated to fit the buffer.
+ char buf[sizeof(kHello) + 1];
+ ProcessId sender_pid;
+ std::vector<ScopedFD> recv_fds;
+ const ssize_t nread = UnixDomainSocket::RecvMsgWithPid(
+ recv_sock.get(), buf, sizeof(buf), &recv_fds, &sender_pid);
+ ASSERT_EQ(sizeof(kHello), static_cast<size_t>(nread));
+ ASSERT_EQ(0, memcmp(buf, kHello, sizeof(kHello)));
+ ASSERT_EQ(UnixDomainSocket::kMaxFileDescriptors, recv_fds.size());
+
+ ASSERT_EQ(getpid(), sender_pid);
+}
+
+// Check that RecvMsgWithPid doesn't DCHECK fail when reading EOF from a
+// disconnected socket.
+TEST(UnixDomianSocketTest, RecvPidDisconnectedSocket) {
+ int fds[2];
+ ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds));
+ ScopedFD recv_sock(fds[0]);
+ ScopedFD send_sock(fds[1]);
+
+ ASSERT_TRUE(UnixDomainSocket::EnableReceiveProcessId(recv_sock.get()));
+
+ send_sock.reset();
+
+ char ch;
+ ProcessId sender_pid;
+ std::vector<ScopedFD> recv_fds;
+ const ssize_t nread = UnixDomainSocket::RecvMsgWithPid(
+ recv_sock.get(), &ch, sizeof(ch), &recv_fds, &sender_pid);
+ ASSERT_EQ(0, nread);
+ ASSERT_EQ(-1, sender_pid);
+ ASSERT_EQ(0U, recv_fds.size());
+}
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/power_monitor/power_monitor.h b/libchrome/base/power_monitor/power_monitor.h
new file mode 100644
index 0000000..e025b32
--- /dev/null
+++ b/libchrome/base/power_monitor/power_monitor.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_MONITOR_H_
+#define BASE_POWER_MONITOR_POWER_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/power_monitor/power_observer.h"
+
+namespace base {
+
+class PowerMonitorSource;
+
+// A class used to monitor the power state change and notify the observers about
+// the change event.
+class BASE_EXPORT PowerMonitor {
+ public:
+ // Takes ownership of |source|.
+ explicit PowerMonitor(std::unique_ptr<PowerMonitorSource> source);
+ ~PowerMonitor();
+
+ // Get the process-wide PowerMonitor (if not present, returns NULL).
+ static PowerMonitor* Get();
+
+ // Add and remove an observer.
+ // Can be called from any thread.
+ // Must not be called from within a notification callback.
+ void AddObserver(PowerObserver* observer);
+ void RemoveObserver(PowerObserver* observer);
+
+ // Is the computer currently on battery power.
+ bool IsOnBatteryPower();
+
+ private:
+ friend class PowerMonitorSource;
+
+ PowerMonitorSource* Source();
+
+ void NotifyPowerStateChange(bool battery_in_use);
+ void NotifySuspend();
+ void NotifyResume();
+
+ scoped_refptr<ObserverListThreadSafe<PowerObserver> > observers_;
+ std::unique_ptr<PowerMonitorSource> source_;
+
+ DISALLOW_COPY_AND_ASSIGN(PowerMonitor);
+};
+
+} // namespace base
+
+#endif // BASE_POWER_MONITOR_POWER_MONITOR_H_
diff --git a/libchrome/base/power_monitor/power_monitor_device_source.h b/libchrome/base/power_monitor/power_monitor_device_source.h
new file mode 100644
index 0000000..2dabac8
--- /dev/null
+++ b/libchrome/base/power_monitor/power_monitor_device_source.h
@@ -0,0 +1,118 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_MONITOR_DEVICE_SOURCE_H_
+#define BASE_POWER_MONITOR_POWER_MONITOR_DEVICE_SOURCE_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "base/power_monitor/power_observer.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+
+// Windows HiRes timers drain the battery faster so we need to know the battery
+// status. This isn't true for other platforms.
+#define ENABLE_BATTERY_MONITORING 1
+#else
+#undef ENABLE_BATTERY_MONITORING
+#endif // !OS_WIN
+
+#if defined(ENABLE_BATTERY_MONITORING)
+#include "base/timer/timer.h"
+#endif // defined(ENABLE_BATTERY_MONITORING)
+
+#if defined(OS_IOS)
+#include <objc/runtime.h>
+#endif // OS_IOS
+
+namespace base {
+
+// A class used to monitor the power state change and notify the observers about
+// the change event.
+class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
+ public:
+ PowerMonitorDeviceSource();
+ ~PowerMonitorDeviceSource() override;
+
+#if defined(OS_MACOSX)
+ // Allocate system resources needed by the PowerMonitor class.
+ //
+ // This function must be called before instantiating an instance of the class
+ // and before the Sandbox is initialized.
+#if !defined(OS_IOS)
+ static void AllocateSystemIOPorts();
+#else
+ static void AllocateSystemIOPorts() {}
+#endif // OS_IOS
+#endif // OS_MACOSX
+
+#if defined(OS_CHROMEOS)
+ // On Chrome OS, Chrome receives power-related events from powerd, the system
+ // power daemon, via D-Bus signals received on the UI thread. base can't
+ // directly depend on that code, so this class instead exposes static methods
+ // so that events can be passed in.
+ static void SetPowerSource(bool on_battery);
+ static void HandleSystemSuspending();
+ static void HandleSystemResumed();
+#endif
+
+ private:
+#if defined(OS_WIN)
+ // Represents a message-only window for power message handling on Windows.
+ // Only allow PowerMonitor to create it.
+ class PowerMessageWindow {
+ public:
+ PowerMessageWindow();
+ ~PowerMessageWindow();
+
+ private:
+ static LRESULT CALLBACK WndProcThunk(HWND hwnd,
+ UINT message,
+ WPARAM wparam,
+ LPARAM lparam);
+ // Instance of the module containing the window procedure.
+ HMODULE instance_;
+ // A hidden message-only window.
+ HWND message_hwnd_;
+ };
+#endif // OS_WIN
+
+#if defined(OS_MACOSX)
+ void PlatformInit();
+ void PlatformDestroy();
+#endif
+
+ // Platform-specific method to check whether the system is currently
+ // running on battery power. Returns true if running on batteries,
+ // false otherwise.
+ bool IsOnBatteryPowerImpl() override;
+
+ // Checks the battery status and notifies observers if the battery
+ // status has changed.
+ void BatteryCheck();
+
+#if defined(OS_IOS)
+ // Holds pointers to system event notification observers.
+ std::vector<id> notification_observers_;
+#endif
+
+#if defined(ENABLE_BATTERY_MONITORING)
+ base::OneShotTimer delayed_battery_check_;
+#endif
+
+#if defined(OS_WIN)
+ PowerMessageWindow power_message_window_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(PowerMonitorDeviceSource);
+};
+
+} // namespace base
+
+#endif // BASE_POWER_MONITOR_POWER_MONITOR_DEVICE_SOURCE_H_
diff --git a/libchrome/base/power_monitor/power_monitor_source.h b/libchrome/base/power_monitor/power_monitor_source.h
new file mode 100644
index 0000000..e63f4f8
--- /dev/null
+++ b/libchrome/base/power_monitor/power_monitor_source.h
@@ -0,0 +1,65 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_MONITOR_SOURCE_H_
+#define BASE_POWER_MONITOR_POWER_MONITOR_SOURCE_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class PowerMonitor;
+
+// Communicates power state changes to the power monitor.
+class BASE_EXPORT PowerMonitorSource {
+ public:
+ PowerMonitorSource();
+ virtual ~PowerMonitorSource();
+
+ // Normalized list of power events.
+ enum PowerEvent {
+ POWER_STATE_EVENT, // The Power status of the system has changed.
+ SUSPEND_EVENT, // The system is being suspended.
+ RESUME_EVENT // The system is being resumed.
+ };
+
+ // Is the computer currently on battery power. Can be called on any thread.
+ bool IsOnBatteryPower();
+
+ protected:
+ friend class PowerMonitorTest;
+
+ // Friend function that is allowed to access the protected ProcessPowerEvent.
+ friend void ProcessPowerEventHelper(PowerEvent);
+
+ // Get the process-wide PowerMonitorSource (if not present, returns NULL).
+ static PowerMonitorSource* Get();
+
+ // ProcessPowerEvent should only be called from a single thread, most likely
+ // the UI thread or, in child processes, the IO thread.
+ static void ProcessPowerEvent(PowerEvent event_id);
+
+ // Platform-specific method to check whether the system is currently
+ // running on battery power. Returns true if running on batteries,
+ // false otherwise.
+ virtual bool IsOnBatteryPowerImpl() = 0;
+
+ private:
+ bool on_battery_power_;
+ bool suspended_;
+
+ // This lock guards access to on_battery_power_, to ensure that
+ // IsOnBatteryPower can be called from any thread.
+ Lock battery_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(PowerMonitorSource);
+};
+
+} // namespace base
+
+#endif // BASE_POWER_MONITOR_POWER_MONITOR_SOURCE_H_
diff --git a/libchrome/base/power_monitor/power_observer.h b/libchrome/base/power_monitor/power_observer.h
new file mode 100644
index 0000000..6be70bb
--- /dev/null
+++ b/libchrome/base/power_monitor/power_observer.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_POWER_OBSERVER_H_
+#define BASE_POWER_MONITOR_POWER_OBSERVER_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+class BASE_EXPORT PowerObserver {
+ public:
+ // Notification of a change in power status of the computer, such
+ // as from switching between battery and A/C power.
+ virtual void OnPowerStateChange(bool on_battery_power) {};
+
+ // Notification that the system is suspending.
+ virtual void OnSuspend() {}
+
+ // Notification that the system is resuming.
+ virtual void OnResume() {}
+
+ protected:
+ virtual ~PowerObserver() {}
+};
+
+} // namespace base
+
+#endif // BASE_POWER_MONITOR_POWER_OBSERVER_H_
diff --git a/libchrome/base/process/internal_linux.cc b/libchrome/base/process/internal_linux.cc
new file mode 100644
index 0000000..d286f4e
--- /dev/null
+++ b/libchrome/base/process/internal_linux.cc
@@ -0,0 +1,189 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/internal_linux.h"
+
+#include <limits.h>
+#include <unistd.h>
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+const char kProcDir[] = "/proc";
+
+const char kStatFile[] = "stat";
+
+FilePath GetProcPidDir(pid_t pid) {
+ return FilePath(kProcDir).Append(IntToString(pid));
+}
+
+pid_t ProcDirSlotToPid(const char* d_name) {
+ int i;
+ for (i = 0; i < NAME_MAX && d_name[i]; ++i) {
+ if (!IsAsciiDigit(d_name[i])) {
+ return 0;
+ }
+ }
+ if (i == NAME_MAX)
+ return 0;
+
+ // Read the process's command line.
+ pid_t pid;
+ std::string pid_string(d_name);
+ if (!StringToInt(pid_string, &pid)) {
+ NOTREACHED();
+ return 0;
+ }
+ return pid;
+}
+
+bool ReadProcFile(const FilePath& file, std::string* buffer) {
+ buffer->clear();
+ // Synchronously reading files in /proc is safe.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ if (!ReadFileToString(file, buffer)) {
+ DLOG(WARNING) << "Failed to read " << file.MaybeAsASCII();
+ return false;
+ }
+ return !buffer->empty();
+}
+
+bool ReadProcStats(pid_t pid, std::string* buffer) {
+ FilePath stat_file = internal::GetProcPidDir(pid).Append(kStatFile);
+ return ReadProcFile(stat_file, buffer);
+}
+
+bool ParseProcStats(const std::string& stats_data,
+ std::vector<std::string>* proc_stats) {
+ // |stats_data| may be empty if the process disappeared somehow.
+ // e.g. http://crbug.com/145811
+ if (stats_data.empty())
+ return false;
+
+ // The stat file is formatted as:
+ // pid (process name) data1 data2 .... dataN
+ // Look for the closing paren by scanning backwards, to avoid being fooled by
+ // processes with ')' in the name.
+ size_t open_parens_idx = stats_data.find(" (");
+ size_t close_parens_idx = stats_data.rfind(") ");
+ if (open_parens_idx == std::string::npos ||
+ close_parens_idx == std::string::npos ||
+ open_parens_idx > close_parens_idx) {
+ DLOG(WARNING) << "Failed to find matched parens in '" << stats_data << "'";
+ NOTREACHED();
+ return false;
+ }
+ open_parens_idx++;
+
+ proc_stats->clear();
+ // PID.
+ proc_stats->push_back(stats_data.substr(0, open_parens_idx));
+ // Process name without parentheses.
+ proc_stats->push_back(
+ stats_data.substr(open_parens_idx + 1,
+ close_parens_idx - (open_parens_idx + 1)));
+
+ // Split the rest.
+ std::vector<std::string> other_stats = SplitString(
+ stats_data.substr(close_parens_idx + 2), " ",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ for (size_t i = 0; i < other_stats.size(); ++i)
+ proc_stats->push_back(other_stats[i]);
+ return true;
+}
+
+typedef std::map<std::string, std::string> ProcStatMap;
+void ParseProcStat(const std::string& contents, ProcStatMap* output) {
+ StringPairs key_value_pairs;
+ SplitStringIntoKeyValuePairs(contents, ' ', '\n', &key_value_pairs);
+ for (size_t i = 0; i < key_value_pairs.size(); ++i) {
+ output->insert(key_value_pairs[i]);
+ }
+}
+
+int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
+ ProcStatsFields field_num) {
+ DCHECK_GE(field_num, VM_PPID);
+ CHECK_LT(static_cast<size_t>(field_num), proc_stats.size());
+
+ int64_t value;
+ return StringToInt64(proc_stats[field_num], &value) ? value : 0;
+}
+
+size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
+ ProcStatsFields field_num) {
+ DCHECK_GE(field_num, VM_PPID);
+ CHECK_LT(static_cast<size_t>(field_num), proc_stats.size());
+
+ size_t value;
+ return StringToSizeT(proc_stats[field_num], &value) ? value : 0;
+}
+
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
+ std::string stats_data;
+ if (!ReadProcStats(pid, &stats_data))
+ return 0;
+ std::vector<std::string> proc_stats;
+ if (!ParseProcStats(stats_data, &proc_stats))
+ return 0;
+ return GetProcStatsFieldAsInt64(proc_stats, field_num);
+}
+
+size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
+ ProcStatsFields field_num) {
+ std::string stats_data;
+ if (!ReadProcStats(pid, &stats_data))
+ return 0;
+ std::vector<std::string> proc_stats;
+ if (!ParseProcStats(stats_data, &proc_stats))
+ return 0;
+ return GetProcStatsFieldAsSizeT(proc_stats, field_num);
+}
+
+Time GetBootTime() {
+ FilePath path("/proc/stat");
+ std::string contents;
+ if (!ReadProcFile(path, &contents))
+ return Time();
+ ProcStatMap proc_stat;
+ ParseProcStat(contents, &proc_stat);
+ ProcStatMap::const_iterator btime_it = proc_stat.find("btime");
+ if (btime_it == proc_stat.end())
+ return Time();
+ int btime;
+ if (!StringToInt(btime_it->second, &btime))
+ return Time();
+ return Time::FromTimeT(btime);
+}
+
+TimeDelta ClockTicksToTimeDelta(int clock_ticks) {
+ // This queries the /proc-specific scaling factor which is
+ // conceptually the system hertz. To dump this value on another
+ // system, try
+ // od -t dL /proc/self/auxv
+ // and look for the number after 17 in the output; mine is
+ // 0000040 17 100 3 134512692
+ // which means the answer is 100.
+ // It may be the case that this value is always 100.
+ static const int kHertz = sysconf(_SC_CLK_TCK);
+
+ return TimeDelta::FromMicroseconds(
+ Time::kMicrosecondsPerSecond * clock_ticks / kHertz);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/process/internal_linux.h b/libchrome/base/process/internal_linux.h
new file mode 100644
index 0000000..ba793f7
--- /dev/null
+++ b/libchrome/base/process/internal_linux.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains internal routines that are called by other files in
+// base/process/.
+
+#ifndef BASE_PROCESS_INTERNAL_LINUX_H_
+#define BASE_PROCESS_INTERNAL_LINUX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "base/files/file_path.h"
+
+namespace base {
+
+class Time;
+class TimeDelta;
+
+namespace internal {
+
+// "/proc"
+extern const char kProcDir[];
+
+// "stat"
+extern const char kStatFile[];
+
+// Returns a FilePath to "/proc/pid".
+base::FilePath GetProcPidDir(pid_t pid);
+
+// Take a /proc directory entry named |d_name|, and if it is the directory for
+// a process, convert it to a pid_t.
+// Returns 0 on failure.
+// e.g. /proc/self/ will return 0, whereas /proc/1234 will return 1234.
+pid_t ProcDirSlotToPid(const char* d_name);
+
+// Reads /proc/<pid>/stat into |buffer|. Returns true if the file can be read
+// and is non-empty.
+bool ReadProcStats(pid_t pid, std::string* buffer);
+
+// Takes |stats_data| and populates |proc_stats| with the values split by
+// spaces. Taking into account the 2nd field may, in itself, contain spaces.
+// Returns true if successful.
+bool ParseProcStats(const std::string& stats_data,
+ std::vector<std::string>* proc_stats);
+
+// Fields from /proc/<pid>/stat, 0-based. See man 5 proc.
+// If the ordering ever changes, carefully review functions that use these
+// values.
+enum ProcStatsFields {
+ VM_COMM = 1, // Filename of executable, without parentheses.
+ VM_STATE = 2, // Letter indicating the state of the process.
+ VM_PPID = 3, // PID of the parent.
+ VM_PGRP = 4, // Process group id.
+ VM_UTIME = 13, // Time scheduled in user mode in clock ticks.
+ VM_STIME = 14, // Time scheduled in kernel mode in clock ticks.
+ VM_NUMTHREADS = 19, // Number of threads.
+ VM_STARTTIME = 21, // The time the process started in clock ticks.
+ VM_VSIZE = 22, // Virtual memory size in bytes.
+ VM_RSS = 23, // Resident Set Size in pages.
+};
+
+// Reads the |field_num|th field from |proc_stats|. Returns 0 on failure.
+// This version does not handle the first 3 values, since the first value is
+// simply |pid|, and the next two values are strings.
+int64_t GetProcStatsFieldAsInt64(const std::vector<std::string>& proc_stats,
+ ProcStatsFields field_num);
+
+// Same as GetProcStatsFieldAsInt64(), but for size_t values.
+size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
+ ProcStatsFields field_num);
+
+// Convenience wrapper around GetProcStatsFieldAsInt64(), ParseProcStats() and
+// ReadProcStats(). See GetProcStatsFieldAsInt64() for details.
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num);
+
+// Same as ReadProcStatsAndGetFieldAsInt64() but for size_t values.
+size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
+ ProcStatsFields field_num);
+
+// Returns the time that the OS started. Clock ticks are relative to this.
+Time GetBootTime();
+
+// Converts Linux clock ticks to a wall time delta.
+TimeDelta ClockTicksToTimeDelta(int clock_ticks);
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_PROCESS_INTERNAL_LINUX_H_
diff --git a/libchrome/base/process/kill.cc b/libchrome/base/process/kill.cc
new file mode 100644
index 0000000..5d8ba6a
--- /dev/null
+++ b/libchrome/base/process/kill.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include "base/process/process_iterator.h"
+
+namespace base {
+
+bool KillProcesses(const FilePath::StringType& executable_name,
+ int exit_code,
+ const ProcessFilter* filter) {
+ bool result = true;
+ NamedProcessIterator iter(executable_name, filter);
+ while (const ProcessEntry* entry = iter.NextProcessEntry()) {
+ Process process = Process::Open(entry->pid());
+ result &= process.Terminate(exit_code, true);
+ }
+ return result;
+}
+
+} // namespace base
diff --git a/libchrome/base/process/kill.h b/libchrome/base/process/kill.h
new file mode 100644
index 0000000..c664f33
--- /dev/null
+++ b/libchrome/base/process/kill.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains routines to kill processes and get the exit code and
+// termination status.
+
+#ifndef BASE_PROCESS_KILL_H_
+#define BASE_PROCESS_KILL_H_
+
+#include "base/files/file_path.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class ProcessFilter;
+
+// Return status values from GetTerminationStatus. Don't use these as
+// exit code arguments to KillProcess*(), use platform/application
+// specific values instead.
+enum TerminationStatus {
+ TERMINATION_STATUS_NORMAL_TERMINATION, // zero exit status
+ TERMINATION_STATUS_ABNORMAL_TERMINATION, // non-zero exit status
+ TERMINATION_STATUS_PROCESS_WAS_KILLED, // e.g. SIGKILL or task manager kill
+ TERMINATION_STATUS_PROCESS_CRASHED, // e.g. Segmentation fault
+ TERMINATION_STATUS_STILL_RUNNING, // child hasn't exited yet
+#if defined(OS_CHROMEOS)
+ // Used for the case when oom-killer kills a process on ChromeOS.
+ TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM,
+#endif
+#if defined(OS_ANDROID)
+ // On Android processes are spawned from the system Zygote and we do not get
+ // the termination status. We can't know if the termination was a crash or an
+ // oom kill for sure, but we can use status of the strong process bindings as
+ // a hint.
+ TERMINATION_STATUS_OOM_PROTECTED, // child was protected from oom kill
+#endif
+ TERMINATION_STATUS_LAUNCH_FAILED, // child process never launched
+ TERMINATION_STATUS_MAX_ENUM
+};
+
+// Attempts to kill all the processes on the current machine that were launched
+// from the given executable name, ending them with the given exit code. If
+// filter is non-null, then only processes selected by the filter are killed.
+// Returns true if all processes were able to be killed off, false if at least
+// one couldn't be killed.
+BASE_EXPORT bool KillProcesses(const FilePath::StringType& executable_name,
+ int exit_code,
+ const ProcessFilter* filter);
+
+#if defined(OS_POSIX)
+// Attempts to kill the process group identified by |process_group_id|. Returns
+// true on success.
+BASE_EXPORT bool KillProcessGroup(ProcessHandle process_group_id);
+#endif // defined(OS_POSIX)
+
+// Get the termination status of the process by interpreting the
+// circumstances of the child process' death. |exit_code| is set to
+// the status returned by waitpid() on POSIX, and from
+// GetExitCodeProcess() on Windows. |exit_code| may be NULL if the
+// caller is not interested in it. Note that on Linux, this function
+// will only return a useful result the first time it is called after
+// the child exits (because it will reap the child and the information
+// will no longer be available).
+BASE_EXPORT TerminationStatus GetTerminationStatus(ProcessHandle handle,
+ int* exit_code);
+
+#if defined(OS_POSIX)
+// Send a kill signal to the process and then wait for the process to exit
+// and get the termination status.
+//
+// This is used in situations where it is believed that the process is dead
+// or dying (because communication with the child process has been cut).
+// In order to avoid erroneously returning that the process is still running
+// because the kernel is still cleaning it up, this will wait for the process
+// to terminate. In order to avoid the risk of hanging while waiting for the
+// process to terminate, send a SIGKILL to the process before waiting for the
+// termination status.
+//
+// Note that it is not an option to call WaitForExitCode and then
+// GetTerminationStatus as the child will be reaped when WaitForExitCode
+// returns, and this information will be lost.
+//
+BASE_EXPORT TerminationStatus GetKnownDeadTerminationStatus(
+ ProcessHandle handle, int* exit_code);
+#endif // defined(OS_POSIX)
+
+// Wait for all the processes based on the named executable to exit. If filter
+// is non-null, then only processes selected by the filter are waited on.
+// Returns after all processes have exited or wait_milliseconds have expired.
+// Returns true if all the processes exited, false otherwise.
+BASE_EXPORT bool WaitForProcessesToExit(
+ const FilePath::StringType& executable_name,
+ base::TimeDelta wait,
+ const ProcessFilter* filter);
+
+// Waits a certain amount of time (can be 0) for all the processes with a given
+// executable name to exit, then kills off any of them that are still around.
+// If filter is non-null, then only processes selected by the filter are waited
+// on. Killed processes are ended with the given exit code. Returns false if
+// any processes needed to be killed, true if they all exited cleanly within
+// the wait_milliseconds delay.
+BASE_EXPORT bool CleanupProcesses(const FilePath::StringType& executable_name,
+ base::TimeDelta wait,
+ int exit_code,
+ const ProcessFilter* filter);
+
+// This method ensures that the specified process eventually terminates, and
+// then it closes the given process handle.
+//
+// It assumes that the process has already been signalled to exit, and it
+// begins by waiting a small amount of time for it to exit. If the process
+// does not appear to have exited, then this function starts to become
+// aggressive about ensuring that the process terminates.
+//
+// On Linux this method does not block the calling thread.
+// On OS X this method may block for up to 2 seconds.
+//
+// NOTE: The process must have been opened with the PROCESS_TERMINATE and
+// SYNCHRONIZE permissions.
+//
+BASE_EXPORT void EnsureProcessTerminated(Process process);
+
+#if defined(OS_POSIX) && !defined(OS_MACOSX)
+// The nicer version of EnsureProcessTerminated() that is patient and will
+// wait for |pid| to finish and then reap it.
+BASE_EXPORT void EnsureProcessGetsReaped(ProcessId pid);
+#endif
+
+} // namespace base
+
+#endif // BASE_PROCESS_KILL_H_
diff --git a/libchrome/base/process/kill_posix.cc b/libchrome/base/process/kill_posix.cc
new file mode 100644
index 0000000..85470e0
--- /dev/null
+++ b/libchrome/base/process/kill_posix.cc
@@ -0,0 +1,232 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/kill.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_iterator.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+TerminationStatus GetTerminationStatusImpl(ProcessHandle handle,
+ bool can_block,
+ int* exit_code) {
+ int status = 0;
+ const pid_t result = HANDLE_EINTR(waitpid(handle, &status,
+ can_block ? 0 : WNOHANG));
+ if (result == -1) {
+ DPLOG(ERROR) << "waitpid(" << handle << ")";
+ if (exit_code)
+ *exit_code = 0;
+ return TERMINATION_STATUS_NORMAL_TERMINATION;
+ } else if (result == 0) {
+ // the child hasn't exited yet.
+ if (exit_code)
+ *exit_code = 0;
+ return TERMINATION_STATUS_STILL_RUNNING;
+ }
+
+ if (exit_code)
+ *exit_code = status;
+
+ if (WIFSIGNALED(status)) {
+ switch (WTERMSIG(status)) {
+ case SIGABRT:
+ case SIGBUS:
+ case SIGFPE:
+ case SIGILL:
+ case SIGSEGV:
+ return TERMINATION_STATUS_PROCESS_CRASHED;
+ case SIGKILL:
+#if defined(OS_CHROMEOS)
+ // On ChromeOS, only way a process gets kill by SIGKILL
+ // is by oom-killer.
+ return TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM;
+#endif
+ case SIGINT:
+ case SIGTERM:
+ return TERMINATION_STATUS_PROCESS_WAS_KILLED;
+ default:
+ break;
+ }
+ }
+
+ if (WIFEXITED(status) && WEXITSTATUS(status) != 0)
+ return TERMINATION_STATUS_ABNORMAL_TERMINATION;
+
+ return TERMINATION_STATUS_NORMAL_TERMINATION;
+}
+
+} // namespace
+
+#if !defined(OS_NACL_NONSFI)
+bool KillProcessGroup(ProcessHandle process_group_id) {
+ bool result = kill(-1 * process_group_id, SIGKILL) == 0;
+ if (!result)
+ DPLOG(ERROR) << "Unable to terminate process group " << process_group_id;
+ return result;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
+ return GetTerminationStatusImpl(handle, false /* can_block */, exit_code);
+}
+
+TerminationStatus GetKnownDeadTerminationStatus(ProcessHandle handle,
+ int* exit_code) {
+ bool result = kill(handle, SIGKILL) == 0;
+
+ if (!result)
+ DPLOG(ERROR) << "Unable to terminate process " << handle;
+
+ return GetTerminationStatusImpl(handle, true /* can_block */, exit_code);
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool WaitForProcessesToExit(const FilePath::StringType& executable_name,
+ TimeDelta wait,
+ const ProcessFilter* filter) {
+ bool result = false;
+
+ // TODO(port): This is inefficient, but works if there are multiple procs.
+ // TODO(port): use waitpid to avoid leaving zombies around
+
+ TimeTicks end_time = TimeTicks::Now() + wait;
+ do {
+ NamedProcessIterator iter(executable_name, filter);
+ if (!iter.NextProcessEntry()) {
+ result = true;
+ break;
+ }
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+ } while ((end_time - TimeTicks::Now()) > TimeDelta());
+
+ return result;
+}
+
+bool CleanupProcesses(const FilePath::StringType& executable_name,
+ TimeDelta wait,
+ int exit_code,
+ const ProcessFilter* filter) {
+ bool exited_cleanly = WaitForProcessesToExit(executable_name, wait, filter);
+ if (!exited_cleanly)
+ KillProcesses(executable_name, exit_code, filter);
+ return exited_cleanly;
+}
+
+#if !defined(OS_MACOSX)
+
+namespace {
+
+// Return true if the given child is dead. This will also reap the process.
+// Doesn't block.
+static bool IsChildDead(pid_t child) {
+ const pid_t result = HANDLE_EINTR(waitpid(child, NULL, WNOHANG));
+ if (result == -1) {
+ DPLOG(ERROR) << "waitpid(" << child << ")";
+ NOTREACHED();
+ } else if (result > 0) {
+ // The child has died.
+ return true;
+ }
+
+ return false;
+}
+
+// A thread class which waits for the given child to exit and reaps it.
+// If the child doesn't exit within a couple of seconds, kill it.
+class BackgroundReaper : public PlatformThread::Delegate {
+ public:
+ BackgroundReaper(pid_t child, unsigned timeout)
+ : child_(child),
+ timeout_(timeout) {
+ }
+
+ // Overridden from PlatformThread::Delegate:
+ void ThreadMain() override {
+ WaitForChildToDie();
+ delete this;
+ }
+
+ void WaitForChildToDie() {
+ // Wait forever case.
+ if (timeout_ == 0) {
+ pid_t r = HANDLE_EINTR(waitpid(child_, NULL, 0));
+ if (r != child_) {
+ DPLOG(ERROR) << "While waiting for " << child_
+ << " to terminate, we got the following result: " << r;
+ }
+ return;
+ }
+
+ // There's no good way to wait for a specific child to exit in a timed
+ // fashion. (No kqueue on Linux), so we just loop and sleep.
+
+ // Wait for 2 * timeout_ 500 milliseconds intervals.
+ for (unsigned i = 0; i < 2 * timeout_; ++i) {
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
+ if (IsChildDead(child_))
+ return;
+ }
+
+ if (kill(child_, SIGKILL) == 0) {
+ // SIGKILL is uncatchable. Since the signal was delivered, we can
+ // just wait for the process to die now in a blocking manner.
+ if (HANDLE_EINTR(waitpid(child_, NULL, 0)) < 0)
+ DPLOG(WARNING) << "waitpid";
+ } else {
+ DLOG(ERROR) << "While waiting for " << child_ << " to terminate we"
+ << " failed to deliver a SIGKILL signal (" << errno << ").";
+ }
+ }
+
+ private:
+ const pid_t child_;
+ // Number of seconds to wait, if 0 then wait forever and do not attempt to
+ // kill |child_|.
+ const unsigned timeout_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundReaper);
+};
+
+} // namespace
+
+void EnsureProcessTerminated(Process process) {
+ // If the child is already dead, then there's nothing to do.
+ if (IsChildDead(process.Pid()))
+ return;
+
+ const unsigned timeout = 2; // seconds
+ BackgroundReaper* reaper = new BackgroundReaper(process.Pid(), timeout);
+ PlatformThread::CreateNonJoinable(0, reaper);
+}
+
+void EnsureProcessGetsReaped(ProcessId pid) {
+ // If the child is already dead, then there's nothing to do.
+ if (IsChildDead(pid))
+ return;
+
+ BackgroundReaper* reaper = new BackgroundReaper(pid, 0);
+ PlatformThread::CreateNonJoinable(0, reaper);
+}
+
+#endif // !defined(OS_MACOSX)
+#endif // !defined(OS_NACL_NONSFI)
+
+} // namespace base
diff --git a/libchrome/base/process/launch.cc b/libchrome/base/process/launch.cc
new file mode 100644
index 0000000..3ca5155
--- /dev/null
+++ b/libchrome/base/process/launch.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+#include "build/build_config.h"
+
+namespace base {
+
+LaunchOptions::LaunchOptions()
+ : wait(false),
+#if defined(OS_WIN)
+ start_hidden(false),
+ handles_to_inherit(NULL),
+ inherit_handles(false),
+ as_user(NULL),
+ empty_desktop_name(false),
+ job_handle(NULL),
+ stdin_handle(NULL),
+ stdout_handle(NULL),
+ stderr_handle(NULL),
+ force_breakaway_from_job_(false)
+#else
+ clear_environ(false),
+ fds_to_remap(NULL),
+ maximize_rlimits(NULL),
+ new_process_group(false)
+#if defined(OS_LINUX)
+ , clone_flags(0)
+ , allow_new_privs(false)
+ , kill_on_parent_death(false)
+#endif // OS_LINUX
+#if defined(OS_POSIX)
+ , pre_exec_delegate(NULL)
+#endif // OS_POSIX
+#if defined(OS_CHROMEOS)
+ , ctrl_terminal_fd(-1)
+#endif // OS_CHROMEOS
+#endif // !defined(OS_WIN)
+ {
+}
+
+LaunchOptions::LaunchOptions(const LaunchOptions& other) = default;
+
+LaunchOptions::~LaunchOptions() {
+}
+
+LaunchOptions LaunchOptionsForTest() {
+ LaunchOptions options;
+#if defined(OS_LINUX)
+ // To prevent accidental privilege sharing to an untrusted child, processes
+ // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
+ // new child will be used for testing only.
+ options.allow_new_privs = true;
+#endif
+ return options;
+}
+
+} // namespace base
diff --git a/libchrome/base/process/launch.h b/libchrome/base/process/launch.h
new file mode 100644
index 0000000..b8c0259
--- /dev/null
+++ b/libchrome/base/process/launch.h
@@ -0,0 +1,308 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains functions for launching subprocesses.
+
+#ifndef BASE_PROCESS_LAUNCH_H_
+#define BASE_PROCESS_LAUNCH_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/environment.h"
+#include "base/macros.h"
+#include "base/process/process.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include "base/posix/file_descriptor_shuffle.h"
+#elif defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+class CommandLine;
+
+#if defined(OS_WIN)
+typedef std::vector<HANDLE> HandlesToInheritVector;
+#endif
+// TODO(viettrungluu): Only define this on POSIX?
+typedef std::vector<std::pair<int, int> > FileHandleMappingVector;
+
+// Options for launching a subprocess that are passed to LaunchProcess().
+// The default constructor constructs the object with default options.
+struct BASE_EXPORT LaunchOptions {
+#if defined(OS_POSIX)
+ // Delegate to be run in between fork and exec in the subprocess (see
+ // pre_exec_delegate below)
+ class BASE_EXPORT PreExecDelegate {
+ public:
+ PreExecDelegate() {}
+ virtual ~PreExecDelegate() {}
+
+ // Since this is to be run between fork and exec, and fork may have happened
+ // while multiple threads were running, this function needs to be async
+ // safe.
+ virtual void RunAsyncSafe() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PreExecDelegate);
+ };
+#endif // defined(OS_POSIX)
+
+ LaunchOptions();
+ LaunchOptions(const LaunchOptions&);
+ ~LaunchOptions();
+
+ // If true, wait for the process to complete.
+ bool wait;
+
+ // If not empty, change to this directory before executing the new process.
+ base::FilePath current_directory;
+
+#if defined(OS_WIN)
+ bool start_hidden;
+
+ // If non-null, inherit exactly the list of handles in this vector (these
+ // handles must be inheritable).
+ HandlesToInheritVector* handles_to_inherit;
+
+ // If true, the new process inherits handles from the parent. In production
+ // code this flag should be used only when running short-lived, trusted
+ // binaries, because open handles from other libraries and subsystems will
+ // leak to the child process, causing errors such as open socket hangs.
+ // Note: If |handles_to_inherit| is non-null, this flag is ignored and only
+ // those handles will be inherited.
+ bool inherit_handles;
+
+ // If non-null, runs as if the user represented by the token had launched it.
+ // Whether the application is visible on the interactive desktop depends on
+ // the token belonging to an interactive logon session.
+ //
+ // To avoid hard to diagnose problems, when specified this loads the
+ // environment variables associated with the user and if this operation fails
+ // the entire call fails as well.
+ UserTokenHandle as_user;
+
+ // If true, use an empty string for the desktop name.
+ bool empty_desktop_name;
+
+ // If non-null, launches the application in that job object. The process will
+ // be terminated immediately and LaunchProcess() will fail if assignment to
+ // the job object fails.
+ HANDLE job_handle;
+
+ // Handles for the redirection of stdin, stdout and stderr. The handles must
+ // be inheritable. Caller should either set all three of them or none (i.e.
+ // there is no way to redirect stderr without redirecting stdin). The
+ // |inherit_handles| flag must be set to true when redirecting stdio stream.
+ HANDLE stdin_handle;
+ HANDLE stdout_handle;
+ HANDLE stderr_handle;
+
+ // If set to true, ensures that the child process is launched with the
+ // CREATE_BREAKAWAY_FROM_JOB flag which allows it to breakout of the parent
+ // job if any.
+ bool force_breakaway_from_job_;
+#else
+ // Set/unset environment variables. These are applied on top of the parent
+ // process environment. Empty (the default) means to inherit the same
+ // environment. See AlterEnvironment().
+ EnvironmentMap environ;
+
+ // Clear the environment for the new process before processing changes from
+ // |environ|.
+ bool clear_environ;
+
+ // If non-null, remap file descriptors according to the mapping of
+ // src fd->dest fd to propagate FDs into the child process.
+ // This pointer is owned by the caller and must live through the
+ // call to LaunchProcess().
+ const FileHandleMappingVector* fds_to_remap;
+
+ // Each element is an RLIMIT_* constant that should be raised to its
+ // rlim_max. This pointer is owned by the caller and must live through
+ // the call to LaunchProcess().
+ const std::vector<int>* maximize_rlimits;
+
+ // If true, start the process in a new process group, instead of
+ // inheriting the parent's process group. The pgid of the child process
+ // will be the same as its pid.
+ bool new_process_group;
+
+#if defined(OS_LINUX)
+ // If non-zero, start the process using clone(), using flags as provided.
+ // Unlike in clone, clone_flags may not contain a custom termination signal
+ // that is sent to the parent when the child dies. The termination signal will
+ // always be set to SIGCHLD.
+ int clone_flags;
+
+ // By default, child processes will have the PR_SET_NO_NEW_PRIVS bit set. If
+ // true, then this bit will not be set in the new child process.
+ bool allow_new_privs;
+
+ // Sets parent process death signal to SIGKILL.
+ bool kill_on_parent_death;
+#endif // defined(OS_LINUX)
+
+#if defined(OS_POSIX)
+ // If non-null, a delegate to be run immediately prior to executing the new
+ // program in the child process.
+ //
+ // WARNING: If LaunchProcess is called in the presence of multiple threads,
+ // code running in this delegate essentially needs to be async-signal safe
+ // (see man 7 signal for a list of allowed functions).
+ PreExecDelegate* pre_exec_delegate;
+#endif // defined(OS_POSIX)
+
+#if defined(OS_CHROMEOS)
+ // If non-negative, the specified file descriptor will be set as the launched
+ // process' controlling terminal.
+ int ctrl_terminal_fd;
+#endif // defined(OS_CHROMEOS)
+#endif // !defined(OS_WIN)
+};
+
+// Launch a process via the command line |cmdline|.
+// See the documentation of LaunchOptions for details on |options|.
+//
+// Returns a valid Process upon success.
+//
+// Unix-specific notes:
+// - All file descriptors open in the parent process will be closed in the
+// child process except for any preserved by options::fds_to_remap, and
+// stdin, stdout, and stderr. If not remapped by options::fds_to_remap,
+// stdin is reopened as /dev/null, and the child is allowed to inherit its
+// parent's stdout and stderr.
+// - If the first argument on the command line does not contain a slash,
+// PATH will be searched. (See man execvp.)
+BASE_EXPORT Process LaunchProcess(const CommandLine& cmdline,
+ const LaunchOptions& options);
+
+#if defined(OS_WIN)
+// Windows-specific LaunchProcess that takes the command line as a
+// string. Useful for situations where you need to control the
+// command line arguments directly, but prefer the CommandLine version
+// if launching Chrome itself.
+//
+// The first command line argument should be the path to the process,
+// and don't forget to quote it.
+//
+// Example (including literal quotes)
+// cmdline = "c:\windows\explorer.exe" -foo "c:\bar\"
+BASE_EXPORT Process LaunchProcess(const string16& cmdline,
+ const LaunchOptions& options);
+
+// Launches a process with elevated privileges. This does not behave exactly
+// like LaunchProcess as it uses ShellExecuteEx instead of CreateProcess to
+// create the process. This means the process will have elevated privileges
+// and thus some common operations like OpenProcess will fail. Currently the
+// only supported LaunchOptions are |start_hidden| and |wait|.
+BASE_EXPORT Process LaunchElevatedProcess(const CommandLine& cmdline,
+ const LaunchOptions& options);
+
+#elif defined(OS_POSIX)
+// A POSIX-specific version of LaunchProcess that takes an argv array
+// instead of a CommandLine. Useful for situations where you need to
+// control the command line arguments directly, but prefer the
+// CommandLine version if launching Chrome itself.
+BASE_EXPORT Process LaunchProcess(const std::vector<std::string>& argv,
+ const LaunchOptions& options);
+
+// Close all file descriptors, except those which are a destination in the
+// given multimap. Only call this function in a child process where you know
+// that there aren't any other threads.
+BASE_EXPORT void CloseSuperfluousFds(const InjectiveMultimap& saved_map);
+#endif // defined(OS_POSIX)
+
+#if defined(OS_WIN)
+// Set |job_object|'s JOBOBJECT_EXTENDED_LIMIT_INFORMATION
+// BasicLimitInformation.LimitFlags to |limit_flags|.
+BASE_EXPORT bool SetJobObjectLimitFlags(HANDLE job_object, DWORD limit_flags);
+
+// Output multi-process printf, cout, cerr, etc to the cmd.exe console that ran
+// chrome. This is not thread-safe: only call from main thread.
+BASE_EXPORT void RouteStdioToConsole(bool create_console_if_not_found);
+#endif // defined(OS_WIN)
+
+// Executes the application specified by |cl| and wait for it to exit. Stores
+// the output (stdout) in |output|. Redirects stderr to /dev/null. Returns true
+// on success (application launched and exited cleanly, with exit code
+// indicating success).
+BASE_EXPORT bool GetAppOutput(const CommandLine& cl, std::string* output);
+
+// Like GetAppOutput, but also includes stderr.
+BASE_EXPORT bool GetAppOutputAndError(const CommandLine& cl,
+ std::string* output);
+
+#if defined(OS_WIN)
+// A Windows-specific version of GetAppOutput that takes a command line string
+// instead of a CommandLine object. Useful for situations where you need to
+// control the command line arguments directly.
+BASE_EXPORT bool GetAppOutput(const StringPiece16& cl, std::string* output);
+#endif
+
+#if defined(OS_POSIX)
+// A POSIX-specific version of GetAppOutput that takes an argv array
+// instead of a CommandLine. Useful for situations where you need to
+// control the command line arguments directly.
+BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
+ std::string* output);
+
+// A version of |GetAppOutput()| which also returns the exit code of the
+// executed command. Returns true if the application runs and exits cleanly. If
+// this is the case the exit code of the application is available in
+// |*exit_code|.
+BASE_EXPORT bool GetAppOutputWithExitCode(const CommandLine& cl,
+ std::string* output, int* exit_code);
+#endif // defined(OS_POSIX)
+
+// If supported on the platform, and the user has sufficent rights, increase
+// the current process's scheduling priority to a high priority.
+BASE_EXPORT void RaiseProcessToHighPriority();
+
+#if defined(OS_MACOSX)
+// Restore the default exception handler, setting it to Apple Crash Reporter
+// (ReportCrash). When forking and execing a new process, the child will
+// inherit the parent's exception ports, which may be set to the Breakpad
+// instance running inside the parent. The parent's Breakpad instance should
+// not handle the child's exceptions. Calling RestoreDefaultExceptionHandler
+// in the child after forking will restore the standard exception handler.
+// See http://crbug.com/20371/ for more details.
+void RestoreDefaultExceptionHandler();
+#endif // defined(OS_MACOSX)
+
+// Creates a LaunchOptions object suitable for launching processes in a test
+// binary. This should not be called in production/released code.
+BASE_EXPORT LaunchOptions LaunchOptionsForTest();
+
+#if defined(OS_LINUX) || defined(OS_NACL_NONSFI)
+// A wrapper for clone with fork-like behavior, meaning that it returns the
+// child's pid in the parent and 0 in the child. |flags|, |ptid|, and |ctid| are
+// as in the clone system call (the CLONE_VM flag is not supported).
+//
+// This function uses the libc clone wrapper (which updates libc's pid cache)
+// internally, so callers may expect things like getpid() to work correctly
+// after in both the child and parent. An exception is when this code is run
+// under Valgrind. Valgrind does not support the libc clone wrapper, so the libc
+// pid cache may be incorrect after this function is called under Valgrind.
+//
+// As with fork(), callers should be extremely careful when calling this while
+// multiple threads are running, since at the time the fork happened, the
+// threads could have been in any state (potentially holding locks, etc.).
+// Callers should most likely call execve() in the child soon after calling
+// this.
+BASE_EXPORT pid_t ForkWithFlags(unsigned long flags, pid_t* ptid, pid_t* ctid);
+#endif
+
+} // namespace base
+
+#endif // BASE_PROCESS_LAUNCH_H_
diff --git a/libchrome/base/process/launch_mac.cc b/libchrome/base/process/launch_mac.cc
new file mode 100644
index 0000000..5895eae
--- /dev/null
+++ b/libchrome/base/process/launch_mac.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include <mach/mach.h>
+#include <servers/bootstrap.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+void RestoreDefaultExceptionHandler() {
+ // This function is tailored to remove the Breakpad exception handler.
+ // exception_mask matches s_exception_mask in
+ // breakpad/src/client/mac/handler/exception_handler.cc
+ const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
+ EXC_MASK_BAD_INSTRUCTION |
+ EXC_MASK_ARITHMETIC |
+ EXC_MASK_BREAKPOINT;
+
+ // Setting the exception port to MACH_PORT_NULL may not be entirely
+ // kosher to restore the default exception handler, but in practice,
+ // it results in the exception port being set to Apple Crash Reporter,
+ // the desired behavior.
+ task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
+ EXCEPTION_DEFAULT, THREAD_STATE_NONE);
+}
+
+} // namespace base
diff --git a/libchrome/base/process/launch_posix.cc b/libchrome/base/process/launch_posix.cc
new file mode 100644
index 0000000..4fb1018
--- /dev/null
+++ b/libchrome/base/process/launch_posix.cc
@@ -0,0 +1,753 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/launch.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <setjmp.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <set>
+
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/debugger.h"
+#include "base/debug/stack_trace.h"
+#include "base/files/dir_reader_posix.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/third_party/valgrind/valgrind.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX)
+#include <sys/prctl.h>
+#endif
+
+#if defined(OS_CHROMEOS)
+#include <sys/ioctl.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <sys/event.h>
+#include <sys/ucontext.h>
+#endif
+
+#if defined(OS_MACOSX)
+#include <crt_externs.h>
+#include <sys/event.h>
+#else
+extern char** environ;
+#endif
+
+namespace base {
+
+#if !defined(OS_NACL_NONSFI)
+
+namespace {
+
+// Get the process's "environment" (i.e. the thing that setenv/getenv
+// work with).
+char** GetEnvironment() {
+#if defined(OS_MACOSX)
+ return *_NSGetEnviron();
+#else
+ return environ;
+#endif
+}
+
+// Set the process's "environment" (i.e. the thing that setenv/getenv
+// work with).
+void SetEnvironment(char** env) {
+#if defined(OS_MACOSX)
+ *_NSGetEnviron() = env;
+#else
+ environ = env;
+#endif
+}
+
+// Set the calling thread's signal mask to new_sigmask and return
+// the previous signal mask.
+sigset_t SetSignalMask(const sigset_t& new_sigmask) {
+ sigset_t old_sigmask;
+#if defined(OS_ANDROID)
+ // POSIX says pthread_sigmask() must be used in multi-threaded processes,
+ // but Android's pthread_sigmask() was broken until 4.1:
+ // https://code.google.com/p/android/issues/detail?id=15337
+ // http://stackoverflow.com/questions/13777109/pthread-sigmask-on-android-not-working
+ RAW_CHECK(sigprocmask(SIG_SETMASK, &new_sigmask, &old_sigmask) == 0);
+#else
+ RAW_CHECK(pthread_sigmask(SIG_SETMASK, &new_sigmask, &old_sigmask) == 0);
+#endif
+ return old_sigmask;
+}
+
+#if !defined(OS_LINUX) || \
+ (!defined(__i386__) && !defined(__x86_64__) && !defined(__arm__))
+void ResetChildSignalHandlersToDefaults() {
+ // The previous signal handlers are likely to be meaningless in the child's
+ // context so we reset them to the defaults for now. http://crbug.com/44953
+ // These signal handlers are set up at least in browser_main_posix.cc:
+ // BrowserMainPartsPosix::PreEarlyInitialization and stack_trace_posix.cc:
+ // EnableInProcessStackDumping.
+ signal(SIGHUP, SIG_DFL);
+ signal(SIGINT, SIG_DFL);
+ signal(SIGILL, SIG_DFL);
+ signal(SIGABRT, SIG_DFL);
+ signal(SIGFPE, SIG_DFL);
+ signal(SIGBUS, SIG_DFL);
+ signal(SIGSEGV, SIG_DFL);
+ signal(SIGSYS, SIG_DFL);
+ signal(SIGTERM, SIG_DFL);
+}
+
+#else
+
+// TODO(jln): remove the Linux special case once kernels are fixed.
+
+// Internally the kernel makes sigset_t an array of long large enough to have
+// one bit per signal.
+typedef uint64_t kernel_sigset_t;
+
+// This is what struct sigaction looks like to the kernel at least on X86 and
+// ARM. MIPS, for instance, is very different.
+struct kernel_sigaction {
+ void* k_sa_handler; // For this usage it only needs to be a generic pointer.
+ unsigned long k_sa_flags;
+ void* k_sa_restorer; // For this usage it only needs to be a generic pointer.
+ kernel_sigset_t k_sa_mask;
+};
+
+// glibc's sigaction() will prevent access to sa_restorer, so we need to roll
+// our own.
+int sys_rt_sigaction(int sig, const struct kernel_sigaction* act,
+ struct kernel_sigaction* oact) {
+ return syscall(SYS_rt_sigaction, sig, act, oact, sizeof(kernel_sigset_t));
+}
+
+// This function is intended to be used in between fork() and execve() and will
+// reset all signal handlers to the default.
+// The motivation for going through all of them is that sa_restorer can leak
+// from parents and help defeat ASLR on buggy kernels. We reset it to null.
+// See crbug.com/177956.
+void ResetChildSignalHandlersToDefaults(void) {
+ for (int signum = 1; ; ++signum) {
+#if defined(ANDROID)
+ struct kernel_sigaction act;
+ memset(&act, 0, sizeof(act));
+#else
+ struct kernel_sigaction act = {0};
+#endif
+ int sigaction_get_ret = sys_rt_sigaction(signum, nullptr, &act);
+ if (sigaction_get_ret && errno == EINVAL) {
+#if !defined(NDEBUG)
+ // Linux supports 32 real-time signals from 33 to 64.
+ // If the number of signals in the Linux kernel changes, someone should
+ // look at this code.
+ const int kNumberOfSignals = 64;
+ RAW_CHECK(signum == kNumberOfSignals + 1);
+#endif // !defined(NDEBUG)
+ break;
+ }
+ // All other failures are fatal.
+ if (sigaction_get_ret) {
+ RAW_LOG(FATAL, "sigaction (get) failed.");
+ }
+
+ // The kernel won't allow to re-set SIGKILL or SIGSTOP.
+ if (signum != SIGSTOP && signum != SIGKILL) {
+ act.k_sa_handler = reinterpret_cast<void*>(SIG_DFL);
+ act.k_sa_restorer = nullptr;
+ if (sys_rt_sigaction(signum, &act, nullptr)) {
+ RAW_LOG(FATAL, "sigaction (set) failed.");
+ }
+ }
+#if !defined(NDEBUG)
+ // Now ask the kernel again and check that no restorer will leak.
+ if (sys_rt_sigaction(signum, nullptr, &act) || act.k_sa_restorer) {
+ RAW_LOG(FATAL, "Cound not fix sa_restorer.");
+ }
+#endif // !defined(NDEBUG)
+ }
+}
+#endif // !defined(OS_LINUX) ||
+ // (!defined(__i386__) && !defined(__x86_64__) && !defined(__arm__))
+} // anonymous namespace
+
+// Functor for |ScopedDIR| (below).
+struct ScopedDIRClose {
+ inline void operator()(DIR* x) const {
+ if (x)
+ closedir(x);
+ }
+};
+
+// Automatically closes |DIR*|s.
+typedef std::unique_ptr<DIR, ScopedDIRClose> ScopedDIR;
+
+#if defined(OS_LINUX)
+static const char kFDDir[] = "/proc/self/fd";
+#elif defined(OS_MACOSX)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_SOLARIS)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_FREEBSD)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_OPENBSD)
+static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_ANDROID)
+static const char kFDDir[] = "/proc/self/fd";
+#endif
+
+void CloseSuperfluousFds(const base::InjectiveMultimap& saved_mapping) {
+ // DANGER: no calls to malloc or locks are allowed from now on:
+ // http://crbug.com/36678
+
+ // Get the maximum number of FDs possible.
+ size_t max_fds = GetMaxFds();
+
+ DirReaderPosix fd_dir(kFDDir);
+ if (!fd_dir.IsValid()) {
+ // Fallback case: Try every possible fd.
+ for (size_t i = 0; i < max_fds; ++i) {
+ const int fd = static_cast<int>(i);
+ if (fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO)
+ continue;
+ // Cannot use STL iterators here, since debug iterators use locks.
+ size_t j;
+ for (j = 0; j < saved_mapping.size(); j++) {
+ if (fd == saved_mapping[j].dest)
+ break;
+ }
+ if (j < saved_mapping.size())
+ continue;
+
+ // Since we're just trying to close anything we can find,
+ // ignore any error return values of close().
+ close(fd);
+ }
+ return;
+ }
+
+ const int dir_fd = fd_dir.fd();
+
+ for ( ; fd_dir.Next(); ) {
+ // Skip . and .. entries.
+ if (fd_dir.name()[0] == '.')
+ continue;
+
+ char *endptr;
+ errno = 0;
+ const long int fd = strtol(fd_dir.name(), &endptr, 10);
+ if (fd_dir.name()[0] == 0 || *endptr || fd < 0 || errno)
+ continue;
+ if (fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO)
+ continue;
+ // Cannot use STL iterators here, since debug iterators use locks.
+ size_t i;
+ for (i = 0; i < saved_mapping.size(); i++) {
+ if (fd == saved_mapping[i].dest)
+ break;
+ }
+ if (i < saved_mapping.size())
+ continue;
+ if (fd == dir_fd)
+ continue;
+
+ // When running under Valgrind, Valgrind opens several FDs for its
+ // own use and will complain if we try to close them. All of
+ // these FDs are >= |max_fds|, so we can check against that here
+ // before closing. See https://bugs.kde.org/show_bug.cgi?id=191758
+ if (fd < static_cast<int>(max_fds)) {
+ int ret = IGNORE_EINTR(close(fd));
+ DPCHECK(ret == 0);
+ }
+ }
+}
+
+Process LaunchProcess(const CommandLine& cmdline,
+ const LaunchOptions& options) {
+ return LaunchProcess(cmdline.argv(), options);
+}
+
+Process LaunchProcess(const std::vector<std::string>& argv,
+ const LaunchOptions& options) {
+ size_t fd_shuffle_size = 0;
+ if (options.fds_to_remap) {
+ fd_shuffle_size = options.fds_to_remap->size();
+ }
+
+ InjectiveMultimap fd_shuffle1;
+ InjectiveMultimap fd_shuffle2;
+ fd_shuffle1.reserve(fd_shuffle_size);
+ fd_shuffle2.reserve(fd_shuffle_size);
+
+ std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
+ for (size_t i = 0; i < argv.size(); i++) {
+ argv_cstr[i] = const_cast<char*>(argv[i].c_str());
+ }
+ argv_cstr[argv.size()] = nullptr;
+
+ std::unique_ptr<char* []> new_environ;
+ char* const empty_environ = nullptr;
+ char* const* old_environ = GetEnvironment();
+ if (options.clear_environ)
+ old_environ = &empty_environ;
+ if (!options.environ.empty())
+ new_environ = AlterEnvironment(old_environ, options.environ);
+
+ sigset_t full_sigset;
+ sigfillset(&full_sigset);
+ const sigset_t orig_sigmask = SetSignalMask(full_sigset);
+
+ const char* current_directory = nullptr;
+ if (!options.current_directory.empty()) {
+ current_directory = options.current_directory.value().c_str();
+ }
+
+ pid_t pid;
+#if defined(OS_LINUX)
+ if (options.clone_flags) {
+ // Signal handling in this function assumes the creation of a new
+ // process, so we check that a thread is not being created by mistake
+ // and that signal handling follows the process-creation rules.
+ RAW_CHECK(
+ !(options.clone_flags & (CLONE_SIGHAND | CLONE_THREAD | CLONE_VM)));
+
+ // We specify a null ptid and ctid.
+ RAW_CHECK(
+ !(options.clone_flags &
+ (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT_SETTID)));
+
+ // Since we use waitpid, we do not support custom termination signals in the
+ // clone flags.
+ RAW_CHECK((options.clone_flags & 0xff) == 0);
+
+ pid = ForkWithFlags(options.clone_flags | SIGCHLD, nullptr, nullptr);
+ } else
+#endif
+ {
+ pid = fork();
+ }
+
+ // Always restore the original signal mask in the parent.
+ if (pid != 0) {
+ SetSignalMask(orig_sigmask);
+ }
+
+ if (pid < 0) {
+ DPLOG(ERROR) << "fork";
+ return Process();
+ } else if (pid == 0) {
+ // Child process
+
+ // DANGER: no calls to malloc or locks are allowed from now on:
+ // http://crbug.com/36678
+
+ // DANGER: fork() rule: in the child, if you don't end up doing exec*(),
+ // you call _exit() instead of exit(). This is because _exit() does not
+ // call any previously-registered (in the parent) exit handlers, which
+ // might do things like block waiting for threads that don't even exist
+ // in the child.
+
+ // If a child process uses the readline library, the process block forever.
+ // In BSD like OSes including OS X it is safe to assign /dev/null as stdin.
+ // See http://crbug.com/56596.
+ base::ScopedFD null_fd(HANDLE_EINTR(open("/dev/null", O_RDONLY)));
+ if (!null_fd.is_valid()) {
+ RAW_LOG(ERROR, "Failed to open /dev/null");
+ _exit(127);
+ }
+
+ int new_fd = HANDLE_EINTR(dup2(null_fd.get(), STDIN_FILENO));
+ if (new_fd != STDIN_FILENO) {
+ RAW_LOG(ERROR, "Failed to dup /dev/null for stdin");
+ _exit(127);
+ }
+
+ if (options.new_process_group) {
+ // Instead of inheriting the process group ID of the parent, the child
+ // starts off a new process group with pgid equal to its process ID.
+ if (setpgid(0, 0) < 0) {
+ RAW_LOG(ERROR, "setpgid failed");
+ _exit(127);
+ }
+ }
+
+ if (options.maximize_rlimits) {
+ // Some resource limits need to be maximal in this child.
+ for (size_t i = 0; i < options.maximize_rlimits->size(); ++i) {
+ const int resource = (*options.maximize_rlimits)[i];
+ struct rlimit limit;
+ if (getrlimit(resource, &limit) < 0) {
+ RAW_LOG(WARNING, "getrlimit failed");
+ } else if (limit.rlim_cur < limit.rlim_max) {
+ limit.rlim_cur = limit.rlim_max;
+ if (setrlimit(resource, &limit) < 0) {
+ RAW_LOG(WARNING, "setrlimit failed");
+ }
+ }
+ }
+ }
+
+#if defined(OS_MACOSX)
+ RestoreDefaultExceptionHandler();
+#endif // defined(OS_MACOSX)
+
+ ResetChildSignalHandlersToDefaults();
+ SetSignalMask(orig_sigmask);
+
+#if 0
+ // When debugging it can be helpful to check that we really aren't making
+ // any hidden calls to malloc.
+ void *malloc_thunk =
+ reinterpret_cast<void*>(reinterpret_cast<intptr_t>(malloc) & ~4095);
+ mprotect(malloc_thunk, 4096, PROT_READ | PROT_WRITE | PROT_EXEC);
+ memset(reinterpret_cast<void*>(malloc), 0xff, 8);
+#endif // 0
+
+#if defined(OS_CHROMEOS)
+ if (options.ctrl_terminal_fd >= 0) {
+ // Set process' controlling terminal.
+ if (HANDLE_EINTR(setsid()) != -1) {
+ if (HANDLE_EINTR(
+ ioctl(options.ctrl_terminal_fd, TIOCSCTTY, nullptr)) == -1) {
+ RAW_LOG(WARNING, "ioctl(TIOCSCTTY), ctrl terminal not set");
+ }
+ } else {
+ RAW_LOG(WARNING, "setsid failed, ctrl terminal not set");
+ }
+ }
+#endif // defined(OS_CHROMEOS)
+
+ if (options.fds_to_remap) {
+ // Cannot use STL iterators here, since debug iterators use locks.
+ for (size_t i = 0; i < options.fds_to_remap->size(); ++i) {
+ const FileHandleMappingVector::value_type& value =
+ (*options.fds_to_remap)[i];
+ fd_shuffle1.push_back(InjectionArc(value.first, value.second, false));
+ fd_shuffle2.push_back(InjectionArc(value.first, value.second, false));
+ }
+ }
+
+ if (!options.environ.empty() || options.clear_environ)
+ SetEnvironment(new_environ.get());
+
+ // fd_shuffle1 is mutated by this call because it cannot malloc.
+ if (!ShuffleFileDescriptors(&fd_shuffle1))
+ _exit(127);
+
+ CloseSuperfluousFds(fd_shuffle2);
+
+ // Set NO_NEW_PRIVS by default. Since NO_NEW_PRIVS only exists in kernel
+ // 3.5+, do not check the return value of prctl here.
+#if defined(OS_LINUX)
+#ifndef PR_SET_NO_NEW_PRIVS
+#define PR_SET_NO_NEW_PRIVS 38
+#endif
+ if (!options.allow_new_privs) {
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) && errno != EINVAL) {
+ // Only log if the error is not EINVAL (i.e. not supported).
+ RAW_LOG(FATAL, "prctl(PR_SET_NO_NEW_PRIVS) failed");
+ }
+ }
+
+ if (options.kill_on_parent_death) {
+ if (prctl(PR_SET_PDEATHSIG, SIGKILL) != 0) {
+ RAW_LOG(ERROR, "prctl(PR_SET_PDEATHSIG) failed");
+ _exit(127);
+ }
+ }
+#endif
+
+ if (current_directory != nullptr) {
+ RAW_CHECK(chdir(current_directory) == 0);
+ }
+
+ if (options.pre_exec_delegate != nullptr) {
+ options.pre_exec_delegate->RunAsyncSafe();
+ }
+
+ execvp(argv_cstr[0], argv_cstr.get());
+
+ RAW_LOG(ERROR, "LaunchProcess: failed to execvp:");
+ RAW_LOG(ERROR, argv_cstr[0]);
+ _exit(127);
+ } else {
+ // Parent process
+ if (options.wait) {
+ // While this isn't strictly disk IO, waiting for another process to
+ // finish is the sort of thing ThreadRestrictions is trying to prevent.
+ base::ThreadRestrictions::AssertIOAllowed();
+ pid_t ret = HANDLE_EINTR(waitpid(pid, 0, 0));
+ DPCHECK(ret > 0);
+ }
+ }
+
+ return Process(pid);
+}
+
+void RaiseProcessToHighPriority() {
+ // On POSIX, we don't actually do anything here. We could try to nice() or
+ // setpriority() or sched_getscheduler, but these all require extra rights.
+}
+
+// Executes the application specified by |argv| and wait for it to exit. Stores
+// the output (stdout) in |output|. If |do_search_path| is set, it searches the
+// path for the application; in that case, |envp| must be null, and it will use
+// the current environment. If |do_search_path| is false, |argv[0]| should fully
+// specify the path of the application, and |envp| will be used as the
+// environment. If |include_stderr| is true, includes stderr otherwise redirects
+// it to /dev/null.
+// The return value of the function indicates success or failure. In the case of
+// success, the application exit code will be returned in |*exit_code|, which
+// should be checked to determine if the application ran successfully.
+static bool GetAppOutputInternal(
+ const std::vector<std::string>& argv,
+ char* const envp[],
+ bool include_stderr,
+ std::string* output,
+ bool do_search_path,
+ int* exit_code) {
+ // Doing a blocking wait for another command to finish counts as IO.
+ base::ThreadRestrictions::AssertIOAllowed();
+ // exit_code must be supplied so calling function can determine success.
+ DCHECK(exit_code);
+ *exit_code = EXIT_FAILURE;
+
+ int pipe_fd[2];
+ pid_t pid;
+ InjectiveMultimap fd_shuffle1, fd_shuffle2;
+ std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
+
+ fd_shuffle1.reserve(3);
+ fd_shuffle2.reserve(3);
+
+ // Either |do_search_path| should be false or |envp| should be null, but not
+ // both.
+ DCHECK(!do_search_path ^ !envp);
+
+ if (pipe(pipe_fd) < 0)
+ return false;
+
+ switch (pid = fork()) {
+ case -1: // error
+ close(pipe_fd[0]);
+ close(pipe_fd[1]);
+ return false;
+ case 0: // child
+ {
+ // DANGER: no calls to malloc or locks are allowed from now on:
+ // http://crbug.com/36678
+
+#if defined(OS_MACOSX)
+ RestoreDefaultExceptionHandler();
+#endif
+
+ // Obscure fork() rule: in the child, if you don't end up doing exec*(),
+ // you call _exit() instead of exit(). This is because _exit() does not
+ // call any previously-registered (in the parent) exit handlers, which
+ // might do things like block waiting for threads that don't even exist
+ // in the child.
+ int dev_null = open("/dev/null", O_WRONLY);
+ if (dev_null < 0)
+ _exit(127);
+
+ fd_shuffle1.push_back(InjectionArc(pipe_fd[1], STDOUT_FILENO, true));
+ fd_shuffle1.push_back(InjectionArc(
+ include_stderr ? pipe_fd[1] : dev_null,
+ STDERR_FILENO, true));
+ fd_shuffle1.push_back(InjectionArc(dev_null, STDIN_FILENO, true));
+ // Adding another element here? Remeber to increase the argument to
+ // reserve(), above.
+
+ for (size_t i = 0; i < fd_shuffle1.size(); ++i)
+ fd_shuffle2.push_back(fd_shuffle1[i]);
+
+ if (!ShuffleFileDescriptors(&fd_shuffle1))
+ _exit(127);
+
+ CloseSuperfluousFds(fd_shuffle2);
+
+ for (size_t i = 0; i < argv.size(); i++)
+ argv_cstr[i] = const_cast<char*>(argv[i].c_str());
+ argv_cstr[argv.size()] = nullptr;
+ if (do_search_path)
+ execvp(argv_cstr[0], argv_cstr.get());
+ else
+ execve(argv_cstr[0], argv_cstr.get(), envp);
+ _exit(127);
+ }
+ default: // parent
+ {
+ // Close our writing end of pipe now. Otherwise later read would not
+ // be able to detect end of child's output (in theory we could still
+ // write to the pipe).
+ close(pipe_fd[1]);
+
+ output->clear();
+
+ while (true) {
+ char buffer[256];
+ ssize_t bytes_read =
+ HANDLE_EINTR(read(pipe_fd[0], buffer, sizeof(buffer)));
+ if (bytes_read <= 0)
+ break;
+ output->append(buffer, bytes_read);
+ }
+ close(pipe_fd[0]);
+
+ // Always wait for exit code (even if we know we'll declare
+ // GOT_MAX_OUTPUT).
+ Process process(pid);
+ return process.WaitForExit(exit_code);
+ }
+ }
+}
+
+bool GetAppOutput(const CommandLine& cl, std::string* output) {
+ return GetAppOutput(cl.argv(), output);
+}
+
+bool GetAppOutput(const std::vector<std::string>& argv, std::string* output) {
+ // Run |execve()| with the current environment.
+ int exit_code;
+ bool result =
+ GetAppOutputInternal(argv, nullptr, false, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
+ // Run |execve()| with the current environment.
+ int exit_code;
+ bool result =
+ GetAppOutputInternal(cl.argv(), nullptr, true, output, true, &exit_code);
+ return result && exit_code == EXIT_SUCCESS;
+}
+
+bool GetAppOutputWithExitCode(const CommandLine& cl,
+ std::string* output,
+ int* exit_code) {
+ // Run |execve()| with the current environment.
+ return GetAppOutputInternal(cl.argv(), nullptr, false, output, true,
+ exit_code);
+}
+
+#endif // !defined(OS_NACL_NONSFI)
+
+#if defined(OS_LINUX) || defined(OS_NACL_NONSFI)
+namespace {
+
+bool IsRunningOnValgrind() {
+ return RUNNING_ON_VALGRIND;
+}
+
+// This function runs on the stack specified on the clone call. It uses longjmp
+// to switch back to the original stack so the child can return from sys_clone.
+int CloneHelper(void* arg) {
+ jmp_buf* env_ptr = reinterpret_cast<jmp_buf*>(arg);
+ longjmp(*env_ptr, 1);
+
+ // Should not be reached.
+ RAW_CHECK(false);
+ return 1;
+}
+
+// This function is noinline to ensure that stack_buf is below the stack pointer
+// that is saved when setjmp is called below. This is needed because when
+// compiled with FORTIFY_SOURCE, glibc's longjmp checks that the stack is moved
+// upwards. See crbug.com/442912 for more details.
+#if defined(ADDRESS_SANITIZER)
+// Disable AddressSanitizer instrumentation for this function to make sure
+// |stack_buf| is allocated on thread stack instead of ASan's fake stack.
+// Under ASan longjmp() will attempt to clean up the area between the old and
+// new stack pointers and print a warning that may confuse the user.
+__attribute__((no_sanitize_address))
+#endif
+NOINLINE pid_t CloneAndLongjmpInChild(unsigned long flags,
+ pid_t* ptid,
+ pid_t* ctid,
+ jmp_buf* env) {
+ // We use the libc clone wrapper instead of making the syscall
+ // directly because making the syscall may fail to update the libc's
+ // internal pid cache. The libc interface unfortunately requires
+ // specifying a new stack, so we use setjmp/longjmp to emulate
+ // fork-like behavior.
+ char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
+#if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY)
+ // The stack grows downward.
+ void* stack = stack_buf + sizeof(stack_buf);
+#else
+#error "Unsupported architecture"
+#endif
+ return clone(&CloneHelper, stack, flags, env, ptid, nullptr, ctid);
+}
+
+} // anonymous namespace
+
+pid_t ForkWithFlags(unsigned long flags, pid_t* ptid, pid_t* ctid) {
+ const bool clone_tls_used = flags & CLONE_SETTLS;
+ const bool invalid_ctid =
+ (flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) && !ctid;
+ const bool invalid_ptid = (flags & CLONE_PARENT_SETTID) && !ptid;
+
+ // We do not support CLONE_VM.
+ const bool clone_vm_used = flags & CLONE_VM;
+
+ if (clone_tls_used || invalid_ctid || invalid_ptid || clone_vm_used) {
+ RAW_LOG(FATAL, "Invalid usage of ForkWithFlags");
+ }
+
+ // Valgrind's clone implementation does not support specifiying a child_stack
+ // without CLONE_VM, so we cannot use libc's clone wrapper when running under
+ // Valgrind. As a result, the libc pid cache may be incorrect under Valgrind.
+ // See crbug.com/442817 for more details.
+ if (IsRunningOnValgrind()) {
+ // See kernel/fork.c in Linux. There is different ordering of sys_clone
+ // parameters depending on CONFIG_CLONE_BACKWARDS* configuration options.
+#if defined(ARCH_CPU_X86_64)
+ return syscall(__NR_clone, flags, nullptr, ptid, ctid, nullptr);
+#elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY)
+ // CONFIG_CLONE_BACKWARDS defined.
+ return syscall(__NR_clone, flags, nullptr, ptid, nullptr, ctid);
+#else
+#error "Unsupported architecture"
+#endif
+ }
+
+ jmp_buf env;
+ if (setjmp(env) == 0) {
+ return CloneAndLongjmpInChild(flags, ptid, ctid, &env);
+ }
+
+ return 0;
+}
+#endif // defined(OS_LINUX) || defined(OS_NACL_NONSFI)
+
+} // namespace base
diff --git a/libchrome/base/process/port_provider_mac.cc b/libchrome/base/process/port_provider_mac.cc
new file mode 100644
index 0000000..ac13949
--- /dev/null
+++ b/libchrome/base/process/port_provider_mac.cc
@@ -0,0 +1,27 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/port_provider_mac.h"
+
+namespace base {
+
+PortProvider::PortProvider() : lock_(), observer_list_() {}
+PortProvider::~PortProvider() {}
+
+void PortProvider::AddObserver(Observer* observer) {
+ base::AutoLock l(lock_);
+ observer_list_.AddObserver(observer);
+}
+
+void PortProvider::RemoveObserver(Observer* observer) {
+ base::AutoLock l(lock_);
+ observer_list_.RemoveObserver(observer);
+}
+
+void PortProvider::NotifyObservers(ProcessHandle process) {
+ base::AutoLock l(lock_);
+ FOR_EACH_OBSERVER(Observer, observer_list_, OnReceivedTaskPort(process));
+}
+
+} // namespace base
diff --git a/libchrome/base/process/port_provider_mac.h b/libchrome/base/process/port_provider_mac.h
new file mode 100644
index 0000000..2f40297
--- /dev/null
+++ b/libchrome/base/process/port_provider_mac.h
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PORT_PROVIDER_MAC_H_
+#define BASE_PROCESS_PORT_PROVIDER_MAC_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/observer_list.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Abstract base class that provides a mapping from ProcessHandle (pid_t) to the
+// Mach task port. This replicates task_for_pid(), which requires root
+// privileges.
+class BASE_EXPORT PortProvider {
+ public:
+ PortProvider();
+ virtual ~PortProvider();
+
+ class Observer {
+ public:
+ virtual ~Observer() {};
+ // Called by the PortProvider to notify observers that the task port was
+ // received for a given process.
+ // No guarantees are made about the thread on which this notification will
+ // be sent.
+ // Observers must not call AddObserver() or RemoveObserver() in this
+ // callback, as doing so will deadlock.
+ virtual void OnReceivedTaskPort(ProcessHandle process) = 0;
+ };
+
+ // Returns the mach task port for |process| if possible, or else
+ // |MACH_PORT_NULL|.
+ virtual mach_port_t TaskForPid(ProcessHandle process) const = 0;
+
+ // Observer interface.
+ void AddObserver(Observer* observer);
+ void RemoveObserver(Observer* observer);
+
+ protected:
+ // Called by subclasses to send a notification to observers.
+ void NotifyObservers(ProcessHandle process);
+
+ private:
+ // ObserverList is not thread-safe, so |lock_| ensures consistency of
+ // |observer_list_|.
+ base::Lock lock_;
+ base::ObserverList<Observer> observer_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(PortProvider);
+};
+
+} // namespace base
+
+#endif // BASE_PROCESS_PORT_PROVIDER_MAC_H_
diff --git a/libchrome/base/process/process.h b/libchrome/base/process/process.h
new file mode 100644
index 0000000..70c8260
--- /dev/null
+++ b/libchrome/base/process/process.h
@@ -0,0 +1,151 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_H_
+#define BASE_PROCESS_PROCESS_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+// Provides a move-only encapsulation of a process.
+//
+// This object is not tied to the lifetime of the underlying process: the
+// process may be killed and this object may still around, and it will still
+// claim to be valid. The actual behavior in that case is OS dependent like so:
+//
+// Windows: The underlying ProcessHandle will be valid after the process dies
+// and can be used to gather some information about that process, but most
+// methods will obviously fail.
+//
+// POSIX: The underlying PorcessHandle is not guaranteed to remain valid after
+// the process dies, and it may be reused by the system, which means that it may
+// end up pointing to the wrong process.
+class BASE_EXPORT Process {
+ public:
+ explicit Process(ProcessHandle handle = kNullProcessHandle);
+
+ Process(Process&& other);
+
+ // The destructor does not terminate the process.
+ ~Process();
+
+ Process& operator=(Process&& other);
+
+ // Returns an object for the current process.
+ static Process Current();
+
+ // Returns a Process for the given |pid|.
+ static Process Open(ProcessId pid);
+
+ // Returns a Process for the given |pid|. On Windows the handle is opened
+ // with more access rights and must only be used by trusted code (can read the
+ // address space and duplicate handles).
+ static Process OpenWithExtraPrivileges(ProcessId pid);
+
+#if defined(OS_WIN)
+ // Returns a Process for the given |pid|, using some |desired_access|.
+ // See ::OpenProcess documentation for valid |desired_access|.
+ static Process OpenWithAccess(ProcessId pid, DWORD desired_access);
+#endif
+
+ // Creates an object from a |handle| owned by someone else.
+ // Don't use this for new code. It is only intended to ease the migration to
+ // a strict ownership model.
+ // TODO(rvargas) crbug.com/417532: Remove this code.
+ static Process DeprecatedGetProcessFromHandle(ProcessHandle handle);
+
+ // Returns true if processes can be backgrounded.
+ static bool CanBackgroundProcesses();
+
+ // Returns true if this objects represents a valid process.
+ bool IsValid() const;
+
+ // Returns a handle for this process. There is no guarantee about when that
+ // handle becomes invalid because this object retains ownership.
+ ProcessHandle Handle() const;
+
+ // Returns a second object that represents this process.
+ Process Duplicate() const;
+
+ // Get the PID for this process.
+ ProcessId Pid() const;
+
+ // Returns true if this process is the current process.
+ bool is_current() const;
+
+ // Close the process handle. This will not terminate the process.
+ void Close();
+
+ // Terminates the process with extreme prejudice. The given |exit_code| will
+ // be the exit code of the process. If |wait| is true, this method will wait
+ // for up to one minute for the process to actually terminate.
+ // Returns true if the process terminates within the allowed time.
+ // NOTE: On POSIX |exit_code| is ignored.
+ bool Terminate(int exit_code, bool wait) const;
+
+ // Waits for the process to exit. Returns true on success.
+ // On POSIX, if the process has been signaled then |exit_code| is set to -1.
+ // On Linux this must be a child process, however on Mac and Windows it can be
+ // any process.
+ // NOTE: |exit_code| is optional, nullptr can be passed if the exit code is
+ // not required.
+ bool WaitForExit(int* exit_code);
+
+ // Same as WaitForExit() but only waits for up to |timeout|.
+ // NOTE: |exit_code| is optional, nullptr can be passed if the exit code
+ // is not required.
+ bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code);
+
+ // A process is backgrounded when it's priority is lower than normal.
+ // Return true if this process is backgrounded, false otherwise.
+ bool IsProcessBackgrounded() const;
+
+ // Set a process as backgrounded. If value is true, the priority of the
+ // process will be lowered. If value is false, the priority of the process
+ // will be made "normal" - equivalent to default process priority.
+ // Returns true if the priority was changed, false otherwise.
+ bool SetProcessBackgrounded(bool value);
+
+ // Returns an integer representing the priority of a process. The meaning
+ // of this value is OS dependent.
+ int GetPriority() const;
+
+#if defined(OS_CHROMEOS)
+ // Get the PID in its PID namespace.
+ // If the process is not in a PID namespace or /proc/<pid>/status does not
+ // report NSpid, kNullProcessId is returned.
+ ProcessId GetPidInNamespace() const;
+#endif
+
+ private:
+#if defined(OS_WIN)
+ bool is_current_process_;
+ win::ScopedHandle process_;
+#else
+ ProcessHandle process_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Process);
+};
+
+#if defined(OS_CHROMEOS)
+// Exposed for testing.
+// Given the contents of the /proc/<pid>/cgroup file, determine whether the
+// process is backgrounded or not.
+BASE_EXPORT bool IsProcessBackgroundedCGroup(
+ const StringPiece& cgroup_contents);
+#endif // defined(OS_CHROMEOS)
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_H_
diff --git a/libchrome/base/process/process_handle.cc b/libchrome/base/process/process_handle.cc
new file mode 100644
index 0000000..1f22b93
--- /dev/null
+++ b/libchrome/base/process/process_handle.cc
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/process/process_handle.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+bool g_have_unique_id = false;
+uint32_t g_unique_id;
+
+// The process which set |g_unique_id|.
+ProcessId g_procid;
+
+// Mangle IDs so that they are not accidentally used as PIDs, e.g. as an
+// argument to kill or waitpid.
+uint32_t MangleProcessId(ProcessId process_id) {
+ // Add a large power of 10 so that the pid is still the pid is still readable
+ // inside the mangled id.
+ return static_cast<uint32_t>(process_id) + 1000000000U;
+}
+
+} // namespace
+
+uint32_t GetUniqueIdForProcess() {
+ if (!g_have_unique_id) {
+ return MangleProcessId(GetCurrentProcId());
+ }
+
+ // Make sure we are the same process that set |g_procid|. This check may have
+ // false negatives (if a process ID was reused) but should have no false
+ // positives.
+ DCHECK_EQ(GetCurrentProcId(), g_procid);
+ return g_unique_id;
+}
+
+#if defined(OS_LINUX)
+
+void InitUniqueIdForProcessInPidNamespace(ProcessId pid_outside_of_namespace) {
+ g_unique_id = MangleProcessId(pid_outside_of_namespace);
+ g_procid = GetCurrentProcId();
+ g_have_unique_id = true;
+}
+
+#endif
+
+} // namespace base
diff --git a/libchrome/base/process/process_handle.h b/libchrome/base/process/process_handle.h
new file mode 100644
index 0000000..ef7a602
--- /dev/null
+++ b/libchrome/base/process/process_handle.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_HANDLE_H_
+#define BASE_PROCESS_PROCESS_HANDLE_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+// ProcessHandle is a platform specific type which represents the underlying OS
+// handle to a process.
+// ProcessId is a number which identifies the process in the OS.
+#if defined(OS_WIN)
+typedef HANDLE ProcessHandle;
+typedef DWORD ProcessId;
+typedef HANDLE UserTokenHandle;
+const ProcessHandle kNullProcessHandle = NULL;
+const ProcessId kNullProcessId = 0;
+#elif defined(OS_POSIX)
+// On POSIX, our ProcessHandle will just be the PID.
+typedef pid_t ProcessHandle;
+typedef pid_t ProcessId;
+const ProcessHandle kNullProcessHandle = 0;
+const ProcessId kNullProcessId = 0;
+#endif // defined(OS_WIN)
+
+// Returns the id of the current process.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes (use GetUniqueIdForProcess if uniqueness is required).
+BASE_EXPORT ProcessId GetCurrentProcId();
+
+// Returns a unique ID for the current process. The ID will be unique across all
+// currently running processes within the chrome session, but IDs of terminated
+// processes may be reused. This returns an opaque value that is different from
+// a process's PID.
+BASE_EXPORT uint32_t GetUniqueIdForProcess();
+
+#if defined(OS_LINUX)
+// When a process is started in a different PID namespace from the browser
+// process, this function must be called with the process's PID in the browser's
+// PID namespace in order to initialize its unique ID. Not thread safe.
+// WARNING: To avoid inconsistent results from GetUniqueIdForProcess, this
+// should only be called very early after process startup - ideally as soon
+// after process creation as possible.
+BASE_EXPORT void InitUniqueIdForProcessInPidNamespace(
+ ProcessId pid_outside_of_namespace);
+#endif
+
+// Returns the ProcessHandle of the current process.
+BASE_EXPORT ProcessHandle GetCurrentProcessHandle();
+
+// Returns the process ID for the specified process. This is functionally the
+// same as Windows' GetProcessId(), but works on versions of Windows before Win
+// XP SP1 as well.
+// DEPRECATED. New code should be using Process::Pid() instead.
+// Note that on some platforms, this is not guaranteed to be unique across
+// processes.
+BASE_EXPORT ProcessId GetProcId(ProcessHandle process);
+
+// Returns the ID for the parent of the given process.
+BASE_EXPORT ProcessId GetParentProcessId(ProcessHandle process);
+
+#if defined(OS_POSIX)
+// Returns the path to the executable of the given process.
+BASE_EXPORT FilePath GetProcessExecutablePath(ProcessHandle process);
+#endif
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_HANDLE_H_
diff --git a/libchrome/base/process/process_handle_linux.cc b/libchrome/base/process/process_handle_linux.cc
new file mode 100644
index 0000000..950b888
--- /dev/null
+++ b/libchrome/base/process/process_handle_linux.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include "base/files/file_util.h"
+#include "base/process/internal_linux.h"
+
+namespace base {
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+ ProcessId pid =
+ internal::ReadProcStatsAndGetFieldAsInt64(process, internal::VM_PPID);
+ if (pid)
+ return pid;
+ return -1;
+}
+
+FilePath GetProcessExecutablePath(ProcessHandle process) {
+ FilePath stat_file = internal::GetProcPidDir(process).Append("exe");
+ FilePath exe_name;
+ if (!ReadSymbolicLink(stat_file, &exe_name)) {
+ // No such process. Happens frequently in e.g. TerminateAllChromeProcesses
+ return FilePath();
+ }
+ return exe_name;
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_handle_mac.cc b/libchrome/base/process/process_handle_mac.cc
new file mode 100644
index 0000000..d9d22f7
--- /dev/null
+++ b/libchrome/base/process/process_handle_mac.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <libproc.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+ProcessId GetParentProcessId(ProcessHandle process) {
+ struct kinfo_proc info;
+ size_t length = sizeof(struct kinfo_proc);
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process };
+ if (sysctl(mib, 4, &info, &length, NULL, 0) < 0) {
+ DPLOG(ERROR) << "sysctl";
+ return -1;
+ }
+ if (length == 0)
+ return -1;
+ return info.kp_eproc.e_ppid;
+}
+
+FilePath GetProcessExecutablePath(ProcessHandle process) {
+ char pathbuf[PROC_PIDPATHINFO_MAXSIZE];
+ if (!proc_pidpath(process, pathbuf, sizeof(pathbuf)))
+ return FilePath();
+
+ return FilePath(pathbuf);
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_handle_posix.cc b/libchrome/base/process/process_handle_posix.cc
new file mode 100644
index 0000000..4e332df
--- /dev/null
+++ b/libchrome/base/process/process_handle_posix.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_handle.h"
+
+#include <unistd.h>
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+ return getpid();
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+ return GetCurrentProcId();
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+ return process;
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_info.h b/libchrome/base/process/process_info.h
new file mode 100644
index 0000000..1d76f42
--- /dev/null
+++ b/libchrome/base/process/process_info.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PROCESS_INFO_H_
+#define BASE_PROCESS_PROCESS_INFO_H_
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class Time;
+
+// Vends information about the current process.
+class BASE_EXPORT CurrentProcessInfo {
+ public:
+ // Returns the time at which the process was launched. May be empty if an
+ // error occurred retrieving the information.
+ static const Time CreationTime();
+};
+
+#if defined(OS_WIN)
+
+enum IntegrityLevel {
+ INTEGRITY_UNKNOWN,
+ LOW_INTEGRITY,
+ MEDIUM_INTEGRITY,
+ HIGH_INTEGRITY,
+};
+
+// Returns the integrity level of the process. Returns INTEGRITY_UNKNOWN if the
+// system does not support integrity levels (pre-Vista) or in the case of an
+// underlying system failure.
+BASE_EXPORT IntegrityLevel GetCurrentProcessIntegrityLevel();
+
+#endif // defined(OS_WIN)
+
+
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_INFO_H_
diff --git a/libchrome/base/process/process_iterator.cc b/libchrome/base/process/process_iterator.cc
new file mode 100644
index 0000000..d4024d9
--- /dev/null
+++ b/libchrome/base/process/process_iterator.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(OS_POSIX)
+ProcessEntry::ProcessEntry() : pid_(0), ppid_(0), gid_(0) {}
+ProcessEntry::ProcessEntry(const ProcessEntry& other) = default;
+ProcessEntry::~ProcessEntry() {}
+#endif
+
+const ProcessEntry* ProcessIterator::NextProcessEntry() {
+ bool result = false;
+ do {
+ result = CheckForNextProcess();
+ } while (result && !IncludeEntry());
+ if (result)
+ return &entry_;
+ return NULL;
+}
+
+ProcessIterator::ProcessEntries ProcessIterator::Snapshot() {
+ ProcessEntries found;
+ while (const ProcessEntry* process_entry = NextProcessEntry()) {
+ found.push_back(*process_entry);
+ }
+ return found;
+}
+
+bool ProcessIterator::IncludeEntry() {
+ return !filter_ || filter_->Includes(entry_);
+}
+
+NamedProcessIterator::NamedProcessIterator(
+ const FilePath::StringType& executable_name,
+ const ProcessFilter* filter) : ProcessIterator(filter),
+ executable_name_(executable_name) {
+#if defined(OS_ANDROID)
+ // On Android, the process name contains only the last 15 characters, which
+ // is in file /proc/<pid>/stat, the string between open parenthesis and close
+ // parenthesis. Please See ProcessIterator::CheckForNextProcess for details.
+ // Now if the length of input process name is greater than 15, only save the
+ // last 15 characters.
+ if (executable_name_.size() > 15) {
+ executable_name_ = FilePath::StringType(executable_name_,
+ executable_name_.size() - 15, 15);
+ }
+#endif
+}
+
+NamedProcessIterator::~NamedProcessIterator() {
+}
+
+int GetProcessCount(const FilePath::StringType& executable_name,
+ const ProcessFilter* filter) {
+ int count = 0;
+ NamedProcessIterator iter(executable_name, filter);
+ while (iter.NextProcessEntry())
+ ++count;
+ return count;
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_iterator.h b/libchrome/base/process/process_iterator.h
new file mode 100644
index 0000000..0d1f1a6
--- /dev/null
+++ b/libchrome/base/process/process_iterator.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains methods to iterate over processes on the system.
+
+#ifndef BASE_PROCESS_PROCESS_ITERATOR_H_
+#define BASE_PROCESS_PROCESS_ITERATOR_H_
+
+#include <stddef.h>
+
+#include <list>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "base/process/process.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <tlhelp32.h>
+#elif defined(OS_MACOSX) || defined(OS_OPENBSD)
+#include <sys/sysctl.h>
+#elif defined(OS_FREEBSD)
+#include <sys/user.h>
+#elif defined(OS_POSIX)
+#include <dirent.h>
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+struct ProcessEntry : public PROCESSENTRY32 {
+ ProcessId pid() const { return th32ProcessID; }
+ ProcessId parent_pid() const { return th32ParentProcessID; }
+ const wchar_t* exe_file() const { return szExeFile; }
+};
+#elif defined(OS_POSIX)
+struct BASE_EXPORT ProcessEntry {
+ ProcessEntry();
+ ProcessEntry(const ProcessEntry& other);
+ ~ProcessEntry();
+
+ ProcessId pid() const { return pid_; }
+ ProcessId parent_pid() const { return ppid_; }
+ ProcessId gid() const { return gid_; }
+ const char* exe_file() const { return exe_file_.c_str(); }
+ const std::vector<std::string>& cmd_line_args() const {
+ return cmd_line_args_;
+ }
+
+ ProcessId pid_;
+ ProcessId ppid_;
+ ProcessId gid_;
+ std::string exe_file_;
+ std::vector<std::string> cmd_line_args_;
+};
+#endif // defined(OS_POSIX)
+
+// Used to filter processes by process ID.
+class ProcessFilter {
+ public:
+ // Returns true to indicate set-inclusion and false otherwise. This method
+ // should not have side-effects and should be idempotent.
+ virtual bool Includes(const ProcessEntry& entry) const = 0;
+
+ protected:
+ virtual ~ProcessFilter() {}
+};
+
+// This class provides a way to iterate through a list of processes on the
+// current machine with a specified filter.
+// To use, create an instance and then call NextProcessEntry() until it returns
+// false.
+class BASE_EXPORT ProcessIterator {
+ public:
+ typedef std::list<ProcessEntry> ProcessEntries;
+
+ explicit ProcessIterator(const ProcessFilter* filter);
+ virtual ~ProcessIterator();
+
+ // If there's another process that matches the given executable name,
+ // returns a const pointer to the corresponding PROCESSENTRY32.
+ // If there are no more matching processes, returns NULL.
+ // The returned pointer will remain valid until NextProcessEntry()
+ // is called again or this NamedProcessIterator goes out of scope.
+ const ProcessEntry* NextProcessEntry();
+
+ // Takes a snapshot of all the ProcessEntry found.
+ ProcessEntries Snapshot();
+
+ protected:
+ virtual bool IncludeEntry();
+ const ProcessEntry& entry() { return entry_; }
+
+ private:
+ // Determines whether there's another process (regardless of executable)
+ // left in the list of all processes. Returns true and sets entry_ to
+ // that process's info if there is one, false otherwise.
+ bool CheckForNextProcess();
+
+ // Initializes a PROCESSENTRY32 data structure so that it's ready for
+ // use with Process32First/Process32Next.
+ void InitProcessEntry(ProcessEntry* entry);
+
+#if defined(OS_WIN)
+ HANDLE snapshot_;
+ bool started_iteration_;
+#elif defined(OS_MACOSX) || defined(OS_BSD)
+ std::vector<kinfo_proc> kinfo_procs_;
+ size_t index_of_kinfo_proc_;
+#elif defined(OS_POSIX)
+ DIR* procfs_dir_;
+#endif
+ ProcessEntry entry_;
+ const ProcessFilter* filter_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessIterator);
+};
+
+// This class provides a way to iterate through the list of processes
+// on the current machine that were started from the given executable
+// name. To use, create an instance and then call NextProcessEntry()
+// until it returns false.
+class BASE_EXPORT NamedProcessIterator : public ProcessIterator {
+ public:
+ NamedProcessIterator(const FilePath::StringType& executable_name,
+ const ProcessFilter* filter);
+ ~NamedProcessIterator() override;
+
+ protected:
+ bool IncludeEntry() override;
+
+ private:
+ FilePath::StringType executable_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(NamedProcessIterator);
+};
+
+// Returns the number of processes on the machine that are running from the
+// given executable name. If filter is non-null, then only processes selected
+// by the filter will be counted.
+BASE_EXPORT int GetProcessCount(const FilePath::StringType& executable_name,
+ const ProcessFilter* filter);
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_ITERATOR_H_
diff --git a/libchrome/base/process/process_iterator_linux.cc b/libchrome/base/process/process_iterator_linux.cc
new file mode 100644
index 0000000..421565f
--- /dev/null
+++ b/libchrome/base/process/process_iterator_linux.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+#include <stddef.h>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+namespace {
+
+// Reads the |field_num|th field from |proc_stats|.
+// Returns an empty string on failure.
+// This version only handles VM_COMM and VM_STATE, which are the only fields
+// that are strings.
+std::string GetProcStatsFieldAsString(
+ const std::vector<std::string>& proc_stats,
+ internal::ProcStatsFields field_num) {
+ if (field_num < internal::VM_COMM || field_num > internal::VM_STATE) {
+ NOTREACHED();
+ return std::string();
+ }
+
+ if (proc_stats.size() > static_cast<size_t>(field_num))
+ return proc_stats[field_num];
+
+ NOTREACHED();
+ return 0;
+}
+
+// Reads /proc/<pid>/cmdline and populates |proc_cmd_line_args| with the command
+// line arguments. Returns true if successful.
+// Note: /proc/<pid>/cmdline contains command line arguments separated by single
+// null characters. We tokenize it into a vector of strings using '\0' as a
+// delimiter.
+bool GetProcCmdline(pid_t pid, std::vector<std::string>* proc_cmd_line_args) {
+ // Synchronously reading files in /proc is safe.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ FilePath cmd_line_file = internal::GetProcPidDir(pid).Append("cmdline");
+ std::string cmd_line;
+ if (!ReadFileToString(cmd_line_file, &cmd_line))
+ return false;
+ std::string delimiters;
+ delimiters.push_back('\0');
+ *proc_cmd_line_args = SplitString(cmd_line, delimiters, KEEP_WHITESPACE,
+ SPLIT_WANT_NONEMPTY);
+ return true;
+}
+
+} // namespace
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+ : filter_(filter) {
+ procfs_dir_ = opendir(internal::kProcDir);
+ if (!procfs_dir_) {
+ // On Android, SELinux may prevent reading /proc. See
+ // https://crbug.com/581517 for details.
+ PLOG(ERROR) << "opendir " << internal::kProcDir;
+ }
+}
+
+ProcessIterator::~ProcessIterator() {
+ if (procfs_dir_) {
+ closedir(procfs_dir_);
+ procfs_dir_ = nullptr;
+ }
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+ // TODO(port): skip processes owned by different UID
+
+ if (!procfs_dir_) {
+ DLOG(ERROR) << "Skipping CheckForNextProcess(), no procfs_dir_";
+ return false;
+ }
+
+ pid_t pid = kNullProcessId;
+ std::vector<std::string> cmd_line_args;
+ std::string stats_data;
+ std::vector<std::string> proc_stats;
+
+ // Arbitrarily guess that there will never be more than 200 non-process
+ // files in /proc. Hardy has 53 and Lucid has 61.
+ int skipped = 0;
+ const int kSkipLimit = 200;
+ while (skipped < kSkipLimit) {
+ dirent* slot = readdir(procfs_dir_);
+ // all done looking through /proc?
+ if (!slot)
+ return false;
+
+ // If not a process, keep looking for one.
+ pid = internal::ProcDirSlotToPid(slot->d_name);
+ if (!pid) {
+ skipped++;
+ continue;
+ }
+
+ if (!GetProcCmdline(pid, &cmd_line_args))
+ continue;
+
+ if (!internal::ReadProcStats(pid, &stats_data))
+ continue;
+ if (!internal::ParseProcStats(stats_data, &proc_stats))
+ continue;
+
+ std::string runstate =
+ GetProcStatsFieldAsString(proc_stats, internal::VM_STATE);
+ if (runstate.size() != 1) {
+ NOTREACHED();
+ continue;
+ }
+
+ // Is the process in 'Zombie' state, i.e. dead but waiting to be reaped?
+ // Allowed values: D R S T Z
+ if (runstate[0] != 'Z')
+ break;
+
+ // Nope, it's a zombie; somebody isn't cleaning up after their children.
+ // (e.g. WaitForProcessesToExit doesn't clean up after dead children yet.)
+ // There could be a lot of zombies, can't really decrement i here.
+ }
+ if (skipped >= kSkipLimit) {
+ NOTREACHED();
+ return false;
+ }
+
+ entry_.pid_ = pid;
+ entry_.ppid_ = GetProcStatsFieldAsInt64(proc_stats, internal::VM_PPID);
+ entry_.gid_ = GetProcStatsFieldAsInt64(proc_stats, internal::VM_PGRP);
+ entry_.cmd_line_args_.assign(cmd_line_args.begin(), cmd_line_args.end());
+ entry_.exe_file_ = GetProcessExecutablePath(pid).BaseName().value();
+ return true;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+ if (executable_name_ != entry().exe_file())
+ return false;
+ return ProcessIterator::IncludeEntry();
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_iterator_mac.cc b/libchrome/base/process/process_iterator_mac.cc
new file mode 100644
index 0000000..3d61698
--- /dev/null
+++ b/libchrome/base/process/process_iterator_mac.cc
@@ -0,0 +1,140 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_iterator.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+ProcessIterator::ProcessIterator(const ProcessFilter* filter)
+ : index_of_kinfo_proc_(0),
+ filter_(filter) {
+ // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
+ // but trying to find where we were in a constantly changing list is basically
+ // impossible.
+
+ int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID,
+ static_cast<int>(geteuid()) };
+
+ // Since more processes could start between when we get the size and when
+ // we get the list, we do a loop to keep trying until we get it.
+ bool done = false;
+ int try_num = 1;
+ const int max_tries = 10;
+ do {
+ // Get the size of the buffer
+ size_t len = 0;
+ if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
+ DLOG(ERROR) << "failed to get the size needed for the process list";
+ kinfo_procs_.resize(0);
+ done = true;
+ } else {
+ size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+ // Leave some spare room for process table growth (more could show up
+ // between when we check and now)
+ num_of_kinfo_proc += 16;
+ kinfo_procs_.resize(num_of_kinfo_proc);
+ len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
+ // Load the list of processes
+ if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
+ // If we get a mem error, it just means we need a bigger buffer, so
+ // loop around again. Anything else is a real error and give up.
+ if (errno != ENOMEM) {
+ DLOG(ERROR) << "failed to get the process list";
+ kinfo_procs_.resize(0);
+ done = true;
+ }
+ } else {
+ // Got the list, just make sure we're sized exactly right
+ size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
+ kinfo_procs_.resize(num_of_kinfo_proc);
+ done = true;
+ }
+ }
+ } while (!done && (try_num++ < max_tries));
+
+ if (!done) {
+ DLOG(ERROR) << "failed to collect the process list in a few tries";
+ kinfo_procs_.resize(0);
+ }
+}
+
+ProcessIterator::~ProcessIterator() {
+}
+
+bool ProcessIterator::CheckForNextProcess() {
+ std::string data;
+ for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
+ kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
+
+ // Skip processes just awaiting collection
+ if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
+ continue;
+
+ int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
+
+ // Find out what size buffer we need.
+ size_t data_len = 0;
+ if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
+ DVPLOG(1) << "failed to figure out the buffer size for a commandline";
+ continue;
+ }
+
+ data.resize(data_len);
+ if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
+ DVPLOG(1) << "failed to fetch a commandline";
+ continue;
+ }
+
+ // |data| contains all the command line parameters of the process, separated
+ // by blocks of one or more null characters. We tokenize |data| into a
+ // vector of strings using '\0' as a delimiter and populate
+ // |entry_.cmd_line_args_|.
+ std::string delimiters;
+ delimiters.push_back('\0');
+ entry_.cmd_line_args_ = SplitString(data, delimiters,
+ KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+ // |data| starts with the full executable path followed by a null character.
+ // We search for the first instance of '\0' and extract everything before it
+ // to populate |entry_.exe_file_|.
+ size_t exec_name_end = data.find('\0');
+ if (exec_name_end == std::string::npos) {
+ DLOG(ERROR) << "command line data didn't match expected format";
+ continue;
+ }
+
+ entry_.pid_ = kinfo.kp_proc.p_pid;
+ entry_.ppid_ = kinfo.kp_eproc.e_ppid;
+ entry_.gid_ = kinfo.kp_eproc.e_pgid;
+ size_t last_slash = data.rfind('/', exec_name_end);
+ if (last_slash == std::string::npos)
+ entry_.exe_file_.assign(data, 0, exec_name_end);
+ else
+ entry_.exe_file_.assign(data, last_slash + 1,
+ exec_name_end - last_slash - 1);
+ // Start w/ the next entry next time through
+ ++index_of_kinfo_proc_;
+ // Done
+ return true;
+ }
+ return false;
+}
+
+bool NamedProcessIterator::IncludeEntry() {
+ return (executable_name_ == entry().exe_file() &&
+ ProcessIterator::IncludeEntry());
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_metrics.cc b/libchrome/base/process/process_metrics.cc
new file mode 100644
index 0000000..0b38726
--- /dev/null
+++ b/libchrome/base/process/process_metrics.cc
@@ -0,0 +1,98 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+namespace base {
+
+SystemMetrics::SystemMetrics() {
+ committed_memory_ = 0;
+}
+
+SystemMetrics SystemMetrics::Sample() {
+ SystemMetrics system_metrics;
+
+ system_metrics.committed_memory_ = GetSystemCommitCharge();
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ GetSystemMemoryInfo(&system_metrics.memory_info_);
+ GetSystemDiskInfo(&system_metrics.disk_info_);
+#endif
+#if defined(OS_CHROMEOS)
+ GetSwapInfo(&system_metrics.swap_info_);
+#endif
+
+ return system_metrics;
+}
+
+std::unique_ptr<Value> SystemMetrics::ToValue() const {
+ std::unique_ptr<DictionaryValue> res(new DictionaryValue());
+
+ res->SetInteger("committed_memory", static_cast<int>(committed_memory_));
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ res->Set("meminfo", memory_info_.ToValue());
+ res->Set("diskinfo", disk_info_.ToValue());
+#endif
+#if defined(OS_CHROMEOS)
+ res->Set("swapinfo", swap_info_.ToValue());
+#endif
+
+ return std::move(res);
+}
+
+ProcessMetrics* ProcessMetrics::CreateCurrentProcessMetrics() {
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+ return CreateProcessMetrics(base::GetCurrentProcessHandle());
+#else
+ return CreateProcessMetrics(base::GetCurrentProcessHandle(), nullptr);
+#endif // !defined(OS_MACOSX) || defined(OS_IOS)
+}
+
+double ProcessMetrics::GetPlatformIndependentCPUUsage() {
+#if defined(OS_WIN)
+ return GetCPUUsage() * processor_count_;
+#else
+ return GetCPUUsage();
+#endif
+}
+
+#if defined(OS_MACOSX) || defined(OS_LINUX)
+int ProcessMetrics::CalculateIdleWakeupsPerSecond(
+ uint64_t absolute_idle_wakeups) {
+ TimeTicks time = TimeTicks::Now();
+
+ if (last_absolute_idle_wakeups_ == 0) {
+ // First call, just set the last values.
+ last_idle_wakeups_time_ = time;
+ last_absolute_idle_wakeups_ = absolute_idle_wakeups;
+ return 0;
+ }
+
+ int64_t wakeups_delta = absolute_idle_wakeups - last_absolute_idle_wakeups_;
+ int64_t time_delta = (time - last_idle_wakeups_time_).InMicroseconds();
+ if (time_delta == 0) {
+ NOTREACHED();
+ return 0;
+ }
+
+ last_idle_wakeups_time_ = time;
+ last_absolute_idle_wakeups_ = absolute_idle_wakeups;
+
+ // Round to average wakeups per second.
+ int64_t wakeups_delta_for_ms = wakeups_delta * Time::kMicrosecondsPerSecond;
+ return (wakeups_delta_for_ms + time_delta / 2) / time_delta;
+}
+#else
+int ProcessMetrics::GetIdleWakeupsPerSecond() {
+ NOTIMPLEMENTED(); // http://crbug.com/120488
+ return 0;
+}
+#endif // defined(OS_MACOSX) || defined(OS_LINUX)
+
+} // namespace base
diff --git a/libchrome/base/process/process_metrics.h b/libchrome/base/process/process_metrics.h
new file mode 100644
index 0000000..e67b663
--- /dev/null
+++ b/libchrome/base/process/process_metrics.h
@@ -0,0 +1,462 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains routines for gathering resource statistics for processes
+// running on the system.
+
+#ifndef BASE_PROCESS_PROCESS_METRICS_H_
+#define BASE_PROCESS_PROCESS_METRICS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "base/values.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#include "base/process/port_provider_mac.h"
+#endif
+
+namespace base {
+
+#if defined(OS_WIN)
+struct IoCounters : public IO_COUNTERS {
+};
+#elif defined(OS_POSIX)
+struct IoCounters {
+ uint64_t ReadOperationCount;
+ uint64_t WriteOperationCount;
+ uint64_t OtherOperationCount;
+ uint64_t ReadTransferCount;
+ uint64_t WriteTransferCount;
+ uint64_t OtherTransferCount;
+};
+#endif
+
+// Working Set (resident) memory usage broken down by
+//
+// On Windows:
+// priv (private): These pages (kbytes) cannot be shared with any other process.
+// shareable: These pages (kbytes) can be shared with other processes under
+// the right circumstances.
+// shared : These pages (kbytes) are currently shared with at least one
+// other process.
+//
+// On Linux:
+// priv: Pages mapped only by this process.
+// shared: PSS or 0 if the kernel doesn't support this.
+// shareable: 0
+
+// On ChromeOS:
+// priv: Pages mapped only by this process.
+// shared: PSS or 0 if the kernel doesn't support this.
+// shareable: 0
+// swapped Pages swapped out to zram.
+//
+// On OS X: TODO(thakis): Revise.
+// priv: Memory.
+// shared: 0
+// shareable: 0
+//
+struct WorkingSetKBytes {
+ WorkingSetKBytes() : priv(0), shareable(0), shared(0) {}
+ size_t priv;
+ size_t shareable;
+ size_t shared;
+#if defined(OS_CHROMEOS)
+ size_t swapped;
+#endif
+};
+
+// Committed (resident + paged) memory usage broken down by
+// private: These pages cannot be shared with any other process.
+// mapped: These pages are mapped into the view of a section (backed by
+// pagefile.sys)
+// image: These pages are mapped into the view of an image section (backed by
+// file system)
+struct CommittedKBytes {
+ CommittedKBytes() : priv(0), mapped(0), image(0) {}
+ size_t priv;
+ size_t mapped;
+ size_t image;
+};
+
+// Convert a POSIX timeval to microseconds.
+BASE_EXPORT int64_t TimeValToMicroseconds(const struct timeval& tv);
+
+// A callback to filter threads of a process (when counting CPU time)
+using ThreadFilterCb = std::function<bool(const std::string &stat)>;
+
+// Provides performance metrics for a specified process (CPU usage, memory and
+// IO counters). Use CreateCurrentProcessMetrics() to get an instance for the
+// current process, or CreateProcessMetrics() to get an instance for an
+// arbitrary process. Then, access the information with the different get
+// methods.
+class BASE_EXPORT ProcessMetrics {
+ public:
+ ~ProcessMetrics();
+
+ // Creates a ProcessMetrics for the specified process.
+ // The caller owns the returned object.
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+ static ProcessMetrics* CreateProcessMetrics(ProcessHandle process);
+
+ static ProcessMetrics* CreateProcessMetricsWithFilter(ProcessHandle process,
+ ThreadFilterCb thread_filter);
+#else
+
+ // The port provider needs to outlive the ProcessMetrics object returned by
+ // this function. If NULL is passed as provider, the returned object
+ // only returns valid metrics if |process| is the current process.
+ static ProcessMetrics* CreateProcessMetrics(ProcessHandle process,
+ PortProvider* port_provider);
+#endif // !defined(OS_MACOSX) || defined(OS_IOS)
+
+ // Creates a ProcessMetrics for the current process. This a cross-platform
+ // convenience wrapper for CreateProcessMetrics().
+ // The caller owns the returned object.
+ static ProcessMetrics* CreateCurrentProcessMetrics();
+
+ // Returns the current space allocated for the pagefile, in bytes (these pages
+ // may or may not be in memory). On Linux, this returns the total virtual
+ // memory size.
+ size_t GetPagefileUsage() const;
+ // Returns the peak space allocated for the pagefile, in bytes.
+ size_t GetPeakPagefileUsage() const;
+ // Returns the current working set size, in bytes. On Linux, this returns
+ // the resident set size.
+ size_t GetWorkingSetSize() const;
+ // Returns the peak working set size, in bytes.
+ size_t GetPeakWorkingSetSize() const;
+ // Returns private and sharedusage, in bytes. Private bytes is the amount of
+ // memory currently allocated to a process that cannot be shared. Returns
+ // false on platform specific error conditions. Note: |private_bytes|
+ // returns 0 on unsupported OSes: prior to XP SP2.
+ bool GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes);
+ // Fills a CommittedKBytes with both resident and paged
+ // memory usage as per definition of CommittedBytes.
+ void GetCommittedKBytes(CommittedKBytes* usage) const;
+ // Fills a WorkingSetKBytes containing resident private and shared memory
+ // usage in bytes, as per definition of WorkingSetBytes. Note that this
+ // function is somewhat expensive on Windows (a few ms per process).
+ bool GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const;
+
+#if defined(OS_MACOSX)
+ // Fills both CommitedKBytes and WorkingSetKBytes in a single operation. This
+ // is more efficient on Mac OS X, as the two can be retrieved with a single
+ // system call.
+ bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
+ WorkingSetKBytes* ws_usage) const;
+#endif
+
+ // Returns the CPU usage in percent since the last time this method or
+ // GetPlatformIndependentCPUUsage() was called. The first time this method
+ // is called it returns 0 and will return the actual CPU info on subsequent
+ // calls. On Windows, the CPU usage value is for all CPUs. So if you have
+ // 2 CPUs and your process is using all the cycles of 1 CPU and not the other
+ // CPU, this method returns 50.
+ double GetCPUUsage();
+
+ // Returns the number of average idle cpu wakeups per second since the last
+ // call.
+ int GetIdleWakeupsPerSecond();
+
+ // Same as GetCPUUsage(), but will return consistent values on all platforms
+ // (cancelling the Windows exception mentioned above) by returning a value in
+ // the range of 0 to (100 * numCPUCores) everywhere.
+ double GetPlatformIndependentCPUUsage();
+
+ // Retrieves accounting information for all I/O operations performed by the
+ // process.
+ // If IO information is retrieved successfully, the function returns true
+ // and fills in the IO_COUNTERS passed in. The function returns false
+ // otherwise.
+ bool GetIOCounters(IoCounters* io_counters) const;
+
+#if defined(OS_LINUX)
+ // Returns the number of file descriptors currently open by the process, or
+ // -1 on error.
+ int GetOpenFdCount() const;
+#endif // defined(OS_LINUX)
+
+ private:
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+ explicit ProcessMetrics(ProcessHandle process, ThreadFilterCb thread_filter);
+#else
+ ProcessMetrics(ProcessHandle process, PortProvider* port_provider);
+#endif // !defined(OS_MACOSX) || defined(OS_IOS)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ bool GetWorkingSetKBytesStatm(WorkingSetKBytes* ws_usage) const;
+#endif
+
+#if defined(OS_CHROMEOS)
+ bool GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage) const;
+#endif
+
+#if defined(OS_MACOSX) || defined(OS_LINUX)
+ int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
+#endif
+
+ ProcessHandle process_;
+
+ int processor_count_;
+
+ // Used to store the previous times and CPU usage counts so we can
+ // compute the CPU usage between calls.
+ TimeTicks last_cpu_time_;
+ int64_t last_system_time_;
+
+#if defined(OS_MACOSX) || defined(OS_LINUX)
+ // Same thing for idle wakeups.
+ TimeTicks last_idle_wakeups_time_;
+ uint64_t last_absolute_idle_wakeups_;
+#endif
+
+#if !defined(OS_IOS)
+#if defined(OS_MACOSX)
+ // Queries the port provider if it's set.
+ mach_port_t TaskForPid(ProcessHandle process) const;
+
+ PortProvider* port_provider_;
+#elif defined(OS_POSIX)
+ // Jiffie count at the last_cpu_time_ we updated.
+ int last_cpu_;
+#endif // defined(OS_POSIX)
+#endif // !defined(OS_IOS)
+
+ ThreadFilterCb thread_filter_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessMetrics);
+};
+
+// Returns the memory committed by the system in KBytes.
+// Returns 0 if it can't compute the commit charge.
+BASE_EXPORT size_t GetSystemCommitCharge();
+
+// Returns the number of bytes in a memory page. Do not use this to compute
+// the number of pages in a block of memory for calling mincore(). On some
+// platforms, e.g. iOS, mincore() uses a different page size from what is
+// returned by GetPageSize().
+BASE_EXPORT size_t GetPageSize();
+
+#if defined(OS_POSIX)
+// Returns the maximum number of file descriptors that can be open by a process
+// at once. If the number is unavailable, a conservative best guess is returned.
+BASE_EXPORT size_t GetMaxFds();
+
+// Sets the file descriptor soft limit to |max_descriptors| or the OS hard
+// limit, whichever is lower.
+BASE_EXPORT void SetFdLimit(unsigned int max_descriptors);
+#endif // defined(OS_POSIX)
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+// Data about system-wide memory consumption. Values are in KB. Available on
+// Windows, Mac, Linux, Android and Chrome OS.
+//
+// Total/free memory are available on all platforms that implement
+// GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
+// except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
+// inactive_file/dirty/pswpin/pswpout/pgmajfault are available on
+// Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
+struct BASE_EXPORT SystemMemoryInfoKB {
+ SystemMemoryInfoKB();
+ SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
+
+ // Serializes the platform specific fields to value.
+ std::unique_ptr<Value> ToValue() const;
+
+ int total;
+ int free;
+
+#if defined(OS_LINUX)
+ // This provides an estimate of available memory as described here:
+ // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+ // NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
+ // be 0 in earlier kernel versions.
+ int available;
+#endif
+
+#if !defined(OS_MACOSX)
+ int swap_total;
+ int swap_free;
+#endif
+
+#if defined(OS_ANDROID) || defined(OS_LINUX)
+ int buffers;
+ int cached;
+ int active_anon;
+ int inactive_anon;
+ int active_file;
+ int inactive_file;
+ int dirty;
+
+ // vmstats data.
+ int pswpin;
+ int pswpout;
+ int pgmajfault;
+#endif // defined(OS_ANDROID) || defined(OS_LINUX)
+
+#if defined(OS_CHROMEOS)
+ int shmem;
+ int slab;
+ // Gem data will be -1 if not supported.
+ int gem_objects;
+ long long gem_size;
+#endif // defined(OS_CHROMEOS)
+};
+
+// On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
+// from /proc/meminfo and /proc/vmstat. On Windows/Mac, it is obtained using
+// system API calls.
+//
+// Fills in the provided |meminfo| structure. Returns true on success.
+// Exposed for memory debugging widget.
+BASE_EXPORT bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo);
+
+#endif // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+ // defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+// Parse the data found in /proc/<pid>/stat and return the sum of the
+// CPU-related ticks. Returns -1 on parse error.
+// Exposed for testing.
+BASE_EXPORT int ParseProcStatCPU(const std::string& input);
+
+// Get the number of threads of |process| as available in /proc/<pid>/stat.
+// This should be used with care as no synchronization with running threads is
+// done. This is mostly useful to guarantee being single-threaded.
+// Returns 0 on failure.
+BASE_EXPORT int GetNumberOfThreads(ProcessHandle process);
+
+// /proc/self/exe refers to the current executable.
+BASE_EXPORT extern const char kProcSelfExe[];
+
+// Parses a string containing the contents of /proc/meminfo
+// returns true on success or false for a parsing error
+BASE_EXPORT bool ParseProcMeminfo(const std::string& input,
+ SystemMemoryInfoKB* meminfo);
+
+// Parses a string containing the contents of /proc/vmstat
+// returns true on success or false for a parsing error
+BASE_EXPORT bool ParseProcVmstat(const std::string& input,
+ SystemMemoryInfoKB* meminfo);
+
+// Data from /proc/diskstats about system-wide disk I/O.
+struct BASE_EXPORT SystemDiskInfo {
+ SystemDiskInfo();
+ SystemDiskInfo(const SystemDiskInfo& other);
+
+ // Serializes the platform specific fields to value.
+ std::unique_ptr<Value> ToValue() const;
+
+ uint64_t reads;
+ uint64_t reads_merged;
+ uint64_t sectors_read;
+ uint64_t read_time;
+ uint64_t writes;
+ uint64_t writes_merged;
+ uint64_t sectors_written;
+ uint64_t write_time;
+ uint64_t io;
+ uint64_t io_time;
+ uint64_t weighted_io_time;
+};
+
+// Checks whether the candidate string is a valid disk name, [hsv]d[a-z]+
+// for a generic disk or mmcblk[0-9]+ for the MMC case.
+// Names of disk partitions (e.g. sda1) are not valid.
+BASE_EXPORT bool IsValidDiskName(const std::string& candidate);
+
+// Retrieves data from /proc/diskstats about system-wide disk I/O.
+// Fills in the provided |diskinfo| structure. Returns true on success.
+BASE_EXPORT bool GetSystemDiskInfo(SystemDiskInfo* diskinfo);
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_CHROMEOS)
+// Data from files in directory /sys/block/zram0 about ZRAM usage.
+struct BASE_EXPORT SwapInfo {
+ SwapInfo()
+ : num_reads(0),
+ num_writes(0),
+ compr_data_size(0),
+ orig_data_size(0),
+ mem_used_total(0) {
+ }
+
+ // Serializes the platform specific fields to value.
+ std::unique_ptr<Value> ToValue() const;
+
+ uint64_t num_reads;
+ uint64_t num_writes;
+ uint64_t compr_data_size;
+ uint64_t orig_data_size;
+ uint64_t mem_used_total;
+};
+
+// In ChromeOS, reads files from /sys/block/zram0 that contain ZRAM usage data.
+// Fills in the provided |swap_data| structure.
+BASE_EXPORT void GetSwapInfo(SwapInfo* swap_info);
+#endif // defined(OS_CHROMEOS)
+
+class SystemCpuMetrics {
+ public:
+ SystemCpuMetrics();
+
+ // Returns the CPU usage in percent since the last time this method or
+ // GetPlatformIndependentCPUUsage() was called. The first time this method
+ // is called it returns 0 and will return the actual CPU info on subsequent
+ // calls. On Windows, the CPU usage value is for all CPUs. So if you have
+ // 2 CPUs and your process is using all the cycles of 1 CPU and not the other
+ // CPU, this method returns 50.
+ double GetCPUUsage();
+
+ private:
+ // Used to store the previous times and CPU usage counts so we can
+ // compute the CPU usage between calls.
+ TimeTicks last_cpu_time_;
+
+ // Jiffie count at the last_cpu_time_ we updated.
+ int last_cpu_;
+
+};
+
+// Collects and holds performance metrics for system memory and disk.
+// Provides functionality to retrieve the data on various platforms and
+// to serialize the stored data.
+class SystemMetrics {
+ public:
+ SystemMetrics();
+
+ static SystemMetrics Sample();
+
+ // Serializes the system metrics to value.
+ std::unique_ptr<Value> ToValue() const;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(SystemMetricsTest, SystemMetrics);
+
+ size_t committed_memory_;
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ SystemMemoryInfoKB memory_info_;
+ SystemDiskInfo disk_info_;
+#endif
+#if defined(OS_CHROMEOS)
+ SwapInfo swap_info_;
+#endif
+};
+
+} // namespace base
+
+#endif // BASE_PROCESS_PROCESS_METRICS_H_
diff --git a/libchrome/base/process/process_metrics_linux.cc b/libchrome/base/process/process_metrics_linux.cc
new file mode 100644
index 0000000..146bbc5
--- /dev/null
+++ b/libchrome/base/process/process_metrics_linux.cc
@@ -0,0 +1,1072 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <utility>
+
+#include "base/files/dir_reader_posix.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/process/internal_linux.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+void TrimKeyValuePairs(StringPairs* pairs) {
+ DCHECK(pairs);
+ StringPairs& p_ref = *pairs;
+ for (size_t i = 0; i < p_ref.size(); ++i) {
+ TrimWhitespaceASCII(p_ref[i].first, TRIM_ALL, &p_ref[i].first);
+ TrimWhitespaceASCII(p_ref[i].second, TRIM_ALL, &p_ref[i].second);
+ }
+}
+
+#if defined(OS_CHROMEOS)
+// Read a file with a single number string and return the number as a uint64_t.
+static uint64_t ReadFileToUint64(const FilePath file) {
+ std::string file_as_string;
+ if (!ReadFileToString(file, &file_as_string))
+ return 0;
+ TrimWhitespaceASCII(file_as_string, TRIM_ALL, &file_as_string);
+ uint64_t file_as_uint64 = 0;
+ if (!StringToUint64(file_as_string, &file_as_uint64))
+ return 0;
+ return file_as_uint64;
+}
+#endif
+
+// Read /proc/<pid>/status and return the value for |field|, or 0 on failure.
+// Only works for fields in the form of "Field: value kB".
+size_t ReadProcStatusAndGetFieldAsSizeT(pid_t pid, const std::string& field) {
+ std::string status;
+ {
+ // Synchronously reading files in /proc does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+ FilePath stat_file = internal::GetProcPidDir(pid).Append("status");
+ if (!ReadFileToString(stat_file, &status))
+ return 0;
+ }
+
+ StringPairs pairs;
+ SplitStringIntoKeyValuePairs(status, ':', '\n', &pairs);
+ TrimKeyValuePairs(&pairs);
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ const std::string& key = pairs[i].first;
+ const std::string& value_str = pairs[i].second;
+ if (key == field) {
+ std::vector<StringPiece> split_value_str = SplitStringPiece(
+ value_str, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ if (split_value_str.size() != 2 || split_value_str[1] != "kB") {
+ NOTREACHED();
+ return 0;
+ }
+ size_t value;
+ if (!StringToSizeT(split_value_str[0], &value)) {
+ NOTREACHED();
+ return 0;
+ }
+ return value;
+ }
+ }
+ // This can be reached if the process dies when proc is read -- in that case,
+ // the kernel can return missing fields.
+ return 0;
+}
+
+#if defined(OS_LINUX)
+// Read /proc/<pid>/sched and look for |field|. On succes, return true and
+// write the value for |field| into |result|.
+// Only works for fields in the form of "field : uint_value"
+bool ReadProcSchedAndGetFieldAsUint64(pid_t pid,
+ const std::string& field,
+ uint64_t* result) {
+ std::string sched_data;
+ {
+ // Synchronously reading files in /proc does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+ FilePath sched_file = internal::GetProcPidDir(pid).Append("sched");
+ if (!ReadFileToString(sched_file, &sched_data))
+ return false;
+ }
+
+ StringPairs pairs;
+ SplitStringIntoKeyValuePairs(sched_data, ':', '\n', &pairs);
+ TrimKeyValuePairs(&pairs);
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ const std::string& key = pairs[i].first;
+ const std::string& value_str = pairs[i].second;
+ if (key == field) {
+ uint64_t value;
+ if (!StringToUint64(value_str, &value))
+ return false;
+ *result = value;
+ return true;
+ }
+ }
+ return false;
+}
+#endif // defined(OS_LINUX)
+
+typedef std::map<std::string, std::string> ProcStatMap;
+
+bool ReadProcFile(const FilePath& file, std::string* buffer) {
+ buffer->clear();
+ // Synchronously reading files in /proc is safe.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ if (!ReadFileToString(file, buffer)) {
+ DLOG(WARNING) << "Failed to read " << file.MaybeAsASCII();
+ return false;
+ }
+ return !buffer->empty();
+}
+
+void ParseProcStat(const std::string& contents, ProcStatMap* output) {
+ StringPairs key_value_pairs;
+ SplitStringIntoKeyValuePairs(contents, ' ', '\n', &key_value_pairs);
+ for (size_t i = 0; i < key_value_pairs.size(); ++i) {
+ output->insert(key_value_pairs[i]);
+ }
+}
+
+int GetSystemCpuTimeSinceBoot() {
+ FilePath path("/proc/stat");
+ std::string contents;
+ if (!ReadProcFile(path, &contents))
+ return 0;
+
+ ProcStatMap proc_stat;
+ ParseProcStat(contents, &proc_stat);
+ ProcStatMap::const_iterator cpu_it = proc_stat.find("cpu");
+ if (cpu_it == proc_stat.end())
+ return 0;
+
+ std::vector<std::string> cpu = SplitString(
+ cpu_it->second, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+ uint64_t user;
+ uint64_t nice;
+ uint64_t system;
+ if (!StringToUint64(cpu[0], &user) || !StringToUint64(cpu[1], &nice) || !StringToUint64(cpu[2], &system))
+ return 0;
+
+ return user + nice + system;
+}
+
+// Get the total CPU of a single process. Return value is number of jiffies
+// on success or -1 on error.
+int GetProcessCPU(pid_t pid, ThreadFilterCb thread_filter) {
+ // Use /proc/<pid>/task to find all threads and parse their /stat file.
+ FilePath task_path = internal::GetProcPidDir(pid).Append("task");
+
+ DIR* dir = opendir(task_path.value().c_str());
+ if (!dir) {
+ DPLOG(ERROR) << "opendir(" << task_path.value() << ")";
+ return -1;
+ }
+
+ int total_cpu = 0;
+ while (struct dirent* ent = readdir(dir)) {
+ pid_t tid = internal::ProcDirSlotToPid(ent->d_name);
+ if (!tid)
+ continue;
+
+ // Synchronously reading files in /proc does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ std::string stat;
+ FilePath stat_path =
+ task_path.Append(ent->d_name).Append(internal::kStatFile);
+ if (ReadFileToString(stat_path, &stat)) {
+ // Only include threads for which the filter returns true
+ if (thread_filter && !thread_filter(stat)) {
+ continue;
+ }
+
+ int cpu = ParseProcStatCPU(stat);
+ if (cpu > 0)
+ total_cpu += cpu;
+ }
+ }
+ closedir(dir);
+
+ return total_cpu;
+}
+
+} // namespace
+
+// static
+ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) {
+ return new ProcessMetrics(process, ThreadFilterCb());
+}
+
+// static
+ProcessMetrics* ProcessMetrics::CreateProcessMetricsWithFilter(
+ ProcessHandle process, ThreadFilterCb thread_filter)
+{
+ return new ProcessMetrics(process, thread_filter);
+}
+
+// On linux, we return vsize.
+size_t ProcessMetrics::GetPagefileUsage() const {
+ return internal::ReadProcStatsAndGetFieldAsSizeT(process_,
+ internal::VM_VSIZE);
+}
+
+// On linux, we return the high water mark of vsize.
+size_t ProcessMetrics::GetPeakPagefileUsage() const {
+ return ReadProcStatusAndGetFieldAsSizeT(process_, "VmPeak") * 1024;
+}
+
+// On linux, we return RSS.
+size_t ProcessMetrics::GetWorkingSetSize() const {
+ return internal::ReadProcStatsAndGetFieldAsSizeT(process_, internal::VM_RSS) *
+ getpagesize();
+}
+
+// On linux, we return the high water mark of RSS.
+size_t ProcessMetrics::GetPeakWorkingSetSize() const {
+ return ReadProcStatusAndGetFieldAsSizeT(process_, "VmHWM") * 1024;
+}
+
+bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes) {
+ WorkingSetKBytes ws_usage;
+ if (!GetWorkingSetKBytes(&ws_usage))
+ return false;
+
+ if (private_bytes)
+ *private_bytes = ws_usage.priv * 1024;
+
+ if (shared_bytes)
+ *shared_bytes = ws_usage.shared * 1024;
+
+ return true;
+}
+
+bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
+#if defined(OS_CHROMEOS)
+ if (GetWorkingSetKBytesTotmaps(ws_usage))
+ return true;
+#endif
+ return GetWorkingSetKBytesStatm(ws_usage);
+}
+
+SystemCpuMetrics::SystemCpuMetrics() {
+ last_cpu_ = 0;
+}
+
+double SystemCpuMetrics::GetCPUUsage() {
+ TimeTicks time = TimeTicks::Now();
+
+ if (last_cpu_ == 0) {
+ // First call, just set the last values.
+ last_cpu_time_ = time;
+ last_cpu_ = GetSystemCpuTimeSinceBoot();
+ return 0.0;
+ }
+
+ TimeDelta time_delta = time - last_cpu_time_;
+ if (time_delta.is_zero()) {
+ NOTREACHED();
+ return 0.0;
+ }
+
+ int cpu = GetSystemCpuTimeSinceBoot();
+
+ // We have the number of jiffies in the time period. Convert to percentage.
+ // Note this means we will go *over* 100 in the case where multiple threads
+ // are together adding to more than one CPU's worth.
+ TimeDelta cpu_time = internal::ClockTicksToTimeDelta(cpu);
+ TimeDelta last_cpu_time = internal::ClockTicksToTimeDelta(last_cpu_);
+
+ // If the number of threads running in the process has decreased since the
+ // last time this function was called, |last_cpu_time| will be greater than
+ // |cpu_time| which will result in a negative value in the below percentage
+ // calculation. We prevent this by clamping to 0. crbug.com/546565.
+ // This computation is known to be shaky when threads are destroyed between
+ // "last" and "now", but for our current purposes, it's all right.
+ double percentage = 0.0;
+ if (last_cpu_time < cpu_time) {
+ percentage = 100.0 * (cpu_time - last_cpu_time).InSecondsF() /
+ time_delta.InSecondsF();
+ }
+
+ last_cpu_time_ = time;
+ last_cpu_ = cpu;
+
+ return percentage;
+}
+
+double ProcessMetrics::GetCPUUsage() {
+ TimeTicks time = TimeTicks::Now();
+
+ if (last_cpu_ == 0) {
+ // First call, just set the last values.
+ last_cpu_time_ = time;
+ last_cpu_ = GetProcessCPU(process_, thread_filter_);
+ return 0.0;
+ }
+
+ TimeDelta time_delta = time - last_cpu_time_;
+ if (time_delta.is_zero()) {
+ NOTREACHED();
+ return 0.0;
+ }
+
+ int cpu = GetProcessCPU(process_, thread_filter_);
+
+ // We have the number of jiffies in the time period. Convert to percentage.
+ // Note this means we will go *over* 100 in the case where multiple threads
+ // are together adding to more than one CPU's worth.
+ TimeDelta cpu_time = internal::ClockTicksToTimeDelta(cpu);
+ TimeDelta last_cpu_time = internal::ClockTicksToTimeDelta(last_cpu_);
+
+ // If the number of threads running in the process has decreased since the
+ // last time this function was called, |last_cpu_time| will be greater than
+ // |cpu_time| which will result in a negative value in the below percentage
+ // calculation. We prevent this by clamping to 0. crbug.com/546565.
+ // This computation is known to be shaky when threads are destroyed between
+ // "last" and "now", but for our current purposes, it's all right.
+ double percentage = 0.0;
+ if (last_cpu_time < cpu_time) {
+ percentage = 100.0 * (cpu_time - last_cpu_time).InSecondsF() /
+ time_delta.InSecondsF();
+ }
+
+ last_cpu_time_ = time;
+ last_cpu_ = cpu;
+
+ return percentage;
+}
+
+// To have /proc/self/io file you must enable CONFIG_TASK_IO_ACCOUNTING
+// in your kernel configuration.
+bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+ // Synchronously reading files in /proc does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ std::string proc_io_contents;
+ FilePath io_file = internal::GetProcPidDir(process_).Append("io");
+ if (!ReadFileToString(io_file, &proc_io_contents))
+ return false;
+
+ io_counters->OtherOperationCount = 0;
+ io_counters->OtherTransferCount = 0;
+
+ StringPairs pairs;
+ SplitStringIntoKeyValuePairs(proc_io_contents, ':', '\n', &pairs);
+ TrimKeyValuePairs(&pairs);
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ const std::string& key = pairs[i].first;
+ const std::string& value_str = pairs[i].second;
+ uint64_t* target_counter = NULL;
+ if (key == "syscr")
+ target_counter = &io_counters->ReadOperationCount;
+ else if (key == "syscw")
+ target_counter = &io_counters->WriteOperationCount;
+ else if (key == "rchar")
+ target_counter = &io_counters->ReadTransferCount;
+ else if (key == "wchar")
+ target_counter = &io_counters->WriteTransferCount;
+ if (!target_counter)
+ continue;
+ bool converted = StringToUint64(value_str, target_counter);
+ DCHECK(converted);
+ }
+ return true;
+}
+
+#if defined(OS_LINUX)
+int ProcessMetrics::GetOpenFdCount() const {
+ // Use /proc/<pid>/fd to count the number of entries there.
+ FilePath fd_path = internal::GetProcPidDir(process_).Append("fd");
+
+ DirReaderPosix dir_reader(fd_path.value().c_str());
+ if (!dir_reader.IsValid())
+ return -1;
+
+ int total_count = 0;
+ for (; dir_reader.Next(); ) {
+ const char* name = dir_reader.name();
+ if (strcmp(name, ".") != 0 && strcmp(name, "..") != 0)
+ ++total_count;
+ }
+
+ return total_count;
+}
+#endif // defined(OS_LINUX)
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process,
+ ThreadFilterCb thread_filter)
+ : process_(process),
+ last_system_time_(0),
+#if defined(OS_LINUX)
+ last_absolute_idle_wakeups_(0),
+#endif
+ last_cpu_(0),
+ thread_filter_(thread_filter) {
+ processor_count_ = SysInfo::NumberOfProcessors();
+}
+
+#if defined(OS_CHROMEOS)
+// Private, Shared and Proportional working set sizes are obtained from
+// /proc/<pid>/totmaps
+bool ProcessMetrics::GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage)
+ const {
+ // The format of /proc/<pid>/totmaps is:
+ //
+ // Rss: 6120 kB
+ // Pss: 3335 kB
+ // Shared_Clean: 1008 kB
+ // Shared_Dirty: 4012 kB
+ // Private_Clean: 4 kB
+ // Private_Dirty: 1096 kB
+ // Referenced: XXX kB
+ // Anonymous: XXX kB
+ // AnonHugePages: XXX kB
+ // Swap: XXX kB
+ // Locked: XXX kB
+ const size_t kPssIndex = (1 * 3) + 1;
+ const size_t kPrivate_CleanIndex = (4 * 3) + 1;
+ const size_t kPrivate_DirtyIndex = (5 * 3) + 1;
+ const size_t kSwapIndex = (9 * 3) + 1;
+
+ std::string totmaps_data;
+ {
+ FilePath totmaps_file = internal::GetProcPidDir(process_).Append("totmaps");
+ ThreadRestrictions::ScopedAllowIO allow_io;
+ bool ret = ReadFileToString(totmaps_file, &totmaps_data);
+ if (!ret || totmaps_data.length() == 0)
+ return false;
+ }
+
+ std::vector<std::string> totmaps_fields = SplitString(
+ totmaps_data, base::kWhitespaceASCII, base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+
+ DCHECK_EQ("Pss:", totmaps_fields[kPssIndex-1]);
+ DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
+ DCHECK_EQ("Private_Dirty:", totmaps_fields[kPrivate_DirtyIndex - 1]);
+ DCHECK_EQ("Swap:", totmaps_fields[kSwapIndex-1]);
+
+ int pss = 0;
+ int private_clean = 0;
+ int private_dirty = 0;
+ int swap = 0;
+ bool ret = true;
+ ret &= StringToInt(totmaps_fields[kPssIndex], &pss);
+ ret &= StringToInt(totmaps_fields[kPrivate_CleanIndex], &private_clean);
+ ret &= StringToInt(totmaps_fields[kPrivate_DirtyIndex], &private_dirty);
+ ret &= StringToInt(totmaps_fields[kSwapIndex], &swap);
+
+ // On ChromeOS swap is to zram. We count this as private / shared, as
+ // increased swap decreases available RAM to user processes, which would
+ // otherwise create surprising results.
+ ws_usage->priv = private_clean + private_dirty + swap;
+ ws_usage->shared = pss + swap;
+ ws_usage->shareable = 0;
+ ws_usage->swapped = swap;
+ return ret;
+}
+#endif
+
+// Private and Shared working set sizes are obtained from /proc/<pid>/statm.
+bool ProcessMetrics::GetWorkingSetKBytesStatm(WorkingSetKBytes* ws_usage)
+ const {
+ // Use statm instead of smaps because smaps is:
+ // a) Large and slow to parse.
+ // b) Unavailable in the SUID sandbox.
+
+ // First we need to get the page size, since everything is measured in pages.
+ // For details, see: man 5 proc.
+ const int page_size_kb = getpagesize() / 1024;
+ if (page_size_kb <= 0)
+ return false;
+
+ std::string statm;
+ {
+ FilePath statm_file = internal::GetProcPidDir(process_).Append("statm");
+ // Synchronously reading files in /proc does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+ bool ret = ReadFileToString(statm_file, &statm);
+ if (!ret || statm.length() == 0)
+ return false;
+ }
+
+ std::vector<StringPiece> statm_vec = SplitStringPiece(
+ statm, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ if (statm_vec.size() != 7)
+ return false; // Not the format we expect.
+
+ int statm_rss, statm_shared;
+ bool ret = true;
+ ret &= StringToInt(statm_vec[1], &statm_rss);
+ ret &= StringToInt(statm_vec[2], &statm_shared);
+
+ ws_usage->priv = (statm_rss - statm_shared) * page_size_kb;
+ ws_usage->shared = statm_shared * page_size_kb;
+
+ // Sharable is not calculated, as it does not provide interesting data.
+ ws_usage->shareable = 0;
+
+#if defined(OS_CHROMEOS)
+ // Can't get swapped memory from statm.
+ ws_usage->swapped = 0;
+#endif
+
+ return ret;
+}
+
+size_t GetSystemCommitCharge() {
+ SystemMemoryInfoKB meminfo;
+ if (!GetSystemMemoryInfo(&meminfo))
+ return 0;
+ return meminfo.total - meminfo.free - meminfo.buffers - meminfo.cached;
+}
+
+int ParseProcStatCPU(const std::string& input) {
+ // |input| may be empty if the process disappeared somehow.
+ // e.g. http://crbug.com/145811.
+ if (input.empty())
+ return -1;
+
+ size_t start = input.find_last_of(')');
+ if (start == input.npos)
+ return -1;
+
+ // Number of spaces remaining until reaching utime's index starting after the
+ // last ')'.
+ int num_spaces_remaining = internal::VM_UTIME - 1;
+
+ size_t i = start;
+ while ((i = input.find(' ', i + 1)) != input.npos) {
+ // Validate the assumption that there aren't any contiguous spaces
+ // in |input| before utime.
+ DCHECK_NE(input[i - 1], ' ');
+ if (--num_spaces_remaining == 0) {
+ int utime = 0;
+ int stime = 0;
+ if (sscanf(&input.data()[i], "%d %d", &utime, &stime) != 2)
+ return -1;
+
+ return utime + stime;
+ }
+ }
+
+ return -1;
+}
+
+const char kProcSelfExe[] = "/proc/self/exe";
+
+int GetNumberOfThreads(ProcessHandle process) {
+ return internal::ReadProcStatsAndGetFieldAsInt64(process,
+ internal::VM_NUMTHREADS);
+}
+
+namespace {
+
+// The format of /proc/diskstats is:
+// Device major number
+// Device minor number
+// Device name
+// Field 1 -- # of reads completed
+// This is the total number of reads completed successfully.
+// Field 2 -- # of reads merged, field 6 -- # of writes merged
+// Reads and writes which are adjacent to each other may be merged for
+// efficiency. Thus two 4K reads may become one 8K read before it is
+// ultimately handed to the disk, and so it will be counted (and queued)
+// as only one I/O. This field lets you know how often this was done.
+// Field 3 -- # of sectors read
+// This is the total number of sectors read successfully.
+// Field 4 -- # of milliseconds spent reading
+// This is the total number of milliseconds spent by all reads (as
+// measured from __make_request() to end_that_request_last()).
+// Field 5 -- # of writes completed
+// This is the total number of writes completed successfully.
+// Field 6 -- # of writes merged
+// See the description of field 2.
+// Field 7 -- # of sectors written
+// This is the total number of sectors written successfully.
+// Field 8 -- # of milliseconds spent writing
+// This is the total number of milliseconds spent by all writes (as
+// measured from __make_request() to end_that_request_last()).
+// Field 9 -- # of I/Os currently in progress
+// The only field that should go to zero. Incremented as requests are
+// given to appropriate struct request_queue and decremented as they
+// finish.
+// Field 10 -- # of milliseconds spent doing I/Os
+// This field increases so long as field 9 is nonzero.
+// Field 11 -- weighted # of milliseconds spent doing I/Os
+// This field is incremented at each I/O start, I/O completion, I/O
+// merge, or read of these stats by the number of I/Os in progress
+// (field 9) times the number of milliseconds spent doing I/O since the
+// last update of this field. This can provide an easy measure of both
+// I/O completion time and the backlog that may be accumulating.
+
+const size_t kDiskDriveName = 2;
+const size_t kDiskReads = 3;
+const size_t kDiskReadsMerged = 4;
+const size_t kDiskSectorsRead = 5;
+const size_t kDiskReadTime = 6;
+const size_t kDiskWrites = 7;
+const size_t kDiskWritesMerged = 8;
+const size_t kDiskSectorsWritten = 9;
+const size_t kDiskWriteTime = 10;
+const size_t kDiskIO = 11;
+const size_t kDiskIOTime = 12;
+const size_t kDiskWeightedIOTime = 13;
+
+} // namespace
+
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+ total = 0;
+ free = 0;
+#if defined(OS_LINUX)
+ available = 0;
+#endif
+ buffers = 0;
+ cached = 0;
+ active_anon = 0;
+ inactive_anon = 0;
+ active_file = 0;
+ inactive_file = 0;
+ swap_total = 0;
+ swap_free = 0;
+ dirty = 0;
+
+ pswpin = 0;
+ pswpout = 0;
+ pgmajfault = 0;
+
+#ifdef OS_CHROMEOS
+ shmem = 0;
+ slab = 0;
+ gem_objects = -1;
+ gem_size = -1;
+#endif
+}
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+ default;
+
+std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
+ std::unique_ptr<DictionaryValue> res(new DictionaryValue());
+
+ res->SetInteger("total", total);
+ res->SetInteger("free", free);
+#if defined(OS_LINUX)
+ res->SetInteger("available", available);
+#endif
+ res->SetInteger("buffers", buffers);
+ res->SetInteger("cached", cached);
+ res->SetInteger("active_anon", active_anon);
+ res->SetInteger("inactive_anon", inactive_anon);
+ res->SetInteger("active_file", active_file);
+ res->SetInteger("inactive_file", inactive_file);
+ res->SetInteger("swap_total", swap_total);
+ res->SetInteger("swap_free", swap_free);
+ res->SetInteger("swap_used", swap_total - swap_free);
+ res->SetInteger("dirty", dirty);
+ res->SetInteger("pswpin", pswpin);
+ res->SetInteger("pswpout", pswpout);
+ res->SetInteger("pgmajfault", pgmajfault);
+#ifdef OS_CHROMEOS
+ res->SetInteger("shmem", shmem);
+ res->SetInteger("slab", slab);
+ res->SetInteger("gem_objects", gem_objects);
+ res->SetInteger("gem_size", gem_size);
+#endif
+
+ return std::move(res);
+}
+
+// exposed for testing
+bool ParseProcMeminfo(const std::string& meminfo_data,
+ SystemMemoryInfoKB* meminfo) {
+ // The format of /proc/meminfo is:
+ //
+ // MemTotal: 8235324 kB
+ // MemFree: 1628304 kB
+ // Buffers: 429596 kB
+ // Cached: 4728232 kB
+ // ...
+ // There is no guarantee on the ordering or position
+ // though it doesn't appear to change very often
+
+ // As a basic sanity check, let's make sure we at least get non-zero
+ // MemTotal value
+ meminfo->total = 0;
+
+ for (const StringPiece& line : SplitStringPiece(
+ meminfo_data, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+ std::vector<StringPiece> tokens = SplitStringPiece(
+ line, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ // HugePages_* only has a number and no suffix so we can't rely on
+ // there being exactly 3 tokens.
+ if (tokens.size() <= 1) {
+ DLOG(WARNING) << "meminfo: tokens: " << tokens.size()
+ << " malformed line: " << line.as_string();
+ continue;
+ }
+
+ int* target = NULL;
+ if (tokens[0] == "MemTotal:")
+ target = &meminfo->total;
+ else if (tokens[0] == "MemFree:")
+ target = &meminfo->free;
+#if defined(OS_LINUX)
+ else if (tokens[0] == "MemAvailable:")
+ target = &meminfo->available;
+#endif
+ else if (tokens[0] == "Buffers:")
+ target = &meminfo->buffers;
+ else if (tokens[0] == "Cached:")
+ target = &meminfo->cached;
+ else if (tokens[0] == "Active(anon):")
+ target = &meminfo->active_anon;
+ else if (tokens[0] == "Inactive(anon):")
+ target = &meminfo->inactive_anon;
+ else if (tokens[0] == "Active(file):")
+ target = &meminfo->active_file;
+ else if (tokens[0] == "Inactive(file):")
+ target = &meminfo->inactive_file;
+ else if (tokens[0] == "SwapTotal:")
+ target = &meminfo->swap_total;
+ else if (tokens[0] == "SwapFree:")
+ target = &meminfo->swap_free;
+ else if (tokens[0] == "Dirty:")
+ target = &meminfo->dirty;
+#if defined(OS_CHROMEOS)
+ // Chrome OS has a tweaked kernel that allows us to query Shmem, which is
+ // usually video memory otherwise invisible to the OS.
+ else if (tokens[0] == "Shmem:")
+ target = &meminfo->shmem;
+ else if (tokens[0] == "Slab:")
+ target = &meminfo->slab;
+#endif
+ if (target)
+ StringToInt(tokens[1], target);
+ }
+
+ // Make sure we got a valid MemTotal.
+ return meminfo->total > 0;
+}
+
+// exposed for testing
+bool ParseProcVmstat(const std::string& vmstat_data,
+ SystemMemoryInfoKB* meminfo) {
+ // The format of /proc/vmstat is:
+ //
+ // nr_free_pages 299878
+ // nr_inactive_anon 239863
+ // nr_active_anon 1318966
+ // nr_inactive_file 2015629
+ // ...
+ //
+ // We iterate through the whole file because the position of the
+ // fields are dependent on the kernel version and configuration.
+
+ for (const StringPiece& line : SplitStringPiece(
+ vmstat_data, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+ std::vector<StringPiece> tokens = SplitStringPiece(
+ line, " ", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ if (tokens.size() != 2)
+ continue;
+
+ if (tokens[0] == "pswpin") {
+ StringToInt(tokens[1], &meminfo->pswpin);
+ } else if (tokens[0] == "pswpout") {
+ StringToInt(tokens[1], &meminfo->pswpout);
+ } else if (tokens[0] == "pgmajfault") {
+ StringToInt(tokens[1], &meminfo->pgmajfault);
+ }
+ }
+
+ return true;
+}
+
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+ // Synchronously reading files in /proc and /sys are safe.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ // Used memory is: total - free - buffers - caches
+ FilePath meminfo_file("/proc/meminfo");
+ std::string meminfo_data;
+ if (!ReadFileToString(meminfo_file, &meminfo_data)) {
+ DLOG(WARNING) << "Failed to open " << meminfo_file.value();
+ return false;
+ }
+
+ if (!ParseProcMeminfo(meminfo_data, meminfo)) {
+ DLOG(WARNING) << "Failed to parse " << meminfo_file.value();
+ return false;
+ }
+
+#if defined(OS_CHROMEOS)
+ // Report on Chrome OS GEM object graphics memory. /run/debugfs_gpu is a
+ // bind mount into /sys/kernel/debug and synchronously reading the in-memory
+ // files in /sys is fast.
+#if defined(ARCH_CPU_ARM_FAMILY)
+ FilePath geminfo_file("/run/debugfs_gpu/exynos_gem_objects");
+#else
+ FilePath geminfo_file("/run/debugfs_gpu/i915_gem_objects");
+#endif
+ std::string geminfo_data;
+ meminfo->gem_objects = -1;
+ meminfo->gem_size = -1;
+ if (ReadFileToString(geminfo_file, &geminfo_data)) {
+ int gem_objects = -1;
+ long long gem_size = -1;
+ int num_res = sscanf(geminfo_data.c_str(),
+ "%d objects, %lld bytes",
+ &gem_objects, &gem_size);
+ if (num_res == 2) {
+ meminfo->gem_objects = gem_objects;
+ meminfo->gem_size = gem_size;
+ }
+ }
+
+#if defined(ARCH_CPU_ARM_FAMILY)
+ // Incorporate Mali graphics memory if present.
+ FilePath mali_memory_file("/sys/class/misc/mali0/device/memory");
+ std::string mali_memory_data;
+ if (ReadFileToString(mali_memory_file, &mali_memory_data)) {
+ long long mali_size = -1;
+ int num_res = sscanf(mali_memory_data.c_str(), "%lld bytes", &mali_size);
+ if (num_res == 1)
+ meminfo->gem_size += mali_size;
+ }
+#endif // defined(ARCH_CPU_ARM_FAMILY)
+#endif // defined(OS_CHROMEOS)
+
+ FilePath vmstat_file("/proc/vmstat");
+ std::string vmstat_data;
+ if (!ReadFileToString(vmstat_file, &vmstat_data)) {
+ DLOG(WARNING) << "Failed to open " << vmstat_file.value();
+ return false;
+ }
+ if (!ParseProcVmstat(vmstat_data, meminfo)) {
+ DLOG(WARNING) << "Failed to parse " << vmstat_file.value();
+ return false;
+ }
+
+ return true;
+}
+
+SystemDiskInfo::SystemDiskInfo() {
+ reads = 0;
+ reads_merged = 0;
+ sectors_read = 0;
+ read_time = 0;
+ writes = 0;
+ writes_merged = 0;
+ sectors_written = 0;
+ write_time = 0;
+ io = 0;
+ io_time = 0;
+ weighted_io_time = 0;
+}
+
+SystemDiskInfo::SystemDiskInfo(const SystemDiskInfo& other) = default;
+
+std::unique_ptr<Value> SystemDiskInfo::ToValue() const {
+ std::unique_ptr<DictionaryValue> res(new DictionaryValue());
+
+ // Write out uint64_t variables as doubles.
+ // Note: this may discard some precision, but for JS there's no other option.
+ res->SetDouble("reads", static_cast<double>(reads));
+ res->SetDouble("reads_merged", static_cast<double>(reads_merged));
+ res->SetDouble("sectors_read", static_cast<double>(sectors_read));
+ res->SetDouble("read_time", static_cast<double>(read_time));
+ res->SetDouble("writes", static_cast<double>(writes));
+ res->SetDouble("writes_merged", static_cast<double>(writes_merged));
+ res->SetDouble("sectors_written", static_cast<double>(sectors_written));
+ res->SetDouble("write_time", static_cast<double>(write_time));
+ res->SetDouble("io", static_cast<double>(io));
+ res->SetDouble("io_time", static_cast<double>(io_time));
+ res->SetDouble("weighted_io_time", static_cast<double>(weighted_io_time));
+
+ return std::move(res);
+}
+
+bool IsValidDiskName(const std::string& candidate) {
+ if (candidate.length() < 3)
+ return false;
+ if (candidate[1] == 'd' &&
+ (candidate[0] == 'h' || candidate[0] == 's' || candidate[0] == 'v')) {
+ // [hsv]d[a-z]+ case
+ for (size_t i = 2; i < candidate.length(); ++i) {
+ if (!islower(candidate[i]))
+ return false;
+ }
+ return true;
+ }
+
+ const char kMMCName[] = "mmcblk";
+ const size_t kMMCNameLen = strlen(kMMCName);
+ if (candidate.length() < kMMCNameLen + 1)
+ return false;
+ if (candidate.compare(0, kMMCNameLen, kMMCName) != 0)
+ return false;
+
+ // mmcblk[0-9]+ case
+ for (size_t i = kMMCNameLen; i < candidate.length(); ++i) {
+ if (!isdigit(candidate[i]))
+ return false;
+ }
+ return true;
+}
+
+bool GetSystemDiskInfo(SystemDiskInfo* diskinfo) {
+ // Synchronously reading files in /proc does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ FilePath diskinfo_file("/proc/diskstats");
+ std::string diskinfo_data;
+ if (!ReadFileToString(diskinfo_file, &diskinfo_data)) {
+ DLOG(WARNING) << "Failed to open " << diskinfo_file.value();
+ return false;
+ }
+
+ std::vector<StringPiece> diskinfo_lines = SplitStringPiece(
+ diskinfo_data, "\n", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ if (diskinfo_lines.size() == 0) {
+ DLOG(WARNING) << "No lines found";
+ return false;
+ }
+
+ diskinfo->reads = 0;
+ diskinfo->reads_merged = 0;
+ diskinfo->sectors_read = 0;
+ diskinfo->read_time = 0;
+ diskinfo->writes = 0;
+ diskinfo->writes_merged = 0;
+ diskinfo->sectors_written = 0;
+ diskinfo->write_time = 0;
+ diskinfo->io = 0;
+ diskinfo->io_time = 0;
+ diskinfo->weighted_io_time = 0;
+
+ uint64_t reads = 0;
+ uint64_t reads_merged = 0;
+ uint64_t sectors_read = 0;
+ uint64_t read_time = 0;
+ uint64_t writes = 0;
+ uint64_t writes_merged = 0;
+ uint64_t sectors_written = 0;
+ uint64_t write_time = 0;
+ uint64_t io = 0;
+ uint64_t io_time = 0;
+ uint64_t weighted_io_time = 0;
+
+ for (const StringPiece& line : diskinfo_lines) {
+ std::vector<StringPiece> disk_fields = SplitStringPiece(
+ line, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+
+ // Fields may have overflowed and reset to zero.
+ if (IsValidDiskName(disk_fields[kDiskDriveName].as_string())) {
+ StringToUint64(disk_fields[kDiskReads], &reads);
+ StringToUint64(disk_fields[kDiskReadsMerged], &reads_merged);
+ StringToUint64(disk_fields[kDiskSectorsRead], §ors_read);
+ StringToUint64(disk_fields[kDiskReadTime], &read_time);
+ StringToUint64(disk_fields[kDiskWrites], &writes);
+ StringToUint64(disk_fields[kDiskWritesMerged], &writes_merged);
+ StringToUint64(disk_fields[kDiskSectorsWritten], §ors_written);
+ StringToUint64(disk_fields[kDiskWriteTime], &write_time);
+ StringToUint64(disk_fields[kDiskIO], &io);
+ StringToUint64(disk_fields[kDiskIOTime], &io_time);
+ StringToUint64(disk_fields[kDiskWeightedIOTime], &weighted_io_time);
+
+ diskinfo->reads += reads;
+ diskinfo->reads_merged += reads_merged;
+ diskinfo->sectors_read += sectors_read;
+ diskinfo->read_time += read_time;
+ diskinfo->writes += writes;
+ diskinfo->writes_merged += writes_merged;
+ diskinfo->sectors_written += sectors_written;
+ diskinfo->write_time += write_time;
+ diskinfo->io += io;
+ diskinfo->io_time += io_time;
+ diskinfo->weighted_io_time += weighted_io_time;
+ }
+ }
+
+ return true;
+}
+
+#if defined(OS_CHROMEOS)
+std::unique_ptr<Value> SwapInfo::ToValue() const {
+ std::unique_ptr<DictionaryValue> res(new DictionaryValue());
+
+ // Write out uint64_t variables as doubles.
+ // Note: this may discard some precision, but for JS there's no other option.
+ res->SetDouble("num_reads", static_cast<double>(num_reads));
+ res->SetDouble("num_writes", static_cast<double>(num_writes));
+ res->SetDouble("orig_data_size", static_cast<double>(orig_data_size));
+ res->SetDouble("compr_data_size", static_cast<double>(compr_data_size));
+ res->SetDouble("mem_used_total", static_cast<double>(mem_used_total));
+ if (compr_data_size > 0)
+ res->SetDouble("compression_ratio", static_cast<double>(orig_data_size) /
+ static_cast<double>(compr_data_size));
+ else
+ res->SetDouble("compression_ratio", 0);
+
+ return std::move(res);
+}
+
+void GetSwapInfo(SwapInfo* swap_info) {
+ // Synchronously reading files in /sys/block/zram0 does not hit the disk.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+
+ FilePath zram_path("/sys/block/zram0");
+ uint64_t orig_data_size =
+ ReadFileToUint64(zram_path.Append("orig_data_size"));
+ if (orig_data_size <= 4096) {
+ // A single page is compressed at startup, and has a high compression
+ // ratio. We ignore this as it doesn't indicate any real swapping.
+ swap_info->orig_data_size = 0;
+ swap_info->num_reads = 0;
+ swap_info->num_writes = 0;
+ swap_info->compr_data_size = 0;
+ swap_info->mem_used_total = 0;
+ return;
+ }
+ swap_info->orig_data_size = orig_data_size;
+ swap_info->num_reads = ReadFileToUint64(zram_path.Append("num_reads"));
+ swap_info->num_writes = ReadFileToUint64(zram_path.Append("num_writes"));
+ swap_info->compr_data_size =
+ ReadFileToUint64(zram_path.Append("compr_data_size"));
+ swap_info->mem_used_total =
+ ReadFileToUint64(zram_path.Append("mem_used_total"));
+}
+#endif // defined(OS_CHROMEOS)
+
+#if defined(OS_LINUX)
+int ProcessMetrics::GetIdleWakeupsPerSecond() {
+ uint64_t wake_ups;
+ const char kWakeupStat[] = "se.statistics.nr_wakeups";
+ return ReadProcSchedAndGetFieldAsUint64(process_, kWakeupStat, &wake_ups) ?
+ CalculateIdleWakeupsPerSecond(wake_ups) : 0;
+}
+#endif // defined(OS_LINUX)
+
+} // namespace base
diff --git a/libchrome/base/process/process_metrics_mac.cc b/libchrome/base/process/process_metrics_mac.cc
new file mode 100644
index 0000000..8b5d564
--- /dev/null
+++ b/libchrome/base/process/process_metrics_mac.cc
@@ -0,0 +1,399 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <mach/shared_region.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/sys_info.h"
+
+#if !defined(TASK_POWER_INFO)
+// Doesn't exist in the 10.6 or 10.7 SDKs.
+#define TASK_POWER_INFO 21
+struct task_power_info {
+ uint64_t total_user;
+ uint64_t total_system;
+ uint64_t task_interrupt_wakeups;
+ uint64_t task_platform_idle_wakeups;
+ uint64_t task_timer_wakeups_bin_1;
+ uint64_t task_timer_wakeups_bin_2;
+};
+typedef struct task_power_info task_power_info_data_t;
+typedef struct task_power_info *task_power_info_t;
+#define TASK_POWER_INFO_COUNT ((mach_msg_type_number_t) \
+ (sizeof (task_power_info_data_t) / sizeof (natural_t)))
+#endif
+
+namespace base {
+
+namespace {
+
+bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
+ if (task == MACH_PORT_NULL)
+ return false;
+ mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
+ kern_return_t kr = task_info(task,
+ TASK_BASIC_INFO_64,
+ reinterpret_cast<task_info_t>(task_info_data),
+ &count);
+ // Most likely cause for failure: |task| is a zombie.
+ return kr == KERN_SUCCESS;
+}
+
+bool GetCPUTypeForProcess(pid_t /* pid */, cpu_type_t* cpu_type) {
+ size_t len = sizeof(*cpu_type);
+ int result = sysctlbyname("sysctl.proc_cputype",
+ cpu_type,
+ &len,
+ NULL,
+ 0);
+ if (result != 0) {
+ DPLOG(ERROR) << "sysctlbyname(""sysctl.proc_cputype"")";
+ return false;
+ }
+
+ return true;
+}
+
+bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
+ if (type == CPU_TYPE_I386) {
+ return addr >= SHARED_REGION_BASE_I386 &&
+ addr < (SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386);
+ } else if (type == CPU_TYPE_X86_64) {
+ return addr >= SHARED_REGION_BASE_X86_64 &&
+ addr < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64);
+ } else {
+ return false;
+ }
+}
+
+} // namespace
+
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+ total = 0;
+ free = 0;
+}
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+ default;
+
+// Getting a mach task from a pid for another process requires permissions in
+// general, so there doesn't really seem to be a way to do these (and spinning
+// up ps to fetch each stats seems dangerous to put in a base api for anyone to
+// call). Child processes ipc their port, so return something if available,
+// otherwise return 0.
+
+// static
+ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
+ ProcessHandle process,
+ PortProvider* port_provider) {
+ return new ProcessMetrics(process, port_provider);
+}
+
+size_t ProcessMetrics::GetPagefileUsage() const {
+ task_basic_info_64 task_info_data;
+ if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
+ return 0;
+ return task_info_data.virtual_size;
+}
+
+size_t ProcessMetrics::GetPeakPagefileUsage() const {
+ return 0;
+}
+
+size_t ProcessMetrics::GetWorkingSetSize() const {
+ task_basic_info_64 task_info_data;
+ if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
+ return 0;
+ return task_info_data.resident_size;
+}
+
+size_t ProcessMetrics::GetPeakWorkingSetSize() const {
+ return 0;
+}
+
+// This is a rough approximation of the algorithm that libtop uses.
+// private_bytes is the size of private resident memory.
+// shared_bytes is the size of shared resident memory.
+bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
+ size_t* shared_bytes) {
+ size_t private_pages_count = 0;
+ size_t shared_pages_count = 0;
+
+ if (!private_bytes && !shared_bytes)
+ return true;
+
+ mach_port_t task = TaskForPid(process_);
+ if (task == MACH_PORT_NULL) {
+ DLOG(ERROR) << "Invalid process";
+ return false;
+ }
+
+ cpu_type_t cpu_type;
+ if (!GetCPUTypeForProcess(process_, &cpu_type))
+ return false;
+
+ // The same region can be referenced multiple times. To avoid double counting
+ // we need to keep track of which regions we've already counted.
+ base::hash_set<int> seen_objects;
+
+ // We iterate through each VM region in the task's address map. For shared
+ // memory we add up all the pages that are marked as shared. Like libtop we
+ // try to avoid counting pages that are also referenced by other tasks. Since
+ // we don't have access to the VM regions of other tasks the only hint we have
+ // is if the address is in the shared region area.
+ //
+ // Private memory is much simpler. We simply count the pages that are marked
+ // as private or copy on write (COW).
+ //
+ // See libtop_update_vm_regions in
+ // http://www.opensource.apple.com/source/top/top-67/libtop.c
+ mach_vm_size_t size = 0;
+ for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
+ vm_region_top_info_data_t info;
+ mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
+ mach_port_t object_name;
+ kern_return_t kr = mach_vm_region(task,
+ &address,
+ &size,
+ VM_REGION_TOP_INFO,
+ reinterpret_cast<vm_region_info_t>(&info),
+ &info_count,
+ &object_name);
+ if (kr == KERN_INVALID_ADDRESS) {
+ // We're at the end of the address space.
+ break;
+ } else if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "mach_vm_region";
+ return false;
+ }
+
+ // The kernel always returns a null object for VM_REGION_TOP_INFO, but
+ // balance it with a deallocate in case this ever changes. See 10.9.2
+ // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+ mach_port_deallocate(mach_task_self(), object_name);
+
+ if (IsAddressInSharedRegion(address, cpu_type) &&
+ info.share_mode != SM_PRIVATE)
+ continue;
+
+ if (info.share_mode == SM_COW && info.ref_count == 1)
+ info.share_mode = SM_PRIVATE;
+
+ switch (info.share_mode) {
+ case SM_PRIVATE:
+ private_pages_count += info.private_pages_resident;
+ private_pages_count += info.shared_pages_resident;
+ break;
+ case SM_COW:
+ private_pages_count += info.private_pages_resident;
+ // Fall through
+ case SM_SHARED:
+ if (seen_objects.count(info.obj_id) == 0) {
+ // Only count the first reference to this region.
+ seen_objects.insert(info.obj_id);
+ shared_pages_count += info.shared_pages_resident;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (private_bytes)
+ *private_bytes = private_pages_count * PAGE_SIZE;
+ if (shared_bytes)
+ *shared_bytes = shared_pages_count * PAGE_SIZE;
+
+ return true;
+}
+
+void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
+ WorkingSetKBytes unused;
+ if (!GetCommittedAndWorkingSetKBytes(usage, &unused)) {
+ *usage = CommittedKBytes();
+ }
+}
+
+bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
+ CommittedKBytes unused;
+ return GetCommittedAndWorkingSetKBytes(&unused, ws_usage);
+}
+
+bool ProcessMetrics::GetCommittedAndWorkingSetKBytes(
+ CommittedKBytes* usage,
+ WorkingSetKBytes* ws_usage) const {
+ task_basic_info_64 task_info_data;
+ if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
+ return false;
+
+ usage->priv = task_info_data.virtual_size / 1024;
+ usage->mapped = 0;
+ usage->image = 0;
+
+ ws_usage->priv = task_info_data.resident_size / 1024;
+ ws_usage->shareable = 0;
+ ws_usage->shared = 0;
+
+ return true;
+}
+
+#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
+ (r)->tv_sec = (a)->seconds; \
+ (r)->tv_usec = (a)->microseconds; \
+} while (0)
+
+double ProcessMetrics::GetCPUUsage() {
+ mach_port_t task = TaskForPid(process_);
+ if (task == MACH_PORT_NULL)
+ return 0;
+
+ // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
+ // in libtop.c), but this is more concise and gives the same results:
+ task_thread_times_info thread_info_data;
+ mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
+ kern_return_t kr = task_info(task,
+ TASK_THREAD_TIMES_INFO,
+ reinterpret_cast<task_info_t>(&thread_info_data),
+ &thread_info_count);
+ if (kr != KERN_SUCCESS) {
+ // Most likely cause: |task| is a zombie.
+ return 0;
+ }
+
+ task_basic_info_64 task_info_data;
+ if (!GetTaskInfo(task, &task_info_data))
+ return 0;
+
+ /* Set total_time. */
+ // thread info contains live time...
+ struct timeval user_timeval, system_timeval, task_timeval;
+ TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
+ TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
+ timeradd(&user_timeval, &system_timeval, &task_timeval);
+
+ // ... task info contains terminated time.
+ TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
+ TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
+ timeradd(&user_timeval, &task_timeval, &task_timeval);
+ timeradd(&system_timeval, &task_timeval, &task_timeval);
+
+ TimeTicks time = TimeTicks::Now();
+ int64_t task_time = TimeValToMicroseconds(task_timeval);
+
+ if (last_system_time_ == 0) {
+ // First call, just set the last values.
+ last_cpu_time_ = time;
+ last_system_time_ = task_time;
+ return 0;
+ }
+
+ int64_t system_time_delta = task_time - last_system_time_;
+ int64_t time_delta = (time - last_cpu_time_).InMicroseconds();
+ DCHECK_NE(0U, time_delta);
+ if (time_delta == 0)
+ return 0;
+
+ last_cpu_time_ = time;
+ last_system_time_ = task_time;
+
+ return static_cast<double>(system_time_delta * 100.0) / time_delta;
+}
+
+int ProcessMetrics::GetIdleWakeupsPerSecond() {
+ mach_port_t task = TaskForPid(process_);
+ if (task == MACH_PORT_NULL)
+ return 0;
+
+ task_power_info power_info_data;
+ mach_msg_type_number_t power_info_count = TASK_POWER_INFO_COUNT;
+ kern_return_t kr = task_info(task,
+ TASK_POWER_INFO,
+ reinterpret_cast<task_info_t>(&power_info_data),
+ &power_info_count);
+ if (kr != KERN_SUCCESS) {
+ // Most likely cause: |task| is a zombie, or this is on a pre-10.8.4 system
+ // where TASK_POWER_INFO isn't supported yet.
+ return 0;
+ }
+ return CalculateIdleWakeupsPerSecond(
+ power_info_data.task_platform_idle_wakeups);
+}
+
+bool ProcessMetrics::GetIOCounters(IoCounters* /* io_counters */) const {
+ return false;
+}
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process,
+ PortProvider* port_provider)
+ : process_(process),
+ last_system_time_(0),
+ last_absolute_idle_wakeups_(0),
+ port_provider_(port_provider) {
+ processor_count_ = SysInfo::NumberOfProcessors();
+}
+
+mach_port_t ProcessMetrics::TaskForPid(ProcessHandle /* process */) const {
+ mach_port_t task = MACH_PORT_NULL;
+ if (port_provider_)
+ task = port_provider_->TaskForPid(process_);
+ if (task == MACH_PORT_NULL && process_ == getpid())
+ task = mach_task_self();
+ return task;
+}
+
+// Bytes committed by the system.
+size_t GetSystemCommitCharge() {
+ base::mac::ScopedMachSendRight host(mach_host_self());
+ mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
+ vm_statistics_data_t data;
+ kern_return_t kr = host_statistics(host.get(), HOST_VM_INFO,
+ reinterpret_cast<host_info_t>(&data),
+ &count);
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(WARNING, kr) << "host_statistics";
+ return 0;
+ }
+
+ return (data.active_count * PAGE_SIZE) / 1024;
+}
+
+// On Mac, We only get total memory and free memory from the system.
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+ struct host_basic_info hostinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ base::mac::ScopedMachSendRight host(mach_host_self());
+ int result = host_info(host.get(), HOST_BASIC_INFO,
+ reinterpret_cast<host_info_t>(&hostinfo), &count);
+ if (result != KERN_SUCCESS)
+ return false;
+
+ DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+ meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
+
+ vm_statistics_data_t vm_info;
+ count = HOST_VM_INFO_COUNT;
+
+ if (host_statistics(host.get(), HOST_VM_INFO,
+ reinterpret_cast<host_info_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
+ return false;
+ }
+
+ meminfo->free = static_cast<int>(
+ (vm_info.free_count - vm_info.speculative_count) * PAGE_SIZE / 1024);
+
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_metrics_posix.cc b/libchrome/base/process/process_metrics_posix.cc
new file mode 100644
index 0000000..fad581e
--- /dev/null
+++ b/libchrome/base/process/process_metrics_posix.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+namespace base {
+
+int64_t TimeValToMicroseconds(const struct timeval& tv) {
+ int64_t ret = tv.tv_sec; // Avoid (int * int) integer overflow.
+ ret *= Time::kMicrosecondsPerSecond;
+ ret += tv.tv_usec;
+ return ret;
+}
+
+ProcessMetrics::~ProcessMetrics() { }
+
+#if defined(OS_LINUX)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_MACOSX)
+static const rlim_t kSystemDefaultMaxFds = 256;
+#elif defined(OS_SOLARIS)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_FREEBSD)
+static const rlim_t kSystemDefaultMaxFds = 8192;
+#elif defined(OS_OPENBSD)
+static const rlim_t kSystemDefaultMaxFds = 256;
+#elif defined(OS_ANDROID)
+static const rlim_t kSystemDefaultMaxFds = 1024;
+#endif
+
+size_t GetMaxFds() {
+ rlim_t max_fds;
+ struct rlimit nofile;
+ if (getrlimit(RLIMIT_NOFILE, &nofile)) {
+ // getrlimit failed. Take a best guess.
+ max_fds = kSystemDefaultMaxFds;
+ RAW_LOG(ERROR, "getrlimit(RLIMIT_NOFILE) failed");
+ } else {
+ max_fds = nofile.rlim_cur;
+ }
+
+ if (max_fds > INT_MAX)
+ max_fds = INT_MAX;
+
+ return static_cast<size_t>(max_fds);
+}
+
+
+void SetFdLimit(unsigned int max_descriptors) {
+ struct rlimit limits;
+ if (getrlimit(RLIMIT_NOFILE, &limits) == 0) {
+ unsigned int new_limit = max_descriptors;
+ if (limits.rlim_max > 0 && limits.rlim_max < max_descriptors) {
+ new_limit = limits.rlim_max;
+ }
+ limits.rlim_cur = new_limit;
+ if (setrlimit(RLIMIT_NOFILE, &limits) != 0) {
+ PLOG(INFO) << "Failed to set file descriptor limit";
+ }
+ } else {
+ PLOG(INFO) << "Failed to get file descriptor limit";
+ }
+}
+
+size_t GetPageSize() {
+ return getpagesize();
+}
+
+} // namespace base
diff --git a/libchrome/base/process/process_metrics_unittest.cc b/libchrome/base/process/process_metrics_unittest.cc
new file mode 100644
index 0000000..94a2ffe
--- /dev/null
+++ b/libchrome/base/process/process_metrics_unittest.cc
@@ -0,0 +1,519 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <sstream>
+#include <string>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+namespace debug {
+
+#if defined(OS_LINUX) || defined(OS_CHROMEOS)
+namespace {
+
+void BusyWork(std::vector<std::string>* vec) {
+ int64_t test_value = 0;
+ for (int i = 0; i < 100000; ++i) {
+ ++test_value;
+ vec->push_back(Int64ToString(test_value));
+ }
+}
+
+} // namespace
+#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
+
+// Tests for SystemMetrics.
+// Exists as a class so it can be a friend of SystemMetrics.
+class SystemMetricsTest : public testing::Test {
+ public:
+ SystemMetricsTest() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SystemMetricsTest);
+};
+
+/////////////////////////////////////////////////////////////////////////////
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST_F(SystemMetricsTest, IsValidDiskName) {
+ std::string invalid_input1 = "";
+ std::string invalid_input2 = "s";
+ std::string invalid_input3 = "sdz+";
+ std::string invalid_input4 = "hda0";
+ std::string invalid_input5 = "mmcbl";
+ std::string invalid_input6 = "mmcblka";
+ std::string invalid_input7 = "mmcblkb";
+ std::string invalid_input8 = "mmmblk0";
+
+ EXPECT_FALSE(IsValidDiskName(invalid_input1));
+ EXPECT_FALSE(IsValidDiskName(invalid_input2));
+ EXPECT_FALSE(IsValidDiskName(invalid_input3));
+ EXPECT_FALSE(IsValidDiskName(invalid_input4));
+ EXPECT_FALSE(IsValidDiskName(invalid_input5));
+ EXPECT_FALSE(IsValidDiskName(invalid_input6));
+ EXPECT_FALSE(IsValidDiskName(invalid_input7));
+ EXPECT_FALSE(IsValidDiskName(invalid_input8));
+
+ std::string valid_input1 = "sda";
+ std::string valid_input2 = "sdaaaa";
+ std::string valid_input3 = "hdz";
+ std::string valid_input4 = "mmcblk0";
+ std::string valid_input5 = "mmcblk999";
+
+ EXPECT_TRUE(IsValidDiskName(valid_input1));
+ EXPECT_TRUE(IsValidDiskName(valid_input2));
+ EXPECT_TRUE(IsValidDiskName(valid_input3));
+ EXPECT_TRUE(IsValidDiskName(valid_input4));
+ EXPECT_TRUE(IsValidDiskName(valid_input5));
+}
+
+TEST_F(SystemMetricsTest, ParseMeminfo) {
+ struct SystemMemoryInfoKB meminfo;
+ std::string invalid_input1 = "abc";
+ std::string invalid_input2 = "MemTotal:";
+ // Partial file with no MemTotal
+ std::string invalid_input3 =
+ "MemFree: 3913968 kB\n"
+ "Buffers: 2348340 kB\n"
+ "Cached: 49071596 kB\n"
+ "SwapCached: 12 kB\n"
+ "Active: 36393900 kB\n"
+ "Inactive: 21221496 kB\n"
+ "Active(anon): 5674352 kB\n"
+ "Inactive(anon): 633992 kB\n";
+ EXPECT_FALSE(ParseProcMeminfo(invalid_input1, &meminfo));
+ EXPECT_FALSE(ParseProcMeminfo(invalid_input2, &meminfo));
+ EXPECT_FALSE(ParseProcMeminfo(invalid_input3, &meminfo));
+
+ std::string valid_input1 =
+ "MemTotal: 3981504 kB\n"
+ "MemFree: 140764 kB\n"
+ "Buffers: 116480 kB\n"
+ "Cached: 406160 kB\n"
+ "SwapCached: 21304 kB\n"
+ "Active: 3152040 kB\n"
+ "Inactive: 472856 kB\n"
+ "Active(anon): 2972352 kB\n"
+ "Inactive(anon): 270108 kB\n"
+ "Active(file): 179688 kB\n"
+ "Inactive(file): 202748 kB\n"
+ "Unevictable: 0 kB\n"
+ "Mlocked: 0 kB\n"
+ "SwapTotal: 5832280 kB\n"
+ "SwapFree: 3672368 kB\n"
+ "Dirty: 184 kB\n"
+ "Writeback: 0 kB\n"
+ "AnonPages: 3101224 kB\n"
+ "Mapped: 142296 kB\n"
+ "Shmem: 140204 kB\n"
+ "Slab: 54212 kB\n"
+ "SReclaimable: 30936 kB\n"
+ "SUnreclaim: 23276 kB\n"
+ "KernelStack: 2464 kB\n"
+ "PageTables: 24812 kB\n"
+ "NFS_Unstable: 0 kB\n"
+ "Bounce: 0 kB\n"
+ "WritebackTmp: 0 kB\n"
+ "CommitLimit: 7823032 kB\n"
+ "Committed_AS: 7973536 kB\n"
+ "VmallocTotal: 34359738367 kB\n"
+ "VmallocUsed: 375940 kB\n"
+ "VmallocChunk: 34359361127 kB\n"
+ "DirectMap4k: 72448 kB\n"
+ "DirectMap2M: 4061184 kB\n";
+ // output from a much older kernel where the Active and Inactive aren't
+ // broken down into anon and file and Huge Pages are enabled
+ std::string valid_input2 =
+ "MemTotal: 255908 kB\n"
+ "MemFree: 69936 kB\n"
+ "Buffers: 15812 kB\n"
+ "Cached: 115124 kB\n"
+ "SwapCached: 0 kB\n"
+ "Active: 92700 kB\n"
+ "Inactive: 63792 kB\n"
+ "HighTotal: 0 kB\n"
+ "HighFree: 0 kB\n"
+ "LowTotal: 255908 kB\n"
+ "LowFree: 69936 kB\n"
+ "SwapTotal: 524280 kB\n"
+ "SwapFree: 524200 kB\n"
+ "Dirty: 4 kB\n"
+ "Writeback: 0 kB\n"
+ "Mapped: 42236 kB\n"
+ "Slab: 25912 kB\n"
+ "Committed_AS: 118680 kB\n"
+ "PageTables: 1236 kB\n"
+ "VmallocTotal: 3874808 kB\n"
+ "VmallocUsed: 1416 kB\n"
+ "VmallocChunk: 3872908 kB\n"
+ "HugePages_Total: 0\n"
+ "HugePages_Free: 0\n"
+ "Hugepagesize: 4096 kB\n";
+
+ EXPECT_TRUE(ParseProcMeminfo(valid_input1, &meminfo));
+ EXPECT_EQ(meminfo.total, 3981504);
+ EXPECT_EQ(meminfo.free, 140764);
+ EXPECT_EQ(meminfo.buffers, 116480);
+ EXPECT_EQ(meminfo.cached, 406160);
+ EXPECT_EQ(meminfo.active_anon, 2972352);
+ EXPECT_EQ(meminfo.active_file, 179688);
+ EXPECT_EQ(meminfo.inactive_anon, 270108);
+ EXPECT_EQ(meminfo.inactive_file, 202748);
+ EXPECT_EQ(meminfo.swap_total, 5832280);
+ EXPECT_EQ(meminfo.swap_free, 3672368);
+ EXPECT_EQ(meminfo.dirty, 184);
+#if defined(OS_CHROMEOS)
+ EXPECT_EQ(meminfo.shmem, 140204);
+ EXPECT_EQ(meminfo.slab, 54212);
+#endif
+ EXPECT_TRUE(ParseProcMeminfo(valid_input2, &meminfo));
+ EXPECT_EQ(meminfo.total, 255908);
+ EXPECT_EQ(meminfo.free, 69936);
+ EXPECT_EQ(meminfo.buffers, 15812);
+ EXPECT_EQ(meminfo.cached, 115124);
+ EXPECT_EQ(meminfo.swap_total, 524280);
+ EXPECT_EQ(meminfo.swap_free, 524200);
+ EXPECT_EQ(meminfo.dirty, 4);
+}
+
+TEST_F(SystemMetricsTest, ParseVmstat) {
+ struct SystemMemoryInfoKB meminfo;
+ // part of vmstat from a 3.2 kernel with numa enabled
+ std::string valid_input1 =
+ "nr_free_pages 905104\n"
+ "nr_inactive_anon 142478"
+ "nr_active_anon 1520046\n"
+ "nr_inactive_file 4481001\n"
+ "nr_active_file 8313439\n"
+ "nr_unevictable 5044\n"
+ "nr_mlock 5044\n"
+ "nr_anon_pages 1633780\n"
+ "nr_mapped 104742\n"
+ "nr_file_pages 12828218\n"
+ "nr_dirty 245\n"
+ "nr_writeback 0\n"
+ "nr_slab_reclaimable 831609\n"
+ "nr_slab_unreclaimable 41164\n"
+ "nr_page_table_pages 31470\n"
+ "nr_kernel_stack 1735\n"
+ "nr_unstable 0\n"
+ "nr_bounce 0\n"
+ "nr_vmscan_write 406\n"
+ "nr_vmscan_immediate_reclaim 281\n"
+ "nr_writeback_temp 0\n"
+ "nr_isolated_anon 0\n"
+ "nr_isolated_file 0\n"
+ "nr_shmem 28820\n"
+ "nr_dirtied 84674644\n"
+ "nr_written 75307109\n"
+ "nr_anon_transparent_hugepages 0\n"
+ "nr_dirty_threshold 1536206\n"
+ "nr_dirty_background_threshold 768103\n"
+ "pgpgin 30777108\n"
+ "pgpgout 319023278\n"
+ "pswpin 179\n"
+ "pswpout 406\n"
+ "pgalloc_dma 0\n"
+ "pgalloc_dma32 20833399\n"
+ "pgalloc_normal 1622609290\n"
+ "pgalloc_movable 0\n"
+ "pgfree 1644355583\n"
+ "pgactivate 75391882\n"
+ "pgdeactivate 4121019\n"
+ "pgfault 2542879679\n"
+ "pgmajfault 487192\n";
+ std::string valid_input2 =
+ "nr_free_pages 180125\n"
+ "nr_inactive_anon 51\n"
+ "nr_active_anon 38832\n"
+ "nr_inactive_file 50171\n"
+ "nr_active_file 47510\n"
+ "nr_unevictable 0\n"
+ "nr_mlock 0\n"
+ "nr_anon_pages 38825\n"
+ "nr_mapped 24043\n"
+ "nr_file_pages 97733\n"
+ "nr_dirty 0\n"
+ "nr_writeback 0\n"
+ "nr_slab_reclaimable 4032\n"
+ "nr_slab_unreclaimable 2848\n"
+ "nr_page_table_pages 1505\n"
+ "nr_kernel_stack 626\n"
+ "nr_unstable 0\n"
+ "nr_bounce 0\n"
+ "nr_vmscan_write 0\n"
+ "nr_vmscan_immediate_reclaim 0\n"
+ "nr_writeback_temp 0\n"
+ "nr_isolated_anon 0\n"
+ "nr_isolated_file 0\n"
+ "nr_shmem 58\n"
+ "nr_dirtied 435358\n"
+ "nr_written 401258\n"
+ "nr_anon_transparent_hugepages 0\n"
+ "nr_dirty_threshold 18566\n"
+ "nr_dirty_background_threshold 4641\n"
+ "pgpgin 299464\n"
+ "pgpgout 2437788\n"
+ "pswpin 12\n"
+ "pswpout 901\n"
+ "pgalloc_normal 144213030\n"
+ "pgalloc_high 164501274\n"
+ "pgalloc_movable 0\n"
+ "pgfree 308894908\n"
+ "pgactivate 239320\n"
+ "pgdeactivate 1\n"
+ "pgfault 716044601\n"
+ "pgmajfault 2023\n"
+ "pgrefill_normal 0\n"
+ "pgrefill_high 0\n"
+ "pgrefill_movable 0\n";
+ EXPECT_TRUE(ParseProcVmstat(valid_input1, &meminfo));
+ EXPECT_EQ(meminfo.pswpin, 179);
+ EXPECT_EQ(meminfo.pswpout, 406);
+ EXPECT_EQ(meminfo.pgmajfault, 487192);
+ EXPECT_TRUE(ParseProcVmstat(valid_input2, &meminfo));
+ EXPECT_EQ(meminfo.pswpin, 12);
+ EXPECT_EQ(meminfo.pswpout, 901);
+ EXPECT_EQ(meminfo.pgmajfault, 2023);
+}
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_CHROMEOS)
+
+// Test that ProcessMetrics::GetCPUUsage() doesn't return negative values when
+// the number of threads running on the process decreases between two successive
+// calls to it.
+TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
+ ProcessHandle handle = GetCurrentProcessHandle();
+ std::unique_ptr<ProcessMetrics> metrics(
+ ProcessMetrics::CreateProcessMetrics(handle));
+
+ EXPECT_GE(metrics->GetCPUUsage(), 0.0);
+ Thread thread1("thread1");
+ Thread thread2("thread2");
+ Thread thread3("thread3");
+
+ thread1.StartAndWaitForTesting();
+ thread2.StartAndWaitForTesting();
+ thread3.StartAndWaitForTesting();
+
+ ASSERT_TRUE(thread1.IsRunning());
+ ASSERT_TRUE(thread2.IsRunning());
+ ASSERT_TRUE(thread3.IsRunning());
+
+ std::vector<std::string> vec1;
+ std::vector<std::string> vec2;
+ std::vector<std::string> vec3;
+
+ thread1.task_runner()->PostTask(FROM_HERE, Bind(&BusyWork, &vec1));
+ thread2.task_runner()->PostTask(FROM_HERE, Bind(&BusyWork, &vec2));
+ thread3.task_runner()->PostTask(FROM_HERE, Bind(&BusyWork, &vec3));
+
+ EXPECT_GE(metrics->GetCPUUsage(), 0.0);
+
+ thread1.Stop();
+ EXPECT_GE(metrics->GetCPUUsage(), 0.0);
+
+ thread2.Stop();
+ EXPECT_GE(metrics->GetCPUUsage(), 0.0);
+
+ thread3.Stop();
+ EXPECT_GE(metrics->GetCPUUsage(), 0.0);
+}
+
+#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
+
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
+ defined(OS_LINUX) || defined(OS_ANDROID)
+TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
+ SystemMemoryInfoKB info;
+ EXPECT_TRUE(GetSystemMemoryInfo(&info));
+
+ // Ensure each field received a value.
+ EXPECT_GT(info.total, 0);
+ EXPECT_GT(info.free, 0);
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ EXPECT_GT(info.buffers, 0);
+ EXPECT_GT(info.cached, 0);
+ EXPECT_GT(info.active_anon, 0);
+ EXPECT_GT(info.inactive_anon, 0);
+ EXPECT_GT(info.active_file, 0);
+ EXPECT_GT(info.inactive_file, 0);
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
+ // All the values should be less than the total amount of memory.
+ EXPECT_LT(info.free, info.total);
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ EXPECT_LT(info.buffers, info.total);
+ EXPECT_LT(info.cached, info.total);
+ EXPECT_LT(info.active_anon, info.total);
+ EXPECT_LT(info.inactive_anon, info.total);
+ EXPECT_LT(info.active_file, info.total);
+ EXPECT_LT(info.inactive_file, info.total);
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_CHROMEOS)
+ // Chrome OS exposes shmem.
+ EXPECT_GT(info.shmem, 0);
+ EXPECT_LT(info.shmem, info.total);
+ // Chrome unit tests are not run on actual Chrome OS hardware, so gem_objects
+ // and gem_size cannot be tested here.
+#endif
+}
+#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) ||
+ // defined(OS_LINUX) || defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+TEST(ProcessMetricsTest, ParseProcStatCPU) {
+ // /proc/self/stat for a process running "top".
+ const char kTopStat[] = "960 (top) S 16230 960 16230 34818 960 "
+ "4202496 471 0 0 0 "
+ "12 16 0 0 " // <- These are the goods.
+ "20 0 1 0 121946157 15077376 314 18446744073709551615 4194304 "
+ "4246868 140733983044336 18446744073709551615 140244213071219 "
+ "0 0 0 138047495 0 0 0 17 1 0 0 0 0 0";
+ EXPECT_EQ(12 + 16, ParseProcStatCPU(kTopStat));
+
+ // cat /proc/self/stat on a random other machine I have.
+ const char kSelfStat[] = "5364 (cat) R 5354 5364 5354 34819 5364 "
+ "0 142 0 0 0 "
+ "0 0 0 0 " // <- No CPU, apparently.
+ "16 0 1 0 1676099790 2957312 114 4294967295 134512640 134528148 "
+ "3221224832 3221224344 3086339742 0 0 0 0 0 0 0 17 0 0 0";
+
+ EXPECT_EQ(0, ParseProcStatCPU(kSelfStat));
+
+ // Some weird long-running process with a weird name that I created for the
+ // purposes of this test.
+ const char kWeirdNameStat[] = "26115 (Hello) You ())) ) R 24614 26115 24614"
+ " 34839 26115 4218880 227 0 0 0 "
+ "5186 11 0 0 "
+ "20 0 1 0 36933953 4296704 90 18446744073709551615 4194304 4196116 "
+ "140735857761568 140735857761160 4195644 0 0 0 0 0 0 0 17 14 0 0 0 0 0 "
+ "6295056 6295616 16519168 140735857770710 140735857770737 "
+ "140735857770737 140735857774557 0";
+ EXPECT_EQ(5186 + 11, ParseProcStatCPU(kWeirdNameStat));
+}
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
+// Disable on Android because base_unittests runs inside a Dalvik VM that
+// starts and stop threads (crbug.com/175563).
+#if defined(OS_LINUX)
+// http://crbug.com/396455
+TEST(ProcessMetricsTest, DISABLED_GetNumberOfThreads) {
+ const ProcessHandle current = GetCurrentProcessHandle();
+ const int initial_threads = GetNumberOfThreads(current);
+ ASSERT_GT(initial_threads, 0);
+ const int kNumAdditionalThreads = 10;
+ {
+ std::unique_ptr<Thread> my_threads[kNumAdditionalThreads];
+ for (int i = 0; i < kNumAdditionalThreads; ++i) {
+ my_threads[i].reset(new Thread("GetNumberOfThreadsTest"));
+ my_threads[i]->Start();
+ ASSERT_EQ(GetNumberOfThreads(current), initial_threads + 1 + i);
+ }
+ }
+ // The Thread destructor will stop them.
+ ASSERT_EQ(initial_threads, GetNumberOfThreads(current));
+}
+#endif // defined(OS_LINUX)
+
+#if defined(OS_LINUX)
+namespace {
+
+// Keep these in sync so the GetOpenFdCount test can refer to correct test main.
+#define ChildMain ChildFdCount
+#define ChildMainString "ChildFdCount"
+
+// Command line flag name and file name used for synchronization.
+const char kTempDirFlag[] = "temp-dir";
+const char kSignalClosed[] = "closed";
+
+bool SignalEvent(const FilePath& signal_dir, const char* signal_file) {
+ File file(signal_dir.AppendASCII(signal_file),
+ File::FLAG_CREATE | File::FLAG_WRITE);
+ return file.IsValid();
+}
+
+// Check whether an event was signaled.
+bool CheckEvent(const FilePath& signal_dir, const char* signal_file) {
+ File file(signal_dir.AppendASCII(signal_file),
+ File::FLAG_OPEN | File::FLAG_READ);
+ return file.IsValid();
+}
+
+// Busy-wait for an event to be signaled.
+void WaitForEvent(const FilePath& signal_dir, const char* signal_file) {
+ while (!CheckEvent(signal_dir, signal_file))
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+}
+
+// Subprocess to test the number of open file descriptors.
+MULTIPROCESS_TEST_MAIN(ChildMain) {
+ CommandLine* command_line = CommandLine::ForCurrentProcess();
+ const FilePath temp_path = command_line->GetSwitchValuePath(kTempDirFlag);
+ CHECK(DirectoryExists(temp_path));
+
+ // Try to close all the file descriptors, so the open count goes to 0.
+ for (size_t i = 0; i < 1000; ++i)
+ close(i);
+ CHECK(SignalEvent(temp_path, kSignalClosed));
+
+ // Wait to be terminated.
+ while (true)
+ PlatformThread::Sleep(TimeDelta::FromSeconds(1));
+ return 0;
+}
+
+} // namespace
+
+TEST(ProcessMetricsTest, GetOpenFdCount) {
+ ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ const FilePath temp_path = temp_dir.path();
+ CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
+ child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
+ Process child = SpawnMultiProcessTestChild(
+ ChildMainString, child_command_line, LaunchOptions());
+ ASSERT_TRUE(child.IsValid());
+ WaitForEvent(temp_path, kSignalClosed);
+
+ std::unique_ptr<ProcessMetrics> metrics(
+ ProcessMetrics::CreateProcessMetrics(child.Handle()));
+ // Try a couple times to observe the child with 0 fds open.
+ // Sometimes we've seen that the child can have 1 remaining
+ // fd shortly after receiving the signal. Potentially this
+ // is actually the signal file still open in the child.
+ int open_fds = -1;
+ for (int tries = 0; tries < 5; ++tries) {
+ open_fds = metrics->GetOpenFdCount();
+ if (!open_fds) {
+ break;
+ }
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+ }
+ EXPECT_EQ(0, open_fds);
+ ASSERT_TRUE(child.Terminate(0, true));
+}
+#endif // defined(OS_LINUX)
+
+} // namespace debug
+} // namespace base
diff --git a/libchrome/base/process/process_posix.cc b/libchrome/base/process/process_posix.cc
new file mode 100644
index 0000000..ba9b544
--- /dev/null
+++ b/libchrome/base/process/process_posix.cc
@@ -0,0 +1,385 @@
+// Copyright 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/kill.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <sys/event.h>
+#endif
+
+namespace {
+
+#if !defined(OS_NACL_NONSFI)
+
+bool WaitpidWithTimeout(base::ProcessHandle handle,
+ int* status,
+ base::TimeDelta wait) {
+ // This POSIX version of this function only guarantees that we wait no less
+ // than |wait| for the process to exit. The child process may
+ // exit sometime before the timeout has ended but we may still block for up
+ // to 256 milliseconds after the fact.
+ //
+ // waitpid() has no direct support on POSIX for specifying a timeout, you can
+ // either ask it to block indefinitely or return immediately (WNOHANG).
+ // When a child process terminates a SIGCHLD signal is sent to the parent.
+ // Catching this signal would involve installing a signal handler which may
+ // affect other parts of the application and would be difficult to debug.
+ //
+ // Our strategy is to call waitpid() once up front to check if the process
+ // has already exited, otherwise to loop for |wait|, sleeping for
+ // at most 256 milliseconds each time using usleep() and then calling
+ // waitpid(). The amount of time we sleep starts out at 1 milliseconds, and
+ // we double it every 4 sleep cycles.
+ //
+ // usleep() is speced to exit if a signal is received for which a handler
+ // has been installed. This means that when a SIGCHLD is sent, it will exit
+ // depending on behavior external to this function.
+ //
+ // This function is used primarily for unit tests, if we want to use it in
+ // the application itself it would probably be best to examine other routes.
+
+ if (wait == base::TimeDelta::Max()) {
+ return HANDLE_EINTR(waitpid(handle, status, 0)) > 0;
+ }
+
+ pid_t ret_pid = HANDLE_EINTR(waitpid(handle, status, WNOHANG));
+ static const int64_t kMaxSleepInMicroseconds = 1 << 18; // ~256 milliseconds.
+ int64_t max_sleep_time_usecs = 1 << 10; // ~1 milliseconds.
+ int64_t double_sleep_time = 0;
+
+ // If the process hasn't exited yet, then sleep and try again.
+ base::TimeTicks wakeup_time = base::TimeTicks::Now() + wait;
+ while (ret_pid == 0) {
+ base::TimeTicks now = base::TimeTicks::Now();
+ if (now > wakeup_time)
+ break;
+ // Guaranteed to be non-negative!
+ int64_t sleep_time_usecs = (wakeup_time - now).InMicroseconds();
+ // Sleep for a bit while we wait for the process to finish.
+ if (sleep_time_usecs > max_sleep_time_usecs)
+ sleep_time_usecs = max_sleep_time_usecs;
+
+ // usleep() will return 0 and set errno to EINTR on receipt of a signal
+ // such as SIGCHLD.
+ usleep(sleep_time_usecs);
+ ret_pid = HANDLE_EINTR(waitpid(handle, status, WNOHANG));
+
+ if ((max_sleep_time_usecs < kMaxSleepInMicroseconds) &&
+ (double_sleep_time++ % 4 == 0)) {
+ max_sleep_time_usecs *= 2;
+ }
+ }
+
+ return ret_pid > 0;
+}
+
+#if defined(OS_MACOSX)
+// Using kqueue on Mac so that we can wait on non-child processes.
+// We can't use kqueues on child processes because we need to reap
+// our own children using wait.
+static bool WaitForSingleNonChildProcess(base::ProcessHandle handle,
+ base::TimeDelta wait) {
+ DCHECK_GT(handle, 0);
+ DCHECK_GT(wait, base::TimeDelta());
+
+ base::ScopedFD kq(kqueue());
+ if (!kq.is_valid()) {
+ DPLOG(ERROR) << "kqueue";
+ return false;
+ }
+
+#if defined(ANDROID)
+ struct kevent change;
+ memset(&change, 0, sizeof(change));
+#else
+ struct kevent change = {0};
+#endif
+ EV_SET(&change, handle, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
+ int result = HANDLE_EINTR(kevent(kq.get(), &change, 1, NULL, 0, NULL));
+ if (result == -1) {
+ if (errno == ESRCH) {
+ // If the process wasn't found, it must be dead.
+ return true;
+ }
+
+ DPLOG(ERROR) << "kevent (setup " << handle << ")";
+ return false;
+ }
+
+ // Keep track of the elapsed time to be able to restart kevent if it's
+ // interrupted.
+ bool wait_forever = (wait == base::TimeDelta::Max());
+ base::TimeDelta remaining_delta;
+ base::TimeTicks deadline;
+ if (!wait_forever) {
+ remaining_delta = wait;
+ deadline = base::TimeTicks::Now() + remaining_delta;
+ }
+
+ result = -1;
+#if defined(ANDROID)
+ struct kevent event;
+ memset(&event, 0, sizeof(event));
+#else
+ struct kevent event = {0};
+#endif
+
+ while (wait_forever || remaining_delta > base::TimeDelta()) {
+ struct timespec remaining_timespec;
+ struct timespec* remaining_timespec_ptr;
+ if (wait_forever) {
+ remaining_timespec_ptr = NULL;
+ } else {
+ remaining_timespec = remaining_delta.ToTimeSpec();
+ remaining_timespec_ptr = &remaining_timespec;
+ }
+
+ result = kevent(kq.get(), NULL, 0, &event, 1, remaining_timespec_ptr);
+
+ if (result == -1 && errno == EINTR) {
+ if (!wait_forever) {
+ remaining_delta = deadline - base::TimeTicks::Now();
+ }
+ result = 0;
+ } else {
+ break;
+ }
+ }
+
+ if (result < 0) {
+ DPLOG(ERROR) << "kevent (wait " << handle << ")";
+ return false;
+ } else if (result > 1) {
+ DLOG(ERROR) << "kevent (wait " << handle << "): unexpected result "
+ << result;
+ return false;
+ } else if (result == 0) {
+ // Timed out.
+ return false;
+ }
+
+ DCHECK_EQ(result, 1);
+
+ if (event.filter != EVFILT_PROC ||
+ (event.fflags & NOTE_EXIT) == 0 ||
+ event.ident != static_cast<uintptr_t>(handle)) {
+ DLOG(ERROR) << "kevent (wait " << handle
+ << "): unexpected event: filter=" << event.filter
+ << ", fflags=" << event.fflags
+ << ", ident=" << event.ident;
+ return false;
+ }
+
+ return true;
+}
+#endif // OS_MACOSX
+
+bool WaitForExitWithTimeoutImpl(base::ProcessHandle handle,
+ int* exit_code,
+ base::TimeDelta timeout) {
+ base::ProcessHandle parent_pid = base::GetParentProcessId(handle);
+ base::ProcessHandle our_pid = base::GetCurrentProcessHandle();
+ if (parent_pid != our_pid) {
+#if defined(OS_MACOSX)
+ // On Mac we can wait on non child processes.
+ return WaitForSingleNonChildProcess(handle, timeout);
+#else
+ // Currently on Linux we can't handle non child processes.
+ NOTIMPLEMENTED();
+#endif // OS_MACOSX
+ }
+
+ int status;
+ if (!WaitpidWithTimeout(handle, &status, timeout))
+ return false;
+ if (WIFSIGNALED(status)) {
+ if (exit_code)
+ *exit_code = -1;
+ return true;
+ }
+ if (WIFEXITED(status)) {
+ if (exit_code)
+ *exit_code = WEXITSTATUS(status);
+ return true;
+ }
+ return false;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+} // namespace
+
+namespace base {
+
+Process::Process(ProcessHandle handle) : process_(handle) {
+}
+
+Process::~Process() {
+}
+
+Process::Process(Process&& other) : process_(other.process_) {
+ other.Close();
+}
+
+Process& Process::operator=(Process&& other) {
+ DCHECK_NE(this, &other);
+ process_ = other.process_;
+ other.Close();
+ return *this;
+}
+
+// static
+Process Process::Current() {
+ return Process(GetCurrentProcessHandle());
+}
+
+// static
+Process Process::Open(ProcessId pid) {
+ if (pid == GetCurrentProcId())
+ return Current();
+
+ // On POSIX process handles are the same as PIDs.
+ return Process(pid);
+}
+
+// static
+Process Process::OpenWithExtraPrivileges(ProcessId pid) {
+ // On POSIX there are no privileges to set.
+ return Open(pid);
+}
+
+// static
+Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
+ DCHECK_NE(handle, GetCurrentProcessHandle());
+ return Process(handle);
+}
+
+#if !defined(OS_LINUX)
+// static
+bool Process::CanBackgroundProcesses() {
+ return false;
+}
+#endif // !defined(OS_LINUX)
+
+bool Process::IsValid() const {
+ return process_ != kNullProcessHandle;
+}
+
+ProcessHandle Process::Handle() const {
+ return process_;
+}
+
+Process Process::Duplicate() const {
+ if (is_current())
+ return Current();
+
+ return Process(process_);
+}
+
+ProcessId Process::Pid() const {
+ DCHECK(IsValid());
+ return GetProcId(process_);
+}
+
+bool Process::is_current() const {
+ return process_ == GetCurrentProcessHandle();
+}
+
+void Process::Close() {
+ process_ = kNullProcessHandle;
+ // if the process wasn't terminated (so we waited) or the state
+ // wasn't already collected w/ a wait from process_utils, we're gonna
+ // end up w/ a zombie when it does finally exit.
+}
+
+#if !defined(OS_NACL_NONSFI)
+bool Process::Terminate(int /*exit_code*/, bool wait) const {
+ // exit_code isn't supportable.
+ DCHECK(IsValid());
+ CHECK_GT(process_, 0);
+
+ bool result = kill(process_, SIGTERM) == 0;
+ if (result && wait) {
+ int tries = 60;
+
+ unsigned sleep_ms = 4;
+
+ // The process may not end immediately due to pending I/O
+ bool exited = false;
+ while (tries-- > 0) {
+ pid_t pid = HANDLE_EINTR(waitpid(process_, NULL, WNOHANG));
+ if (pid == process_) {
+ exited = true;
+ break;
+ }
+ if (pid == -1) {
+ if (errno == ECHILD) {
+ // The wait may fail with ECHILD if another process also waited for
+ // the same pid, causing the process state to get cleaned up.
+ exited = true;
+ break;
+ }
+ DPLOG(ERROR) << "Error waiting for process " << process_;
+ }
+
+ usleep(sleep_ms * 1000);
+ const unsigned kMaxSleepMs = 1000;
+ if (sleep_ms < kMaxSleepMs)
+ sleep_ms *= 2;
+ }
+
+ // If we're waiting and the child hasn't died by now, force it
+ // with a SIGKILL.
+ if (!exited)
+ result = kill(process_, SIGKILL) == 0;
+ }
+
+ if (!result)
+ DPLOG(ERROR) << "Unable to terminate process " << process_;
+
+ return result;
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+bool Process::WaitForExit(int* exit_code) {
+ return WaitForExitWithTimeout(TimeDelta::Max(), exit_code);
+}
+
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
+ return WaitForExitWithTimeoutImpl(Handle(), exit_code, timeout);
+}
+
+#if !defined(OS_LINUX)
+bool Process::IsProcessBackgrounded() const {
+ // See SetProcessBackgrounded().
+ DCHECK(IsValid());
+ return false;
+}
+
+bool Process::SetProcessBackgrounded(bool /*value*/) {
+ // Not implemented for POSIX systems other than Linux. With POSIX, if we were
+ // to lower the process priority we wouldn't be able to raise it back to its
+ // initial priority.
+ NOTIMPLEMENTED();
+ return false;
+}
+#endif // !defined(OS_LINUX)
+
+int Process::GetPriority() const {
+ DCHECK(IsValid());
+ return getpriority(PRIO_PROCESS, process_);
+}
+
+} // namespace base
diff --git a/libchrome/base/profiler/scoped_profile.cc b/libchrome/base/profiler/scoped_profile.cc
new file mode 100644
index 0000000..f06a8c6
--- /dev/null
+++ b/libchrome/base/profiler/scoped_profile.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/scoped_profile.h"
+
+#include "base/location.h"
+#include "base/tracked_objects.h"
+
+
+namespace tracked_objects {
+
+
+ScopedProfile::ScopedProfile(const Location& location, Mode mode)
+ : birth_(NULL) {
+ if (mode == DISABLED)
+ return;
+
+ birth_ = ThreadData::TallyABirthIfActive(location);
+ if (!birth_)
+ return;
+
+ stopwatch_.Start();
+}
+
+ScopedProfile::~ScopedProfile() {
+ if (!birth_)
+ return;
+
+ stopwatch_.Stop();
+ ThreadData::TallyRunInAScopedRegionIfTracking(birth_, stopwatch_);
+}
+
+} // namespace tracked_objects
diff --git a/libchrome/base/profiler/scoped_profile.h b/libchrome/base/profiler/scoped_profile.h
new file mode 100644
index 0000000..657150a
--- /dev/null
+++ b/libchrome/base/profiler/scoped_profile.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef BASE_PROFILER_SCOPED_PROFILE_H_
+#define BASE_PROFILER_SCOPED_PROFILE_H_
+
+//------------------------------------------------------------------------------
+// ScopedProfile provides basic helper functions for profiling a short
+// region of code within a scope. It is separate from the related ThreadData
+// class so that it can be included without much other cruft, and provide the
+// macros listed below.
+
+#include "base/base_export.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/profiler/tracked_time.h"
+#include "base/tracked_objects.h"
+
+#define PASTE_LINE_NUMBER_ON_NAME(name, line) name##line
+
+#define LINE_BASED_VARIABLE_NAME_FOR_PROFILING \
+ PASTE_LINE_NUMBER_ON_NAME(some_profiler_variable_, __LINE__)
+
+// Defines the containing scope as a profiled region. This allows developers to
+// profile their code and see results on their about:profiler page, as well as
+// on the UMA dashboard.
+#define TRACK_RUN_IN_THIS_SCOPED_REGION(dispatch_function_name) \
+ ::tracked_objects::ScopedProfile LINE_BASED_VARIABLE_NAME_FOR_PROFILING( \
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(#dispatch_function_name), \
+ ::tracked_objects::ScopedProfile::ENABLED)
+
+// Same as TRACK_RUN_IN_THIS_SCOPED_REGION except that there's an extra param
+// which is concatenated with the function name for better filtering.
+#define TRACK_SCOPED_REGION(category_name, dispatch_function_name) \
+ ::tracked_objects::ScopedProfile LINE_BASED_VARIABLE_NAME_FOR_PROFILING( \
+ FROM_HERE_WITH_EXPLICIT_FUNCTION( \
+ "[" category_name "]" dispatch_function_name), \
+ ::tracked_objects::ScopedProfile::ENABLED)
+
+namespace tracked_objects {
+class Births;
+
+class BASE_EXPORT ScopedProfile {
+ public:
+ // Mode of operation. Specifies whether ScopedProfile should be a no-op or
+ // needs to create and tally a task.
+ enum Mode {
+ DISABLED, // Do nothing.
+ ENABLED // Create and tally a task.
+ };
+
+ ScopedProfile(const Location& location, Mode mode);
+ ~ScopedProfile();
+
+ private:
+ Births* birth_; // Place in code where tracking started.
+ TaskStopwatch stopwatch_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedProfile);
+};
+
+} // namespace tracked_objects
+
+#endif // BASE_PROFILER_SCOPED_PROFILE_H_
diff --git a/libchrome/base/profiler/scoped_tracker.cc b/libchrome/base/profiler/scoped_tracker.cc
new file mode 100644
index 0000000..d15b7de
--- /dev/null
+++ b/libchrome/base/profiler/scoped_tracker.cc
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/scoped_tracker.h"
+
+#include "base/bind.h"
+
+namespace tracked_objects {
+
+namespace {
+
+ScopedProfile::Mode g_scoped_profile_mode = ScopedProfile::DISABLED;
+
+} // namespace
+
+// static
+void ScopedTracker::Enable() {
+ g_scoped_profile_mode = ScopedProfile::ENABLED;
+}
+
+ScopedTracker::ScopedTracker(const Location& location)
+ : scoped_profile_(location, g_scoped_profile_mode) {
+}
+
+} // namespace tracked_objects
diff --git a/libchrome/base/profiler/scoped_tracker.h b/libchrome/base/profiler/scoped_tracker.h
new file mode 100644
index 0000000..a61de91
--- /dev/null
+++ b/libchrome/base/profiler/scoped_tracker.h
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_SCOPED_TRACKER_H_
+#define BASE_PROFILER_SCOPED_TRACKER_H_
+
+//------------------------------------------------------------------------------
+// Utilities for temporarily instrumenting code to dig into issues that were
+// found using profiler data.
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/profiler/scoped_profile.h"
+
+namespace tracked_objects {
+
+// ScopedTracker instruments a region within the code if the instrumentation is
+// enabled. It can be used, for example, to find out if a source of jankiness is
+// inside the instrumented code region.
+// Details:
+// 1. This class creates a task (like ones created by PostTask calls or IPC
+// message handlers). This task can be seen in chrome://profiler and is sent as
+// a part of profiler data to the UMA server. See profiler_event.proto.
+// 2. That task's lifetime is same as the lifetime of the ScopedTracker
+// instance.
+// 3. The execution time associated with the task is the wallclock time between
+// its constructor and destructor, minus wallclock times of directly nested
+// tasks.
+// 4. Task creation that this class utilizes is highly optimized.
+// 5. The class doesn't create a task unless this was enabled for the current
+// process. Search for ScopedTracker::Enable for the current list of processes
+// and channels where it's activated.
+// 6. The class is designed for temporarily instrumenting code to find
+// performance problems, after which the instrumentation must be removed.
+class BASE_EXPORT ScopedTracker {
+ public:
+ ScopedTracker(const Location& location);
+
+ // Enables instrumentation for the remainder of the current process' life. If
+ // this function is not called, all profiler instrumentations are no-ops.
+ static void Enable();
+
+ // Augments a |callback| with provided |location|. This is useful for
+ // instrumenting cases when we know that a jank is in a callback and there are
+ // many possible callbacks, but they come from a relatively small number of
+ // places. We can instrument these few places and at least know which one
+ // passes the janky callback.
+ template <typename P1>
+ static base::Callback<void(P1)> TrackCallback(
+ const Location& location,
+ const base::Callback<void(P1)>& callback) {
+ return base::Bind(&ScopedTracker::ExecuteAndTrackCallback<P1>, location,
+ callback);
+ }
+
+ private:
+ // Executes |callback|, augmenting it with provided |location|.
+ template <typename P1>
+ static void ExecuteAndTrackCallback(const Location& location,
+ const base::Callback<void(P1)>& callback,
+ P1 p1) {
+ ScopedTracker tracking_profile(location);
+ callback.Run(p1);
+ }
+
+ const ScopedProfile scoped_profile_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTracker);
+};
+
+} // namespace tracked_objects
+
+#endif // BASE_PROFILER_SCOPED_TRACKER_H_
diff --git a/libchrome/base/profiler/tracked_time.cc b/libchrome/base/profiler/tracked_time.cc
new file mode 100644
index 0000000..7e0040c
--- /dev/null
+++ b/libchrome/base/profiler/tracked_time.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/tracked_time.h"
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <mmsystem.h> // Declare timeGetTime()... after including build_config.
+#endif
+
+namespace tracked_objects {
+
+Duration::Duration() : ms_(0) {}
+Duration::Duration(int32_t duration) : ms_(duration) {}
+
+Duration& Duration::operator+=(const Duration& other) {
+ ms_ += other.ms_;
+ return *this;
+}
+
+Duration Duration::operator+(const Duration& other) const {
+ return Duration(ms_ + other.ms_);
+}
+
+bool Duration::operator==(const Duration& other) const {
+ return ms_ == other.ms_;
+}
+
+bool Duration::operator!=(const Duration& other) const {
+ return ms_ != other.ms_;
+}
+
+bool Duration::operator>(const Duration& other) const {
+ return ms_ > other.ms_;
+}
+
+// static
+Duration Duration::FromMilliseconds(int ms) { return Duration(ms); }
+
+int32_t Duration::InMilliseconds() const {
+ return ms_;
+}
+
+//------------------------------------------------------------------------------
+
+TrackedTime::TrackedTime() : ms_(0) {}
+TrackedTime::TrackedTime(int32_t ms) : ms_(ms) {}
+TrackedTime::TrackedTime(const base::TimeTicks& time)
+ : ms_(static_cast<int32_t>((time - base::TimeTicks()).InMilliseconds())) {}
+
+// static
+TrackedTime TrackedTime::Now() {
+ return TrackedTime(base::TimeTicks::Now());
+}
+
+Duration TrackedTime::operator-(const TrackedTime& other) const {
+ return Duration(ms_ - other.ms_);
+}
+
+TrackedTime TrackedTime::operator+(const Duration& other) const {
+ return TrackedTime(ms_ + other.ms_);
+}
+
+bool TrackedTime::is_null() const { return ms_ == 0; }
+
+} // namespace tracked_objects
diff --git a/libchrome/base/profiler/tracked_time.h b/libchrome/base/profiler/tracked_time.h
new file mode 100644
index 0000000..b32f41b
--- /dev/null
+++ b/libchrome/base/profiler/tracked_time.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_TRACKED_TIME_H_
+#define BASE_PROFILER_TRACKED_TIME_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace tracked_objects {
+
+//------------------------------------------------------------------------------
+
+// TimeTicks maintains a wasteful 64 bits of data (we need less than 32), and on
+// windows, a 64 bit timer is expensive to even obtain. We use a simple
+// millisecond counter for most of our time values, as well as millisecond units
+// of duration between those values. This means we can only handle durations
+// up to 49 days (range), or 24 days (non-negative time durations).
+// We only define enough methods to service the needs of the tracking classes,
+// and our interfaces are modeled after what TimeTicks and TimeDelta use (so we
+// can swap them into place if we want to use the "real" classes).
+
+class BASE_EXPORT Duration { // Similar to base::TimeDelta.
+ public:
+ Duration();
+
+ Duration& operator+=(const Duration& other);
+ Duration operator+(const Duration& other) const;
+
+ bool operator==(const Duration& other) const;
+ bool operator!=(const Duration& other) const;
+ bool operator>(const Duration& other) const;
+
+ static Duration FromMilliseconds(int ms);
+
+ int32_t InMilliseconds() const;
+
+ private:
+ friend class TrackedTime;
+ explicit Duration(int32_t duration);
+
+ // Internal time is stored directly in milliseconds.
+ int32_t ms_;
+};
+
+class BASE_EXPORT TrackedTime { // Similar to base::TimeTicks.
+ public:
+ TrackedTime();
+ explicit TrackedTime(const base::TimeTicks& time);
+
+ static TrackedTime Now();
+ Duration operator-(const TrackedTime& other) const;
+ TrackedTime operator+(const Duration& other) const;
+ bool is_null() const;
+
+ static TrackedTime FromMilliseconds(int32_t ms) { return TrackedTime(ms); }
+
+ private:
+ friend class Duration;
+ explicit TrackedTime(int32_t ms);
+
+ // Internal duration is stored directly in milliseconds.
+ uint32_t ms_;
+};
+
+} // namespace tracked_objects
+
+#endif // BASE_PROFILER_TRACKED_TIME_H_
diff --git a/libchrome/base/profiler/tracked_time_unittest.cc b/libchrome/base/profiler/tracked_time_unittest.cc
new file mode 100644
index 0000000..f6d35ba
--- /dev/null
+++ b/libchrome/base/profiler/tracked_time_unittest.cc
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test of classes in tracked_time.cc
+
+#include <stdint.h>
+
+#include "base/profiler/tracked_time.h"
+#include "base/time/time.h"
+#include "base/tracked_objects.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace tracked_objects {
+
+TEST(TrackedTimeTest, TrackedTimerMilliseconds) {
+ // First make sure we basicallly transfer simple milliseconds values as
+ // expected. Most critically, things should not become null.
+ int32_t kSomeMilliseconds = 243; // Some example times.
+ int64_t kReallyBigMilliseconds = (1LL << 35) + kSomeMilliseconds;
+
+ TrackedTime some = TrackedTime() +
+ Duration::FromMilliseconds(kSomeMilliseconds);
+ EXPECT_EQ(kSomeMilliseconds, (some - TrackedTime()).InMilliseconds());
+ EXPECT_FALSE(some.is_null());
+
+ // Now create a big time, to check that it is wrapped modulo 2^32.
+ base::TimeTicks big = base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(kReallyBigMilliseconds);
+ EXPECT_EQ(kReallyBigMilliseconds, (big - base::TimeTicks()).InMilliseconds());
+
+ TrackedTime wrapped_big(big);
+ // Expect wrapping at 32 bits.
+ EXPECT_EQ(kSomeMilliseconds, (wrapped_big - TrackedTime()).InMilliseconds());
+}
+
+TEST(TrackedTimeTest, TrackedTimerDuration) {
+ int kFirstMilliseconds = 793;
+ int kSecondMilliseconds = 14889;
+
+ Duration first = Duration::FromMilliseconds(kFirstMilliseconds);
+ Duration second = Duration::FromMilliseconds(kSecondMilliseconds);
+
+ EXPECT_EQ(kFirstMilliseconds, first.InMilliseconds());
+ EXPECT_EQ(kSecondMilliseconds, second.InMilliseconds());
+
+ Duration sum = first + second;
+ EXPECT_EQ(kFirstMilliseconds + kSecondMilliseconds, sum.InMilliseconds());
+}
+
+TEST(TrackedTimeTest, TrackedTimerVsTimeTicks) {
+ // Make sure that our 32 bit timer is aligned with the TimeTicks() timer.
+
+ // First get a 64 bit timer (which should not be null).
+ base::TimeTicks ticks_before = base::TimeTicks::Now();
+ EXPECT_FALSE(ticks_before.is_null());
+
+ // Then get a 32 bit timer that can be be null when it wraps.
+ TrackedTime now = TrackedTime::Now();
+
+ // Then get a bracketing time.
+ base::TimeTicks ticks_after = base::TimeTicks::Now();
+ EXPECT_FALSE(ticks_after.is_null());
+
+ // Now make sure that we bracketed our tracked time nicely.
+ Duration before = now - TrackedTime(ticks_before);
+ EXPECT_LE(0, before.InMilliseconds());
+ Duration after = now - TrackedTime(ticks_after);
+ EXPECT_GE(0, after.InMilliseconds());
+}
+
+TEST(TrackedTimeTest, TrackedTimerDisabled) {
+ // Check to be sure disabling the collection of data induces a null time
+ // (which we know will return much faster).
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::DEACTIVATED);
+ // Since we disabled tracking, we should get a null response.
+ TrackedTime track_now = ThreadData::Now();
+ EXPECT_TRUE(track_now.is_null());
+}
+
+TEST(TrackedTimeTest, TrackedTimerEnabled) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+ // Make sure that when we enable tracking, we get a real timer result.
+
+ // First get a 64 bit timer (which should not be null).
+ base::TimeTicks ticks_before = base::TimeTicks::Now();
+ EXPECT_FALSE(ticks_before.is_null());
+
+ // Then get a 32 bit timer that can be null when it wraps.
+ // Crtical difference from the TrackedTimerVsTimeTicks test, is that we use
+ // ThreadData::Now(). It can sometimes return the null time.
+ TrackedTime now = ThreadData::Now();
+
+ // Then get a bracketing time.
+ base::TimeTicks ticks_after = base::TimeTicks::Now();
+ EXPECT_FALSE(ticks_after.is_null());
+
+ // Now make sure that we bracketed our tracked time nicely.
+ Duration before = now - TrackedTime(ticks_before);
+ EXPECT_LE(0, before.InMilliseconds());
+ Duration after = now - TrackedTime(ticks_after);
+ EXPECT_GE(0, after.InMilliseconds());
+}
+
+} // namespace tracked_objects
diff --git a/libchrome/base/rand_util.cc b/libchrome/base/rand_util.cc
new file mode 100644
index 0000000..fab6c66
--- /dev/null
+++ b/libchrome/base/rand_util.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <limits>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+int RandInt(int min, int max) {
+ DCHECK_LE(min, max);
+
+ uint64_t range = static_cast<uint64_t>(max) - min + 1;
+ // |range| is at most UINT_MAX + 1, so the result of RandGenerator(range)
+ // is at most UINT_MAX. Hence it's safe to cast it from uint64_t to int64_t.
+ int result =
+ static_cast<int>(min + static_cast<int64_t>(base::RandGenerator(range)));
+ DCHECK_GE(result, min);
+ DCHECK_LE(result, max);
+ return result;
+}
+
+double RandDouble() {
+ return BitsToOpenEndedUnitInterval(base::RandUint64());
+}
+
+double BitsToOpenEndedUnitInterval(uint64_t bits) {
+ // We try to get maximum precision by masking out as many bits as will fit
+ // in the target type's mantissa, and raising it to an appropriate power to
+ // produce output in the range [0, 1). For IEEE 754 doubles, the mantissa
+ // is expected to accommodate 53 bits.
+
+ static_assert(std::numeric_limits<double>::radix == 2,
+ "otherwise use scalbn");
+ static const int kBits = std::numeric_limits<double>::digits;
+ uint64_t random_bits = bits & ((UINT64_C(1) << kBits) - 1);
+ double result = ldexp(static_cast<double>(random_bits), -1 * kBits);
+ DCHECK_GE(result, 0.0);
+ DCHECK_LT(result, 1.0);
+ return result;
+}
+
+uint64_t RandGenerator(uint64_t range) {
+ DCHECK_GT(range, 0u);
+ // We must discard random results above this number, as they would
+ // make the random generator non-uniform (consider e.g. if
+ // MAX_UINT64 was 7 and |range| was 5, then a result of 1 would be twice
+ // as likely as a result of 3 or 4).
+ uint64_t max_acceptable_value =
+ (std::numeric_limits<uint64_t>::max() / range) * range - 1;
+
+ uint64_t value;
+ do {
+ value = base::RandUint64();
+ } while (value > max_acceptable_value);
+
+ return value % range;
+}
+
+std::string RandBytesAsString(size_t length) {
+ DCHECK_GT(length, 0u);
+ std::string result;
+ RandBytes(WriteInto(&result, length + 1), length);
+ return result;
+}
+
+} // namespace base
diff --git a/libchrome/base/rand_util.h b/libchrome/base/rand_util.h
new file mode 100644
index 0000000..881dbd5
--- /dev/null
+++ b/libchrome/base/rand_util.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_RAND_UTIL_H_
+#define BASE_RAND_UTIL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Returns a random number in range [0, UINT64_MAX]. Thread-safe.
+BASE_EXPORT uint64_t RandUint64();
+
+// Returns a random number between min and max (inclusive). Thread-safe.
+BASE_EXPORT int RandInt(int min, int max);
+
+// Returns a random number in range [0, range). Thread-safe.
+//
+// Note that this can be used as an adapter for std::random_shuffle():
+// Given a pre-populated |std::vector<int> myvector|, shuffle it as
+// std::random_shuffle(myvector.begin(), myvector.end(), base::RandGenerator);
+BASE_EXPORT uint64_t RandGenerator(uint64_t range);
+
+// Returns a random double in range [0, 1). Thread-safe.
+BASE_EXPORT double RandDouble();
+
+// Given input |bits|, convert with maximum precision to a double in
+// the range [0, 1). Thread-safe.
+BASE_EXPORT double BitsToOpenEndedUnitInterval(uint64_t bits);
+
+// Fills |output_length| bytes of |output| with random data.
+//
+// WARNING:
+// Do not use for security-sensitive purposes.
+// See crypto/ for cryptographically secure random number generation APIs.
+BASE_EXPORT void RandBytes(void* output, size_t output_length);
+
+// Fills a string of length |length| with random data and returns it.
+// |length| should be nonzero.
+//
+// Note that this is a variation of |RandBytes| with a different return type.
+// The returned string is likely not ASCII/UTF-8. Use with care.
+//
+// WARNING:
+// Do not use for security-sensitive purposes.
+// See crypto/ for cryptographically secure random number generation APIs.
+BASE_EXPORT std::string RandBytesAsString(size_t length);
+
+#if defined(OS_POSIX)
+BASE_EXPORT int GetUrandomFD();
+#endif
+
+} // namespace base
+
+#endif // BASE_RAND_UTIL_H_
diff --git a/libchrome/base/rand_util_posix.cc b/libchrome/base/rand_util_posix.cc
new file mode 100644
index 0000000..6a6e05a
--- /dev/null
+++ b/libchrome/base/rand_util_posix.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+
+namespace {
+
+// We keep the file descriptor for /dev/urandom around so we don't need to
+// reopen it (which is expensive), and since we may not even be able to reopen
+// it if we are later put in a sandbox. This class wraps the file descriptor so
+// we can use LazyInstance to handle opening it on the first access.
+class URandomFd {
+ public:
+ URandomFd() : fd_(open("/dev/urandom", O_RDONLY)) {
+ DCHECK_GE(fd_, 0) << "Cannot open /dev/urandom: " << errno;
+ }
+
+ ~URandomFd() { close(fd_); }
+
+ int fd() const { return fd_; }
+
+ private:
+ const int fd_;
+};
+
+base::LazyInstance<URandomFd>::Leaky g_urandom_fd = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+namespace base {
+
+// NOTE: This function must be cryptographically secure. http://crbug.com/140076
+uint64_t RandUint64() {
+ uint64_t number;
+ RandBytes(&number, sizeof(number));
+ return number;
+}
+
+void RandBytes(void* output, size_t output_length) {
+ const int urandom_fd = g_urandom_fd.Pointer()->fd();
+ const bool success =
+ ReadFromFD(urandom_fd, static_cast<char*>(output), output_length);
+ CHECK(success);
+}
+
+int GetUrandomFD(void) {
+ return g_urandom_fd.Pointer()->fd();
+}
+
+} // namespace base
diff --git a/libchrome/base/rand_util_unittest.cc b/libchrome/base/rand_util_unittest.cc
new file mode 100644
index 0000000..4f46b80
--- /dev/null
+++ b/libchrome/base/rand_util_unittest.cc
@@ -0,0 +1,154 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int kIntMin = std::numeric_limits<int>::min();
+const int kIntMax = std::numeric_limits<int>::max();
+
+} // namespace
+
+TEST(RandUtilTest, RandInt) {
+ EXPECT_EQ(base::RandInt(0, 0), 0);
+ EXPECT_EQ(base::RandInt(kIntMin, kIntMin), kIntMin);
+ EXPECT_EQ(base::RandInt(kIntMax, kIntMax), kIntMax);
+
+ // Check that the DCHECKS in RandInt() don't fire due to internal overflow.
+ // There was a 50% chance of that happening, so calling it 40 times means
+ // the chances of this passing by accident are tiny (9e-13).
+ for (int i = 0; i < 40; ++i)
+ base::RandInt(kIntMin, kIntMax);
+}
+
+TEST(RandUtilTest, RandDouble) {
+ // Force 64-bit precision, making sure we're not in a 80-bit FPU register.
+ volatile double number = base::RandDouble();
+ EXPECT_GT(1.0, number);
+ EXPECT_LE(0.0, number);
+}
+
+TEST(RandUtilTest, RandBytes) {
+ const size_t buffer_size = 50;
+ char buffer[buffer_size];
+ memset(buffer, 0, buffer_size);
+ base::RandBytes(buffer, buffer_size);
+ std::sort(buffer, buffer + buffer_size);
+ // Probability of occurrence of less than 25 unique bytes in 50 random bytes
+ // is below 10^-25.
+ EXPECT_GT(std::unique(buffer, buffer + buffer_size) - buffer, 25);
+}
+
+TEST(RandUtilTest, RandBytesAsString) {
+ std::string random_string = base::RandBytesAsString(1);
+ EXPECT_EQ(1U, random_string.size());
+ random_string = base::RandBytesAsString(145);
+ EXPECT_EQ(145U, random_string.size());
+ char accumulator = 0;
+ for (size_t i = 0; i < random_string.size(); ++i)
+ accumulator |= random_string[i];
+ // In theory this test can fail, but it won't before the universe dies of
+ // heat death.
+ EXPECT_NE(0, accumulator);
+}
+
+// Make sure that it is still appropriate to use RandGenerator in conjunction
+// with std::random_shuffle().
+TEST(RandUtilTest, RandGeneratorForRandomShuffle) {
+ EXPECT_EQ(base::RandGenerator(1), 0U);
+ EXPECT_LE(std::numeric_limits<ptrdiff_t>::max(),
+ std::numeric_limits<int64_t>::max());
+}
+
+TEST(RandUtilTest, RandGeneratorIsUniform) {
+ // Verify that RandGenerator has a uniform distribution. This is a
+ // regression test that consistently failed when RandGenerator was
+ // implemented this way:
+ //
+ // return base::RandUint64() % max;
+ //
+ // A degenerate case for such an implementation is e.g. a top of
+ // range that is 2/3rds of the way to MAX_UINT64, in which case the
+ // bottom half of the range would be twice as likely to occur as the
+ // top half. A bit of calculus care of jar@ shows that the largest
+ // measurable delta is when the top of the range is 3/4ths of the
+ // way, so that's what we use in the test.
+ const uint64_t kTopOfRange =
+ (std::numeric_limits<uint64_t>::max() / 4ULL) * 3ULL;
+ const uint64_t kExpectedAverage = kTopOfRange / 2ULL;
+ const uint64_t kAllowedVariance = kExpectedAverage / 50ULL; // +/- 2%
+ const int kMinAttempts = 1000;
+ const int kMaxAttempts = 1000000;
+
+ double cumulative_average = 0.0;
+ int count = 0;
+ while (count < kMaxAttempts) {
+ uint64_t value = base::RandGenerator(kTopOfRange);
+ cumulative_average = (count * cumulative_average + value) / (count + 1);
+
+ // Don't quit too quickly for things to start converging, or we may have
+ // a false positive.
+ if (count > kMinAttempts &&
+ kExpectedAverage - kAllowedVariance < cumulative_average &&
+ cumulative_average < kExpectedAverage + kAllowedVariance) {
+ break;
+ }
+
+ ++count;
+ }
+
+ ASSERT_LT(count, kMaxAttempts) << "Expected average was " <<
+ kExpectedAverage << ", average ended at " << cumulative_average;
+}
+
+TEST(RandUtilTest, RandUint64ProducesBothValuesOfAllBits) {
+ // This tests to see that our underlying random generator is good
+ // enough, for some value of good enough.
+ uint64_t kAllZeros = 0ULL;
+ uint64_t kAllOnes = ~kAllZeros;
+ uint64_t found_ones = kAllZeros;
+ uint64_t found_zeros = kAllOnes;
+
+ for (size_t i = 0; i < 1000; ++i) {
+ uint64_t value = base::RandUint64();
+ found_ones |= value;
+ found_zeros &= value;
+
+ if (found_zeros == kAllZeros && found_ones == kAllOnes)
+ return;
+ }
+
+ FAIL() << "Didn't achieve all bit values in maximum number of tries.";
+}
+
+// Benchmark test for RandBytes(). Disabled since it's intentionally slow and
+// does not test anything that isn't already tested by the existing RandBytes()
+// tests.
+TEST(RandUtilTest, DISABLED_RandBytesPerf) {
+ // Benchmark the performance of |kTestIterations| of RandBytes() using a
+ // buffer size of |kTestBufferSize|.
+ const int kTestIterations = 10;
+ const size_t kTestBufferSize = 1 * 1024 * 1024;
+
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
+ const base::TimeTicks now = base::TimeTicks::Now();
+ for (int i = 0; i < kTestIterations; ++i)
+ base::RandBytes(buffer.get(), kTestBufferSize);
+ const base::TimeTicks end = base::TimeTicks::Now();
+
+ LOG(INFO) << "RandBytes(" << kTestBufferSize << ") took: "
+ << (end - now).InMicroseconds() << "µs";
+}
diff --git a/libchrome/base/run_loop.cc b/libchrome/base/run_loop.cc
new file mode 100644
index 0000000..a2322f8
--- /dev/null
+++ b/libchrome/base/run_loop.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/run_loop.h"
+
+#include "base/bind.h"
+#include "base/tracked_objects.h"
+#include "build/build_config.h"
+
+namespace base {
+
+RunLoop::RunLoop()
+ : loop_(MessageLoop::current()),
+ previous_run_loop_(NULL),
+ run_depth_(0),
+ run_called_(false),
+ quit_called_(false),
+ running_(false),
+ quit_when_idle_received_(false),
+ weak_factory_(this) {
+}
+
+RunLoop::~RunLoop() {
+}
+
+void RunLoop::Run() {
+ if (!BeforeRun())
+ return;
+
+ // Use task stopwatch to exclude the loop run time from the current task, if
+ // any.
+ tracked_objects::TaskStopwatch stopwatch;
+ stopwatch.Start();
+ loop_->RunHandler();
+ stopwatch.Stop();
+
+ AfterRun();
+}
+
+void RunLoop::RunUntilIdle() {
+ quit_when_idle_received_ = true;
+ Run();
+}
+
+void RunLoop::Quit() {
+ quit_called_ = true;
+ if (running_ && loop_->run_loop_ == this) {
+ // This is the inner-most RunLoop, so quit now.
+ loop_->QuitNow();
+ }
+}
+
+void RunLoop::QuitWhenIdle() {
+ quit_when_idle_received_ = true;
+}
+
+base::Closure RunLoop::QuitClosure() {
+ return base::Bind(&RunLoop::Quit, weak_factory_.GetWeakPtr());
+}
+
+base::Closure RunLoop::QuitWhenIdleClosure() {
+ return base::Bind(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr());
+}
+
+bool RunLoop::BeforeRun() {
+ DCHECK(!run_called_);
+ run_called_ = true;
+
+ // Allow Quit to be called before Run.
+ if (quit_called_)
+ return false;
+
+ // Push RunLoop stack:
+ previous_run_loop_ = loop_->run_loop_;
+ run_depth_ = previous_run_loop_? previous_run_loop_->run_depth_ + 1 : 1;
+ loop_->run_loop_ = this;
+
+ if (run_depth_ > 1)
+ loop_->NotifyBeginNestedLoop();
+
+ running_ = true;
+ return true;
+}
+
+void RunLoop::AfterRun() {
+ running_ = false;
+
+ // Pop RunLoop stack:
+ loop_->run_loop_ = previous_run_loop_;
+
+ // Execute deferred QuitNow, if any:
+ if (previous_run_loop_ && previous_run_loop_->quit_called_)
+ loop_->QuitNow();
+}
+
+} // namespace base
diff --git a/libchrome/base/run_loop.h b/libchrome/base/run_loop.h
new file mode 100644
index 0000000..635018f
--- /dev/null
+++ b/libchrome/base/run_loop.h
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_RUN_LOOP_H_
+#define BASE_RUN_LOOP_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "build/build_config.h"
+
+namespace base {
+#if defined(OS_ANDROID)
+class MessagePumpForUI;
+#endif
+
+#if defined(OS_IOS)
+class MessagePumpUIApplication;
+#endif
+
+// Helper class to Run a nested MessageLoop. Please do not use nested
+// MessageLoops in production code! If you must, use this class instead of
+// calling MessageLoop::Run/Quit directly. RunLoop::Run can only be called once
+// per RunLoop lifetime. Create a RunLoop on the stack and call Run/Quit to run
+// a nested MessageLoop.
+class BASE_EXPORT RunLoop {
+ public:
+ RunLoop();
+ ~RunLoop();
+
+ // Run the current MessageLoop. This blocks until Quit is called. Before
+ // calling Run, be sure to grab the QuitClosure in order to stop the
+ // MessageLoop asynchronously. MessageLoop::QuitWhenIdle and QuitNow will also
+ // trigger a return from Run, but those are deprecated.
+ void Run();
+
+ // Run the current MessageLoop until it doesn't find any tasks or messages in
+ // the queue (it goes idle). WARNING: This may never return! Only use this
+ // when repeating tasks such as animated web pages have been shut down.
+ void RunUntilIdle();
+
+ bool running() const { return running_; }
+
+ // Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
+ // earlier call to Run() when there aren't any tasks or messages in the queue.
+ //
+ // There can be other nested RunLoops servicing the same task queue
+ // (MessageLoop); Quitting one RunLoop has no bearing on the others. Quit()
+ // and QuitWhenIdle() can be called before, during or after Run(). If called
+ // before Run(), Run() will return immediately when called. Calling Quit() or
+ // QuitWhenIdle() after the RunLoop has already finished running has no
+ // effect.
+ //
+ // WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
+ // terminate the targetted message loop. If a nested message loop continues
+ // running, the target may NEVER terminate. It is very easy to livelock (run
+ // forever) in such a case.
+ void Quit();
+ void QuitWhenIdle();
+
+ // Convenience methods to get a closure that safely calls Quit() or
+ // QuitWhenIdle() (has no effect if the RunLoop instance is gone).
+ //
+ // Example:
+ // RunLoop run_loop;
+ // PostTask(run_loop.QuitClosure());
+ // run_loop.Run();
+ base::Closure QuitClosure();
+ base::Closure QuitWhenIdleClosure();
+
+ private:
+ friend class MessageLoop;
+#if defined(OS_ANDROID)
+ // Android doesn't support the blocking MessageLoop::Run, so it calls
+ // BeforeRun and AfterRun directly.
+ friend class base::MessagePumpForUI;
+#endif
+
+#if defined(OS_IOS)
+ // iOS doesn't support the blocking MessageLoop::Run, so it calls
+ // BeforeRun directly.
+ friend class base::MessagePumpUIApplication;
+#endif
+
+ // Return false to abort the Run.
+ bool BeforeRun();
+ void AfterRun();
+
+ MessageLoop* loop_;
+
+ // Parent RunLoop or NULL if this is the top-most RunLoop.
+ RunLoop* previous_run_loop_;
+
+ // Used to count how many nested Run() invocations are on the stack.
+ int run_depth_;
+
+ bool run_called_;
+ bool quit_called_;
+ bool running_;
+
+ // Used to record that QuitWhenIdle() was called on the MessageLoop, meaning
+ // that we should quit Run once it becomes idle.
+ bool quit_when_idle_received_;
+
+ // WeakPtrFactory for QuitClosure safety.
+ base::WeakPtrFactory<RunLoop> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(RunLoop);
+};
+
+} // namespace base
+
+#endif // BASE_RUN_LOOP_H_
diff --git a/libchrome/base/scoped_clear_errno.h b/libchrome/base/scoped_clear_errno.h
new file mode 100644
index 0000000..585f6f7
--- /dev/null
+++ b/libchrome/base/scoped_clear_errno.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_CLEAR_ERRNO_H_
+#define BASE_SCOPED_CLEAR_ERRNO_H_
+
+#include <errno.h>
+
+#include "base/macros.h"
+
+namespace base {
+
+// Simple scoper that saves the current value of errno, resets it to 0, and on
+// destruction puts the old value back.
+class ScopedClearErrno {
+ public:
+ ScopedClearErrno() : old_errno_(errno) {
+ errno = 0;
+ }
+ ~ScopedClearErrno() {
+ if (errno == 0)
+ errno = old_errno_;
+ }
+
+ private:
+ const int old_errno_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedClearErrno);
+};
+
+} // namespace base
+
+#endif // BASE_SCOPED_CLEAR_ERRNO_H_
diff --git a/libchrome/base/scoped_clear_errno_unittest.cc b/libchrome/base/scoped_clear_errno_unittest.cc
new file mode 100644
index 0000000..8afb33e
--- /dev/null
+++ b/libchrome/base/scoped_clear_errno_unittest.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+
+#include "base/scoped_clear_errno.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ScopedClearErrno, TestNoError) {
+ errno = 1;
+ {
+ ScopedClearErrno clear_error;
+ EXPECT_EQ(0, errno);
+ }
+ EXPECT_EQ(1, errno);
+}
+
+TEST(ScopedClearErrno, TestError) {
+ errno = 1;
+ {
+ ScopedClearErrno clear_error;
+ errno = 2;
+ }
+ EXPECT_EQ(2, errno);
+}
+
+} // namespace base
diff --git a/libchrome/base/scoped_generic.h b/libchrome/base/scoped_generic.h
new file mode 100644
index 0000000..84de6b7
--- /dev/null
+++ b/libchrome/base/scoped_generic.h
@@ -0,0 +1,182 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_GENERIC_H_
+#define BASE_SCOPED_GENERIC_H_
+
+#include <stdlib.h>
+
+#include <algorithm>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+namespace base {
+
+// This class acts like ScopedPtr with a custom deleter (although is slightly
+// less fancy in some of the more escoteric respects) except that it keeps a
+// copy of the object rather than a pointer, and we require that the contained
+// object has some kind of "invalid" value.
+//
+// Defining a scoper based on this class allows you to get a scoper for
+// non-pointer types without having to write custom code for set, reset, and
+// move, etc. and get almost identical semantics that people are used to from
+// scoped_ptr.
+//
+// It is intended that you will typedef this class with an appropriate deleter
+// to implement clean up tasks for objects that act like pointers from a
+// resource management standpoint but aren't, such as file descriptors and
+// various types of operating system handles. Using scoped_ptr for these
+// things requires that you keep a pointer to the handle valid for the lifetime
+// of the scoper (which is easy to mess up).
+//
+// For an object to be able to be put into a ScopedGeneric, it must support
+// standard copyable semantics and have a specific "invalid" value. The traits
+// must define a free function and also the invalid value to assign for
+// default-constructed and released objects.
+//
+// struct FooScopedTraits {
+// // It's assumed that this is a fast inline function with little-to-no
+// // penalty for duplicate calls. This must be a static function even
+// // for stateful traits.
+// static int InvalidValue() {
+// return 0;
+// }
+//
+// // This free function will not be called if f == InvalidValue()!
+// static void Free(int f) {
+// ::FreeFoo(f);
+// }
+// };
+//
+// typedef ScopedGeneric<int, FooScopedTraits> ScopedFoo;
+template<typename T, typename Traits>
+class ScopedGeneric {
+ private:
+ // This must be first since it's used inline below.
+ //
+ // Use the empty base class optimization to allow us to have a D
+ // member, while avoiding any space overhead for it when D is an
+ // empty class. See e.g. http://www.cantrip.org/emptyopt.html for a good
+ // discussion of this technique.
+ struct Data : public Traits {
+ explicit Data(const T& in) : generic(in) {}
+ Data(const T& in, const Traits& other) : Traits(other), generic(in) {}
+ T generic;
+ };
+
+ public:
+ typedef T element_type;
+ typedef Traits traits_type;
+
+ ScopedGeneric() : data_(traits_type::InvalidValue()) {}
+
+ // Constructor. Takes responsibility for freeing the resource associated with
+ // the object T.
+ explicit ScopedGeneric(const element_type& value) : data_(value) {}
+
+ // Constructor. Allows initialization of a stateful traits object.
+ ScopedGeneric(const element_type& value, const traits_type& traits)
+ : data_(value, traits) {
+ }
+
+ // Move constructor. Allows initialization from a ScopedGeneric rvalue.
+ ScopedGeneric(ScopedGeneric<T, Traits>&& rvalue)
+ : data_(rvalue.release(), rvalue.get_traits()) {
+ }
+
+ ~ScopedGeneric() {
+ FreeIfNecessary();
+ }
+
+ // operator=. Allows assignment from a ScopedGeneric rvalue.
+ ScopedGeneric& operator=(ScopedGeneric<T, Traits>&& rvalue) {
+ reset(rvalue.release());
+ return *this;
+ }
+
+ // Frees the currently owned object, if any. Then takes ownership of a new
+ // object, if given. Self-resets are not allowd as on scoped_ptr. See
+ // http://crbug.com/162971
+ void reset(const element_type& value = traits_type::InvalidValue()) {
+ if (data_.generic != traits_type::InvalidValue() && data_.generic == value)
+ abort();
+ FreeIfNecessary();
+ data_.generic = value;
+ }
+
+ void swap(ScopedGeneric& other) {
+ // Standard swap idiom: 'using std::swap' ensures that std::swap is
+ // present in the overload set, but we call swap unqualified so that
+ // any more-specific overloads can be used, if available.
+ using std::swap;
+ swap(static_cast<Traits&>(data_), static_cast<Traits&>(other.data_));
+ swap(data_.generic, other.data_.generic);
+ }
+
+ // Release the object. The return value is the current object held by this
+ // object. After this operation, this object will hold a null value, and
+ // will not own the object any more.
+ element_type release() WARN_UNUSED_RESULT {
+ element_type old_generic = data_.generic;
+ data_.generic = traits_type::InvalidValue();
+ return old_generic;
+ }
+
+ const element_type& get() const { return data_.generic; }
+
+ // Returns true if this object doesn't hold the special null value for the
+ // associated data type.
+ bool is_valid() const { return data_.generic != traits_type::InvalidValue(); }
+
+ bool operator==(const element_type& value) const {
+ return data_.generic == value;
+ }
+ bool operator!=(const element_type& value) const {
+ return data_.generic != value;
+ }
+
+ Traits& get_traits() { return data_; }
+ const Traits& get_traits() const { return data_; }
+
+ private:
+ void FreeIfNecessary() {
+ if (data_.generic != traits_type::InvalidValue()) {
+ data_.Free(data_.generic);
+ data_.generic = traits_type::InvalidValue();
+ }
+ }
+
+ // Forbid comparison. If U != T, it totally doesn't make sense, and if U ==
+ // T, it still doesn't make sense because you should never have the same
+ // object owned by two different ScopedGenerics.
+ template <typename T2, typename Traits2> bool operator==(
+ const ScopedGeneric<T2, Traits2>& p2) const;
+ template <typename T2, typename Traits2> bool operator!=(
+ const ScopedGeneric<T2, Traits2>& p2) const;
+
+ Data data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedGeneric);
+};
+
+template<class T, class Traits>
+void swap(const ScopedGeneric<T, Traits>& a,
+ const ScopedGeneric<T, Traits>& b) {
+ a.swap(b);
+}
+
+template<class T, class Traits>
+bool operator==(const T& value, const ScopedGeneric<T, Traits>& scoped) {
+ return value == scoped.get();
+}
+
+template<class T, class Traits>
+bool operator!=(const T& value, const ScopedGeneric<T, Traits>& scoped) {
+ return value != scoped.get();
+}
+
+} // namespace base
+
+#endif // BASE_SCOPED_GENERIC_H_
diff --git a/libchrome/base/scoped_generic_unittest.cc b/libchrome/base/scoped_generic_unittest.cc
new file mode 100644
index 0000000..5a6abfb
--- /dev/null
+++ b/libchrome/base/scoped_generic_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_generic.h"
+
+#include <utility>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct IntTraits {
+ IntTraits(std::vector<int>* freed) : freed_ints(freed) {}
+
+ static int InvalidValue() {
+ return -1;
+ }
+ void Free(int value) {
+ freed_ints->push_back(value);
+ }
+
+ std::vector<int>* freed_ints;
+};
+
+typedef ScopedGeneric<int, IntTraits> ScopedInt;
+
+} // namespace
+
+TEST(ScopedGenericTest, ScopedGeneric) {
+ std::vector<int> values_freed;
+ IntTraits traits(&values_freed);
+
+ // Invalid case, delete should not be called.
+ {
+ ScopedInt a(IntTraits::InvalidValue(), traits);
+ }
+ EXPECT_TRUE(values_freed.empty());
+
+ // Simple deleting case.
+ static const int kFirst = 0;
+ {
+ ScopedInt a(kFirst, traits);
+ }
+ ASSERT_EQ(1u, values_freed.size());
+ ASSERT_EQ(kFirst, values_freed[0]);
+ values_freed.clear();
+
+ // Release should return the right value and leave the object empty.
+ {
+ ScopedInt a(kFirst, traits);
+ EXPECT_EQ(kFirst, a.release());
+
+ ScopedInt b(IntTraits::InvalidValue(), traits);
+ EXPECT_EQ(IntTraits::InvalidValue(), b.release());
+ }
+ ASSERT_TRUE(values_freed.empty());
+
+ // Reset should free the old value, then the new one should go away when
+ // it goes out of scope.
+ static const int kSecond = 1;
+ {
+ ScopedInt b(kFirst, traits);
+ b.reset(kSecond);
+ ASSERT_EQ(1u, values_freed.size());
+ ASSERT_EQ(kFirst, values_freed[0]);
+ }
+ ASSERT_EQ(2u, values_freed.size());
+ ASSERT_EQ(kSecond, values_freed[1]);
+ values_freed.clear();
+
+ // Swap.
+ {
+ ScopedInt a(kFirst, traits);
+ ScopedInt b(kSecond, traits);
+ a.swap(b);
+ EXPECT_TRUE(values_freed.empty()); // Nothing should be freed.
+ EXPECT_EQ(kSecond, a.get());
+ EXPECT_EQ(kFirst, b.get());
+ }
+ // Values should be deleted in the opposite order.
+ ASSERT_EQ(2u, values_freed.size());
+ EXPECT_EQ(kFirst, values_freed[0]);
+ EXPECT_EQ(kSecond, values_freed[1]);
+ values_freed.clear();
+
+ // Move constructor.
+ {
+ ScopedInt a(kFirst, traits);
+ ScopedInt b(std::move(a));
+ EXPECT_TRUE(values_freed.empty()); // Nothing should be freed.
+ ASSERT_EQ(IntTraits::InvalidValue(), a.get());
+ ASSERT_EQ(kFirst, b.get());
+ }
+
+ ASSERT_EQ(1u, values_freed.size());
+ ASSERT_EQ(kFirst, values_freed[0]);
+ values_freed.clear();
+
+ // Move assign.
+ {
+ ScopedInt a(kFirst, traits);
+ ScopedInt b(kSecond, traits);
+ b = std::move(a);
+ ASSERT_EQ(1u, values_freed.size());
+ EXPECT_EQ(kSecond, values_freed[0]);
+ ASSERT_EQ(IntTraits::InvalidValue(), a.get());
+ ASSERT_EQ(kFirst, b.get());
+ }
+
+ ASSERT_EQ(2u, values_freed.size());
+ EXPECT_EQ(kFirst, values_freed[1]);
+ values_freed.clear();
+}
+
+TEST(ScopedGenericTest, Operators) {
+ std::vector<int> values_freed;
+ IntTraits traits(&values_freed);
+
+ static const int kFirst = 0;
+ static const int kSecond = 1;
+ {
+ ScopedInt a(kFirst, traits);
+ EXPECT_TRUE(a == kFirst);
+ EXPECT_FALSE(a != kFirst);
+ EXPECT_FALSE(a == kSecond);
+ EXPECT_TRUE(a != kSecond);
+
+ EXPECT_TRUE(kFirst == a);
+ EXPECT_FALSE(kFirst != a);
+ EXPECT_FALSE(kSecond == a);
+ EXPECT_TRUE(kSecond != a);
+ }
+
+ // is_valid().
+ {
+ ScopedInt a(kFirst, traits);
+ EXPECT_TRUE(a.is_valid());
+ a.reset();
+ EXPECT_FALSE(a.is_valid());
+ }
+}
+
+// Cheesy manual "no compile" test for manually validating changes.
+#if 0
+TEST(ScopedGenericTest, NoCompile) {
+ // Assignment shouldn't work.
+ /*{
+ ScopedInt a(kFirst, traits);
+ ScopedInt b(a);
+ }*/
+
+ // Comparison shouldn't work.
+ /*{
+ ScopedInt a(kFirst, traits);
+ ScopedInt b(kFirst, traits);
+ if (a == b) {
+ }
+ }*/
+
+ // Implicit conversion to bool shouldn't work.
+ /*{
+ ScopedInt a(kFirst, traits);
+ bool result = a;
+ }*/
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/scoped_observer.h b/libchrome/base/scoped_observer.h
new file mode 100644
index 0000000..13d7ca8
--- /dev/null
+++ b/libchrome/base/scoped_observer.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_OBSERVER_H_
+#define BASE_SCOPED_OBSERVER_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+// ScopedObserver is used to keep track of the set of sources an object has
+// attached itself to as an observer. When ScopedObserver is destroyed it
+// removes the object as an observer from all sources it has been added to.
+template <class Source, class Observer>
+class ScopedObserver {
+ public:
+ explicit ScopedObserver(Observer* observer) : observer_(observer) {}
+
+ ~ScopedObserver() {
+ RemoveAll();
+ }
+
+ // Adds the object passed to the constructor as an observer on |source|.
+ void Add(Source* source) {
+ sources_.push_back(source);
+ source->AddObserver(observer_);
+ }
+
+ // Remove the object passed to the constructor as an observer from |source|.
+ void Remove(Source* source) {
+ auto it = std::find(sources_.begin(), sources_.end(), source);
+ DCHECK(it != sources_.end());
+ sources_.erase(it);
+ source->RemoveObserver(observer_);
+ }
+
+ void RemoveAll() {
+ for (size_t i = 0; i < sources_.size(); ++i)
+ sources_[i]->RemoveObserver(observer_);
+ sources_.clear();
+ }
+
+ bool IsObserving(Source* source) const {
+ return ContainsValue(sources_, source);
+ }
+
+ bool IsObservingSources() const { return !sources_.empty(); }
+
+ private:
+ Observer* observer_;
+
+ std::vector<Source*> sources_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedObserver);
+};
+
+#endif // BASE_SCOPED_OBSERVER_H_
diff --git a/libchrome/base/security_unittest.cc b/libchrome/base/security_unittest.cc
new file mode 100644
index 0000000..af9d2bf
--- /dev/null
+++ b/libchrome/base/security_unittest.cc
@@ -0,0 +1,187 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/memory/free_deleter.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+using std::nothrow;
+using std::numeric_limits;
+
+namespace {
+
+// This function acts as a compiler optimization barrier. We use it to
+// prevent the compiler from making an expression a compile-time constant.
+// We also use it so that the compiler doesn't discard certain return values
+// as something we don't need (see the comment with calloc below).
+template <typename Type>
+NOINLINE Type HideValueFromCompiler(volatile Type value) {
+#if defined(__GNUC__)
+ // In a GCC compatible compiler (GCC or Clang), make this compiler barrier
+ // more robust than merely using "volatile".
+ __asm__ volatile ("" : "+r" (value));
+#endif // __GNUC__
+ return value;
+}
+
+// Tcmalloc and Windows allocator shim support setting malloc limits.
+// - NO_TCMALLOC (should be defined if compiled with use_allocator!="tcmalloc")
+// - ADDRESS_SANITIZER and SYZYASAN because they have their own memory allocator
+// - IOS does not use tcmalloc
+// - OS_MACOSX does not use tcmalloc
+// - Windows allocator shim defines ALLOCATOR_SHIM
+#if (!defined(NO_TCMALLOC) || defined(ALLOCATOR_SHIM)) && \
+ !defined(ADDRESS_SANITIZER) && !defined(OS_IOS) && !defined(OS_MACOSX) && \
+ !defined(SYZYASAN)
+#define MALLOC_OVERFLOW_TEST(function) function
+#else
+#define MALLOC_OVERFLOW_TEST(function) DISABLED_##function
+#endif
+
+#if defined(OS_LINUX) && defined(__x86_64__)
+// Detect runtime TCMalloc bypasses.
+bool IsTcMallocBypassed() {
+ // This should detect a TCMalloc bypass from Valgrind.
+ char* g_slice = getenv("G_SLICE");
+ if (g_slice && !strcmp(g_slice, "always-malloc"))
+ return true;
+ return false;
+}
+#endif
+
+// There are platforms where these tests are known to fail. We would like to
+// be able to easily check the status on the bots, but marking tests as
+// FAILS_ is too clunky.
+void OverflowTestsSoftExpectTrue(bool overflow_detected) {
+ if (!overflow_detected) {
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_MACOSX)
+ // Sadly, on Linux, Android, and OSX we don't have a good story yet. Don't
+ // fail the test, but report.
+ printf("Platform has overflow: %s\n",
+ !overflow_detected ? "yes." : "no.");
+#else
+ // Otherwise, fail the test. (Note: EXPECT are ok in subfunctions, ASSERT
+ // aren't).
+ EXPECT_TRUE(overflow_detected);
+#endif
+ }
+}
+
+#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_LINUX)
+#define MAYBE_NewOverflow DISABLED_NewOverflow
+#else
+#define MAYBE_NewOverflow NewOverflow
+#endif
+// Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
+// IOS doesn't honor nothrow, so disable the test there.
+// Crashes on Windows Dbg builds, disable there as well.
+// Disabled on Linux because failing Linux Valgrind bot, and Valgrind exclusions
+// are not currently read. See http://crbug.com/582398
+TEST(SecurityTest, MAYBE_NewOverflow) {
+ const size_t kArraySize = 4096;
+ // We want something "dynamic" here, so that the compiler doesn't
+ // immediately reject crazy arrays.
+ const size_t kDynamicArraySize = HideValueFromCompiler(kArraySize);
+ // numeric_limits are still not constexpr until we switch to C++11, so we
+ // use an ugly cast.
+ const size_t kMaxSizeT = ~static_cast<size_t>(0);
+ ASSERT_EQ(numeric_limits<size_t>::max(), kMaxSizeT);
+ const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
+ const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
+ {
+ std::unique_ptr<char[][kArraySize]> array_pointer(
+ new (nothrow) char[kDynamicArraySize2][kArraySize]);
+ OverflowTestsSoftExpectTrue(!array_pointer);
+ }
+ // On windows, the compiler prevents static array sizes of more than
+ // 0x7fffffff (error C2148).
+#if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
+ ALLOW_UNUSED_LOCAL(kDynamicArraySize);
+#else
+ {
+ std::unique_ptr<char[][kArraySize2]> array_pointer(
+ new (nothrow) char[kDynamicArraySize][kArraySize2]);
+ OverflowTestsSoftExpectTrue(!array_pointer);
+ }
+#endif // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
+}
+
+#if defined(OS_LINUX) && defined(__x86_64__)
+// Check if ptr1 and ptr2 are separated by less than size chars.
+bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) {
+ ptrdiff_t ptr_diff = reinterpret_cast<char*>(std::max(ptr1, ptr2)) -
+ reinterpret_cast<char*>(std::min(ptr1, ptr2));
+ return static_cast<size_t>(ptr_diff) <= size;
+}
+
+// Check if TCMalloc uses an underlying random memory allocator.
+TEST(SecurityTest, MALLOC_OVERFLOW_TEST(RandomMemoryAllocations)) {
+ if (IsTcMallocBypassed())
+ return;
+ size_t kPageSize = 4096; // We support x86_64 only.
+ // Check that malloc() returns an address that is neither the kernel's
+ // un-hinted mmap area, nor the current brk() area. The first malloc() may
+ // not be at a random address because TCMalloc will first exhaust any memory
+ // that it has allocated early on, before starting the sophisticated
+ // allocators.
+ void* default_mmap_heap_address =
+ mmap(0, kPageSize, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(default_mmap_heap_address,
+ static_cast<void*>(MAP_FAILED));
+ ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
+ void* brk_heap_address = sbrk(0);
+ ASSERT_NE(brk_heap_address, reinterpret_cast<void*>(-1));
+ ASSERT_TRUE(brk_heap_address != NULL);
+ // 1 MB should get us past what TCMalloc pre-allocated before initializing
+ // the sophisticated allocators.
+ size_t kAllocSize = 1<<20;
+ std::unique_ptr<char, base::FreeDeleter> ptr(
+ static_cast<char*>(malloc(kAllocSize)));
+ ASSERT_TRUE(ptr != NULL);
+ // If two pointers are separated by less than 512MB, they are considered
+ // to be in the same area.
+ // Our random pointer could be anywhere within 0x3fffffffffff (46bits),
+ // and we are checking that it's not withing 1GB (30 bits) from two
+ // addresses (brk and mmap heap). We have roughly one chance out of
+ // 2^15 to flake.
+ const size_t kAreaRadius = 1<<29;
+ bool in_default_mmap_heap = ArePointersToSameArea(
+ ptr.get(), default_mmap_heap_address, kAreaRadius);
+ EXPECT_FALSE(in_default_mmap_heap);
+
+ bool in_default_brk_heap = ArePointersToSameArea(
+ ptr.get(), brk_heap_address, kAreaRadius);
+ EXPECT_FALSE(in_default_brk_heap);
+
+ // In the implementation, we always mask our random addresses with
+ // kRandomMask, so we use it as an additional detection mechanism.
+ const uintptr_t kRandomMask = 0x3fffffffffffULL;
+ bool impossible_random_address =
+ reinterpret_cast<uintptr_t>(ptr.get()) & ~kRandomMask;
+ EXPECT_FALSE(impossible_random_address);
+}
+
+#endif // defined(OS_LINUX) && defined(__x86_64__)
+
+} // namespace
diff --git a/libchrome/base/sequence_checker.h b/libchrome/base/sequence_checker.h
new file mode 100644
index 0000000..ad01828
--- /dev/null
+++ b/libchrome/base/sequence_checker.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_CHECKER_H_
+#define BASE_SEQUENCE_CHECKER_H_
+
+// See comments for the similar block in thread_checker.h.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_SEQUENCE_CHECKER 1
+#else
+#define ENABLE_SEQUENCE_CHECKER 0
+#endif
+
+#include "base/sequence_checker_impl.h"
+
+namespace base {
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the SequenceChecker class to get
+// the right version for your build configuration.
+class SequenceCheckerDoNothing {
+ public:
+ bool CalledOnValidSequencedThread() const {
+ return true;
+ }
+
+ void DetachFromSequence() {}
+};
+
+// SequenceChecker is a helper class used to help verify that some
+// methods of a class are called in sequence -- that is, called from
+// the same SequencedTaskRunner. It is a generalization of
+// ThreadChecker; see comments in sequence_checker_impl.h for details.
+//
+// Example:
+// class MyClass {
+// public:
+// void Foo() {
+// DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+// ... (do stuff) ...
+// }
+//
+// private:
+// SequenceChecker sequence_checker_;
+// }
+//
+// In Release mode, CalledOnValidSequencedThread() will always return true.
+#if ENABLE_SEQUENCE_CHECKER
+class SequenceChecker : public SequenceCheckerImpl {
+};
+#else
+class SequenceChecker : public SequenceCheckerDoNothing {
+};
+#endif // ENABLE_SEQUENCE_CHECKER
+
+#undef ENABLE_SEQUENCE_CHECKER
+
+} // namespace base
+
+#endif // BASE_SEQUENCE_CHECKER_H_
diff --git a/libchrome/base/sequence_checker_impl.cc b/libchrome/base/sequence_checker_impl.cc
new file mode 100644
index 0000000..e95b8ee
--- /dev/null
+++ b/libchrome/base/sequence_checker_impl.cc
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_checker_impl.h"
+
+namespace base {
+
+SequenceCheckerImpl::SequenceCheckerImpl()
+ : sequence_token_assigned_(false) {
+ AutoLock auto_lock(lock_);
+ EnsureSequenceTokenAssigned();
+}
+
+SequenceCheckerImpl::~SequenceCheckerImpl() {}
+
+bool SequenceCheckerImpl::CalledOnValidSequencedThread() const {
+ AutoLock auto_lock(lock_);
+ EnsureSequenceTokenAssigned();
+
+ // If this thread is not associated with a SequencedWorkerPool,
+ // SequenceChecker behaves as a ThreadChecker. See header for details.
+ if (!sequence_token_.IsValid())
+ return thread_checker_.CalledOnValidThread();
+
+ return sequence_token_.Equals(
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread());
+}
+
+void SequenceCheckerImpl::DetachFromSequence() {
+ AutoLock auto_lock(lock_);
+ thread_checker_.DetachFromThread();
+ sequence_token_assigned_ = false;
+ sequence_token_ = SequencedWorkerPool::SequenceToken();
+}
+
+void SequenceCheckerImpl::EnsureSequenceTokenAssigned() const {
+ lock_.AssertAcquired();
+ if (sequence_token_assigned_)
+ return;
+
+ sequence_token_assigned_ = true;
+ sequence_token_ = SequencedWorkerPool::GetSequenceTokenForCurrentThread();
+}
+
+} // namespace base
diff --git a/libchrome/base/sequence_checker_impl.h b/libchrome/base/sequence_checker_impl.h
new file mode 100644
index 0000000..e3c5fed
--- /dev/null
+++ b/libchrome/base/sequence_checker_impl.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCE_CHECKER_IMPL_H_
+#define BASE_SEQUENCE_CHECKER_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_checker_impl.h"
+
+namespace base {
+
+// SequenceCheckerImpl is used to help verify that some methods of a
+// class are called in sequence -- that is, called from the same
+// SequencedTaskRunner. It is a generalization of ThreadChecker; in
+// particular, it behaves exactly like ThreadChecker if constructed
+// on a thread that is not part of a SequencedWorkerPool.
+class BASE_EXPORT SequenceCheckerImpl {
+ public:
+ SequenceCheckerImpl();
+ ~SequenceCheckerImpl();
+
+ // Returns whether the we are being called on the same sequence token
+ // as previous calls. If there is no associated sequence, then returns
+ // whether we are being called on the underlying ThreadChecker's thread.
+ bool CalledOnValidSequencedThread() const;
+
+ // Unbinds the checker from the currently associated sequence. The
+ // checker will be re-bound on the next call to CalledOnValidSequence().
+ void DetachFromSequence();
+
+ private:
+ void EnsureSequenceTokenAssigned() const;
+
+ // Guards all variables below.
+ mutable Lock lock_;
+
+ // Used if |sequence_token_| is not valid.
+ ThreadCheckerImpl thread_checker_;
+ mutable bool sequence_token_assigned_;
+
+ mutable SequencedWorkerPool::SequenceToken sequence_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceCheckerImpl);
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCE_CHECKER_IMPL_H_
diff --git a/libchrome/base/sequence_checker_unittest.cc b/libchrome/base/sequence_checker_unittest.cc
new file mode 100644
index 0000000..196bb1c
--- /dev/null
+++ b/libchrome/base/sequence_checker_unittest.cc
@@ -0,0 +1,335 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequence_checker.h"
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/sequenced_worker_pool_owner.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Duplicated from base/sequence_checker.h so that we can be good citizens
+// there and undef the macro.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_SEQUENCE_CHECKER 1
+#else
+#define ENABLE_SEQUENCE_CHECKER 0
+#endif
+
+namespace base {
+
+namespace {
+
+const size_t kNumWorkerThreads = 3;
+
+// Simple class to exercise the basics of SequenceChecker.
+// DoStuff should verify that it's called on a valid sequenced thread.
+// SequenceCheckedObject can be destroyed on any thread (like WeakPtr).
+class SequenceCheckedObject {
+ public:
+ SequenceCheckedObject() {}
+ ~SequenceCheckedObject() {}
+
+ // Verifies that it was called on the same thread as the constructor.
+ void DoStuff() {
+ DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+ }
+
+ void DetachFromSequence() {
+ sequence_checker_.DetachFromSequence();
+ }
+
+ private:
+ SequenceChecker sequence_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequenceCheckedObject);
+};
+
+class SequenceCheckerTest : public testing::Test {
+ public:
+ SequenceCheckerTest() : other_thread_("sequence_checker_test_other_thread") {}
+
+ void SetUp() override {
+ other_thread_.Start();
+ ResetPool();
+ }
+
+ void TearDown() override {
+ other_thread_.Stop();
+ }
+
+ protected:
+ base::Thread* other_thread() { return &other_thread_; }
+
+ const scoped_refptr<SequencedWorkerPool>& pool() {
+ return pool_owner_->pool();
+ }
+
+ void PostDoStuffToWorkerPool(SequenceCheckedObject* sequence_checked_object,
+ const std::string& token_name) {
+ pool()->PostNamedSequencedWorkerTask(
+ token_name,
+ FROM_HERE,
+ base::Bind(&SequenceCheckedObject::DoStuff,
+ base::Unretained(sequence_checked_object)));
+ }
+
+ void PostDoStuffToOtherThread(
+ SequenceCheckedObject* sequence_checked_object) {
+ other_thread()->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&SequenceCheckedObject::DoStuff,
+ base::Unretained(sequence_checked_object)));
+ }
+
+ void PostDeleteToOtherThread(
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
+ other_thread()->message_loop()->task_runner()->DeleteSoon(
+ FROM_HERE, sequence_checked_object.release());
+ }
+
+ // Destroys the SequencedWorkerPool instance, blocking until it is fully shut
+ // down, and creates a new instance.
+ void ResetPool() {
+ pool_owner_.reset(new SequencedWorkerPoolOwner(kNumWorkerThreads, "test"));
+ }
+
+ void MethodOnDifferentThreadDeathTest();
+ void DetachThenCallFromDifferentThreadDeathTest();
+ void DifferentSequenceTokensDeathTest();
+ void WorkerPoolAndSimpleThreadDeathTest();
+ void TwoDifferentWorkerPoolsDeathTest();
+
+ private:
+ MessageLoop message_loop_; // Needed by SequencedWorkerPool to function.
+ base::Thread other_thread_;
+ std::unique_ptr<SequencedWorkerPoolOwner> pool_owner_;
+};
+
+TEST_F(SequenceCheckerTest, CallsAllowedOnSameThread) {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ // Verify that DoStuff doesn't assert.
+ sequence_checked_object->DoStuff();
+
+ // Verify that the destructor doesn't assert.
+ sequence_checked_object.reset();
+}
+
+TEST_F(SequenceCheckerTest, DestructorAllowedOnDifferentThread) {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ // Verify the destructor doesn't assert when called on a different thread.
+ PostDeleteToOtherThread(std::move(sequence_checked_object));
+ other_thread()->Stop();
+}
+
+TEST_F(SequenceCheckerTest, DetachFromSequence) {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ // Verify that DoStuff doesn't assert when called on a different thread after
+ // a call to DetachFromSequence.
+ sequence_checked_object->DetachFromSequence();
+
+ PostDoStuffToOtherThread(sequence_checked_object.get());
+ other_thread()->Stop();
+}
+
+TEST_F(SequenceCheckerTest, SameSequenceTokenValid) {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ pool()->FlushForTesting();
+
+ PostDeleteToOtherThread(std::move(sequence_checked_object));
+ other_thread()->Stop();
+}
+
+TEST_F(SequenceCheckerTest, DetachSequenceTokenValid) {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ pool()->FlushForTesting();
+
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+ pool()->FlushForTesting();
+
+ PostDeleteToOtherThread(std::move(sequence_checked_object));
+ other_thread()->Stop();
+}
+
+#if GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
+
+void SequenceCheckerTest::MethodOnDifferentThreadDeathTest() {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ // DoStuff should assert in debug builds only when called on a
+ // different thread.
+ PostDoStuffToOtherThread(sequence_checked_object.get());
+ other_thread()->Stop();
+}
+
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, MethodNotAllowedOnDifferentThreadDeathTestInDebug) {
+ // The default style "fast" does not support multi-threaded tests.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ ASSERT_DEATH({
+ MethodOnDifferentThreadDeathTest();
+ }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, MethodAllowedOnDifferentThreadDeathTestInRelease) {
+ MethodOnDifferentThreadDeathTest();
+}
+#endif // ENABLE_SEQUENCE_CHECKER
+
+void SequenceCheckerTest::DetachThenCallFromDifferentThreadDeathTest() {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ // DoStuff doesn't assert when called on a different thread
+ // after a call to DetachFromSequence.
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToOtherThread(sequence_checked_object.get());
+ other_thread()->Stop();
+
+ // DoStuff should assert in debug builds only after moving to
+ // another thread.
+ sequence_checked_object->DoStuff();
+}
+
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, DetachFromSequenceDeathTestInDebug) {
+ // The default style "fast" does not support multi-threaded tests.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ ASSERT_DEATH({
+ DetachThenCallFromDifferentThreadDeathTest();
+ }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, DetachFromThreadDeathTestInRelease) {
+ DetachThenCallFromDifferentThreadDeathTest();
+}
+#endif // ENABLE_SEQUENCE_CHECKER
+
+void SequenceCheckerTest::DifferentSequenceTokensDeathTest() {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+ pool()->FlushForTesting();
+
+ PostDeleteToOtherThread(std::move(sequence_checked_object));
+ other_thread()->Stop();
+}
+
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, DifferentSequenceTokensDeathTestInDebug) {
+ // The default style "fast" does not support multi-threaded tests.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ ASSERT_DEATH({
+ DifferentSequenceTokensDeathTest();
+ }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, DifferentSequenceTokensDeathTestInRelease) {
+ DifferentSequenceTokensDeathTest();
+}
+#endif // ENABLE_SEQUENCE_CHECKER
+
+void SequenceCheckerTest::WorkerPoolAndSimpleThreadDeathTest() {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ pool()->FlushForTesting();
+
+ PostDoStuffToOtherThread(sequence_checked_object.get());
+ other_thread()->Stop();
+}
+
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, WorkerPoolAndSimpleThreadDeathTestInDebug) {
+ // The default style "fast" does not support multi-threaded tests.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ ASSERT_DEATH({
+ WorkerPoolAndSimpleThreadDeathTest();
+ }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, WorkerPoolAndSimpleThreadDeathTestInRelease) {
+ WorkerPoolAndSimpleThreadDeathTest();
+}
+#endif // ENABLE_SEQUENCE_CHECKER
+
+void SequenceCheckerTest::TwoDifferentWorkerPoolsDeathTest() {
+ std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+ new SequenceCheckedObject);
+
+ sequence_checked_object->DetachFromSequence();
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+ pool()->FlushForTesting();
+
+ SequencedWorkerPoolOwner second_pool_owner(kNumWorkerThreads, "test2");
+ second_pool_owner.pool()->PostNamedSequencedWorkerTask(
+ "A",
+ FROM_HERE,
+ base::Bind(&SequenceCheckedObject::DoStuff,
+ base::Unretained(sequence_checked_object.get())));
+ second_pool_owner.pool()->FlushForTesting();
+}
+
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, TwoDifferentWorkerPoolsDeathTestInDebug) {
+ // The default style "fast" does not support multi-threaded tests.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ ASSERT_DEATH({
+ TwoDifferentWorkerPoolsDeathTest();
+ }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, TwoDifferentWorkerPoolsDeathTestInRelease) {
+ TwoDifferentWorkerPoolsDeathTest();
+}
+#endif // ENABLE_SEQUENCE_CHECKER
+
+#endif // GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
+
+} // namespace
+
+} // namespace base
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_SEQUENCE_CHECKER
diff --git a/libchrome/base/sequenced_task_runner.cc b/libchrome/base/sequenced_task_runner.cc
new file mode 100644
index 0000000..00d4048
--- /dev/null
+++ b/libchrome/base/sequenced_task_runner.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sequenced_task_runner.h"
+
+#include "base/bind.h"
+
+namespace base {
+
+bool SequencedTaskRunner::PostNonNestableTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
+}
+
+bool SequencedTaskRunner::DeleteSoonInternal(
+ const tracked_objects::Location& from_here,
+ void(*deleter)(const void*),
+ const void* object) {
+ return PostNonNestableTask(from_here, Bind(deleter, object));
+}
+
+bool SequencedTaskRunner::ReleaseSoonInternal(
+ const tracked_objects::Location& from_here,
+ void(*releaser)(const void*),
+ const void* object) {
+ return PostNonNestableTask(from_here, Bind(releaser, object));
+}
+
+} // namespace base
diff --git a/libchrome/base/sequenced_task_runner.h b/libchrome/base/sequenced_task_runner.h
new file mode 100644
index 0000000..6bb3f2b
--- /dev/null
+++ b/libchrome/base/sequenced_task_runner.h
@@ -0,0 +1,159 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCED_TASK_RUNNER_H_
+#define BASE_SEQUENCED_TASK_RUNNER_H_
+
+#include "base/base_export.h"
+#include "base/sequenced_task_runner_helpers.h"
+#include "base/task_runner.h"
+
+namespace base {
+
+// A SequencedTaskRunner is a subclass of TaskRunner that provides
+// additional guarantees on the order that tasks are started, as well
+// as guarantees on when tasks are in sequence, i.e. one task finishes
+// before the other one starts.
+//
+// Summary
+// -------
+// Non-nested tasks with the same delay will run one by one in FIFO
+// order.
+//
+// Detailed guarantees
+// -------------------
+//
+// SequencedTaskRunner also adds additional methods for posting
+// non-nestable tasks. In general, an implementation of TaskRunner
+// may expose task-running methods which are themselves callable from
+// within tasks. A non-nestable task is one that is guaranteed to not
+// be run from within an already-running task. Conversely, a nestable
+// task (the default) is a task that can be run from within an
+// already-running task.
+//
+// The guarantees of SequencedTaskRunner are as follows:
+//
+// - Given two tasks T2 and T1, T2 will start after T1 starts if:
+//
+// * T2 is posted after T1; and
+// * T2 has equal or higher delay than T1; and
+// * T2 is non-nestable or T1 is nestable.
+//
+// - If T2 will start after T1 starts by the above guarantee, then
+// T2 will start after T1 finishes and is destroyed if:
+//
+// * T2 is non-nestable, or
+// * T1 doesn't call any task-running methods.
+//
+// - If T2 will start after T1 finishes by the above guarantee, then
+// all memory changes in T1 and T1's destruction will be visible
+// to T2.
+//
+// - If T2 runs nested within T1 via a call to the task-running
+// method M, then all memory changes in T1 up to the call to M
+// will be visible to T2, and all memory changes in T2 will be
+// visible to T1 from the return from M.
+//
+// Note that SequencedTaskRunner does not guarantee that tasks are run
+// on a single dedicated thread, although the above guarantees provide
+// most (but not all) of the same guarantees. If you do need to
+// guarantee that tasks are run on a single dedicated thread, see
+// SingleThreadTaskRunner (in single_thread_task_runner.h).
+//
+// Some corollaries to the above guarantees, assuming the tasks in
+// question don't call any task-running methods:
+//
+// - Tasks posted via PostTask are run in FIFO order.
+//
+// - Tasks posted via PostNonNestableTask are run in FIFO order.
+//
+// - Tasks posted with the same delay and the same nestable state
+// are run in FIFO order.
+//
+// - A list of tasks with the same nestable state posted in order of
+// non-decreasing delay is run in FIFO order.
+//
+// - A list of tasks posted in order of non-decreasing delay with at
+// most a single change in nestable state from nestable to
+// non-nestable is run in FIFO order. (This is equivalent to the
+// statement of the first guarantee above.)
+//
+// Some theoretical implementations of SequencedTaskRunner:
+//
+// - A SequencedTaskRunner that wraps a regular TaskRunner but makes
+// sure that only one task at a time is posted to the TaskRunner,
+// with appropriate memory barriers in between tasks.
+//
+// - A SequencedTaskRunner that, for each task, spawns a joinable
+// thread to run that task and immediately quit, and then
+// immediately joins that thread.
+//
+// - A SequencedTaskRunner that stores the list of posted tasks and
+// has a method Run() that runs each runnable task in FIFO order
+// that can be called from any thread, but only if another
+// (non-nested) Run() call isn't already happening.
+class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
+ public:
+ // The two PostNonNestable*Task methods below are like their
+ // nestable equivalents in TaskRunner, but they guarantee that the
+ // posted task will not run nested within an already-running task.
+ //
+ // A simple corollary is that posting a task as non-nestable can
+ // only delay when the task gets run. That is, posting a task as
+ // non-nestable may not affect when the task gets run, or it could
+ // make it run later than it normally would, but it won't make it
+ // run earlier than it normally would.
+
+ // TODO(akalin): Get rid of the boolean return value for the methods
+ // below.
+
+ bool PostNonNestableTask(const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ virtual bool PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ base::TimeDelta delay) = 0;
+
+ // Submits a non-nestable task to delete the given object. Returns
+ // true if the object may be deleted at some point in the future,
+ // and false if the object definitely will not be deleted.
+ template <class T>
+ bool DeleteSoon(const tracked_objects::Location& from_here,
+ const T* object) {
+ return
+ subtle::DeleteHelperInternal<T, bool>::DeleteViaSequencedTaskRunner(
+ this, from_here, object);
+ }
+
+ // Submits a non-nestable task to release the given object. Returns
+ // true if the object may be released at some point in the future,
+ // and false if the object definitely will not be released.
+ template <class T>
+ bool ReleaseSoon(const tracked_objects::Location& from_here,
+ T* object) {
+ return
+ subtle::ReleaseHelperInternal<T, bool>::ReleaseViaSequencedTaskRunner(
+ this, from_here, object);
+ }
+
+ protected:
+ ~SequencedTaskRunner() override {}
+
+ private:
+ template <class T, class R> friend class subtle::DeleteHelperInternal;
+ template <class T, class R> friend class subtle::ReleaseHelperInternal;
+
+ bool DeleteSoonInternal(const tracked_objects::Location& from_here,
+ void(*deleter)(const void*),
+ const void* object);
+
+ bool ReleaseSoonInternal(const tracked_objects::Location& from_here,
+ void(*releaser)(const void*),
+ const void* object);
+};
+
+} // namespace base
+
+#endif // BASE_SEQUENCED_TASK_RUNNER_H_
diff --git a/libchrome/base/sequenced_task_runner_helpers.h b/libchrome/base/sequenced_task_runner_helpers.h
new file mode 100644
index 0000000..7980b46
--- /dev/null
+++ b/libchrome/base/sequenced_task_runner_helpers.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
+#define BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
+
+#include "base/debug/alias.h"
+#include "base/macros.h"
+
+// TODO(akalin): Investigate whether it's possible to just have
+// SequencedTaskRunner use these helpers (instead of MessageLoop).
+// Then we can just move these to sequenced_task_runner.h.
+
+namespace tracked_objects {
+class Location;
+}
+
+namespace base {
+
+namespace subtle {
+template <class T, class R> class DeleteHelperInternal;
+template <class T, class R> class ReleaseHelperInternal;
+}
+
+// Template helpers which use function indirection to erase T from the
+// function signature while still remembering it so we can call the
+// correct destructor/release function.
+//
+// We use this trick so we don't need to include bind.h in a header
+// file like sequenced_task_runner.h. We also wrap the helpers in a
+// templated class to make it easier for users of DeleteSoon to
+// declare the helper as a friend.
+template <class T>
+class DeleteHelper {
+ private:
+ template <class T2, class R> friend class subtle::DeleteHelperInternal;
+
+ static void DoDelete(const void* object) {
+ delete reinterpret_cast<const T*>(object);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(DeleteHelper);
+};
+
+template <class T>
+class ReleaseHelper {
+ private:
+ template <class T2, class R> friend class subtle::ReleaseHelperInternal;
+
+ static void DoRelease(const void* object) {
+ reinterpret_cast<const T*>(object)->Release();
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(ReleaseHelper);
+};
+
+namespace subtle {
+
+// An internal SequencedTaskRunner-like class helper for DeleteHelper
+// and ReleaseHelper. We don't want to expose the Do*() functions
+// directly directly since the void* argument makes it possible to
+// pass/ an object of the wrong type to delete. Instead, we force
+// callers to go through these internal helpers for type
+// safety. SequencedTaskRunner-like classes which expose DeleteSoon or
+// ReleaseSoon methods should friend the appropriate helper and
+// implement a corresponding *Internal method with the following
+// signature:
+//
+// bool(const tracked_objects::Location&,
+// void(*function)(const void*),
+// void* object)
+//
+// An implementation of this function should simply create a
+// base::Closure from (function, object) and return the result of
+// posting the task.
+template <class T, class ReturnType>
+class DeleteHelperInternal {
+ public:
+ template <class SequencedTaskRunnerType>
+ static ReturnType DeleteViaSequencedTaskRunner(
+ SequencedTaskRunnerType* sequenced_task_runner,
+ const tracked_objects::Location& from_here,
+ const T* object) {
+ return sequenced_task_runner->DeleteSoonInternal(
+ from_here, &DeleteHelper<T>::DoDelete, object);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DeleteHelperInternal);
+};
+
+template <class T, class ReturnType>
+class ReleaseHelperInternal {
+ public:
+ template <class SequencedTaskRunnerType>
+ static ReturnType ReleaseViaSequencedTaskRunner(
+ SequencedTaskRunnerType* sequenced_task_runner,
+ const tracked_objects::Location& from_here,
+ const T* object) {
+ return sequenced_task_runner->ReleaseSoonInternal(
+ from_here, &ReleaseHelper<T>::DoRelease, object);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ReleaseHelperInternal);
+};
+
+} // namespace subtle
+
+} // namespace base
+
+#endif // BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
diff --git a/libchrome/base/sha1.h b/libchrome/base/sha1.h
new file mode 100644
index 0000000..902e301
--- /dev/null
+++ b/libchrome/base/sha1.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SHA1_H_
+#define BASE_SHA1_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// These functions perform SHA-1 operations.
+
+static const size_t kSHA1Length = 20; // Length in bytes of a SHA-1 hash.
+
+// Computes the SHA-1 hash of the input string |str| and returns the full
+// hash.
+BASE_EXPORT std::string SHA1HashString(const std::string& str);
+
+// Computes the SHA-1 hash of the |len| bytes in |data| and puts the hash
+// in |hash|. |hash| must be kSHA1Length bytes long.
+BASE_EXPORT void SHA1HashBytes(const unsigned char* data, size_t len,
+ unsigned char* hash);
+
+} // namespace base
+
+#endif // BASE_SHA1_H_
diff --git a/libchrome/base/sha1_portable.cc b/libchrome/base/sha1_portable.cc
new file mode 100644
index 0000000..dd2ab6f
--- /dev/null
+++ b/libchrome/base/sha1_portable.cc
@@ -0,0 +1,217 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sha1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+
+namespace base {
+
+// Implementation of SHA-1. Only handles data in byte-sized blocks,
+// which simplifies the code a fair bit.
+
+// Identifier names follow notation in FIPS PUB 180-3, where you'll
+// also find a description of the algorithm:
+// http://csrc.nist.gov/publications/fips/fips180-3/fips180-3_final.pdf
+
+// Usage example:
+//
+// SecureHashAlgorithm sha;
+// while(there is data to hash)
+// sha.Update(moredata, size of data);
+// sha.Final();
+// memcpy(somewhere, sha.Digest(), 20);
+//
+// to reuse the instance of sha, call sha.Init();
+
+// TODO(jhawkins): Replace this implementation with a per-platform
+// implementation using each platform's crypto library. See
+// http://crbug.com/47218
+
+class SecureHashAlgorithm {
+ public:
+ SecureHashAlgorithm() { Init(); }
+
+ static const int kDigestSizeBytes;
+
+ void Init();
+ void Update(const void* data, size_t nbytes);
+ void Final();
+
+ // 20 bytes of message digest.
+ const unsigned char* Digest() const {
+ return reinterpret_cast<const unsigned char*>(H);
+ }
+
+ private:
+ void Pad();
+ void Process();
+
+ uint32_t A, B, C, D, E;
+
+ uint32_t H[5];
+
+ union {
+ uint32_t W[80];
+ uint8_t M[64];
+ };
+
+ uint32_t cursor;
+ uint64_t l;
+};
+
+static inline uint32_t f(uint32_t t, uint32_t B, uint32_t C, uint32_t D) {
+ if (t < 20) {
+ return (B & C) | ((~B) & D);
+ } else if (t < 40) {
+ return B ^ C ^ D;
+ } else if (t < 60) {
+ return (B & C) | (B & D) | (C & D);
+ } else {
+ return B ^ C ^ D;
+ }
+}
+
+static inline uint32_t S(uint32_t n, uint32_t X) {
+ return (X << n) | (X >> (32-n));
+}
+
+static inline uint32_t K(uint32_t t) {
+ if (t < 20) {
+ return 0x5a827999;
+ } else if (t < 40) {
+ return 0x6ed9eba1;
+ } else if (t < 60) {
+ return 0x8f1bbcdc;
+ } else {
+ return 0xca62c1d6;
+ }
+}
+
+static inline void swapends(uint32_t* t) {
+ *t = (*t >> 24) | ((*t >> 8) & 0xff00) | ((*t & 0xff00) << 8) | (*t << 24);
+}
+
+const int SecureHashAlgorithm::kDigestSizeBytes = 20;
+
+void SecureHashAlgorithm::Init() {
+ A = 0;
+ B = 0;
+ C = 0;
+ D = 0;
+ E = 0;
+ cursor = 0;
+ l = 0;
+ H[0] = 0x67452301;
+ H[1] = 0xefcdab89;
+ H[2] = 0x98badcfe;
+ H[3] = 0x10325476;
+ H[4] = 0xc3d2e1f0;
+}
+
+void SecureHashAlgorithm::Final() {
+ Pad();
+ Process();
+
+ for (int t = 0; t < 5; ++t)
+ swapends(&H[t]);
+}
+
+void SecureHashAlgorithm::Update(const void* data, size_t nbytes) {
+ const uint8_t* d = reinterpret_cast<const uint8_t*>(data);
+ while (nbytes--) {
+ M[cursor++] = *d++;
+ if (cursor >= 64)
+ Process();
+ l += 8;
+ }
+}
+
+void SecureHashAlgorithm::Pad() {
+ M[cursor++] = 0x80;
+
+ if (cursor > 64-8) {
+ // pad out to next block
+ while (cursor < 64)
+ M[cursor++] = 0;
+
+ Process();
+ }
+
+ while (cursor < 64-8)
+ M[cursor++] = 0;
+
+ M[cursor++] = (l >> 56) & 0xff;
+ M[cursor++] = (l >> 48) & 0xff;
+ M[cursor++] = (l >> 40) & 0xff;
+ M[cursor++] = (l >> 32) & 0xff;
+ M[cursor++] = (l >> 24) & 0xff;
+ M[cursor++] = (l >> 16) & 0xff;
+ M[cursor++] = (l >> 8) & 0xff;
+ M[cursor++] = l & 0xff;
+}
+
+void SecureHashAlgorithm::Process() {
+ uint32_t t;
+
+ // Each a...e corresponds to a section in the FIPS 180-3 algorithm.
+
+ // a.
+ //
+ // W and M are in a union, so no need to memcpy.
+ // memcpy(W, M, sizeof(M));
+ for (t = 0; t < 16; ++t)
+ swapends(&W[t]);
+
+ // b.
+ for (t = 16; t < 80; ++t)
+ W[t] = S(1, W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16]);
+
+ // c.
+ A = H[0];
+ B = H[1];
+ C = H[2];
+ D = H[3];
+ E = H[4];
+
+ // d.
+ for (t = 0; t < 80; ++t) {
+ uint32_t TEMP = S(5, A) + f(t, B, C, D) + E + W[t] + K(t);
+ E = D;
+ D = C;
+ C = S(30, B);
+ B = A;
+ A = TEMP;
+ }
+
+ // e.
+ H[0] += A;
+ H[1] += B;
+ H[2] += C;
+ H[3] += D;
+ H[4] += E;
+
+ cursor = 0;
+}
+
+std::string SHA1HashString(const std::string& str) {
+ char hash[SecureHashAlgorithm::kDigestSizeBytes];
+ SHA1HashBytes(reinterpret_cast<const unsigned char*>(str.c_str()),
+ str.length(), reinterpret_cast<unsigned char*>(hash));
+ return std::string(hash, SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+void SHA1HashBytes(const unsigned char* data, size_t len,
+ unsigned char* hash) {
+ SecureHashAlgorithm sha;
+ sha.Update(data, len);
+ sha.Final();
+
+ memcpy(hash, sha.Digest(), SecureHashAlgorithm::kDigestSizeBytes);
+}
+
+} // namespace base
diff --git a/libchrome/base/sha1_unittest.cc b/libchrome/base/sha1_unittest.cc
new file mode 100644
index 0000000..ea9cf63
--- /dev/null
+++ b/libchrome/base/sha1_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sha1.h"
+
+#include <stddef.h>
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(SHA1Test, Test1) {
+ // Example A.1 from FIPS 180-2: one-block message.
+ std::string input = "abc";
+
+ int expected[] = { 0xa9, 0x99, 0x3e, 0x36,
+ 0x47, 0x06, 0x81, 0x6a,
+ 0xba, 0x3e, 0x25, 0x71,
+ 0x78, 0x50, 0xc2, 0x6c,
+ 0x9c, 0xd0, 0xd8, 0x9d };
+
+ std::string output = base::SHA1HashString(input);
+ for (size_t i = 0; i < base::kSHA1Length; i++)
+ EXPECT_EQ(expected[i], output[i] & 0xFF);
+}
+
+TEST(SHA1Test, Test2) {
+ // Example A.2 from FIPS 180-2: multi-block message.
+ std::string input =
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+
+ int expected[] = { 0x84, 0x98, 0x3e, 0x44,
+ 0x1c, 0x3b, 0xd2, 0x6e,
+ 0xba, 0xae, 0x4a, 0xa1,
+ 0xf9, 0x51, 0x29, 0xe5,
+ 0xe5, 0x46, 0x70, 0xf1 };
+
+ std::string output = base::SHA1HashString(input);
+ for (size_t i = 0; i < base::kSHA1Length; i++)
+ EXPECT_EQ(expected[i], output[i] & 0xFF);
+}
+
+TEST(SHA1Test, Test3) {
+ // Example A.3 from FIPS 180-2: long message.
+ std::string input(1000000, 'a');
+
+ int expected[] = { 0x34, 0xaa, 0x97, 0x3c,
+ 0xd4, 0xc4, 0xda, 0xa4,
+ 0xf6, 0x1e, 0xeb, 0x2b,
+ 0xdb, 0xad, 0x27, 0x31,
+ 0x65, 0x34, 0x01, 0x6f };
+
+ std::string output = base::SHA1HashString(input);
+ for (size_t i = 0; i < base::kSHA1Length; i++)
+ EXPECT_EQ(expected[i], output[i] & 0xFF);
+}
+
+TEST(SHA1Test, Test1Bytes) {
+ // Example A.1 from FIPS 180-2: one-block message.
+ std::string input = "abc";
+ unsigned char output[base::kSHA1Length];
+
+ unsigned char expected[] = { 0xa9, 0x99, 0x3e, 0x36,
+ 0x47, 0x06, 0x81, 0x6a,
+ 0xba, 0x3e, 0x25, 0x71,
+ 0x78, 0x50, 0xc2, 0x6c,
+ 0x9c, 0xd0, 0xd8, 0x9d };
+
+ base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(input.c_str()),
+ input.length(), output);
+ for (size_t i = 0; i < base::kSHA1Length; i++)
+ EXPECT_EQ(expected[i], output[i]);
+}
+
+TEST(SHA1Test, Test2Bytes) {
+ // Example A.2 from FIPS 180-2: multi-block message.
+ std::string input =
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+ unsigned char output[base::kSHA1Length];
+
+ unsigned char expected[] = { 0x84, 0x98, 0x3e, 0x44,
+ 0x1c, 0x3b, 0xd2, 0x6e,
+ 0xba, 0xae, 0x4a, 0xa1,
+ 0xf9, 0x51, 0x29, 0xe5,
+ 0xe5, 0x46, 0x70, 0xf1 };
+
+ base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(input.c_str()),
+ input.length(), output);
+ for (size_t i = 0; i < base::kSHA1Length; i++)
+ EXPECT_EQ(expected[i], output[i]);
+}
+
+TEST(SHA1Test, Test3Bytes) {
+ // Example A.3 from FIPS 180-2: long message.
+ std::string input(1000000, 'a');
+ unsigned char output[base::kSHA1Length];
+
+ unsigned char expected[] = { 0x34, 0xaa, 0x97, 0x3c,
+ 0xd4, 0xc4, 0xda, 0xa4,
+ 0xf6, 0x1e, 0xeb, 0x2b,
+ 0xdb, 0xad, 0x27, 0x31,
+ 0x65, 0x34, 0x01, 0x6f };
+
+ base::SHA1HashBytes(reinterpret_cast<const unsigned char*>(input.c_str()),
+ input.length(), output);
+ for (size_t i = 0; i < base::kSHA1Length; i++)
+ EXPECT_EQ(expected[i], output[i]);
+}
diff --git a/libchrome/base/single_thread_task_runner.h b/libchrome/base/single_thread_task_runner.h
new file mode 100644
index 0000000..6e93193
--- /dev/null
+++ b/libchrome/base/single_thread_task_runner.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SINGLE_THREAD_TASK_RUNNER_H_
+#define BASE_SINGLE_THREAD_TASK_RUNNER_H_
+
+#include "base/base_export.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// A SingleThreadTaskRunner is a SequencedTaskRunner with one more
+// guarantee; namely, that all tasks are run on a single dedicated
+// thread. Most use cases require only a SequencedTaskRunner, unless
+// there is a specific need to run tasks on only a single thread.
+//
+// SingleThreadTaskRunner implementations might:
+// - Post tasks to an existing thread's MessageLoop (see
+// MessageLoop::task_runner()).
+// - Create their own worker thread and MessageLoop to post tasks to.
+// - Add tasks to a FIFO and signal to a non-MessageLoop thread for them to
+// be processed. This allows TaskRunner-oriented code run on threads
+// running other kinds of message loop, e.g. Jingle threads.
+class BASE_EXPORT SingleThreadTaskRunner : public SequencedTaskRunner {
+ public:
+ // A more explicit alias to RunsTasksOnCurrentThread().
+ bool BelongsToCurrentThread() const {
+ return RunsTasksOnCurrentThread();
+ }
+
+ protected:
+ ~SingleThreadTaskRunner() override {}
+};
+
+} // namespace base
+
+#endif // BASE_SINGLE_THREAD_TASK_RUNNER_H_
diff --git a/libchrome/base/stl_util.h b/libchrome/base/stl_util.h
new file mode 100644
index 0000000..12e226a
--- /dev/null
+++ b/libchrome/base/stl_util.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Derived from google3/util/gtl/stl_util.h
+
+#ifndef BASE_STL_UTIL_H_
+#define BASE_STL_UTIL_H_
+
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+
+// Clears internal memory of an STL object.
+// STL clear()/reserve(0) does not always free internal memory allocated
+// This function uses swap/destructor to ensure the internal memory is freed.
+template<class T>
+void STLClearObject(T* obj) {
+ T tmp;
+ tmp.swap(*obj);
+ // Sometimes "T tmp" allocates objects with memory (arena implementation?).
+ // Hence using additional reserve(0) even if it doesn't always work.
+ obj->reserve(0);
+}
+
+// For a range within a container of pointers, calls delete (non-array version)
+// on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete *temp;
+ }
+}
+
+// For a range within a container of pairs, calls delete (non-array version) on
+// BOTH items in the pairs.
+// NOTE: Like STLDeleteContainerPointers, it is important that this deletes
+// behind the iterator because if both the key and value are deleted, the
+// container may call the hash function on the iterator when it is advanced,
+// which could result in the hash function trying to dereference a stale
+// pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPairPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete temp->first;
+ delete temp->second;
+ }
+}
+
+// For a range within a container of pairs, calls delete (non-array version) on
+// the FIRST item in the pairs.
+// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+template <class ForwardIterator>
+void STLDeleteContainerPairFirstPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete temp->first;
+ }
+}
+
+// For a range within a container of pairs, calls delete.
+// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+// Deleting the value does not always invalidate the iterator, but it may
+// do so if the key is a pointer into the value object.
+template <class ForwardIterator>
+void STLDeleteContainerPairSecondPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete temp->second;
+ }
+}
+
+// Counts the number of instances of val in a container.
+template <typename Container, typename T>
+typename std::iterator_traits<
+ typename Container::const_iterator>::difference_type
+STLCount(const Container& container, const T& val) {
+ return std::count(container.begin(), container.end(), val);
+}
+
+// Return a mutable char* pointing to a string's internal buffer,
+// which may not be null-terminated. Writing through this pointer will
+// modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// As of 2006-04, there is no standard-blessed way of getting a
+// mutable reference to a string's internal buffer. However, issue 530
+// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#530)
+// proposes this as the method. According to Matt Austern, this should
+// already work on all current implementations.
+inline char* string_as_array(std::string* str) {
+ // DO NOT USE const_cast<char*>(str->data())
+ return str->empty() ? NULL : &*str->begin();
+}
+
+// The following functions are useful for cleaning up STL containers whose
+// elements point to allocated memory.
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container. This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// STLElementDeleter (defined below), which ensures that your container's
+// elements are deleted when the STLElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T* container) {
+ if (!container)
+ return;
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+}
+
+// Given an STL container consisting of (key, value) pairs, STLDeleteValues
+// deletes all the "value" components and clears the container. Does nothing
+// in the case it's given a NULL pointer.
+template <class T>
+void STLDeleteValues(T* container) {
+ if (!container)
+ return;
+ STLDeleteContainerPairSecondPointers(container->begin(), container->end());
+ container->clear();
+}
+
+
+// The following classes provide a convenient way to delete all elements or
+// values from STL containers when they goes out of scope. This greatly
+// simplifies code that creates temporary objects and has multiple return
+// statements. Example:
+//
+// vector<MyProto *> tmp_proto;
+// STLElementDeleter<vector<MyProto *> > d(&tmp_proto);
+// if (...) return false;
+// ...
+// return success;
+
+// Given a pointer to an STL container this class will delete all the element
+// pointers when it goes out of scope.
+template<class T>
+class STLElementDeleter {
+ public:
+ STLElementDeleter<T>(T* container) : container_(container) {}
+ ~STLElementDeleter<T>() { STLDeleteElements(container_); }
+
+ private:
+ T* container_;
+};
+
+// Given a pointer to an STL container this class will delete all the value
+// pointers when it goes out of scope.
+template<class T>
+class STLValueDeleter {
+ public:
+ STLValueDeleter<T>(T* container) : container_(container) {}
+ ~STLValueDeleter<T>() { STLDeleteValues(container_); }
+
+ private:
+ T* container_;
+};
+
+// Test to see if a set, map, hash_set or hash_map contains a particular key.
+// Returns true if the key is in the collection.
+template <typename Collection, typename Key>
+bool ContainsKey(const Collection& collection, const Key& key) {
+ return collection.find(key) != collection.end();
+}
+
+// Test to see if a collection like a vector contains a particular value.
+// Returns true if the value is in the collection.
+template <typename Collection, typename Value>
+bool ContainsValue(const Collection& collection, const Value& value) {
+ return std::find(collection.begin(), collection.end(), value) !=
+ collection.end();
+}
+
+namespace base {
+
+// Returns true if the container is sorted.
+template <typename Container>
+bool STLIsSorted(const Container& cont) {
+ // Note: Use reverse iterator on container to ensure we only require
+ // value_type to implement operator<.
+ return std::adjacent_find(cont.rbegin(), cont.rend(),
+ std::less<typename Container::value_type>())
+ == cont.rend();
+}
+
+// Returns a new ResultType containing the difference of two sorted containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetDifference(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ ResultType difference;
+ std::set_difference(a1.begin(), a1.end(),
+ a2.begin(), a2.end(),
+ std::inserter(difference, difference.end()));
+ return difference;
+}
+
+// Returns a new ResultType containing the union of two sorted containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetUnion(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ ResultType result;
+ std::set_union(a1.begin(), a1.end(),
+ a2.begin(), a2.end(),
+ std::inserter(result, result.end()));
+ return result;
+}
+
+// Returns a new ResultType containing the intersection of two sorted
+// containers.
+template <typename ResultType, typename Arg1, typename Arg2>
+ResultType STLSetIntersection(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ ResultType result;
+ std::set_intersection(a1.begin(), a1.end(),
+ a2.begin(), a2.end(),
+ std::inserter(result, result.end()));
+ return result;
+}
+
+// Returns true if the sorted container |a1| contains all elements of the sorted
+// container |a2|.
+template <typename Arg1, typename Arg2>
+bool STLIncludes(const Arg1& a1, const Arg2& a2) {
+ DCHECK(STLIsSorted(a1));
+ DCHECK(STLIsSorted(a2));
+ return std::includes(a1.begin(), a1.end(),
+ a2.begin(), a2.end());
+}
+
+} // namespace base
+
+#endif // BASE_STL_UTIL_H_
diff --git a/libchrome/base/stl_util_unittest.cc b/libchrome/base/stl_util_unittest.cc
new file mode 100644
index 0000000..42004eb
--- /dev/null
+++ b/libchrome/base/stl_util_unittest.cc
@@ -0,0 +1,267 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/stl_util.h"
+
+#include <set>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Used as test case to ensure the various base::STLXxx functions don't require
+// more than operators "<" and "==" on values stored in containers.
+class ComparableValue {
+ public:
+ explicit ComparableValue(int value) : value_(value) {}
+
+ bool operator==(const ComparableValue& rhs) const {
+ return value_ == rhs.value_;
+ }
+
+ bool operator<(const ComparableValue& rhs) const {
+ return value_ < rhs.value_;
+ }
+
+ private:
+ int value_;
+};
+
+} // namespace
+
+namespace base {
+namespace {
+
+TEST(STLUtilTest, STLIsSorted) {
+ {
+ std::set<int> set;
+ set.insert(24);
+ set.insert(1);
+ set.insert(12);
+ EXPECT_TRUE(STLIsSorted(set));
+ }
+
+ {
+ std::set<ComparableValue> set;
+ set.insert(ComparableValue(24));
+ set.insert(ComparableValue(1));
+ set.insert(ComparableValue(12));
+ EXPECT_TRUE(STLIsSorted(set));
+ }
+
+ {
+ std::vector<int> vector;
+ vector.push_back(1);
+ vector.push_back(1);
+ vector.push_back(4);
+ vector.push_back(64);
+ vector.push_back(12432);
+ EXPECT_TRUE(STLIsSorted(vector));
+ vector.back() = 1;
+ EXPECT_FALSE(STLIsSorted(vector));
+ }
+}
+
+TEST(STLUtilTest, STLSetDifference) {
+ std::set<int> a1;
+ a1.insert(1);
+ a1.insert(2);
+ a1.insert(3);
+ a1.insert(4);
+
+ std::set<int> a2;
+ a2.insert(3);
+ a2.insert(4);
+ a2.insert(5);
+ a2.insert(6);
+ a2.insert(7);
+
+ {
+ std::set<int> difference;
+ difference.insert(1);
+ difference.insert(2);
+ EXPECT_EQ(difference, STLSetDifference<std::set<int> >(a1, a2));
+ }
+
+ {
+ std::set<int> difference;
+ difference.insert(5);
+ difference.insert(6);
+ difference.insert(7);
+ EXPECT_EQ(difference, STLSetDifference<std::set<int> >(a2, a1));
+ }
+
+ {
+ std::vector<int> difference;
+ difference.push_back(1);
+ difference.push_back(2);
+ EXPECT_EQ(difference, STLSetDifference<std::vector<int> >(a1, a2));
+ }
+
+ {
+ std::vector<int> difference;
+ difference.push_back(5);
+ difference.push_back(6);
+ difference.push_back(7);
+ EXPECT_EQ(difference, STLSetDifference<std::vector<int> >(a2, a1));
+ }
+}
+
+TEST(STLUtilTest, STLSetUnion) {
+ std::set<int> a1;
+ a1.insert(1);
+ a1.insert(2);
+ a1.insert(3);
+ a1.insert(4);
+
+ std::set<int> a2;
+ a2.insert(3);
+ a2.insert(4);
+ a2.insert(5);
+ a2.insert(6);
+ a2.insert(7);
+
+ {
+ std::set<int> result;
+ result.insert(1);
+ result.insert(2);
+ result.insert(3);
+ result.insert(4);
+ result.insert(5);
+ result.insert(6);
+ result.insert(7);
+ EXPECT_EQ(result, STLSetUnion<std::set<int> >(a1, a2));
+ }
+
+ {
+ std::set<int> result;
+ result.insert(1);
+ result.insert(2);
+ result.insert(3);
+ result.insert(4);
+ result.insert(5);
+ result.insert(6);
+ result.insert(7);
+ EXPECT_EQ(result, STLSetUnion<std::set<int> >(a2, a1));
+ }
+
+ {
+ std::vector<int> result;
+ result.push_back(1);
+ result.push_back(2);
+ result.push_back(3);
+ result.push_back(4);
+ result.push_back(5);
+ result.push_back(6);
+ result.push_back(7);
+ EXPECT_EQ(result, STLSetUnion<std::vector<int> >(a1, a2));
+ }
+
+ {
+ std::vector<int> result;
+ result.push_back(1);
+ result.push_back(2);
+ result.push_back(3);
+ result.push_back(4);
+ result.push_back(5);
+ result.push_back(6);
+ result.push_back(7);
+ EXPECT_EQ(result, STLSetUnion<std::vector<int> >(a2, a1));
+ }
+}
+
+TEST(STLUtilTest, STLSetIntersection) {
+ std::set<int> a1;
+ a1.insert(1);
+ a1.insert(2);
+ a1.insert(3);
+ a1.insert(4);
+
+ std::set<int> a2;
+ a2.insert(3);
+ a2.insert(4);
+ a2.insert(5);
+ a2.insert(6);
+ a2.insert(7);
+
+ {
+ std::set<int> result;
+ result.insert(3);
+ result.insert(4);
+ EXPECT_EQ(result, STLSetIntersection<std::set<int> >(a1, a2));
+ }
+
+ {
+ std::set<int> result;
+ result.insert(3);
+ result.insert(4);
+ EXPECT_EQ(result, STLSetIntersection<std::set<int> >(a2, a1));
+ }
+
+ {
+ std::vector<int> result;
+ result.push_back(3);
+ result.push_back(4);
+ EXPECT_EQ(result, STLSetIntersection<std::vector<int> >(a1, a2));
+ }
+
+ {
+ std::vector<int> result;
+ result.push_back(3);
+ result.push_back(4);
+ EXPECT_EQ(result, STLSetIntersection<std::vector<int> >(a2, a1));
+ }
+}
+
+TEST(STLUtilTest, STLIncludes) {
+ std::set<int> a1;
+ a1.insert(1);
+ a1.insert(2);
+ a1.insert(3);
+ a1.insert(4);
+
+ std::set<int> a2;
+ a2.insert(3);
+ a2.insert(4);
+
+ std::set<int> a3;
+ a3.insert(3);
+ a3.insert(4);
+ a3.insert(5);
+
+ EXPECT_TRUE(STLIncludes<std::set<int> >(a1, a2));
+ EXPECT_FALSE(STLIncludes<std::set<int> >(a1, a3));
+ EXPECT_FALSE(STLIncludes<std::set<int> >(a2, a1));
+ EXPECT_FALSE(STLIncludes<std::set<int> >(a2, a3));
+ EXPECT_FALSE(STLIncludes<std::set<int> >(a3, a1));
+ EXPECT_TRUE(STLIncludes<std::set<int> >(a3, a2));
+}
+
+TEST(StringAsArrayTest, Empty) {
+ std::string empty;
+ EXPECT_EQ(nullptr, string_as_array(&empty));
+}
+
+TEST(StringAsArrayTest, NullTerminated) {
+ // If any std::string implementation is not null-terminated, this should
+ // fail. All compilers we use return a null-terminated buffer, but please do
+ // not rely on this fact in your code.
+ std::string str("abcde");
+ str.resize(3);
+ EXPECT_STREQ("abc", string_as_array(&str));
+}
+
+TEST(StringAsArrayTest, WriteCopy) {
+ // With a COW implementation, this test will fail if
+ // string_as_array(&str) is implemented as
+ // const_cast<char*>(str->data()).
+ std::string s1("abc");
+ const std::string s2(s1);
+ string_as_array(&s1)[1] = 'x';
+ EXPECT_EQ("axc", s1);
+ EXPECT_EQ("abc", s2);
+}
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/strings/OWNERS b/libchrome/base/strings/OWNERS
new file mode 100644
index 0000000..5381872
--- /dev/null
+++ b/libchrome/base/strings/OWNERS
@@ -0,0 +1,2 @@
+per-file safe_sprintf*=jln@chromium.org
+per-file safe_sprintf*=mdempsky@chromium.org
diff --git a/libchrome/base/strings/nullable_string16.cc b/libchrome/base/strings/nullable_string16.cc
new file mode 100644
index 0000000..07f81d4
--- /dev/null
+++ b/libchrome/base/strings/nullable_string16.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/nullable_string16.h"
+
+#include <ostream>
+
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+
+std::ostream& operator<<(std::ostream& out, const NullableString16& value) {
+ return value.is_null() ? out << "(null)" : out << UTF16ToUTF8(value.string());
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/nullable_string16.h b/libchrome/base/strings/nullable_string16.h
new file mode 100644
index 0000000..016c25c
--- /dev/null
+++ b/libchrome/base/strings/nullable_string16.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_NULLABLE_STRING16_H_
+#define BASE_STRINGS_NULLABLE_STRING16_H_
+
+#include <iosfwd>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+// This class is a simple wrapper for string16 which also contains a null
+// state. This should be used only where the difference between null and
+// empty is meaningful.
+class NullableString16 {
+ public:
+ NullableString16() : is_null_(true) { }
+ NullableString16(const string16& string, bool is_null)
+ : string_(string), is_null_(is_null) {
+ }
+
+ const string16& string() const { return string_; }
+ bool is_null() const { return is_null_; }
+
+ private:
+ string16 string_;
+ bool is_null_;
+};
+
+inline bool operator==(const NullableString16& a, const NullableString16& b) {
+ return a.is_null() == b.is_null() && a.string() == b.string();
+}
+
+inline bool operator!=(const NullableString16& a, const NullableString16& b) {
+ return !(a == b);
+}
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+ const NullableString16& value);
+
+} // namespace base
+
+#endif // BASE_STRINGS_NULLABLE_STRING16_H_
diff --git a/libchrome/base/strings/nullable_string16_unittest.cc b/libchrome/base/strings/nullable_string16_unittest.cc
new file mode 100644
index 0000000..f02fdce
--- /dev/null
+++ b/libchrome/base/strings/nullable_string16_unittest.cc
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/nullable_string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(NullableString16Test, DefaultConstructor) {
+ NullableString16 s;
+ EXPECT_TRUE(s.is_null());
+ EXPECT_EQ(string16(), s.string());
+}
+
+TEST(NullableString16Test, Equals) {
+ NullableString16 a(ASCIIToUTF16("hello"), false);
+ NullableString16 b(ASCIIToUTF16("hello"), false);
+ EXPECT_EQ(a, b);
+}
+
+TEST(NullableString16Test, NotEquals) {
+ NullableString16 a(ASCIIToUTF16("hello"), false);
+ NullableString16 b(ASCIIToUTF16("world"), false);
+ EXPECT_NE(a, b);
+}
+
+TEST(NullableString16Test, NotEqualsNull) {
+ NullableString16 a(ASCIIToUTF16("hello"), false);
+ NullableString16 b;
+ EXPECT_NE(a, b);
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/pattern.cc b/libchrome/base/strings/pattern.cc
new file mode 100644
index 0000000..af30aab
--- /dev/null
+++ b/libchrome/base/strings/pattern.cc
@@ -0,0 +1,169 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/pattern.h"
+
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+static bool IsWildcard(base_icu::UChar32 character) {
+ return character == '*' || character == '?';
+}
+
+// Move the strings pointers to the point where they start to differ.
+template <typename CHAR, typename NEXT>
+static void EatSameChars(const CHAR** pattern, const CHAR* pattern_end,
+ const CHAR** string, const CHAR* string_end,
+ NEXT next) {
+ const CHAR* escape = NULL;
+ while (*pattern != pattern_end && *string != string_end) {
+ if (!escape && IsWildcard(**pattern)) {
+ // We don't want to match wildcard here, except if it's escaped.
+ return;
+ }
+
+ // Check if the escapement char is found. If so, skip it and move to the
+ // next character.
+ if (!escape && **pattern == '\\') {
+ escape = *pattern;
+ next(pattern, pattern_end);
+ continue;
+ }
+
+ // Check if the chars match, if so, increment the ptrs.
+ const CHAR* pattern_next = *pattern;
+ const CHAR* string_next = *string;
+ base_icu::UChar32 pattern_char = next(&pattern_next, pattern_end);
+ if (pattern_char == next(&string_next, string_end) &&
+ pattern_char != CBU_SENTINEL) {
+ *pattern = pattern_next;
+ *string = string_next;
+ } else {
+ // Uh oh, it did not match, we are done. If the last char was an
+ // escapement, that means that it was an error to advance the ptr here,
+ // let's put it back where it was. This also mean that the MatchPattern
+ // function will return false because if we can't match an escape char
+ // here, then no one will.
+ if (escape) {
+ *pattern = escape;
+ }
+ return;
+ }
+
+ escape = NULL;
+ }
+}
+
+template <typename CHAR, typename NEXT>
+static void EatWildcard(const CHAR** pattern, const CHAR* end, NEXT next) {
+ while (*pattern != end) {
+ if (!IsWildcard(**pattern))
+ return;
+ next(pattern, end);
+ }
+}
+
+template <typename CHAR, typename NEXT>
+static bool MatchPatternT(const CHAR* eval, const CHAR* eval_end,
+ const CHAR* pattern, const CHAR* pattern_end,
+ int depth,
+ NEXT next) {
+ const int kMaxDepth = 16;
+ if (depth > kMaxDepth)
+ return false;
+
+ // Eat all the matching chars.
+ EatSameChars(&pattern, pattern_end, &eval, eval_end, next);
+
+ // If the string is empty, then the pattern must be empty too, or contains
+ // only wildcards.
+ if (eval == eval_end) {
+ EatWildcard(&pattern, pattern_end, next);
+ return pattern == pattern_end;
+ }
+
+ // Pattern is empty but not string, this is not a match.
+ if (pattern == pattern_end)
+ return false;
+
+ // If this is a question mark, then we need to compare the rest with
+ // the current string or the string with one character eaten.
+ const CHAR* next_pattern = pattern;
+ next(&next_pattern, pattern_end);
+ if (pattern[0] == '?') {
+ if (MatchPatternT(eval, eval_end, next_pattern, pattern_end,
+ depth + 1, next))
+ return true;
+ const CHAR* next_eval = eval;
+ next(&next_eval, eval_end);
+ if (MatchPatternT(next_eval, eval_end, next_pattern, pattern_end,
+ depth + 1, next))
+ return true;
+ }
+
+ // This is a *, try to match all the possible substrings with the remainder
+ // of the pattern.
+ if (pattern[0] == '*') {
+ // Collapse duplicate wild cards (********** into *) so that the
+ // method does not recurse unnecessarily. http://crbug.com/52839
+ EatWildcard(&next_pattern, pattern_end, next);
+
+ while (eval != eval_end) {
+ if (MatchPatternT(eval, eval_end, next_pattern, pattern_end,
+ depth + 1, next))
+ return true;
+ eval++;
+ }
+
+ // We reached the end of the string, let see if the pattern contains only
+ // wildcards.
+ if (eval == eval_end) {
+ EatWildcard(&pattern, pattern_end, next);
+ if (pattern != pattern_end)
+ return false;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+struct NextCharUTF8 {
+ base_icu::UChar32 operator()(const char** p, const char* end) {
+ base_icu::UChar32 c;
+ int offset = 0;
+ CBU8_NEXT(*p, offset, end - *p, c);
+ *p += offset;
+ return c;
+ }
+};
+
+struct NextCharUTF16 {
+ base_icu::UChar32 operator()(const char16** p, const char16* end) {
+ base_icu::UChar32 c;
+ int offset = 0;
+ CBU16_NEXT(*p, offset, end - *p, c);
+ *p += offset;
+ return c;
+ }
+};
+
+} // namespace
+
+bool MatchPattern(const StringPiece& eval, const StringPiece& pattern) {
+ return MatchPatternT(eval.data(), eval.data() + eval.size(),
+ pattern.data(), pattern.data() + pattern.size(),
+ 0, NextCharUTF8());
+}
+
+bool MatchPattern(const StringPiece16& eval, const StringPiece16& pattern) {
+ return MatchPatternT(eval.data(), eval.data() + eval.size(),
+ pattern.data(), pattern.data() + pattern.size(),
+ 0, NextCharUTF16());
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/pattern.h b/libchrome/base/strings/pattern.h
new file mode 100644
index 0000000..b698207
--- /dev/null
+++ b/libchrome/base/strings/pattern.h
@@ -0,0 +1,26 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_PATTERN_H_
+#define BASE_STRINGS_PATTERN_H_
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Returns true if the string passed in matches the pattern. The pattern
+// string can contain wildcards like * and ?
+//
+// The backslash character (\) is an escape character for * and ?
+// We limit the patterns to having a max of 16 * or ? characters.
+// ? matches 0 or 1 character, while * matches 0 or more characters.
+BASE_EXPORT bool MatchPattern(const StringPiece& string,
+ const StringPiece& pattern);
+BASE_EXPORT bool MatchPattern(const StringPiece16& string,
+ const StringPiece16& pattern);
+
+} // namespace base
+
+#endif // BASE_STRINGS_PATTERN_H_
diff --git a/libchrome/base/strings/pattern_unittest.cc b/libchrome/base/strings/pattern_unittest.cc
new file mode 100644
index 0000000..9e82b3c
--- /dev/null
+++ b/libchrome/base/strings/pattern_unittest.cc
@@ -0,0 +1,50 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/pattern.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(StringUtilTest, MatchPatternTest) {
+ EXPECT_TRUE(MatchPattern("www.google.com", "*.com"));
+ EXPECT_TRUE(MatchPattern("www.google.com", "*"));
+ EXPECT_FALSE(MatchPattern("www.google.com", "www*.g*.org"));
+ EXPECT_TRUE(MatchPattern("Hello", "H?l?o"));
+ EXPECT_FALSE(MatchPattern("www.google.com", "http://*)"));
+ EXPECT_FALSE(MatchPattern("www.msn.com", "*.COM"));
+ EXPECT_TRUE(MatchPattern("Hello*1234", "He??o\\*1*"));
+ EXPECT_FALSE(MatchPattern("", "*.*"));
+ EXPECT_TRUE(MatchPattern("", "*"));
+ EXPECT_TRUE(MatchPattern("", "?"));
+ EXPECT_TRUE(MatchPattern("", ""));
+ EXPECT_FALSE(MatchPattern("Hello", ""));
+ EXPECT_TRUE(MatchPattern("Hello*", "Hello*"));
+ // Stop after a certain recursion depth.
+ EXPECT_FALSE(MatchPattern("123456789012345678", "?????????????????*"));
+
+ // Test UTF8 matching.
+ EXPECT_TRUE(MatchPattern("heart: \xe2\x99\xa0", "*\xe2\x99\xa0"));
+ EXPECT_TRUE(MatchPattern("heart: \xe2\x99\xa0.", "heart: ?."));
+ EXPECT_TRUE(MatchPattern("hearts: \xe2\x99\xa0\xe2\x99\xa0", "*"));
+ // Invalid sequences should be handled as a single invalid character.
+ EXPECT_TRUE(MatchPattern("invalid: \xef\xbf\xbe", "invalid: ?"));
+ // If the pattern has invalid characters, it shouldn't match anything.
+ EXPECT_FALSE(MatchPattern("\xf4\x90\x80\x80", "\xf4\x90\x80\x80"));
+
+ // Test UTF16 character matching.
+ EXPECT_TRUE(MatchPattern(UTF8ToUTF16("www.google.com"),
+ UTF8ToUTF16("*.com")));
+ EXPECT_TRUE(MatchPattern(UTF8ToUTF16("Hello*1234"),
+ UTF8ToUTF16("He??o\\*1*")));
+
+ // This test verifies that consecutive wild cards are collapsed into 1
+ // wildcard (when this doesn't occur, MatchPattern reaches it's maximum
+ // recursion depth).
+ EXPECT_TRUE(MatchPattern(UTF8ToUTF16("Hello"),
+ UTF8ToUTF16("He********************************o")));
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/safe_sprintf.cc b/libchrome/base/strings/safe_sprintf.cc
new file mode 100644
index 0000000..a51c778
--- /dev/null
+++ b/libchrome/base/strings/safe_sprintf.cc
@@ -0,0 +1,686 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <limits>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if !defined(NDEBUG)
+// In debug builds, we use RAW_CHECK() to print useful error messages, if
+// SafeSPrintf() is called with broken arguments.
+// As our contract promises that SafeSPrintf() can be called from any
+// restricted run-time context, it is not actually safe to call logging
+// functions from it; and we only ever do so for debug builds and hope for the
+// best. We should _never_ call any logging function other than RAW_CHECK(),
+// and we should _never_ include any logging code that is active in production
+// builds. Most notably, we should not include these logging functions in
+// unofficial release builds, even though those builds would otherwise have
+// DCHECKS() enabled.
+// In other words; please do not remove the #ifdef around this #include.
+// Instead, in production builds we opt for returning a degraded result,
+// whenever an error is encountered.
+// E.g. The broken function call
+// SafeSPrintf("errno = %d (%x)", errno, strerror(errno))
+// will print something like
+// errno = 13, (%x)
+// instead of
+// errno = 13 (Access denied)
+// In most of the anticipated use cases, that's probably the preferred
+// behavior.
+#include "base/logging.h"
+#define DEBUG_CHECK RAW_CHECK
+#else
+#define DEBUG_CHECK(x) do { if (x) { } } while (0)
+#endif
+
+namespace base {
+namespace strings {
+
+// The code in this file is extremely careful to be async-signal-safe.
+//
+// Most obviously, we avoid calling any code that could dynamically allocate
+// memory. Doing so would almost certainly result in bugs and dead-locks.
+// We also avoid calling any other STL functions that could have unintended
+// side-effects involving memory allocation or access to other shared
+// resources.
+//
+// But on top of that, we also avoid calling other library functions, as many
+// of them have the side-effect of calling getenv() (in order to deal with
+// localization) or accessing errno. The latter sounds benign, but there are
+// several execution contexts where it isn't even possible to safely read let
+// alone write errno.
+//
+// The stated design goal of the SafeSPrintf() function is that it can be
+// called from any context that can safely call C or C++ code (i.e. anything
+// that doesn't require assembly code).
+//
+// For a brief overview of some but not all of the issues with async-signal-
+// safety, refer to:
+// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+
+namespace {
+const size_t kSSizeMaxConst = ((size_t)(ssize_t)-1) >> 1;
+
+const char kUpCaseHexDigits[] = "0123456789ABCDEF";
+const char kDownCaseHexDigits[] = "0123456789abcdef";
+}
+
+#if defined(NDEBUG)
+// We would like to define kSSizeMax as std::numeric_limits<ssize_t>::max(),
+// but C++ doesn't allow us to do that for constants. Instead, we have to
+// use careful casting and shifting. We later use a static_assert to
+// verify that this worked correctly.
+namespace {
+const size_t kSSizeMax = kSSizeMaxConst;
+}
+#else // defined(NDEBUG)
+// For efficiency, we really need kSSizeMax to be a constant. But for unit
+// tests, it should be adjustable. This allows us to verify edge cases without
+// having to fill the entire available address space. As a compromise, we make
+// kSSizeMax adjustable in debug builds, and then only compile that particular
+// part of the unit test in debug builds.
+namespace {
+static size_t kSSizeMax = kSSizeMaxConst;
+}
+
+namespace internal {
+void SetSafeSPrintfSSizeMaxForTest(size_t max) {
+ kSSizeMax = max;
+}
+
+size_t GetSafeSPrintfSSizeMaxForTest() {
+ return kSSizeMax;
+}
+}
+#endif // defined(NDEBUG)
+
+namespace {
+class Buffer {
+ public:
+ // |buffer| is caller-allocated storage that SafeSPrintf() writes to. It
+ // has |size| bytes of writable storage. It is the caller's responsibility
+ // to ensure that the buffer is at least one byte in size, so that it fits
+ // the trailing NUL that will be added by the destructor. The buffer also
+ // must be smaller or equal to kSSizeMax in size.
+ Buffer(char* buffer, size_t size)
+ : buffer_(buffer),
+ size_(size - 1), // Account for trailing NUL byte
+ count_(0) {
+// MSVS2013's standard library doesn't mark max() as constexpr yet. cl.exe
+// supports static_cast but doesn't really implement constexpr yet so it doesn't
+// complain, but clang does.
+#if __cplusplus >= 201103 && !(defined(__clang__) && defined(OS_WIN))
+ static_assert(kSSizeMaxConst ==
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()),
+ "kSSizeMaxConst should be the max value of an ssize_t");
+#endif
+ DEBUG_CHECK(size > 0);
+ DEBUG_CHECK(size <= kSSizeMax);
+ }
+
+ ~Buffer() {
+ // The code calling the constructor guaranteed that there was enough space
+ // to store a trailing NUL -- and in debug builds, we are actually
+ // verifying this with DEBUG_CHECK()s in the constructor. So, we can
+ // always unconditionally write the NUL byte in the destructor. We do not
+ // need to adjust the count_, as SafeSPrintf() copies snprintf() in not
+ // including the NUL byte in its return code.
+ *GetInsertionPoint() = '\000';
+ }
+
+ // Returns true, iff the buffer is filled all the way to |kSSizeMax-1|. The
+ // caller can now stop adding more data, as GetCount() has reached its
+ // maximum possible value.
+ inline bool OutOfAddressableSpace() const {
+ return count_ == static_cast<size_t>(kSSizeMax - 1);
+ }
+
+ // Returns the number of bytes that would have been emitted to |buffer_|
+ // if it was sized sufficiently large. This number can be larger than
+ // |size_|, if the caller provided an insufficiently large output buffer.
+ // But it will never be bigger than |kSSizeMax-1|.
+ inline ssize_t GetCount() const {
+ DEBUG_CHECK(count_ < kSSizeMax);
+ return static_cast<ssize_t>(count_);
+ }
+
+ // Emits one |ch| character into the |buffer_| and updates the |count_| of
+ // characters that are currently supposed to be in the buffer.
+ // Returns "false", iff the buffer was already full.
+ // N.B. |count_| increases even if no characters have been written. This is
+ // needed so that GetCount() can return the number of bytes that should
+ // have been allocated for the |buffer_|.
+ inline bool Out(char ch) {
+ if (size_ >= 1 && count_ < size_) {
+ buffer_[count_] = ch;
+ return IncrementCountByOne();
+ }
+ // |count_| still needs to be updated, even if the buffer has been
+ // filled completely. This allows SafeSPrintf() to return the number of
+ // bytes that should have been emitted.
+ IncrementCountByOne();
+ return false;
+ }
+
+ // Inserts |padding|-|len| bytes worth of padding into the |buffer_|.
+ // |count_| will also be incremented by the number of bytes that were meant
+ // to be emitted. The |pad| character is typically either a ' ' space
+ // or a '0' zero, but other non-NUL values are legal.
+ // Returns "false", iff the the |buffer_| filled up (i.e. |count_|
+ // overflowed |size_|) at any time during padding.
+ inline bool Pad(char pad, size_t padding, size_t len) {
+ DEBUG_CHECK(pad);
+ DEBUG_CHECK(padding <= kSSizeMax);
+ for (; padding > len; --padding) {
+ if (!Out(pad)) {
+ if (--padding) {
+ IncrementCount(padding-len);
+ }
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // POSIX doesn't define any async-signal-safe function for converting
+ // an integer to ASCII. Define our own version.
+ //
+ // This also gives us the ability to make the function a little more
+ // powerful and have it deal with |padding|, with truncation, and with
+ // predicting the length of the untruncated output.
+ //
+ // IToASCII() converts an integer |i| to ASCII.
+ //
+ // Unlike similar functions in the standard C library, it never appends a
+ // NUL character. This is left for the caller to do.
+ //
+ // While the function signature takes a signed int64_t, the code decides at
+ // run-time whether to treat the argument as signed (int64_t) or as unsigned
+ // (uint64_t) based on the value of |sign|.
+ //
+ // It supports |base|s 2 through 16. Only a |base| of 10 is allowed to have
+ // a |sign|. Otherwise, |i| is treated as unsigned.
+ //
+ // For bases larger than 10, |upcase| decides whether lower-case or upper-
+ // case letters should be used to designate digits greater than 10.
+ //
+ // Padding can be done with either '0' zeros or ' ' spaces. Padding has to
+ // be positive and will always be applied to the left of the output.
+ //
+ // Prepends a |prefix| to the number (e.g. "0x"). This prefix goes to
+ // the left of |padding|, if |pad| is '0'; and to the right of |padding|
+ // if |pad| is ' '.
+ //
+ // Returns "false", if the |buffer_| overflowed at any time.
+ bool IToASCII(bool sign, bool upcase, int64_t i, int base,
+ char pad, size_t padding, const char* prefix);
+
+ private:
+ // Increments |count_| by |inc| unless this would cause |count_| to
+ // overflow |kSSizeMax-1|. Returns "false", iff an overflow was detected;
+ // it then clamps |count_| to |kSSizeMax-1|.
+ inline bool IncrementCount(size_t inc) {
+ // "inc" is either 1 or a "padding" value. Padding is clamped at
+ // run-time to at most kSSizeMax-1. So, we know that "inc" is always in
+ // the range 1..kSSizeMax-1.
+ // This allows us to compute "kSSizeMax - 1 - inc" without incurring any
+ // integer overflows.
+ DEBUG_CHECK(inc <= kSSizeMax - 1);
+ if (count_ > kSSizeMax - 1 - inc) {
+ count_ = kSSizeMax - 1;
+ return false;
+ } else {
+ count_ += inc;
+ return true;
+ }
+ }
+
+ // Convenience method for the common case of incrementing |count_| by one.
+ inline bool IncrementCountByOne() {
+ return IncrementCount(1);
+ }
+
+ // Return the current insertion point into the buffer. This is typically
+ // at |buffer_| + |count_|, but could be before that if truncation
+ // happened. It always points to one byte past the last byte that was
+ // successfully placed into the |buffer_|.
+ inline char* GetInsertionPoint() const {
+ size_t idx = count_;
+ if (idx > size_) {
+ idx = size_;
+ }
+ return buffer_ + idx;
+ }
+
+ // User-provided buffer that will receive the fully formatted output string.
+ char* buffer_;
+
+ // Number of bytes that are available in the buffer excluding the trailing
+ // NUL byte that will be added by the destructor.
+ const size_t size_;
+
+ // Number of bytes that would have been emitted to the buffer, if the buffer
+ // was sufficiently big. This number always excludes the trailing NUL byte
+ // and it is guaranteed to never grow bigger than kSSizeMax-1.
+ size_t count_;
+
+ DISALLOW_COPY_AND_ASSIGN(Buffer);
+};
+
+
+bool Buffer::IToASCII(bool sign, bool upcase, int64_t i, int base,
+ char pad, size_t padding, const char* prefix) {
+ // Sanity check for parameters. None of these should ever fail, but see
+ // above for the rationale why we can't call CHECK().
+ DEBUG_CHECK(base >= 2);
+ DEBUG_CHECK(base <= 16);
+ DEBUG_CHECK(!sign || base == 10);
+ DEBUG_CHECK(pad == '0' || pad == ' ');
+ DEBUG_CHECK(padding <= kSSizeMax);
+ DEBUG_CHECK(!(sign && prefix && *prefix));
+
+ // Handle negative numbers, if the caller indicated that |i| should be
+ // treated as a signed number; otherwise treat |i| as unsigned (even if the
+ // MSB is set!)
+ // Details are tricky, because of limited data-types, but equivalent pseudo-
+ // code would look like:
+ // if (sign && i < 0)
+ // prefix = "-";
+ // num = abs(i);
+ int minint = 0;
+ uint64_t num;
+ if (sign && i < 0) {
+ prefix = "-";
+
+ // Turn our number positive.
+ if (i == std::numeric_limits<int64_t>::min()) {
+ // The most negative integer needs special treatment.
+ minint = 1;
+ num = static_cast<uint64_t>(-(i + 1));
+ } else {
+ // "Normal" negative numbers are easy.
+ num = static_cast<uint64_t>(-i);
+ }
+ } else {
+ num = static_cast<uint64_t>(i);
+ }
+
+ // If padding with '0' zero, emit the prefix or '-' character now. Otherwise,
+ // make the prefix accessible in reverse order, so that we can later output
+ // it right between padding and the number.
+ // We cannot choose the easier approach of just reversing the number, as that
+ // fails in situations where we need to truncate numbers that have padding
+ // and/or prefixes.
+ const char* reverse_prefix = NULL;
+ if (prefix && *prefix) {
+ if (pad == '0') {
+ while (*prefix) {
+ if (padding) {
+ --padding;
+ }
+ Out(*prefix++);
+ }
+ prefix = NULL;
+ } else {
+ for (reverse_prefix = prefix; *reverse_prefix; ++reverse_prefix) {
+ }
+ }
+ } else
+ prefix = NULL;
+ const size_t prefix_length = reverse_prefix - prefix;
+
+ // Loop until we have converted the entire number. Output at least one
+ // character (i.e. '0').
+ size_t start = count_;
+ size_t discarded = 0;
+ bool started = false;
+ do {
+ // Make sure there is still enough space left in our output buffer.
+ if (count_ >= size_) {
+ if (start < size_) {
+ // It is rare that we need to output a partial number. But if asked
+ // to do so, we will still make sure we output the correct number of
+ // leading digits.
+ // Since we are generating the digits in reverse order, we actually
+ // have to discard digits in the order that we have already emitted
+ // them. This is essentially equivalent to:
+ // memmove(buffer_ + start, buffer_ + start + 1, size_ - start - 1)
+ for (char* move = buffer_ + start, *end = buffer_ + size_ - 1;
+ move < end;
+ ++move) {
+ *move = move[1];
+ }
+ ++discarded;
+ --count_;
+ } else if (count_ - size_ > 1) {
+ // Need to increment either |count_| or |discarded| to make progress.
+ // The latter is more efficient, as it eventually triggers fast
+ // handling of padding. But we have to ensure we don't accidentally
+ // change the overall state (i.e. switch the state-machine from
+ // discarding to non-discarding). |count_| needs to always stay
+ // bigger than |size_|.
+ --count_;
+ ++discarded;
+ }
+ }
+
+ // Output the next digit and (if necessary) compensate for the most
+ // negative integer needing special treatment. This works because,
+ // no matter the bit width of the integer, the lowest-most decimal
+ // integer always ends in 2, 4, 6, or 8.
+ if (!num && started) {
+ if (reverse_prefix > prefix) {
+ Out(*--reverse_prefix);
+ } else {
+ Out(pad);
+ }
+ } else {
+ started = true;
+ Out((upcase ? kUpCaseHexDigits : kDownCaseHexDigits)[num%base + minint]);
+ }
+
+ minint = 0;
+ num /= base;
+
+ // Add padding, if requested.
+ if (padding > 0) {
+ --padding;
+
+ // Performance optimization for when we are asked to output excessive
+ // padding, but our output buffer is limited in size. Even if we output
+ // a 64bit number in binary, we would never write more than 64 plus
+ // prefix non-padding characters. So, once this limit has been passed,
+ // any further state change can be computed arithmetically; we know that
+ // by this time, our entire final output consists of padding characters
+ // that have all already been output.
+ if (discarded > 8*sizeof(num) + prefix_length) {
+ IncrementCount(padding);
+ padding = 0;
+ }
+ }
+ } while (num || padding || (reverse_prefix > prefix));
+
+ // Conversion to ASCII actually resulted in the digits being in reverse
+ // order. We can't easily generate them in forward order, as we can't tell
+ // the number of characters needed until we are done converting.
+ // So, now, we reverse the string (except for the possible '-' sign).
+ char* front = buffer_ + start;
+ char* back = GetInsertionPoint();
+ while (--back > front) {
+ char ch = *back;
+ *back = *front;
+ *front++ = ch;
+ }
+
+ IncrementCount(discarded);
+ return !discarded;
+}
+
+} // anonymous namespace
+
+namespace internal {
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt, const Arg* args,
+ const size_t max_args) {
+ // Make sure that at least one NUL byte can be written, and that the buffer
+ // never overflows kSSizeMax. Not only does that use up most or all of the
+ // address space, it also would result in a return code that cannot be
+ // represented.
+ if (static_cast<ssize_t>(sz) < 1) {
+ return -1;
+ } else if (sz > kSSizeMax) {
+ sz = kSSizeMax;
+ }
+
+ // Iterate over format string and interpret '%' arguments as they are
+ // encountered.
+ Buffer buffer(buf, sz);
+ size_t padding;
+ char pad;
+ for (unsigned int cur_arg = 0; *fmt && !buffer.OutOfAddressableSpace(); ) {
+ if (*fmt++ == '%') {
+ padding = 0;
+ pad = ' ';
+ char ch = *fmt++;
+ format_character_found:
+ switch (ch) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // Found a width parameter. Convert to an integer value and store in
+ // "padding". If the leading digit is a zero, change the padding
+ // character from a space ' ' to a zero '0'.
+ pad = ch == '0' ? '0' : ' ';
+ for (;;) {
+ // The maximum allowed padding fills all the available address
+ // space and leaves just enough space to insert the trailing NUL.
+ const size_t max_padding = kSSizeMax - 1;
+ if (padding > max_padding/10 ||
+ 10*padding > max_padding - (ch - '0')) {
+ DEBUG_CHECK(padding <= max_padding/10 &&
+ 10*padding <= max_padding - (ch - '0'));
+ // Integer overflow detected. Skip the rest of the width until
+ // we find the format character, then do the normal error handling.
+ padding_overflow:
+ padding = max_padding;
+ while ((ch = *fmt++) >= '0' && ch <= '9') {
+ }
+ if (cur_arg < max_args) {
+ ++cur_arg;
+ }
+ goto fail_to_expand;
+ }
+ padding = 10*padding + ch - '0';
+ if (padding > max_padding) {
+ // This doesn't happen for "sane" values of kSSizeMax. But once
+ // kSSizeMax gets smaller than about 10, our earlier range checks
+ // are incomplete. Unittests do trigger this artificial corner
+ // case.
+ DEBUG_CHECK(padding <= max_padding);
+ goto padding_overflow;
+ }
+ ch = *fmt++;
+ if (ch < '0' || ch > '9') {
+ // Reached the end of the width parameter. This is where the format
+ // character is found.
+ goto format_character_found;
+ }
+ }
+ break;
+ case 'c': { // Output an ASCII character.
+ // Check that there are arguments left to be inserted.
+ if (cur_arg >= max_args) {
+ DEBUG_CHECK(cur_arg < max_args);
+ goto fail_to_expand;
+ }
+
+ // Check that the argument has the expected type.
+ const Arg& arg = args[cur_arg++];
+ if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+ DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+ goto fail_to_expand;
+ }
+
+ // Apply padding, if needed.
+ buffer.Pad(' ', padding, 1);
+
+ // Convert the argument to an ASCII character and output it.
+ char as_char = static_cast<char>(arg.integer.i);
+ if (!as_char) {
+ goto end_of_output_buffer;
+ }
+ buffer.Out(as_char);
+ break; }
+ case 'd': // Output a possibly signed decimal value.
+ case 'o': // Output an unsigned octal value.
+ case 'x': // Output an unsigned hexadecimal value.
+ case 'X':
+ case 'p': { // Output a pointer value.
+ // Check that there are arguments left to be inserted.
+ if (cur_arg >= max_args) {
+ DEBUG_CHECK(cur_arg < max_args);
+ goto fail_to_expand;
+ }
+
+ const Arg& arg = args[cur_arg++];
+ int64_t i;
+ const char* prefix = NULL;
+ if (ch != 'p') {
+ // Check that the argument has the expected type.
+ if (arg.type != Arg::INT && arg.type != Arg::UINT) {
+ DEBUG_CHECK(arg.type == Arg::INT || arg.type == Arg::UINT);
+ goto fail_to_expand;
+ }
+ i = arg.integer.i;
+
+ if (ch != 'd') {
+ // The Arg() constructor automatically performed sign expansion on
+ // signed parameters. This is great when outputting a %d decimal
+ // number, but can result in unexpected leading 0xFF bytes when
+ // outputting a %x hexadecimal number. Mask bits, if necessary.
+ // We have to do this here, instead of in the Arg() constructor, as
+ // the Arg() constructor cannot tell whether we will output a %d
+ // or a %x. Only the latter should experience masking.
+ if (arg.integer.width < sizeof(int64_t)) {
+ i &= (1LL << (8*arg.integer.width)) - 1;
+ }
+ }
+ } else {
+ // Pointer values require an actual pointer or a string.
+ if (arg.type == Arg::POINTER) {
+ i = reinterpret_cast<uintptr_t>(arg.ptr);
+ } else if (arg.type == Arg::STRING) {
+ i = reinterpret_cast<uintptr_t>(arg.str);
+ } else if (arg.type == Arg::INT &&
+ arg.integer.width == sizeof(NULL) &&
+ arg.integer.i == 0) { // Allow C++'s version of NULL
+ i = 0;
+ } else {
+ DEBUG_CHECK(arg.type == Arg::POINTER || arg.type == Arg::STRING);
+ goto fail_to_expand;
+ }
+
+ // Pointers always include the "0x" prefix.
+ prefix = "0x";
+ }
+
+ // Use IToASCII() to convert to ASCII representation. For decimal
+ // numbers, optionally print a sign. For hexadecimal numbers,
+ // distinguish between upper and lower case. %p addresses are always
+ // printed as upcase. Supports base 8, 10, and 16. Prints padding
+ // and/or prefixes, if so requested.
+ buffer.IToASCII(ch == 'd' && arg.type == Arg::INT,
+ ch != 'x', i,
+ ch == 'o' ? 8 : ch == 'd' ? 10 : 16,
+ pad, padding, prefix);
+ break; }
+ case 's': {
+ // Check that there are arguments left to be inserted.
+ if (cur_arg >= max_args) {
+ DEBUG_CHECK(cur_arg < max_args);
+ goto fail_to_expand;
+ }
+
+ // Check that the argument has the expected type.
+ const Arg& arg = args[cur_arg++];
+ const char *s;
+ if (arg.type == Arg::STRING) {
+ s = arg.str ? arg.str : "<NULL>";
+ } else if (arg.type == Arg::INT && arg.integer.width == sizeof(NULL) &&
+ arg.integer.i == 0) { // Allow C++'s version of NULL
+ s = "<NULL>";
+ } else {
+ DEBUG_CHECK(arg.type == Arg::STRING);
+ goto fail_to_expand;
+ }
+
+ // Apply padding, if needed. This requires us to first check the
+ // length of the string that we are outputting.
+ if (padding) {
+ size_t len = 0;
+ for (const char* src = s; *src++; ) {
+ ++len;
+ }
+ buffer.Pad(' ', padding, len);
+ }
+
+ // Printing a string involves nothing more than copying it into the
+ // output buffer and making sure we don't output more bytes than
+ // available space; Out() takes care of doing that.
+ for (const char* src = s; *src; ) {
+ buffer.Out(*src++);
+ }
+ break; }
+ case '%':
+ // Quoted percent '%' character.
+ goto copy_verbatim;
+ fail_to_expand:
+ // C++ gives us tools to do type checking -- something that snprintf()
+ // could never really do. So, whenever we see arguments that don't
+ // match up with the format string, we refuse to output them. But
+ // since we have to be extremely conservative about being async-
+ // signal-safe, we are limited in the type of error handling that we
+ // can do in production builds (in debug builds we can use
+ // DEBUG_CHECK() and hope for the best). So, all we do is pass the
+ // format string unchanged. That should eventually get the user's
+ // attention; and in the meantime, it hopefully doesn't lose too much
+ // data.
+ default:
+ // Unknown or unsupported format character. Just copy verbatim to
+ // output.
+ buffer.Out('%');
+ DEBUG_CHECK(ch);
+ if (!ch) {
+ goto end_of_format_string;
+ }
+ buffer.Out(ch);
+ break;
+ }
+ } else {
+ copy_verbatim:
+ buffer.Out(fmt[-1]);
+ }
+ }
+ end_of_format_string:
+ end_of_output_buffer:
+ return buffer.GetCount();
+}
+
+} // namespace internal
+
+ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt) {
+ // Make sure that at least one NUL byte can be written, and that the buffer
+ // never overflows kSSizeMax. Not only does that use up most or all of the
+ // address space, it also would result in a return code that cannot be
+ // represented.
+ if (static_cast<ssize_t>(sz) < 1) {
+ return -1;
+ } else if (sz > kSSizeMax) {
+ sz = kSSizeMax;
+ }
+
+ Buffer buffer(buf, sz);
+
+ // In the slow-path, we deal with errors by copying the contents of
+ // "fmt" unexpanded. This means, if there are no arguments passed, the
+ // SafeSPrintf() function always degenerates to a version of strncpy() that
+ // de-duplicates '%' characters.
+ const char* src = fmt;
+ for (; *src; ++src) {
+ buffer.Out(*src);
+ DEBUG_CHECK(src[0] != '%' || src[1] == '%');
+ if (src[0] == '%' && src[1] == '%') {
+ ++src;
+ }
+ }
+ return buffer.GetCount();
+}
+
+} // namespace strings
+} // namespace base
diff --git a/libchrome/base/strings/safe_sprintf.h b/libchrome/base/strings/safe_sprintf.h
new file mode 100644
index 0000000..65524a5
--- /dev/null
+++ b/libchrome/base/strings/safe_sprintf.h
@@ -0,0 +1,246 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_SAFE_SPRINTF_H_
+#define BASE_STRINGS_SAFE_SPRINTF_H_
+
+#include "build/build_config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#if defined(OS_POSIX)
+// For ssize_t
+#include <unistd.h>
+#endif
+
+#include "base/base_export.h"
+
+namespace base {
+namespace strings {
+
+#if defined(_MSC_VER)
+// Define ssize_t inside of our namespace.
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef long ssize_t;
+#endif
+#endif
+
+// SafeSPrintf() is a type-safe and completely self-contained version of
+// snprintf().
+//
+// SafeSNPrintf() is an alternative function signature that can be used when
+// not dealing with fixed-sized buffers. When possible, SafeSPrintf() should
+// always be used instead of SafeSNPrintf()
+//
+// These functions allow for formatting complicated messages from contexts that
+// require strict async-signal-safety. In fact, it is safe to call them from
+// any low-level execution context, as they are guaranteed to make no library
+// or system calls. It deliberately never touches "errno", either.
+//
+// The only exception to this rule is that in debug builds the code calls
+// RAW_CHECK() to help diagnose problems when the format string does not
+// match the rest of the arguments. In release builds, no CHECK()s are used,
+// and SafeSPrintf() instead returns an output string that expands only
+// those arguments that match their format characters. Mismatched arguments
+// are ignored.
+//
+// The code currently only supports a subset of format characters:
+// %c, %o, %d, %x, %X, %p, and %s.
+//
+// SafeSPrintf() aims to be as liberal as reasonably possible. Integer-like
+// values of arbitrary width can be passed to all of the format characters
+// that expect integers. Thus, it is explicitly legal to pass an "int" to
+// "%c", and output will automatically look at the LSB only. It is also
+// explicitly legal to pass either signed or unsigned values, and the format
+// characters will automatically interpret the arguments accordingly.
+//
+// It is still not legal to mix-and-match integer-like values with pointer
+// values. For instance, you cannot pass a pointer to %x, nor can you pass an
+// integer to %p.
+//
+// The one exception is "0" zero being accepted by "%p". This works-around
+// the problem of C++ defining NULL as an integer-like value.
+//
+// All format characters take an optional width parameter. This must be a
+// positive integer. For %d, %o, %x, %X and %p, if the width starts with
+// a leading '0', padding is done with '0' instead of ' ' characters.
+//
+// There are a few features of snprintf()-style format strings, that
+// SafeSPrintf() does not support at this time.
+//
+// If an actual user showed up, there is no particularly strong reason they
+// couldn't be added. But that assumes that the trade-offs between complexity
+// and utility are favorable.
+//
+// For example, adding support for negative padding widths, and for %n are all
+// likely to be viewed positively. They are all clearly useful, low-risk, easy
+// to test, don't jeopardize the async-signal-safety of the code, and overall
+// have little impact on other parts of SafeSPrintf() function.
+//
+// On the other hands, adding support for alternate forms, positional
+// arguments, grouping, wide characters, localization or floating point numbers
+// are all unlikely to ever be added.
+//
+// SafeSPrintf() and SafeSNPrintf() mimic the behavior of snprintf() and they
+// return the number of bytes needed to store the untruncated output. This
+// does *not* include the terminating NUL byte.
+//
+// They return -1, iff a fatal error happened. This typically can only happen,
+// if the buffer size is a) negative, or b) zero (i.e. not even the NUL byte
+// can be written). The return value can never be larger than SSIZE_MAX-1.
+// This ensures that the caller can always add one to the signed return code
+// in order to determine the amount of storage that needs to be allocated.
+//
+// While the code supports type checking and while it is generally very careful
+// to avoid printing incorrect values, it tends to be conservative in printing
+// as much as possible, even when given incorrect parameters. Typically, in
+// case of an error, the format string will not be expanded. (i.e. something
+// like SafeSPrintf(buf, "%p %d", 1, 2) results in "%p 2"). See above for
+// the use of RAW_CHECK() in debug builds, though.
+//
+// Basic example:
+// char buf[20];
+// base::strings::SafeSPrintf(buf, "The answer: %2d", 42);
+//
+// Example with dynamically sized buffer (async-signal-safe). This code won't
+// work on Visual studio, as it requires dynamically allocating arrays on the
+// stack. Consider picking a smaller value for |kMaxSize| if stack size is
+// limited and known. On the other hand, if the parameters to SafeSNPrintf()
+// are trusted and not controllable by the user, you can consider eliminating
+// the check for |kMaxSize| altogether. The current value of SSIZE_MAX is
+// essentially a no-op that just illustrates how to implement an upper bound:
+// const size_t kInitialSize = 128;
+// const size_t kMaxSize = std::numeric_limits<ssize_t>::max();
+// size_t size = kInitialSize;
+// for (;;) {
+// char buf[size];
+// size = SafeSNPrintf(buf, size, "Error message \"%s\"\n", err) + 1;
+// if (sizeof(buf) < kMaxSize && size > kMaxSize) {
+// size = kMaxSize;
+// continue;
+// } else if (size > sizeof(buf))
+// continue;
+// write(2, buf, size-1);
+// break;
+// }
+
+namespace internal {
+// Helpers that use C++ overloading, templates, and specializations to deduce
+// and record type information from function arguments. This allows us to
+// later write a type-safe version of snprintf().
+
+struct Arg {
+ enum Type { INT, UINT, STRING, POINTER };
+
+ // Any integer-like value.
+ Arg(signed char c) : type(INT) {
+ integer.i = c;
+ integer.width = sizeof(char);
+ }
+ Arg(unsigned char c) : type(UINT) {
+ integer.i = c;
+ integer.width = sizeof(char);
+ }
+ Arg(signed short j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(short);
+ }
+ Arg(unsigned short j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(short);
+ }
+ Arg(signed int j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(int);
+ }
+ Arg(unsigned int j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(int);
+ }
+ Arg(signed long j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(long);
+ }
+ Arg(unsigned long j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(long);
+ }
+ Arg(signed long long j) : type(INT) {
+ integer.i = j;
+ integer.width = sizeof(long long);
+ }
+ Arg(unsigned long long j) : type(UINT) {
+ integer.i = j;
+ integer.width = sizeof(long long);
+ }
+
+ // A C-style text string.
+ Arg(const char* s) : str(s), type(STRING) { }
+ Arg(char* s) : str(s), type(STRING) { }
+
+ // Any pointer value that can be cast to a "void*".
+ template<class T> Arg(T* p) : ptr((void*)p), type(POINTER) { }
+
+ union {
+ // An integer-like value.
+ struct {
+ int64_t i;
+ unsigned char width;
+ } integer;
+
+ // A C-style text string.
+ const char* str;
+
+ // A pointer to an arbitrary object.
+ const void* ptr;
+ };
+ const enum Type type;
+};
+
+// This is the internal function that performs the actual formatting of
+// an snprintf()-style format string.
+BASE_EXPORT ssize_t SafeSNPrintf(char* buf, size_t sz, const char* fmt,
+ const Arg* args, size_t max_args);
+
+#if !defined(NDEBUG)
+// In debug builds, allow unit tests to artificially lower the kSSizeMax
+// constant that is used as a hard upper-bound for all buffers. In normal
+// use, this constant should always be std::numeric_limits<ssize_t>::max().
+BASE_EXPORT void SetSafeSPrintfSSizeMaxForTest(size_t max);
+BASE_EXPORT size_t GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+} // namespace internal
+
+template<typename... Args>
+ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args... args) {
+ // Use Arg() object to record type information and then copy arguments to an
+ // array to make it easier to iterate over them.
+ const internal::Arg arg_array[] = { args... };
+ return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+template<size_t N, typename... Args>
+ssize_t SafeSPrintf(char (&buf)[N], const char* fmt, Args... args) {
+ // Use Arg() object to record type information and then copy arguments to an
+ // array to make it easier to iterate over them.
+ const internal::Arg arg_array[] = { args... };
+ return internal::SafeSNPrintf(buf, N, fmt, arg_array, sizeof...(args));
+}
+
+// Fast-path when we don't actually need to substitute any arguments.
+BASE_EXPORT ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt);
+template<size_t N>
+inline ssize_t SafeSPrintf(char (&buf)[N], const char* fmt) {
+ return SafeSNPrintf(buf, N, fmt);
+}
+
+} // namespace strings
+} // namespace base
+
+#endif // BASE_STRINGS_SAFE_SPRINTF_H_
diff --git a/libchrome/base/strings/safe_sprintf_unittest.cc b/libchrome/base/strings/safe_sprintf_unittest.cc
new file mode 100644
index 0000000..1a21728
--- /dev/null
+++ b/libchrome/base/strings/safe_sprintf_unittest.cc
@@ -0,0 +1,763 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests on Android are currently very flaky. No need to add more flaky
+// tests, as they just make it hard to spot real problems.
+// TODO(markus): See if the restrictions on Android can eventually be lifted.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define ALLOW_DEATH_TEST
+#endif
+
+namespace base {
+namespace strings {
+
+TEST(SafeSPrintfTest, Empty) {
+ char buf[2] = { 'X', 'X' };
+
+ // Negative buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
+ EXPECT_EQ('X', buf[0]);
+ EXPECT_EQ('X', buf[1]);
+
+ // Zero buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
+ EXPECT_EQ('X', buf[0]);
+ EXPECT_EQ('X', buf[1]);
+
+ // A one-byte buffer should always print a single NUL byte.
+ EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_EQ('X', buf[1]);
+ buf[0] = 'X';
+
+ // A larger buffer should leave the trailing bytes unchanged.
+ EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_EQ('X', buf[1]);
+ buf[0] = 'X';
+
+ // The same test using SafeSPrintf() instead of SafeSNPrintf().
+ EXPECT_EQ(0, SafeSPrintf(buf, ""));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_EQ('X', buf[1]);
+ buf[0] = 'X';
+}
+
+TEST(SafeSPrintfTest, NoArguments) {
+ // Output a text message that doesn't require any substitutions. This
+ // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
+ // always add a trailing NUL; it always deduplicates '%' characters).
+ static const char text[] = "hello world";
+ char ref[20], buf[20];
+ memset(ref, 'X', sizeof(ref));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A negative buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // Zero buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // A one-byte buffer should always print a single NUL byte.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 1, text));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A larger (but limited) buffer should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 2, text));
+ EXPECT_EQ(text[0], buf[0]);
+ EXPECT_EQ(0, buf[1]);
+ EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A unrestricted buffer length should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, sizeof(buf), text));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // The same test using SafeSPrintf() instead of SafeSNPrintf().
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, text));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // Check for deduplication of '%' percent characters.
+ EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
+#if defined(NDEBUG)
+ EXPECT_EQ(1, SafeSPrintf(buf, "%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
+#endif
+}
+
+TEST(SafeSPrintfTest, OneArgument) {
+ // Test basic single-argument single-character substitution.
+ const char text[] = "hello world";
+ const char fmt[] = "hello%cworld";
+ char ref[20], buf[20];
+ memset(ref, 'X', sizeof(buf));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A negative buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // Zero buffer size should always result in an error.
+ EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
+ EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+ // A one-byte buffer should always print a single NUL byte.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, 1, fmt, ' '));
+ EXPECT_EQ(0, buf[0]);
+ EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A larger (but limited) buffer should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, 2, fmt, ' '));
+ EXPECT_EQ(text[0], buf[0]);
+ EXPECT_EQ(0, buf[1]);
+ EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+ memcpy(buf, ref, sizeof(buf));
+
+ // A unrestricted buffer length should always leave the trailing bytes
+ // unchanged.
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+ SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // The same test using SafeSPrintf() instead of SafeSNPrintf().
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, fmt, ' '));
+ EXPECT_EQ(std::string(text), std::string(buf));
+ EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+ sizeof(buf) - sizeof(text)));
+ memcpy(buf, ref, sizeof(buf));
+
+ // Check for deduplication of '%' percent characters.
+ EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
+#if defined(NDEBUG)
+ EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, MissingArg) {
+#if defined(NDEBUG)
+ char buf[20];
+ EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
+ EXPECT_EQ("A%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ char buf[20];
+ EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
+#endif
+}
+
+TEST(SafeSPrintfTest, ASANFriendlyBufferTest) {
+ // Print into a buffer that is sized exactly to size. ASAN can verify that
+ // nobody attempts to write past the end of the buffer.
+ // There is a more complicated test in PrintLongString() that covers a lot
+ // more edge case, but it is also harder to debug in case of a failure.
+ const char kTestString[] = "This is a test";
+ std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+ SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
+ EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+ EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+ SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
+ EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+}
+
+TEST(SafeSPrintfTest, NArgs) {
+ // Pre-C++11 compilers have a different code path, that can only print
+ // up to ten distinct arguments.
+ // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
+ // have typos in the copy-n-pasted code that is needed to deal with various
+ // numbers of arguments.
+ char buf[12];
+ EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
+ EXPECT_EQ("\1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
+ EXPECT_EQ("\1\2", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
+ EXPECT_EQ("\1\2\3", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
+ EXPECT_EQ("\1\2\3\4", std::string(buf));
+ EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+ EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+ EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+ EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+ EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+ EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+ EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+ EXPECT_EQ(9, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+ EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+ // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+ EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
+ EXPECT_EQ("\1", std::string(buf));
+ EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
+ EXPECT_EQ("\1\2", std::string(buf));
+ EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
+ EXPECT_EQ("\1\2\3", std::string(buf));
+ EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
+ EXPECT_EQ("\1\2\3\4", std::string(buf));
+ EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+ EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+ EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+ EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+ EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+ EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+ EXPECT_EQ(8, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+ EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+ EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+
+ EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+ EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c",
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+ EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+}
+
+TEST(SafeSPrintfTest, DataTypes) {
+ char buf[40];
+
+ // Bytes
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
+ EXPECT_EQ("255", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
+ EXPECT_EQ("-128", std::string(buf));
+
+ // Half-words
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
+ EXPECT_EQ("65535", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
+ EXPECT_EQ("-32768", std::string(buf));
+
+ // Words
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
+ EXPECT_EQ("4294967295", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ // Work-around for an limitation of C90
+ EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647-1));
+ EXPECT_EQ("-2147483648", std::string(buf));
+
+ // Quads
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
+ EXPECT_EQ("18446744073709551615", std::string(buf));
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
+ EXPECT_EQ("-1", std::string(buf));
+ // Work-around for an limitation of C90
+ EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL-1));
+ EXPECT_EQ("-9223372036854775808", std::string(buf));
+
+ // Strings (both const and mutable).
+ EXPECT_EQ(4, SafeSPrintf(buf, "test"));
+ EXPECT_EQ("test", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, buf));
+ EXPECT_EQ("test", std::string(buf));
+
+ // Pointer
+ char addr[20];
+ sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+ SafeSPrintf(buf, "%p", buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+ SafeSPrintf(buf, "%p", (const char *)buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+ sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)sprintf);
+ SafeSPrintf(buf, "%p", sprintf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+
+ // Padding for pointers is a little more complicated because of the "0x"
+ // prefix. Padding with '0' zeros is relatively straight-forward, but
+ // padding with ' ' spaces requires more effort.
+ sprintf(addr, "0x%017llX", (unsigned long long)(uintptr_t)buf);
+ SafeSPrintf(buf, "%019p", buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+ sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+ memset(addr, ' ',
+ (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1,
+ addr, strlen(addr)+1) - addr);
+ SafeSPrintf(buf, "%19p", buf);
+ EXPECT_EQ(std::string(addr), std::string(buf));
+}
+
+namespace {
+void PrintLongString(char* buf, size_t sz) {
+ // Output a reasonably complex expression into a limited-size buffer.
+ // At least one byte is available for writing the NUL character.
+ CHECK_GT(sz, static_cast<size_t>(0));
+
+ // Allocate slightly more space, so that we can verify that SafeSPrintf()
+ // never writes past the end of the buffer.
+ std::unique_ptr<char[]> tmp(new char[sz + 2]);
+ memset(tmp.get(), 'X', sz+2);
+
+ // Use SafeSPrintf() to output a complex list of arguments:
+ // - test padding and truncating %c single characters.
+ // - test truncating %s simple strings.
+ // - test mismatching arguments and truncating (for %d != %s).
+ // - test zero-padding and truncating %x hexadecimal numbers.
+ // - test outputting and truncating %d MININT.
+ // - test outputting and truncating %p arbitrary pointer values.
+ // - test outputting, padding and truncating NULL-pointer %s strings.
+ char* out = tmp.get();
+ size_t out_sz = sz;
+ size_t len;
+ for (std::unique_ptr<char[]> perfect_buf;;) {
+ size_t needed = SafeSNPrintf(out, out_sz,
+#if defined(NDEBUG)
+ "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+#else
+ "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+#endif
+ 0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+ PrintLongString, static_cast<char*>(NULL)) + 1;
+
+ // Various sanity checks:
+ // The numbered of characters needed to print the full string should always
+ // be bigger or equal to the bytes that have actually been output.
+ len = strlen(tmp.get());
+ CHECK_GE(needed, len+1);
+
+ // The number of characters output should always fit into the buffer that
+ // was passed into SafeSPrintf().
+ CHECK_LT(len, out_sz);
+
+ // The output is always terminated with a NUL byte (actually, this test is
+ // always going to pass, as strlen() already verified this)
+ EXPECT_FALSE(tmp[len]);
+
+ // ASAN can check that we are not overwriting buffers, iff we make sure the
+ // buffer is exactly the size that we are expecting to be written. After
+ // running SafeSNPrintf() the first time, it is possible to compute the
+ // correct buffer size for this test. So, allocate a second buffer and run
+ // the exact same SafeSNPrintf() command again.
+ if (!perfect_buf.get()) {
+ out_sz = std::min(needed, sz);
+ out = new char[out_sz];
+ perfect_buf.reset(out);
+ } else {
+ break;
+ }
+ }
+
+ // All trailing bytes are unchanged.
+ for (size_t i = len+1; i < sz+2; ++i)
+ EXPECT_EQ('X', tmp[i]);
+
+ // The text that was generated by SafeSPrintf() should always match the
+ // equivalent text generated by sprintf(). Please note that the format
+ // string for sprintf() is not complicated, as it does not have the
+ // benefit of getting type information from the C++ compiler.
+ //
+ // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
+ // Visual Studio doesn't support this function, and the work-arounds
+ // are all really awkward.
+ char ref[256];
+ CHECK_LE(sz, sizeof(ref));
+ sprintf(ref, "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
+ static_cast<long long>(std::numeric_limits<intptr_t>::min()),
+ static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(PrintLongString)));
+ ref[sz-1] = '\000';
+
+#if defined(NDEBUG)
+ const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
+#else
+ const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+ // Compare the output from SafeSPrintf() to the one from sprintf().
+ EXPECT_EQ(std::string(ref).substr(0, kSSizeMax-1), std::string(tmp.get()));
+
+ // We allocated a slightly larger buffer, so that we could perform some
+ // extra sanity checks. Now that the tests have all passed, we copy the
+ // data to the output buffer that the caller provided.
+ memcpy(buf, tmp.get(), len+1);
+}
+
+#if !defined(NDEBUG)
+class ScopedSafeSPrintfSSizeMaxSetter {
+ public:
+ ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
+ old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
+ internal::SetSafeSPrintfSSizeMaxForTest(sz);
+ }
+
+ ~ScopedSafeSPrintfSSizeMaxSetter() {
+ internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
+ }
+
+ private:
+ size_t old_ssize_max_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSafeSPrintfSSizeMaxSetter);
+};
+#endif
+
+} // anonymous namespace
+
+TEST(SafeSPrintfTest, Truncation) {
+ // We use PrintLongString() to print a complex long string and then
+ // truncate to all possible lengths. This ends up exercising a lot of
+ // different code paths in SafeSPrintf() and IToASCII(), as truncation can
+ // happen in a lot of different states.
+ char ref[256];
+ PrintLongString(ref, sizeof(ref));
+ for (size_t i = strlen(ref)+1; i; --i) {
+ char buf[sizeof(ref)];
+ PrintLongString(buf, i);
+ EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+ }
+
+ // When compiling in debug mode, we have the ability to fake a small
+ // upper limit for the maximum value that can be stored in an ssize_t.
+ // SafeSPrintf() uses this upper limit to determine how many bytes it will
+ // write to the buffer, even if the caller claimed a bigger buffer size.
+ // Repeat the truncation test and verify that this other code path in
+ // SafeSPrintf() works correctly, too.
+#if !defined(NDEBUG)
+ for (size_t i = strlen(ref)+1; i > 1; --i) {
+ ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
+ char buf[sizeof(ref)];
+ PrintLongString(buf, sizeof(buf));
+ EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+ }
+
+ // kSSizeMax is also used to constrain the maximum amount of padding, before
+ // SafeSPrintf() detects an error in the format string.
+ ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
+ char buf[256];
+ EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
+ EXPECT_EQ(std::string(99, ' '), std::string(buf));
+ *buf = '\000';
+#if defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
+#endif
+ EXPECT_EQ(0, *buf);
+#endif
+}
+
+TEST(SafeSPrintfTest, Padding) {
+ char buf[40], fmt[40];
+
+ // Chars %c
+ EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
+ EXPECT_EQ("A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
+ EXPECT_EQ("%-2c", std::string(buf));
+ SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1, SafeSPrintf(buf, fmt, 'A'));
+ SafeSPrintf(fmt, "%%%dc",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
+ EXPECT_EQ("%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
+#endif
+
+ // Octal %o
+ EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
+ EXPECT_EQ(" 1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
+ EXPECT_EQ("01", std::string(buf));
+ EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
+ EXPECT_EQ(" 37777777777", std::string(buf));
+ EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
+ EXPECT_EQ("037777777777", std::string(buf));
+ EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
+ EXPECT_EQ(" 1777777777777777777777", std::string(buf));
+ EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
+ EXPECT_EQ("01777777777777777777777", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
+ EXPECT_EQ("111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
+ EXPECT_EQ("%-2o", std::string(buf));
+ SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ("000", std::string(buf));
+ SafeSPrintf(fmt, "%%%do",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%o", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // Decimals %d
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
+ EXPECT_EQ(" 1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
+ EXPECT_EQ("01", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
+ EXPECT_EQ(" -1", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
+ EXPECT_EQ("-01", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
+ EXPECT_EQ("111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
+ EXPECT_EQ("-111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
+ EXPECT_EQ("%-2d", std::string(buf));
+ SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ("000", std::string(buf));
+ SafeSPrintf(fmt, "%%%dd",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%d", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // Hex %X
+ EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
+ EXPECT_EQ("1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
+ EXPECT_EQ(" 1", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
+ EXPECT_EQ("01", std::string(buf));
+ EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
+ EXPECT_EQ(" FFFFFFFF", std::string(buf));
+ EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
+ EXPECT_EQ("0FFFFFFFF", std::string(buf));
+ EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
+ EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
+ EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
+ EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
+ EXPECT_EQ("111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
+ EXPECT_EQ("%-2X", std::string(buf));
+ SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, 1));
+ EXPECT_EQ("000", std::string(buf));
+ SafeSPrintf(fmt, "%%%dX",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%X", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // Pointer %p
+ EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
+ EXPECT_EQ("0x1", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
+ EXPECT_EQ(" 0x1", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
+ EXPECT_EQ("0x01", std::string(buf));
+ EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
+ EXPECT_EQ("0x111", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
+ EXPECT_EQ("%-2p", std::string(buf));
+ SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, (void*)1));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, (void*)1));
+ EXPECT_EQ("0x0", std::string(buf));
+ SafeSPrintf(fmt, "%%%dp",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+ EXPECT_EQ("%p", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+ // String
+ EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
+ EXPECT_EQ("A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
+ EXPECT_EQ(" A", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
+ EXPECT_EQ("AAA", std::string(buf));
+ EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
+ EXPECT_EQ("%-2s", std::string(buf));
+ SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, "A"));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max()-1);
+ EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+ SafeSNPrintf(buf, 4, fmt, "A"));
+ EXPECT_EQ(" ", std::string(buf));
+ SafeSPrintf(fmt, "%%%ds",
+ static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
+ EXPECT_EQ("%s", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmbeddedNul) {
+ char buf[] = { 'X', 'X', 'X', 'X' };
+ EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
+ EXPECT_EQ(' ', buf[0]);
+ EXPECT_EQ(' ', buf[1]);
+ EXPECT_EQ(0, buf[2]);
+ EXPECT_EQ('X', buf[3]);
+
+ // Check handling of a NUL format character. N.B. this takes two different
+ // code paths depending on whether we are actually passing arguments. If
+ // we don't have any arguments, we are running in the fast-path code, that
+ // looks (almost) like a strncpy().
+#if defined(NDEBUG)
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+ EXPECT_EQ("%%", std::string(buf));
+ EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+ EXPECT_EQ("%%", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+ EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmitNULL) {
+ char buf[40];
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion-null"
+#endif
+ EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
+ EXPECT_EQ("0", std::string(buf));
+ EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
+ EXPECT_EQ("0x0", std::string(buf));
+ EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
+ EXPECT_EQ("<NULL>", std::string(buf));
+#if defined(__GCC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+TEST(SafeSPrintfTest, PointerSize) {
+ // The internal data representation is a 64bit value, independent of the
+ // native word size. We want to perform sign-extension for signed integers,
+ // but we want to avoid doing so for pointer types. This could be a
+ // problem on systems, where pointers are only 32bit. This tests verifies
+ // that there is no such problem.
+ char *str = reinterpret_cast<char *>(0x80000000u);
+ void *ptr = str;
+ char buf[40];
+ EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
+ EXPECT_EQ("0x80000000", std::string(buf));
+ EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
+ EXPECT_EQ("0x80000000", std::string(buf));
+}
+
+} // namespace strings
+} // namespace base
diff --git a/libchrome/base/strings/string16.cc b/libchrome/base/strings/string16.cc
new file mode 100644
index 0000000..f4c8cf7
--- /dev/null
+++ b/libchrome/base/strings/string16.cc
@@ -0,0 +1,82 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string16.h"
+
+#if defined(WCHAR_T_IS_UTF16)
+
+#error This file should not be used on 2-byte wchar_t systems
+// If this winds up being needed on 2-byte wchar_t systems, either the
+// definitions below can be used, or the host system's wide character
+// functions like wmemcmp can be wrapped.
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include <ostream>
+
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+
+int c16memcmp(const char16* s1, const char16* s2, size_t n) {
+ // We cannot call memcmp because that changes the semantics.
+ while (n-- > 0) {
+ if (*s1 != *s2) {
+ // We cannot use (*s1 - *s2) because char16 is unsigned.
+ return ((*s1 < *s2) ? -1 : 1);
+ }
+ ++s1;
+ ++s2;
+ }
+ return 0;
+}
+
+size_t c16len(const char16* s) {
+ const char16 *s_orig = s;
+ while (*s) {
+ ++s;
+ }
+ return s - s_orig;
+}
+
+const char16* c16memchr(const char16* s, char16 c, size_t n) {
+ while (n-- > 0) {
+ if (*s == c) {
+ return s;
+ }
+ ++s;
+ }
+ return 0;
+}
+
+char16* c16memmove(char16* s1, const char16* s2, size_t n) {
+ return static_cast<char16*>(memmove(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memcpy(char16* s1, const char16* s2, size_t n) {
+ return static_cast<char16*>(memcpy(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memset(char16* s, char16 c, size_t n) {
+ char16 *s_orig = s;
+ while (n-- > 0) {
+ *s = c;
+ ++s;
+ }
+ return s_orig;
+}
+
+std::ostream& operator<<(std::ostream& out, const string16& str) {
+ return out << UTF16ToUTF8(str);
+}
+
+void PrintTo(const string16& str, std::ostream* out) {
+ *out << str;
+}
+
+} // namespace base
+
+template class std::basic_string<base::char16, base::string16_char_traits>;
+
+#endif // WCHAR_T_IS_UTF32
diff --git a/libchrome/base/strings/string16.h b/libchrome/base/strings/string16.h
new file mode 100644
index 0000000..30f4e3e
--- /dev/null
+++ b/libchrome/base/strings/string16.h
@@ -0,0 +1,206 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING16_H_
+#define BASE_STRINGS_STRING16_H_
+
+// WHAT:
+// A version of std::basic_string that provides 2-byte characters even when
+// wchar_t is not implemented as a 2-byte type. You can access this class as
+// string16. We also define char16, which string16 is based upon.
+//
+// WHY:
+// On Windows, wchar_t is 2 bytes, and it can conveniently handle UTF-16/UCS-2
+// data. Plenty of existing code operates on strings encoded as UTF-16.
+//
+// On many other platforms, sizeof(wchar_t) is 4 bytes by default. We can make
+// it 2 bytes by using the GCC flag -fshort-wchar. But then std::wstring fails
+// at run time, because it calls some functions (like wcslen) that come from
+// the system's native C library -- which was built with a 4-byte wchar_t!
+// It's wasteful to use 4-byte wchar_t strings to carry UTF-16 data, and it's
+// entirely improper on those systems where the encoding of wchar_t is defined
+// as UTF-32.
+//
+// Here, we define string16, which is similar to std::wstring but replaces all
+// libc functions with custom, 2-byte-char compatible routines. It is capable
+// of carrying UTF-16-encoded data.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <functional>
+#include <string>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(WCHAR_T_IS_UTF16)
+
+namespace base {
+
+typedef wchar_t char16;
+typedef std::wstring string16;
+typedef std::char_traits<wchar_t> string16_char_traits;
+
+} // namespace base
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include <wchar.h> // for mbstate_t
+
+namespace base {
+
+typedef uint16_t char16;
+
+// char16 versions of the functions required by string16_char_traits; these
+// are based on the wide character functions of similar names ("w" or "wcs"
+// instead of "c16").
+BASE_EXPORT int c16memcmp(const char16* s1, const char16* s2, size_t n);
+BASE_EXPORT size_t c16len(const char16* s);
+BASE_EXPORT const char16* c16memchr(const char16* s, char16 c, size_t n);
+BASE_EXPORT char16* c16memmove(char16* s1, const char16* s2, size_t n);
+BASE_EXPORT char16* c16memcpy(char16* s1, const char16* s2, size_t n);
+BASE_EXPORT char16* c16memset(char16* s, char16 c, size_t n);
+
+struct string16_char_traits {
+ typedef char16 char_type;
+ typedef int int_type;
+
+ // int_type needs to be able to hold each possible value of char_type, and in
+ // addition, the distinct value of eof().
+ static_assert(sizeof(int_type) > sizeof(char_type),
+ "int must be larger than 16 bits wide");
+
+ typedef std::streamoff off_type;
+ typedef mbstate_t state_type;
+ typedef std::fpos<state_type> pos_type;
+
+ static void assign(char_type& c1, const char_type& c2) {
+ c1 = c2;
+ }
+
+ static bool eq(const char_type& c1, const char_type& c2) {
+ return c1 == c2;
+ }
+ static bool lt(const char_type& c1, const char_type& c2) {
+ return c1 < c2;
+ }
+
+ static int compare(const char_type* s1, const char_type* s2, size_t n) {
+ return c16memcmp(s1, s2, n);
+ }
+
+ static size_t length(const char_type* s) {
+ return c16len(s);
+ }
+
+ static const char_type* find(const char_type* s, size_t n,
+ const char_type& a) {
+ return c16memchr(s, a, n);
+ }
+
+ static char_type* move(char_type* s1, const char_type* s2, size_t n) {
+ return c16memmove(s1, s2, n);
+ }
+
+ static char_type* copy(char_type* s1, const char_type* s2, size_t n) {
+ return c16memcpy(s1, s2, n);
+ }
+
+ static char_type* assign(char_type* s, size_t n, char_type a) {
+ return c16memset(s, a, n);
+ }
+
+ static int_type not_eof(const int_type& c) {
+ return eq_int_type(c, eof()) ? 0 : c;
+ }
+
+ static char_type to_char_type(const int_type& c) {
+ return char_type(c);
+ }
+
+ static int_type to_int_type(const char_type& c) {
+ return int_type(c);
+ }
+
+ static bool eq_int_type(const int_type& c1, const int_type& c2) {
+ return c1 == c2;
+ }
+
+ static int_type eof() {
+ return static_cast<int_type>(EOF);
+ }
+};
+
+typedef std::basic_string<char16, base::string16_char_traits> string16;
+
+BASE_EXPORT extern std::ostream& operator<<(std::ostream& out,
+ const string16& str);
+
+// This is required by googletest to print a readable output on test failures.
+BASE_EXPORT extern void PrintTo(const string16& str, std::ostream* out);
+
+} // namespace base
+
+// The string class will be explicitly instantiated only once, in string16.cc.
+//
+// std::basic_string<> in GNU libstdc++ contains a static data member,
+// _S_empty_rep_storage, to represent empty strings. When an operation such
+// as assignment or destruction is performed on a string, causing its existing
+// data member to be invalidated, it must not be freed if this static data
+// member is being used. Otherwise, it counts as an attempt to free static
+// (and not allocated) data, which is a memory error.
+//
+// Generally, due to C++ template magic, _S_empty_rep_storage will be marked
+// as a coalesced symbol, meaning that the linker will combine multiple
+// instances into a single one when generating output.
+//
+// If a string class is used by multiple shared libraries, a problem occurs.
+// Each library will get its own copy of _S_empty_rep_storage. When strings
+// are passed across a library boundary for alteration or destruction, memory
+// errors will result. GNU libstdc++ contains a configuration option,
+// --enable-fully-dynamic-string (_GLIBCXX_FULLY_DYNAMIC_STRING), which
+// disables the static data member optimization, but it's a good optimization
+// and non-STL code is generally at the mercy of the system's STL
+// configuration. Fully-dynamic strings are not the default for GNU libstdc++
+// libstdc++ itself or for the libstdc++ installations on the systems we care
+// about, such as Mac OS X and relevant flavors of Linux.
+//
+// See also http://gcc.gnu.org/bugzilla/show_bug.cgi?id=24196 .
+//
+// To avoid problems, string classes need to be explicitly instantiated only
+// once, in exactly one library. All other string users see it via an "extern"
+// declaration. This is precisely how GNU libstdc++ handles
+// std::basic_string<char> (string) and std::basic_string<wchar_t> (wstring).
+//
+// This also works around a Mac OS X linker bug in ld64-85.2.1 (Xcode 3.1.2),
+// in which the linker does not fully coalesce symbols when dead code
+// stripping is enabled. This bug causes the memory errors described above
+// to occur even when a std::basic_string<> does not cross shared library
+// boundaries, such as in statically-linked executables.
+//
+// TODO(mark): File this bug with Apple and update this note with a bug number.
+
+extern template
+class BASE_EXPORT std::basic_string<base::char16, base::string16_char_traits>;
+
+// Specialize std::hash for base::string16. Although the style guide forbids
+// this in general, it is necessary for consistency with WCHAR_T_IS_UTF16
+// platforms, where base::string16 is a type alias for std::wstring.
+namespace std {
+template <>
+struct hash<base::string16> {
+ std::size_t operator()(const base::string16& s) const {
+ std::size_t result = 0;
+ for (base::char16 c : s)
+ result = (result * 131) + c;
+ return result;
+ }
+};
+} // namespace std
+
+#endif // WCHAR_T_IS_UTF32
+
+#endif // BASE_STRINGS_STRING16_H_
diff --git a/libchrome/base/strings/string16_unittest.cc b/libchrome/base/strings/string16_unittest.cc
new file mode 100644
index 0000000..0d2ca80
--- /dev/null
+++ b/libchrome/base/strings/string16_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+#include <unordered_set>
+
+#include "base/strings/string16.h"
+
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// We define a custom operator<< for string16 so we can use it with logging.
+// This tests that conversion.
+TEST(String16Test, OutputStream) {
+ // Basic stream test.
+ {
+ std::ostringstream stream;
+ stream << "Empty '" << string16() << "' standard '"
+ << string16(ASCIIToUTF16("Hello, world")) << "'";
+ EXPECT_STREQ("Empty '' standard 'Hello, world'",
+ stream.str().c_str());
+ }
+
+ // Interesting edge cases.
+ {
+ // These should each get converted to the invalid character: EF BF BD.
+ string16 initial_surrogate;
+ initial_surrogate.push_back(0xd800);
+ string16 final_surrogate;
+ final_surrogate.push_back(0xdc00);
+
+ // Old italic A = U+10300, will get converted to: F0 90 8C 80 'z'.
+ string16 surrogate_pair;
+ surrogate_pair.push_back(0xd800);
+ surrogate_pair.push_back(0xdf00);
+ surrogate_pair.push_back('z');
+
+ // Will get converted to the invalid char + 's': EF BF BD 's'.
+ string16 unterminated_surrogate;
+ unterminated_surrogate.push_back(0xd800);
+ unterminated_surrogate.push_back('s');
+
+ std::ostringstream stream;
+ stream << initial_surrogate << "," << final_surrogate << ","
+ << surrogate_pair << "," << unterminated_surrogate;
+
+ EXPECT_STREQ("\xef\xbf\xbd,\xef\xbf\xbd,\xf0\x90\x8c\x80z,\xef\xbf\xbds",
+ stream.str().c_str());
+ }
+}
+
+TEST(String16Test, Hash) {
+ string16 str1 = ASCIIToUTF16("hello");
+ string16 str2 = ASCIIToUTF16("world");
+
+ std::unordered_set<string16> set;
+
+ set.insert(str1);
+ EXPECT_EQ(1u, set.count(str1));
+ EXPECT_EQ(0u, set.count(str2));
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_number_conversions.cc b/libchrome/base/strings/string_number_conversions.cc
new file mode 100644
index 0000000..09aeb44
--- /dev/null
+++ b/libchrome/base/strings/string_number_conversions.cc
@@ -0,0 +1,489 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <wctype.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/numerics/safe_math.h"
+#include "base/scoped_clear_errno.h"
+#include "base/scoped_clear_errno.h"
+
+namespace base {
+
+namespace {
+
+template <typename STR, typename INT>
+struct IntToStringT {
+ static STR IntToString(INT value) {
+ // log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
+ // So round up to allocate 3 output characters per byte, plus 1 for '-'.
+ const size_t kOutputBufSize =
+ 3 * sizeof(INT) + std::numeric_limits<INT>::is_signed;
+
+ // Create the string in a temporary buffer, write it back to front, and
+ // then return the substr of what we ended up using.
+ using CHR = typename STR::value_type;
+ CHR outbuf[kOutputBufSize];
+
+ // The ValueOrDie call below can never fail, because UnsignedAbs is valid
+ // for all valid inputs.
+ auto res = CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+
+ CHR* end = outbuf + kOutputBufSize;
+ CHR* i = end;
+ do {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>((res % 10) + '0');
+ res /= 10;
+ } while (res != 0);
+ if (IsValueNegative(value)) {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>('-');
+ }
+ return STR(i, end);
+ }
+};
+
+// Utility to convert a character to a digit in a given base
+template<typename CHAR, int BASE, bool BASE_LTE_10> class BaseCharToDigit {
+};
+
+// Faster specialization for bases <= 10
+template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, true> {
+ public:
+ static bool Convert(CHAR c, uint8_t* digit) {
+ if (c >= '0' && c < '0' + BASE) {
+ *digit = static_cast<uint8_t>(c - '0');
+ return true;
+ }
+ return false;
+ }
+};
+
+// Specialization for bases where 10 < base <= 36
+template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, false> {
+ public:
+ static bool Convert(CHAR c, uint8_t* digit) {
+ if (c >= '0' && c <= '9') {
+ *digit = c - '0';
+ } else if (c >= 'a' && c < 'a' + BASE - 10) {
+ *digit = c - 'a' + 10;
+ } else if (c >= 'A' && c < 'A' + BASE - 10) {
+ *digit = c - 'A' + 10;
+ } else {
+ return false;
+ }
+ return true;
+ }
+};
+
+template <int BASE, typename CHAR>
+bool CharToDigit(CHAR c, uint8_t* digit) {
+ return BaseCharToDigit<CHAR, BASE, BASE <= 10>::Convert(c, digit);
+}
+
+// There is an IsUnicodeWhitespace for wchars defined in string_util.h, but it
+// is locale independent, whereas the functions we are replacing were
+// locale-dependent. TBD what is desired, but for the moment let's not
+// introduce a change in behaviour.
+template<typename CHAR> class WhitespaceHelper {
+};
+
+template<> class WhitespaceHelper<char> {
+ public:
+ static bool Invoke(char c) {
+ return 0 != isspace(static_cast<unsigned char>(c));
+ }
+};
+
+template<> class WhitespaceHelper<char16> {
+ public:
+ static bool Invoke(char16 c) {
+ return 0 != iswspace(c);
+ }
+};
+
+template<typename CHAR> bool LocalIsWhitespace(CHAR c) {
+ return WhitespaceHelper<CHAR>::Invoke(c);
+}
+
+// IteratorRangeToNumberTraits should provide:
+// - a typedef for iterator_type, the iterator type used as input.
+// - a typedef for value_type, the target numeric type.
+// - static functions min, max (returning the minimum and maximum permitted
+// values)
+// - constant kBase, the base in which to interpret the input
+template<typename IteratorRangeToNumberTraits>
+class IteratorRangeToNumber {
+ public:
+ typedef IteratorRangeToNumberTraits traits;
+ typedef typename traits::iterator_type const_iterator;
+ typedef typename traits::value_type value_type;
+
+ // Generalized iterator-range-to-number conversion.
+ //
+ static bool Invoke(const_iterator begin,
+ const_iterator end,
+ value_type* output) {
+ bool valid = true;
+
+ while (begin != end && LocalIsWhitespace(*begin)) {
+ valid = false;
+ ++begin;
+ }
+
+ if (begin != end && *begin == '-') {
+ if (!std::numeric_limits<value_type>::is_signed) {
+ *output = 0;
+ valid = false;
+ } else if (!Negative::Invoke(begin + 1, end, output)) {
+ valid = false;
+ }
+ } else {
+ if (begin != end && *begin == '+') {
+ ++begin;
+ }
+ if (!Positive::Invoke(begin, end, output)) {
+ valid = false;
+ }
+ }
+
+ return valid;
+ }
+
+ private:
+ // Sign provides:
+ // - a static function, CheckBounds, that determines whether the next digit
+ // causes an overflow/underflow
+ // - a static function, Increment, that appends the next digit appropriately
+ // according to the sign of the number being parsed.
+ template<typename Sign>
+ class Base {
+ public:
+ static bool Invoke(const_iterator begin, const_iterator end,
+ typename traits::value_type* output) {
+ *output = 0;
+
+ if (begin == end) {
+ return false;
+ }
+
+ // Note: no performance difference was found when using template
+ // specialization to remove this check in bases other than 16
+ if (traits::kBase == 16 && end - begin > 2 && *begin == '0' &&
+ (*(begin + 1) == 'x' || *(begin + 1) == 'X')) {
+ begin += 2;
+ }
+
+ for (const_iterator current = begin; current != end; ++current) {
+ uint8_t new_digit = 0;
+
+ if (!CharToDigit<traits::kBase>(*current, &new_digit)) {
+ return false;
+ }
+
+ if (current != begin) {
+ if (!Sign::CheckBounds(output, new_digit)) {
+ return false;
+ }
+ *output *= traits::kBase;
+ }
+
+ Sign::Increment(new_digit, output);
+ }
+ return true;
+ }
+ };
+
+ class Positive : public Base<Positive> {
+ public:
+ static bool CheckBounds(value_type* output, uint8_t new_digit) {
+ if (*output > static_cast<value_type>(traits::max() / traits::kBase) ||
+ (*output == static_cast<value_type>(traits::max() / traits::kBase) &&
+ new_digit > traits::max() % traits::kBase)) {
+ *output = traits::max();
+ return false;
+ }
+ return true;
+ }
+ static void Increment(uint8_t increment, value_type* output) {
+ *output += increment;
+ }
+ };
+
+ class Negative : public Base<Negative> {
+ public:
+ static bool CheckBounds(value_type* output, uint8_t new_digit) {
+ if (*output < traits::min() / traits::kBase ||
+ (*output == traits::min() / traits::kBase &&
+ new_digit > 0 - traits::min() % traits::kBase)) {
+ *output = traits::min();
+ return false;
+ }
+ return true;
+ }
+ static void Increment(uint8_t increment, value_type* output) {
+ *output -= increment;
+ }
+ };
+};
+
+template<typename ITERATOR, typename VALUE, int BASE>
+class BaseIteratorRangeToNumberTraits {
+ public:
+ typedef ITERATOR iterator_type;
+ typedef VALUE value_type;
+ static value_type min() {
+ return std::numeric_limits<value_type>::min();
+ }
+ static value_type max() {
+ return std::numeric_limits<value_type>::max();
+ }
+ static const int kBase = BASE;
+};
+
+template<typename ITERATOR>
+class BaseHexIteratorRangeToIntTraits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, int, 16> {
+};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToUIntTraits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, uint32_t, 16> {};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToInt64Traits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, int64_t, 16> {};
+
+template <typename ITERATOR>
+class BaseHexIteratorRangeToUInt64Traits
+ : public BaseIteratorRangeToNumberTraits<ITERATOR, uint64_t, 16> {};
+
+typedef BaseHexIteratorRangeToIntTraits<StringPiece::const_iterator>
+ HexIteratorRangeToIntTraits;
+
+typedef BaseHexIteratorRangeToUIntTraits<StringPiece::const_iterator>
+ HexIteratorRangeToUIntTraits;
+
+typedef BaseHexIteratorRangeToInt64Traits<StringPiece::const_iterator>
+ HexIteratorRangeToInt64Traits;
+
+typedef BaseHexIteratorRangeToUInt64Traits<StringPiece::const_iterator>
+ HexIteratorRangeToUInt64Traits;
+
+template <typename STR>
+bool HexStringToBytesT(const STR& input, std::vector<uint8_t>* output) {
+ DCHECK_EQ(output->size(), 0u);
+ size_t count = input.size();
+ if (count == 0 || (count % 2) != 0)
+ return false;
+ for (uintptr_t i = 0; i < count / 2; ++i) {
+ uint8_t msb = 0; // most significant 4 bits
+ uint8_t lsb = 0; // least significant 4 bits
+ if (!CharToDigit<16>(input[i * 2], &msb) ||
+ !CharToDigit<16>(input[i * 2 + 1], &lsb))
+ return false;
+ output->push_back((msb << 4) | lsb);
+ }
+ return true;
+}
+
+template <typename VALUE, int BASE>
+class StringPieceToNumberTraits
+ : public BaseIteratorRangeToNumberTraits<StringPiece::const_iterator,
+ VALUE,
+ BASE> {
+};
+
+template <typename VALUE>
+bool StringToIntImpl(const StringPiece& input, VALUE* output) {
+ return IteratorRangeToNumber<StringPieceToNumberTraits<VALUE, 10> >::Invoke(
+ input.begin(), input.end(), output);
+}
+
+template <typename VALUE, int BASE>
+class StringPiece16ToNumberTraits
+ : public BaseIteratorRangeToNumberTraits<StringPiece16::const_iterator,
+ VALUE,
+ BASE> {
+};
+
+template <typename VALUE>
+bool String16ToIntImpl(const StringPiece16& input, VALUE* output) {
+ return IteratorRangeToNumber<StringPiece16ToNumberTraits<VALUE, 10> >::Invoke(
+ input.begin(), input.end(), output);
+}
+
+} // namespace
+
+std::string IntToString(int value) {
+ return IntToStringT<std::string, int>::IntToString(value);
+}
+
+string16 IntToString16(int value) {
+ return IntToStringT<string16, int>::IntToString(value);
+}
+
+std::string UintToString(unsigned int value) {
+ return IntToStringT<std::string, unsigned int>::IntToString(value);
+}
+
+string16 UintToString16(unsigned int value) {
+ return IntToStringT<string16, unsigned int>::IntToString(value);
+}
+
+std::string Int64ToString(int64_t value) {
+ return IntToStringT<std::string, int64_t>::IntToString(value);
+}
+
+string16 Int64ToString16(int64_t value) {
+ return IntToStringT<string16, int64_t>::IntToString(value);
+}
+
+std::string Uint64ToString(uint64_t value) {
+ return IntToStringT<std::string, uint64_t>::IntToString(value);
+}
+
+string16 Uint64ToString16(uint64_t value) {
+ return IntToStringT<string16, uint64_t>::IntToString(value);
+}
+
+std::string SizeTToString(size_t value) {
+ return IntToStringT<std::string, size_t>::IntToString(value);
+}
+
+string16 SizeTToString16(size_t value) {
+ return IntToStringT<string16, size_t>::IntToString(value);
+}
+
+std::string DoubleToString(double value) {
+ auto ret = std::to_string(value);
+ // If this returned an integer, don't do anything.
+ if (ret.find('.') == std::string::npos) {
+ return ret;
+ }
+ // Otherwise, it has an annoying tendency to leave trailing zeros.
+ size_t len = ret.size();
+ while (len >= 2 && ret[len - 1] == '0' && ret[len - 2] != '.') {
+ --len;
+ }
+ ret.erase(len);
+ return ret;
+}
+
+bool StringToInt(const StringPiece& input, int* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToInt(const StringPiece16& input, int* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToUint(const StringPiece& input, unsigned* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToUint(const StringPiece16& input, unsigned* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToInt64(const StringPiece& input, int64_t* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToInt64(const StringPiece16& input, int64_t* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToUint64(const StringPiece& input, uint64_t* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToUint64(const StringPiece16& input, uint64_t* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToSizeT(const StringPiece& input, size_t* output) {
+ return StringToIntImpl(input, output);
+}
+
+bool StringToSizeT(const StringPiece16& input, size_t* output) {
+ return String16ToIntImpl(input, output);
+}
+
+bool StringToDouble(const std::string& input, double* output) {
+ char* endptr = nullptr;
+ *output = strtod(input.c_str(), &endptr);
+
+ // Cases to return false:
+ // - If the input string is empty, there was nothing to parse.
+ // - If endptr does not point to the end of the string, there are either
+ // characters remaining in the string after a parsed number, or the string
+ // does not begin with a parseable number. endptr is compared to the
+ // expected end given the string's stated length to correctly catch cases
+ // where the string contains embedded NUL characters.
+ // - If the first character is a space, there was leading whitespace
+ return !input.empty() &&
+ input.c_str() + input.length() == endptr &&
+ !isspace(input[0]) &&
+ *output != std::numeric_limits<double>::infinity() &&
+ *output != -std::numeric_limits<double>::infinity();
+}
+
+// Note: if you need to add String16ToDouble, first ask yourself if it's
+// really necessary. If it is, probably the best implementation here is to
+// convert to 8-bit and then use the 8-bit version.
+
+// Note: if you need to add an iterator range version of StringToDouble, first
+// ask yourself if it's really necessary. If it is, probably the best
+// implementation here is to instantiate a string and use the string version.
+
+std::string HexEncode(const void* bytes, size_t size) {
+ static const char kHexChars[] = "0123456789ABCDEF";
+
+ // Each input byte creates two output hex characters.
+ std::string ret(size * 2, '\0');
+
+ for (size_t i = 0; i < size; ++i) {
+ char b = reinterpret_cast<const char*>(bytes)[i];
+ ret[(i * 2)] = kHexChars[(b >> 4) & 0xf];
+ ret[(i * 2) + 1] = kHexChars[b & 0xf];
+ }
+ return ret;
+}
+
+bool HexStringToInt(const StringPiece& input, int* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToIntTraits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToUInt(const StringPiece& input, uint32_t* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToUIntTraits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToInt64(const StringPiece& input, int64_t* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToInt64Traits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToUInt64(const StringPiece& input, uint64_t* output) {
+ return IteratorRangeToNumber<HexIteratorRangeToUInt64Traits>::Invoke(
+ input.begin(), input.end(), output);
+}
+
+bool HexStringToBytes(const std::string& input, std::vector<uint8_t>* output) {
+ return HexStringToBytesT(input, output);
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_number_conversions.h b/libchrome/base/strings/string_number_conversions.h
new file mode 100644
index 0000000..a95544e
--- /dev/null
+++ b/libchrome/base/strings/string_number_conversions.h
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
+#define BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+// ----------------------------------------------------------------------------
+// IMPORTANT MESSAGE FROM YOUR SPONSOR
+//
+// This file contains no "wstring" variants. New code should use string16. If
+// you need to make old code work, use the UTF8 version and convert. Please do
+// not add wstring variants.
+//
+// Please do not add "convenience" functions for converting strings to integers
+// that return the value and ignore success/failure. That encourages people to
+// write code that doesn't properly handle the error conditions.
+//
+// DO NOT use these functions in any UI unless it's NOT localized on purpose.
+// Instead, use base::MessageFormatter for a complex message with numbers
+// (integer, float, double) embedded or base::Format{Number,Double,Percent} to
+// just format a single number/percent. Note that some languages use native
+// digits instead of ASCII digits while others use a group separator or decimal
+// point different from ',' and '.'. Using these functions in the UI would lead
+// numbers to be formatted in a non-native way.
+// ----------------------------------------------------------------------------
+
+namespace base {
+
+// Number -> string conversions ------------------------------------------------
+
+BASE_EXPORT std::string IntToString(int value);
+BASE_EXPORT string16 IntToString16(int value);
+
+BASE_EXPORT std::string UintToString(unsigned value);
+BASE_EXPORT string16 UintToString16(unsigned value);
+
+BASE_EXPORT std::string Int64ToString(int64_t value);
+BASE_EXPORT string16 Int64ToString16(int64_t value);
+
+BASE_EXPORT std::string Uint64ToString(uint64_t value);
+BASE_EXPORT string16 Uint64ToString16(uint64_t value);
+
+BASE_EXPORT std::string SizeTToString(size_t value);
+BASE_EXPORT string16 SizeTToString16(size_t value);
+
+// Deprecated: prefer std::to_string(double) instead.
+// DoubleToString converts the double to a string format that ignores the
+// locale. If you want to use locale specific formatting, use ICU.
+BASE_EXPORT std::string DoubleToString(double value);
+
+// String -> number conversions ------------------------------------------------
+
+// Perform a best-effort conversion of the input string to a numeric type,
+// setting |*output| to the result of the conversion. Returns true for
+// "perfect" conversions; returns false in the following cases:
+// - Overflow. |*output| will be set to the maximum value supported
+// by the data type.
+// - Underflow. |*output| will be set to the minimum value supported
+// by the data type.
+// - Trailing characters in the string after parsing the number. |*output|
+// will be set to the value of the number that was parsed.
+// - Leading whitespace in the string before parsing the number. |*output| will
+// be set to the value of the number that was parsed.
+// - No characters parseable as a number at the beginning of the string.
+// |*output| will be set to 0.
+// - Empty string. |*output| will be set to 0.
+// WARNING: Will write to |output| even when returning false.
+// Read the comments above carefully.
+BASE_EXPORT bool StringToInt(const StringPiece& input, int* output);
+BASE_EXPORT bool StringToInt(const StringPiece16& input, int* output);
+
+BASE_EXPORT bool StringToUint(const StringPiece& input, unsigned* output);
+BASE_EXPORT bool StringToUint(const StringPiece16& input, unsigned* output);
+
+BASE_EXPORT bool StringToInt64(const StringPiece& input, int64_t* output);
+BASE_EXPORT bool StringToInt64(const StringPiece16& input, int64_t* output);
+
+BASE_EXPORT bool StringToUint64(const StringPiece& input, uint64_t* output);
+BASE_EXPORT bool StringToUint64(const StringPiece16& input, uint64_t* output);
+
+BASE_EXPORT bool StringToSizeT(const StringPiece& input, size_t* output);
+BASE_EXPORT bool StringToSizeT(const StringPiece16& input, size_t* output);
+
+// Deprecated: prefer std::stod() instead.
+// For floating-point conversions, only conversions of input strings in decimal
+// form are defined to work. Behavior with strings representing floating-point
+// numbers in hexadecimal, and strings representing non-finite values (such as
+// NaN and inf) is undefined. Otherwise, these behave the same as the integral
+// variants. This expects the input string to NOT be specific to the locale.
+// If your input is locale specific, use ICU to read the number.
+// WARNING: Will write to |output| even when returning false.
+// Read the comments here and above StringToInt() carefully.
+BASE_EXPORT bool StringToDouble(const std::string& input, double* output);
+
+// Hex encoding ----------------------------------------------------------------
+
+// Returns a hex string representation of a binary buffer. The returned hex
+// string will be in upper case. This function does not check if |size| is
+// within reasonable limits since it's written with trusted data in mind. If
+// you suspect that the data you want to format might be large, the absolute
+// max size for |size| should be is
+// std::numeric_limits<size_t>::max() / 2
+BASE_EXPORT std::string HexEncode(const void* bytes, size_t size);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// -0x80000000 < |input| < 0x7FFFFFFF.
+BASE_EXPORT bool HexStringToInt(const StringPiece& input, int* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// 0x00000000 < |input| < 0xFFFFFFFF.
+// The string is not required to start with 0x.
+BASE_EXPORT bool HexStringToUInt(const StringPiece& input, uint32_t* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// -0x8000000000000000 < |input| < 0x7FFFFFFFFFFFFFFF.
+BASE_EXPORT bool HexStringToInt64(const StringPiece& input, int64_t* output);
+
+// Best effort conversion, see StringToInt above for restrictions.
+// Will only successful parse hex values that will fit into |output|, i.e.
+// 0x0000000000000000 < |input| < 0xFFFFFFFFFFFFFFFF.
+// The string is not required to start with 0x.
+BASE_EXPORT bool HexStringToUInt64(const StringPiece& input, uint64_t* output);
+
+// Similar to the previous functions, except that output is a vector of bytes.
+// |*output| will contain as many bytes as were successfully parsed prior to the
+// error. There is no overflow, but input.size() must be evenly divisible by 2.
+// Leading 0x or +/- are not allowed.
+BASE_EXPORT bool HexStringToBytes(const std::string& input,
+ std::vector<uint8_t>* output);
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
diff --git a/libchrome/base/strings/string_number_conversions_unittest.cc b/libchrome/base/strings/string_number_conversions_unittest.cc
new file mode 100644
index 0000000..91191e0
--- /dev/null
+++ b/libchrome/base/strings/string_number_conversions_unittest.cc
@@ -0,0 +1,812 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <cmath>
+#include <limits>
+
+#include "base/bit_cast.h"
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+template <typename INT>
+struct IntToStringTest {
+ INT num;
+ const char* sexpected;
+ const char* uexpected;
+};
+
+} // namespace
+
+TEST(StringNumberConversionsTest, IntToString) {
+ static const IntToStringTest<int> int_tests[] = {
+ { 0, "0", "0" },
+ { -1, "-1", "4294967295" },
+ { std::numeric_limits<int>::max(), "2147483647", "2147483647" },
+ { std::numeric_limits<int>::min(), "-2147483648", "2147483648" },
+ };
+ static const IntToStringTest<int64_t> int64_tests[] = {
+ {0, "0", "0"},
+ {-1, "-1", "18446744073709551615"},
+ {
+ std::numeric_limits<int64_t>::max(), "9223372036854775807",
+ "9223372036854775807",
+ },
+ {std::numeric_limits<int64_t>::min(), "-9223372036854775808",
+ "9223372036854775808"},
+ };
+
+ for (size_t i = 0; i < arraysize(int_tests); ++i) {
+ const IntToStringTest<int>* test = &int_tests[i];
+ EXPECT_EQ(IntToString(test->num), test->sexpected);
+ EXPECT_EQ(IntToString16(test->num), UTF8ToUTF16(test->sexpected));
+ EXPECT_EQ(UintToString(test->num), test->uexpected);
+ EXPECT_EQ(UintToString16(test->num), UTF8ToUTF16(test->uexpected));
+ }
+ for (size_t i = 0; i < arraysize(int64_tests); ++i) {
+ const IntToStringTest<int64_t>* test = &int64_tests[i];
+ EXPECT_EQ(Int64ToString(test->num), test->sexpected);
+ EXPECT_EQ(Int64ToString16(test->num), UTF8ToUTF16(test->sexpected));
+ EXPECT_EQ(Uint64ToString(test->num), test->uexpected);
+ EXPECT_EQ(Uint64ToString16(test->num), UTF8ToUTF16(test->uexpected));
+ }
+}
+
+TEST(StringNumberConversionsTest, Uint64ToString) {
+ static const struct {
+ uint64_t input;
+ std::string output;
+ } cases[] = {
+ {0, "0"},
+ {42, "42"},
+ {INT_MAX, "2147483647"},
+ {std::numeric_limits<uint64_t>::max(), "18446744073709551615"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i)
+ EXPECT_EQ(cases[i].output, Uint64ToString(cases[i].input));
+}
+
+TEST(StringNumberConversionsTest, SizeTToString) {
+ size_t size_t_max = std::numeric_limits<size_t>::max();
+ std::string size_t_max_string = StringPrintf("%" PRIuS, size_t_max);
+
+ static const struct {
+ size_t input;
+ std::string output;
+ } cases[] = {
+ {0, "0"},
+ {9, "9"},
+ {42, "42"},
+ {INT_MAX, "2147483647"},
+ {2147483648U, "2147483648"},
+#if SIZE_MAX > 4294967295U
+ {99999999999U, "99999999999"},
+#endif
+ {size_t_max, size_t_max_string},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i)
+ EXPECT_EQ(cases[i].output, Uint64ToString(cases[i].input));
+}
+
+TEST(StringNumberConversionsTest, StringToInt) {
+ static const struct {
+ std::string input;
+ int output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 42, true},
+ {"42\x99", 42, false},
+ {"\x99" "42\x99", 0, false},
+ {"-2147483648", INT_MIN, true},
+ {"2147483647", INT_MAX, true},
+ {"", 0, false},
+ {" 42", 42, false},
+ {"42 ", 42, false},
+ {"\t\n\v\f\r 42", 42, false},
+ {"blah42", 0, false},
+ {"42blah", 42, false},
+ {"blah42blah", 0, false},
+ {"-273.15", -273, false},
+ {"+98.6", 98, false},
+ {"--123", 0, false},
+ {"++123", 0, false},
+ {"-+123", 0, false},
+ {"+-123", 0, false},
+ {"-", 0, false},
+ {"-2147483649", INT_MIN, false},
+ {"-99999999999", INT_MIN, false},
+ {"2147483648", INT_MAX, false},
+ {"99999999999", INT_MAX, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ int output = cases[i].output ^ 1; // Ensure StringToInt wrote something.
+ EXPECT_EQ(cases[i].success, StringToInt(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+
+ string16 utf16_input = UTF8ToUTF16(cases[i].input);
+ output = cases[i].output ^ 1; // Ensure StringToInt wrote something.
+ EXPECT_EQ(cases[i].success, StringToInt(utf16_input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "6\06";
+ std::string input_string(input, arraysize(input) - 1);
+ int output;
+ EXPECT_FALSE(StringToInt(input_string, &output));
+ EXPECT_EQ(6, output);
+
+ string16 utf16_input = UTF8ToUTF16(input_string);
+ output = 0;
+ EXPECT_FALSE(StringToInt(utf16_input, &output));
+ EXPECT_EQ(6, output);
+
+ output = 0;
+ const char16 negative_wide_input[] = { 0xFF4D, '4', '2', 0};
+ EXPECT_FALSE(StringToInt(string16(negative_wide_input), &output));
+ EXPECT_EQ(0, output);
+}
+
+TEST(StringNumberConversionsTest, StringToUint) {
+ static const struct {
+ std::string input;
+ unsigned output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 42, true},
+ {"42\x99", 42, false},
+ {"\x99" "42\x99", 0, false},
+ {"-2147483648", 0, false},
+ {"2147483647", INT_MAX, true},
+ {"", 0, false},
+ {" 42", 42, false},
+ {"42 ", 42, false},
+ {"\t\n\v\f\r 42", 42, false},
+ {"blah42", 0, false},
+ {"42blah", 42, false},
+ {"blah42blah", 0, false},
+ {"-273.15", 0, false},
+ {"+98.6", 98, false},
+ {"--123", 0, false},
+ {"++123", 0, false},
+ {"-+123", 0, false},
+ {"+-123", 0, false},
+ {"-", 0, false},
+ {"-2147483649", 0, false},
+ {"-99999999999", 0, false},
+ {"4294967295", UINT_MAX, true},
+ {"4294967296", UINT_MAX, false},
+ {"99999999999", UINT_MAX, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ unsigned output =
+ cases[i].output ^ 1; // Ensure StringToUint wrote something.
+ EXPECT_EQ(cases[i].success, StringToUint(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+
+ string16 utf16_input = UTF8ToUTF16(cases[i].input);
+ output = cases[i].output ^ 1; // Ensure StringToUint wrote something.
+ EXPECT_EQ(cases[i].success, StringToUint(utf16_input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "6\06";
+ std::string input_string(input, arraysize(input) - 1);
+ unsigned output;
+ EXPECT_FALSE(StringToUint(input_string, &output));
+ EXPECT_EQ(6U, output);
+
+ string16 utf16_input = UTF8ToUTF16(input_string);
+ output = 0;
+ EXPECT_FALSE(StringToUint(utf16_input, &output));
+ EXPECT_EQ(6U, output);
+
+ output = 0;
+ const char16 negative_wide_input[] = { 0xFF4D, '4', '2', 0};
+ EXPECT_FALSE(StringToUint(string16(negative_wide_input), &output));
+ EXPECT_EQ(0U, output);
+}
+
+TEST(StringNumberConversionsTest, StringToInt64) {
+ static const struct {
+ std::string input;
+ int64_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 42, true},
+ {"-2147483648", INT_MIN, true},
+ {"2147483647", INT_MAX, true},
+ {"-2147483649", INT64_C(-2147483649), true},
+ {"-99999999999", INT64_C(-99999999999), true},
+ {"2147483648", INT64_C(2147483648), true},
+ {"99999999999", INT64_C(99999999999), true},
+ {"9223372036854775807", std::numeric_limits<int64_t>::max(), true},
+ {"-9223372036854775808", std::numeric_limits<int64_t>::min(), true},
+ {"09", 9, true},
+ {"-09", -9, true},
+ {"", 0, false},
+ {" 42", 42, false},
+ {"42 ", 42, false},
+ {"0x42", 0, false},
+ {"\t\n\v\f\r 42", 42, false},
+ {"blah42", 0, false},
+ {"42blah", 42, false},
+ {"blah42blah", 0, false},
+ {"-273.15", -273, false},
+ {"+98.6", 98, false},
+ {"--123", 0, false},
+ {"++123", 0, false},
+ {"-+123", 0, false},
+ {"+-123", 0, false},
+ {"-", 0, false},
+ {"-9223372036854775809", std::numeric_limits<int64_t>::min(), false},
+ {"-99999999999999999999", std::numeric_limits<int64_t>::min(), false},
+ {"9223372036854775808", std::numeric_limits<int64_t>::max(), false},
+ {"99999999999999999999", std::numeric_limits<int64_t>::max(), false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ int64_t output = 0;
+ EXPECT_EQ(cases[i].success, StringToInt64(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+
+ string16 utf16_input = UTF8ToUTF16(cases[i].input);
+ output = 0;
+ EXPECT_EQ(cases[i].success, StringToInt64(utf16_input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "6\06";
+ std::string input_string(input, arraysize(input) - 1);
+ int64_t output;
+ EXPECT_FALSE(StringToInt64(input_string, &output));
+ EXPECT_EQ(6, output);
+
+ string16 utf16_input = UTF8ToUTF16(input_string);
+ output = 0;
+ EXPECT_FALSE(StringToInt64(utf16_input, &output));
+ EXPECT_EQ(6, output);
+}
+
+TEST(StringNumberConversionsTest, StringToUint64) {
+ static const struct {
+ std::string input;
+ uint64_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 42, true},
+ {"-2147483648", 0, false},
+ {"2147483647", INT_MAX, true},
+ {"-2147483649", 0, false},
+ {"-99999999999", 0, false},
+ {"2147483648", UINT64_C(2147483648), true},
+ {"99999999999", UINT64_C(99999999999), true},
+ {"9223372036854775807", std::numeric_limits<int64_t>::max(), true},
+ {"-9223372036854775808", 0, false},
+ {"09", 9, true},
+ {"-09", 0, false},
+ {"", 0, false},
+ {" 42", 42, false},
+ {"42 ", 42, false},
+ {"0x42", 0, false},
+ {"\t\n\v\f\r 42", 42, false},
+ {"blah42", 0, false},
+ {"42blah", 42, false},
+ {"blah42blah", 0, false},
+ {"-273.15", 0, false},
+ {"+98.6", 98, false},
+ {"--123", 0, false},
+ {"++123", 0, false},
+ {"-+123", 0, false},
+ {"+-123", 0, false},
+ {"-", 0, false},
+ {"-9223372036854775809", 0, false},
+ {"-99999999999999999999", 0, false},
+ {"9223372036854775808", UINT64_C(9223372036854775808), true},
+ {"99999999999999999999", std::numeric_limits<uint64_t>::max(), false},
+ {"18446744073709551615", std::numeric_limits<uint64_t>::max(), true},
+ {"18446744073709551616", std::numeric_limits<uint64_t>::max(), false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ uint64_t output = 0;
+ EXPECT_EQ(cases[i].success, StringToUint64(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+
+ string16 utf16_input = UTF8ToUTF16(cases[i].input);
+ output = 0;
+ EXPECT_EQ(cases[i].success, StringToUint64(utf16_input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "6\06";
+ std::string input_string(input, arraysize(input) - 1);
+ uint64_t output;
+ EXPECT_FALSE(StringToUint64(input_string, &output));
+ EXPECT_EQ(6U, output);
+
+ string16 utf16_input = UTF8ToUTF16(input_string);
+ output = 0;
+ EXPECT_FALSE(StringToUint64(utf16_input, &output));
+ EXPECT_EQ(6U, output);
+}
+
+TEST(StringNumberConversionsTest, StringToSizeT) {
+ size_t size_t_max = std::numeric_limits<size_t>::max();
+ std::string size_t_max_string = StringPrintf("%" PRIuS, size_t_max);
+
+ static const struct {
+ std::string input;
+ size_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 42, true},
+ {"-2147483648", 0, false},
+ {"2147483647", INT_MAX, true},
+ {"-2147483649", 0, false},
+ {"-99999999999", 0, false},
+ {"2147483648", 2147483648U, true},
+#if SIZE_MAX > 4294967295U
+ {"99999999999", 99999999999U, true},
+#endif
+ {"-9223372036854775808", 0, false},
+ {"09", 9, true},
+ {"-09", 0, false},
+ {"", 0, false},
+ {" 42", 42, false},
+ {"42 ", 42, false},
+ {"0x42", 0, false},
+ {"\t\n\v\f\r 42", 42, false},
+ {"blah42", 0, false},
+ {"42blah", 42, false},
+ {"blah42blah", 0, false},
+ {"-273.15", 0, false},
+ {"+98.6", 98, false},
+ {"--123", 0, false},
+ {"++123", 0, false},
+ {"-+123", 0, false},
+ {"+-123", 0, false},
+ {"-", 0, false},
+ {"-9223372036854775809", 0, false},
+ {"-99999999999999999999", 0, false},
+ {"999999999999999999999999", size_t_max, false},
+ {size_t_max_string, size_t_max, true},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ size_t output = 0;
+ EXPECT_EQ(cases[i].success, StringToSizeT(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+
+ string16 utf16_input = UTF8ToUTF16(cases[i].input);
+ output = 0;
+ EXPECT_EQ(cases[i].success, StringToSizeT(utf16_input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "6\06";
+ std::string input_string(input, arraysize(input) - 1);
+ size_t output;
+ EXPECT_FALSE(StringToSizeT(input_string, &output));
+ EXPECT_EQ(6U, output);
+
+ string16 utf16_input = UTF8ToUTF16(input_string);
+ output = 0;
+ EXPECT_FALSE(StringToSizeT(utf16_input, &output));
+ EXPECT_EQ(6U, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToInt) {
+ static const struct {
+ std::string input;
+ int64_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 66, true},
+ {"-42", -66, true},
+ {"+42", 66, true},
+ {"7fffffff", INT_MAX, true},
+ {"-80000000", INT_MIN, true},
+ {"80000000", INT_MAX, false}, // Overflow test.
+ {"-80000001", INT_MIN, false}, // Underflow test.
+ {"0x42", 66, true},
+ {"-0x42", -66, true},
+ {"+0x42", 66, true},
+ {"0x7fffffff", INT_MAX, true},
+ {"-0x80000000", INT_MIN, true},
+ {"-80000000", INT_MIN, true},
+ {"80000000", INT_MAX, false}, // Overflow test.
+ {"-80000001", INT_MIN, false}, // Underflow test.
+ {"0x0f", 15, true},
+ {"0f", 15, true},
+ {" 45", 0x45, false},
+ {"\t\n\v\f\r 0x45", 0x45, false},
+ {" 45", 0x45, false},
+ {"45 ", 0x45, false},
+ {"45:", 0x45, false},
+ {"efgh", 0xef, false},
+ {"0xefgh", 0xef, false},
+ {"hgfe", 0, false},
+ {"-", 0, false},
+ {"", 0, false},
+ {"0x", 0, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ int output = 0;
+ EXPECT_EQ(cases[i].success, HexStringToInt(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "0xc0ffee\0" "9";
+ std::string input_string(input, arraysize(input) - 1);
+ int output;
+ EXPECT_FALSE(HexStringToInt(input_string, &output));
+ EXPECT_EQ(0xc0ffee, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToUInt) {
+ static const struct {
+ std::string input;
+ uint32_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 0x42, true},
+ {"-42", 0, false},
+ {"+42", 0x42, true},
+ {"7fffffff", INT_MAX, true},
+ {"-80000000", 0, false},
+ {"ffffffff", 0xffffffff, true},
+ {"DeadBeef", 0xdeadbeef, true},
+ {"0x42", 0x42, true},
+ {"-0x42", 0, false},
+ {"+0x42", 0x42, true},
+ {"0x7fffffff", INT_MAX, true},
+ {"-0x80000000", 0, false},
+ {"0xffffffff", std::numeric_limits<uint32_t>::max(), true},
+ {"0XDeadBeef", 0xdeadbeef, true},
+ {"0x7fffffffffffffff", std::numeric_limits<uint32_t>::max(),
+ false}, // Overflow test.
+ {"-0x8000000000000000", 0, false},
+ {"0x8000000000000000", std::numeric_limits<uint32_t>::max(),
+ false}, // Overflow test.
+ {"-0x8000000000000001", 0, false},
+ {"0xFFFFFFFFFFFFFFFF", std::numeric_limits<uint32_t>::max(),
+ false}, // Overflow test.
+ {"FFFFFFFFFFFFFFFF", std::numeric_limits<uint32_t>::max(),
+ false}, // Overflow test.
+ {"0x0000000000000000", 0, true},
+ {"0000000000000000", 0, true},
+ {"1FFFFFFFFFFFFFFFF", std::numeric_limits<uint32_t>::max(),
+ false}, // Overflow test.
+ {"0x0f", 0x0f, true},
+ {"0f", 0x0f, true},
+ {" 45", 0x45, false},
+ {"\t\n\v\f\r 0x45", 0x45, false},
+ {" 45", 0x45, false},
+ {"45 ", 0x45, false},
+ {"45:", 0x45, false},
+ {"efgh", 0xef, false},
+ {"0xefgh", 0xef, false},
+ {"hgfe", 0, false},
+ {"-", 0, false},
+ {"", 0, false},
+ {"0x", 0, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ uint32_t output = 0;
+ EXPECT_EQ(cases[i].success, HexStringToUInt(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "0xc0ffee\0" "9";
+ std::string input_string(input, arraysize(input) - 1);
+ uint32_t output;
+ EXPECT_FALSE(HexStringToUInt(input_string, &output));
+ EXPECT_EQ(0xc0ffeeU, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToInt64) {
+ static const struct {
+ std::string input;
+ int64_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 66, true},
+ {"-42", -66, true},
+ {"+42", 66, true},
+ {"40acd88557b", INT64_C(4444444448123), true},
+ {"7fffffff", INT_MAX, true},
+ {"-80000000", INT_MIN, true},
+ {"ffffffff", 0xffffffff, true},
+ {"DeadBeef", 0xdeadbeef, true},
+ {"0x42", 66, true},
+ {"-0x42", -66, true},
+ {"+0x42", 66, true},
+ {"0x40acd88557b", INT64_C(4444444448123), true},
+ {"0x7fffffff", INT_MAX, true},
+ {"-0x80000000", INT_MIN, true},
+ {"0xffffffff", 0xffffffff, true},
+ {"0XDeadBeef", 0xdeadbeef, true},
+ {"0x7fffffffffffffff", std::numeric_limits<int64_t>::max(), true},
+ {"-0x8000000000000000", std::numeric_limits<int64_t>::min(), true},
+ {"0x8000000000000000", std::numeric_limits<int64_t>::max(),
+ false}, // Overflow test.
+ {"-0x8000000000000001", std::numeric_limits<int64_t>::min(),
+ false}, // Underflow test.
+ {"0x0f", 15, true},
+ {"0f", 15, true},
+ {" 45", 0x45, false},
+ {"\t\n\v\f\r 0x45", 0x45, false},
+ {" 45", 0x45, false},
+ {"45 ", 0x45, false},
+ {"45:", 0x45, false},
+ {"efgh", 0xef, false},
+ {"0xefgh", 0xef, false},
+ {"hgfe", 0, false},
+ {"-", 0, false},
+ {"", 0, false},
+ {"0x", 0, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ int64_t output = 0;
+ EXPECT_EQ(cases[i].success, HexStringToInt64(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "0xc0ffee\0" "9";
+ std::string input_string(input, arraysize(input) - 1);
+ int64_t output;
+ EXPECT_FALSE(HexStringToInt64(input_string, &output));
+ EXPECT_EQ(0xc0ffee, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToUInt64) {
+ static const struct {
+ std::string input;
+ uint64_t output;
+ bool success;
+ } cases[] = {
+ {"0", 0, true},
+ {"42", 66, true},
+ {"-42", 0, false},
+ {"+42", 66, true},
+ {"40acd88557b", INT64_C(4444444448123), true},
+ {"7fffffff", INT_MAX, true},
+ {"-80000000", 0, false},
+ {"ffffffff", 0xffffffff, true},
+ {"DeadBeef", 0xdeadbeef, true},
+ {"0x42", 66, true},
+ {"-0x42", 0, false},
+ {"+0x42", 66, true},
+ {"0x40acd88557b", INT64_C(4444444448123), true},
+ {"0x7fffffff", INT_MAX, true},
+ {"-0x80000000", 0, false},
+ {"0xffffffff", 0xffffffff, true},
+ {"0XDeadBeef", 0xdeadbeef, true},
+ {"0x7fffffffffffffff", std::numeric_limits<int64_t>::max(), true},
+ {"-0x8000000000000000", 0, false},
+ {"0x8000000000000000", UINT64_C(0x8000000000000000), true},
+ {"-0x8000000000000001", 0, false},
+ {"0xFFFFFFFFFFFFFFFF", std::numeric_limits<uint64_t>::max(), true},
+ {"FFFFFFFFFFFFFFFF", std::numeric_limits<uint64_t>::max(), true},
+ {"0x0000000000000000", 0, true},
+ {"0000000000000000", 0, true},
+ {"1FFFFFFFFFFFFFFFF", std::numeric_limits<uint64_t>::max(),
+ false}, // Overflow test.
+ {"0x0f", 15, true},
+ {"0f", 15, true},
+ {" 45", 0x45, false},
+ {"\t\n\v\f\r 0x45", 0x45, false},
+ {" 45", 0x45, false},
+ {"45 ", 0x45, false},
+ {"45:", 0x45, false},
+ {"efgh", 0xef, false},
+ {"0xefgh", 0xef, false},
+ {"hgfe", 0, false},
+ {"-", 0, false},
+ {"", 0, false},
+ {"0x", 0, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ uint64_t output = 0;
+ EXPECT_EQ(cases[i].success, HexStringToUInt64(cases[i].input, &output));
+ EXPECT_EQ(cases[i].output, output);
+ }
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "0xc0ffee\0" "9";
+ std::string input_string(input, arraysize(input) - 1);
+ uint64_t output;
+ EXPECT_FALSE(HexStringToUInt64(input_string, &output));
+ EXPECT_EQ(0xc0ffeeU, output);
+}
+
+TEST(StringNumberConversionsTest, HexStringToBytes) {
+ static const struct {
+ const std::string input;
+ const char* output;
+ size_t output_len;
+ bool success;
+ } cases[] = {
+ {"0", "", 0, false}, // odd number of characters fails
+ {"00", "\0", 1, true},
+ {"42", "\x42", 1, true},
+ {"-42", "", 0, false}, // any non-hex value fails
+ {"+42", "", 0, false},
+ {"7fffffff", "\x7f\xff\xff\xff", 4, true},
+ {"80000000", "\x80\0\0\0", 4, true},
+ {"deadbeef", "\xde\xad\xbe\xef", 4, true},
+ {"DeadBeef", "\xde\xad\xbe\xef", 4, true},
+ {"0x42", "", 0, false}, // leading 0x fails (x is not hex)
+ {"0f", "\xf", 1, true},
+ {"45 ", "\x45", 1, false},
+ {"efgh", "\xef", 1, false},
+ {"", "", 0, false},
+ {"0123456789ABCDEF", "\x01\x23\x45\x67\x89\xAB\xCD\xEF", 8, true},
+ {"0123456789ABCDEF012345",
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF\x01\x23\x45", 11, true},
+ };
+
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ std::vector<uint8_t> output;
+ std::vector<uint8_t> compare;
+ EXPECT_EQ(cases[i].success, HexStringToBytes(cases[i].input, &output)) <<
+ i << ": " << cases[i].input;
+ for (size_t j = 0; j < cases[i].output_len; ++j)
+ compare.push_back(static_cast<uint8_t>(cases[i].output[j]));
+ ASSERT_EQ(output.size(), compare.size()) << i << ": " << cases[i].input;
+ EXPECT_TRUE(std::equal(output.begin(), output.end(), compare.begin())) <<
+ i << ": " << cases[i].input;
+ }
+}
+
+TEST(StringNumberConversionsTest, StringToDouble) {
+ static const struct {
+ std::string input;
+ double output;
+ bool success;
+ } cases[] = {
+ {"0", 0.0, true},
+ {"42", 42.0, true},
+ {"-42", -42.0, true},
+ {"123.45", 123.45, true},
+ {"-123.45", -123.45, true},
+ {"+123.45", 123.45, true},
+ {"2.99792458e8", 299792458.0, true},
+ {"149597870.691E+3", 149597870691.0, true},
+ {"6.", 6.0, true},
+ {"9e99999999999999999999", std::numeric_limits<double>::infinity(),
+ false},
+ {"-9e99999999999999999999", -std::numeric_limits<double>::infinity(),
+ false},
+ {"1e-2", 0.01, true},
+ {"42 ", 42.0, false},
+ {" 1e-2", 0.01, false},
+ {"1e-2 ", 0.01, false},
+ {"-1E-7", -0.0000001, true},
+ {"01e02", 100, true},
+ {"2.3e15", 2.3e15, true},
+ {"\t\n\v\f\r -123.45e2", -12345.0, false},
+ {"+123 e4", 123.0, false},
+ {"123e ", 123.0, false},
+ {"123e", 123.0, false},
+ {" 2.99", 2.99, false},
+ {"1e3.4", 1000.0, false},
+ {"nothing", 0.0, false},
+ {"-", 0.0, false},
+ {"+", 0.0, false},
+ {"", 0.0, false},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ double output;
+ errno = 1;
+ EXPECT_EQ(cases[i].success, StringToDouble(cases[i].input, &output))
+ << "for input=" << cases[i].input << "got output=" << output;
+ if (cases[i].success)
+ EXPECT_EQ(1, errno) << i; // confirm that errno is unchanged.
+ EXPECT_DOUBLE_EQ(cases[i].output, output);
+ }
+
+ // One additional test to verify that conversion of numbers in strings with
+ // embedded NUL characters. The NUL and extra data after it should be
+ // interpreted as junk after the number.
+ const char input[] = "3.14\0" "159";
+ std::string input_string(input, arraysize(input) - 1);
+ double output;
+ EXPECT_FALSE(StringToDouble(input_string, &output));
+ EXPECT_DOUBLE_EQ(3.14, output);
+}
+
+TEST(StringNumberConversionsTest, DoubleToString) {
+ static const struct {
+ double input;
+ const char* expected;
+ } cases[] = {
+ {0.0, "0.0"},
+ {1.25, "1.25"},
+ {1.33518e+012, "1335180000000.0"},
+ {1.33489e+012, "1334890000000.0"},
+ {1.33505e+012, "1335050000000.0"},
+ {1.33545e+009, "1335450000.0"},
+ {1.33503e+009, "1335030000.0"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ EXPECT_EQ(cases[i].expected, DoubleToString(cases[i].input));
+ }
+
+ // The following two values were seen in crashes in the wild.
+ const char input_bytes[8] = {0, 0, 0, 0, '\xee', '\x6d', '\x73', '\x42'};
+ double input = 0;
+ memcpy(&input, input_bytes, arraysize(input_bytes));
+ EXPECT_EQ("1335179083776.0", DoubleToString(input));
+ const char input_bytes2[8] =
+ {0, 0, 0, '\xa0', '\xda', '\x6c', '\x73', '\x42'};
+ input = 0;
+ memcpy(&input, input_bytes2, arraysize(input_bytes2));
+ EXPECT_EQ("1334890332160.0", DoubleToString(input));
+}
+
+TEST(StringNumberConversionsTest, HexEncode) {
+ std::string hex(HexEncode(NULL, 0));
+ EXPECT_EQ(hex.length(), 0U);
+ unsigned char bytes[] = {0x01, 0xff, 0x02, 0xfe, 0x03, 0x80, 0x81};
+ hex = HexEncode(bytes, sizeof(bytes));
+ EXPECT_EQ(hex.compare("01FF02FE038081"), 0);
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_piece.cc b/libchrome/base/strings/string_piece.cc
new file mode 100644
index 0000000..c26bb36
--- /dev/null
+++ b/libchrome/base/strings/string_piece.cc
@@ -0,0 +1,452 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.cc with modifications
+
+#include "base/strings/string_piece.h"
+
+#include <limits.h>
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/logging.h"
+
+namespace base {
+namespace {
+
+// For each character in characters_wanted, sets the index corresponding
+// to the ASCII code of that character to 1 in table. This is used by
+// the find_.*_of methods below to tell whether or not a character is in
+// the lookup table in constant time.
+// The argument `table' must be an array that is large enough to hold all
+// the possible values of an unsigned char. Thus it should be be declared
+// as follows:
+// bool table[UCHAR_MAX + 1]
+inline void BuildLookupTable(const StringPiece& characters_wanted,
+ bool* table) {
+ const size_t length = characters_wanted.length();
+ const char* const data = characters_wanted.data();
+ for (size_t i = 0; i < length; ++i) {
+ table[static_cast<unsigned char>(data[i])] = true;
+ }
+}
+
+} // namespace
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+template class BasicStringPiece<std::string>;
+template class BasicStringPiece<string16>;
+#endif
+
+bool operator==(const StringPiece& x, const StringPiece& y) {
+ if (x.size() != y.size())
+ return false;
+
+ return StringPiece::wordmemcmp(x.data(), y.data(), x.size()) == 0;
+}
+
+std::ostream& operator<<(std::ostream& o, const StringPiece& piece) {
+ o.write(piece.data(), static_cast<std::streamsize>(piece.size()));
+ return o;
+}
+
+namespace internal {
+
+template<typename STR>
+void CopyToStringT(const BasicStringPiece<STR>& self, STR* target) {
+ if (self.empty())
+ target->clear();
+ else
+ target->assign(self.data(), self.size());
+}
+
+void CopyToString(const StringPiece& self, std::string* target) {
+ CopyToStringT(self, target);
+}
+
+void CopyToString(const StringPiece16& self, string16* target) {
+ CopyToStringT(self, target);
+}
+
+template<typename STR>
+void AppendToStringT(const BasicStringPiece<STR>& self, STR* target) {
+ if (!self.empty())
+ target->append(self.data(), self.size());
+}
+
+void AppendToString(const StringPiece& self, std::string* target) {
+ AppendToStringT(self, target);
+}
+
+void AppendToString(const StringPiece16& self, string16* target) {
+ AppendToStringT(self, target);
+}
+
+template<typename STR>
+size_t copyT(const BasicStringPiece<STR>& self,
+ typename STR::value_type* buf,
+ size_t n,
+ size_t pos) {
+ size_t ret = std::min(self.size() - pos, n);
+ memcpy(buf, self.data() + pos, ret * sizeof(typename STR::value_type));
+ return ret;
+}
+
+size_t copy(const StringPiece& self, char* buf, size_t n, size_t pos) {
+ return copyT(self, buf, n, pos);
+}
+
+size_t copy(const StringPiece16& self, char16* buf, size_t n, size_t pos) {
+ return copyT(self, buf, n, pos);
+}
+
+template<typename STR>
+size_t findT(const BasicStringPiece<STR>& self,
+ const BasicStringPiece<STR>& s,
+ size_t pos) {
+ if (pos > self.size())
+ return BasicStringPiece<STR>::npos;
+
+ typename BasicStringPiece<STR>::const_iterator result =
+ std::search(self.begin() + pos, self.end(), s.begin(), s.end());
+ const size_t xpos =
+ static_cast<size_t>(result - self.begin());
+ return xpos + s.size() <= self.size() ? xpos : BasicStringPiece<STR>::npos;
+}
+
+size_t find(const StringPiece& self, const StringPiece& s, size_t pos) {
+ return findT(self, s, pos);
+}
+
+size_t find(const StringPiece16& self, const StringPiece16& s, size_t pos) {
+ return findT(self, s, pos);
+}
+
+template<typename STR>
+size_t findT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (pos >= self.size())
+ return BasicStringPiece<STR>::npos;
+
+ typename BasicStringPiece<STR>::const_iterator result =
+ std::find(self.begin() + pos, self.end(), c);
+ return result != self.end() ?
+ static_cast<size_t>(result - self.begin()) : BasicStringPiece<STR>::npos;
+}
+
+size_t find(const StringPiece& self, char c, size_t pos) {
+ return findT(self, c, pos);
+}
+
+size_t find(const StringPiece16& self, char16 c, size_t pos) {
+ return findT(self, c, pos);
+}
+
+template<typename STR>
+size_t rfindT(const BasicStringPiece<STR>& self,
+ const BasicStringPiece<STR>& s,
+ size_t pos) {
+ if (self.size() < s.size())
+ return BasicStringPiece<STR>::npos;
+
+ if (s.empty())
+ return std::min(self.size(), pos);
+
+ typename BasicStringPiece<STR>::const_iterator last =
+ self.begin() + std::min(self.size() - s.size(), pos) + s.size();
+ typename BasicStringPiece<STR>::const_iterator result =
+ std::find_end(self.begin(), last, s.begin(), s.end());
+ return result != last ?
+ static_cast<size_t>(result - self.begin()) : BasicStringPiece<STR>::npos;
+}
+
+size_t rfind(const StringPiece& self, const StringPiece& s, size_t pos) {
+ return rfindT(self, s, pos);
+}
+
+size_t rfind(const StringPiece16& self, const StringPiece16& s, size_t pos) {
+ return rfindT(self, s, pos);
+}
+
+template<typename STR>
+size_t rfindT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (self.size() == 0)
+ return BasicStringPiece<STR>::npos;
+
+ for (size_t i = std::min(pos, self.size() - 1); ;
+ --i) {
+ if (self.data()[i] == c)
+ return i;
+ if (i == 0)
+ break;
+ }
+ return BasicStringPiece<STR>::npos;
+}
+
+size_t rfind(const StringPiece& self, char c, size_t pos) {
+ return rfindT(self, c, pos);
+}
+
+size_t rfind(const StringPiece16& self, char16 c, size_t pos) {
+ return rfindT(self, c, pos);
+}
+
+// 8-bit version using lookup table.
+size_t find_first_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos) {
+ if (self.size() == 0 || s.size() == 0)
+ return StringPiece::npos;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return find(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_t i = pos; i < self.size(); ++i) {
+ if (lookup[static_cast<unsigned char>(self.data()[i])]) {
+ return i;
+ }
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute force version.
+size_t find_first_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ StringPiece16::const_iterator found =
+ std::find_first_of(self.begin() + pos, self.end(), s.begin(), s.end());
+ if (found == self.end())
+ return StringPiece16::npos;
+ return found - self.begin();
+}
+
+// 8-bit version using lookup table.
+size_t find_first_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece::npos;
+
+ if (s.size() == 0)
+ return 0;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return find_first_not_of(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_t i = pos; i < self.size(); ++i) {
+ if (!lookup[static_cast<unsigned char>(self.data()[i])]) {
+ return i;
+ }
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece16::npos;
+
+ for (size_t self_i = pos; self_i < self.size(); ++self_i) {
+ bool found = false;
+ for (size_t s_i = 0; s_i < s.size(); ++s_i) {
+ if (self[self_i] == s[s_i]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return self_i;
+ }
+ return StringPiece16::npos;
+}
+
+template<typename STR>
+size_t find_first_not_ofT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (self.size() == 0)
+ return BasicStringPiece<STR>::npos;
+
+ for (; pos < self.size(); ++pos) {
+ if (self.data()[pos] != c) {
+ return pos;
+ }
+ }
+ return BasicStringPiece<STR>::npos;
+}
+
+size_t find_first_not_of(const StringPiece& self,
+ char c,
+ size_t pos) {
+ return find_first_not_ofT(self, c, pos);
+}
+
+size_t find_first_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos) {
+ return find_first_not_ofT(self, c, pos);
+}
+
+// 8-bit version using lookup table.
+size_t find_last_of(const StringPiece& self, const StringPiece& s, size_t pos) {
+ if (self.size() == 0 || s.size() == 0)
+ return StringPiece::npos;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return rfind(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_t i = std::min(pos, self.size() - 1); ; --i) {
+ if (lookup[static_cast<unsigned char>(self.data()[i])])
+ return i;
+ if (i == 0)
+ break;
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+size_t find_last_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece16::npos;
+
+ for (size_t self_i = std::min(pos, self.size() - 1); ;
+ --self_i) {
+ for (size_t s_i = 0; s_i < s.size(); s_i++) {
+ if (self.data()[self_i] == s[s_i])
+ return self_i;
+ }
+ if (self_i == 0)
+ break;
+ }
+ return StringPiece16::npos;
+}
+
+// 8-bit version using lookup table.
+size_t find_last_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece::npos;
+
+ size_t i = std::min(pos, self.size() - 1);
+ if (s.size() == 0)
+ return i;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.size() == 1)
+ return find_last_not_of(self, s.data()[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (; ; --i) {
+ if (!lookup[static_cast<unsigned char>(self.data()[i])])
+ return i;
+ if (i == 0)
+ break;
+ }
+ return StringPiece::npos;
+}
+
+// 16-bit brute-force version.
+size_t find_last_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos) {
+ if (self.size() == 0)
+ return StringPiece::npos;
+
+ for (size_t self_i = std::min(pos, self.size() - 1); ; --self_i) {
+ bool found = false;
+ for (size_t s_i = 0; s_i < s.size(); s_i++) {
+ if (self.data()[self_i] == s[s_i]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return self_i;
+ if (self_i == 0)
+ break;
+ }
+ return StringPiece16::npos;
+}
+
+template<typename STR>
+size_t find_last_not_ofT(const BasicStringPiece<STR>& self,
+ typename STR::value_type c,
+ size_t pos) {
+ if (self.size() == 0)
+ return BasicStringPiece<STR>::npos;
+
+ for (size_t i = std::min(pos, self.size() - 1); ; --i) {
+ if (self.data()[i] != c)
+ return i;
+ if (i == 0)
+ break;
+ }
+ return BasicStringPiece<STR>::npos;
+}
+
+size_t find_last_not_of(const StringPiece& self,
+ char c,
+ size_t pos) {
+ return find_last_not_ofT(self, c, pos);
+}
+
+size_t find_last_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos) {
+ return find_last_not_ofT(self, c, pos);
+}
+
+template<typename STR>
+BasicStringPiece<STR> substrT(const BasicStringPiece<STR>& self,
+ size_t pos,
+ size_t n) {
+ if (pos > self.size()) pos = self.size();
+ if (n > self.size() - pos) n = self.size() - pos;
+ return BasicStringPiece<STR>(self.data() + pos, n);
+}
+
+StringPiece substr(const StringPiece& self,
+ size_t pos,
+ size_t n) {
+ return substrT(self, pos, n);
+}
+
+StringPiece16 substr(const StringPiece16& self,
+ size_t pos,
+ size_t n) {
+ return substrT(self, pos, n);
+}
+
+#if DCHECK_IS_ON()
+void AssertIteratorsInOrder(std::string::const_iterator begin,
+ std::string::const_iterator end) {
+ DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+}
+void AssertIteratorsInOrder(string16::const_iterator begin,
+ string16::const_iterator end) {
+ DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+}
+#endif
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/strings/string_piece.h b/libchrome/base/strings/string_piece.h
new file mode 100644
index 0000000..eaec14d
--- /dev/null
+++ b/libchrome/base/strings/string_piece.h
@@ -0,0 +1,464 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.h with modifications
+//
+// A string-like object that points to a sized piece of memory.
+//
+// You can use StringPiece as a function or method parameter. A StringPiece
+// parameter can receive a double-quoted string literal argument, a "const
+// char*" argument, a string argument, or a StringPiece argument with no data
+// copying. Systematic use of StringPiece for arguments reduces data
+// copies and strlen() calls.
+//
+// Prefer passing StringPieces by value:
+// void MyFunction(StringPiece arg);
+// If circumstances require, you may also pass by const reference:
+// void MyFunction(const StringPiece& arg); // not preferred
+// Both of these have the same lifetime semantics. Passing by value
+// generates slightly smaller code. For more discussion, Googlers can see
+// the thread go/stringpiecebyvalue on c-users.
+
+#ifndef BASE_STRINGS_STRING_PIECE_H_
+#define BASE_STRINGS_STRING_PIECE_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+template <typename STRING_TYPE> class BasicStringPiece;
+typedef BasicStringPiece<std::string> StringPiece;
+typedef BasicStringPiece<string16> StringPiece16;
+
+// internal --------------------------------------------------------------------
+
+// Many of the StringPiece functions use different implementations for the
+// 8-bit and 16-bit versions, and we don't want lots of template expansions in
+// this (very common) header that will slow down compilation.
+//
+// So here we define overloaded functions called by the StringPiece template.
+// For those that share an implementation, the two versions will expand to a
+// template internal to the .cc file.
+namespace internal {
+
+BASE_EXPORT void CopyToString(const StringPiece& self, std::string* target);
+BASE_EXPORT void CopyToString(const StringPiece16& self, string16* target);
+
+BASE_EXPORT void AppendToString(const StringPiece& self, std::string* target);
+BASE_EXPORT void AppendToString(const StringPiece16& self, string16* target);
+
+BASE_EXPORT size_t copy(const StringPiece& self,
+ char* buf,
+ size_t n,
+ size_t pos);
+BASE_EXPORT size_t copy(const StringPiece16& self,
+ char16* buf,
+ size_t n,
+ size_t pos);
+
+BASE_EXPORT size_t find(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t find(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t rfind(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t rfind(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t find_first_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_first_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+
+BASE_EXPORT size_t find_first_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t find_first_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t find_last_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece& self,
+ char c,
+ size_t pos);
+BASE_EXPORT size_t find_last_of(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+
+BASE_EXPORT size_t find_last_not_of(const StringPiece& self,
+ const StringPiece& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece16& self,
+ const StringPiece16& s,
+ size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece16& self,
+ char16 c,
+ size_t pos);
+BASE_EXPORT size_t find_last_not_of(const StringPiece& self,
+ char c,
+ size_t pos);
+
+BASE_EXPORT StringPiece substr(const StringPiece& self,
+ size_t pos,
+ size_t n);
+BASE_EXPORT StringPiece16 substr(const StringPiece16& self,
+ size_t pos,
+ size_t n);
+
+#if DCHECK_IS_ON()
+// Asserts that begin <= end to catch some errors with iterator usage.
+BASE_EXPORT void AssertIteratorsInOrder(std::string::const_iterator begin,
+ std::string::const_iterator end);
+BASE_EXPORT void AssertIteratorsInOrder(string16::const_iterator begin,
+ string16::const_iterator end);
+#endif
+
+} // namespace internal
+
+// BasicStringPiece ------------------------------------------------------------
+
+// Defines the types, methods, operators, and data members common to both
+// StringPiece and StringPiece16. Do not refer to this class directly, but
+// rather to BasicStringPiece, StringPiece, or StringPiece16.
+//
+// This is templatized by string class type rather than character type, so
+// BasicStringPiece<std::string> or BasicStringPiece<base::string16>.
+template <typename STRING_TYPE> class BasicStringPiece {
+ public:
+ // Standard STL container boilerplate.
+ typedef size_t size_type;
+ typedef typename STRING_TYPE::value_type value_type;
+ typedef const value_type* pointer;
+ typedef const value_type& reference;
+ typedef const value_type& const_reference;
+ typedef ptrdiff_t difference_type;
+ typedef const value_type* const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ static const size_type npos;
+
+ public:
+ // We provide non-explicit singleton constructors so users can pass
+ // in a "const char*" or a "string" wherever a "StringPiece" is
+ // expected (likewise for char16, string16, StringPiece16).
+ BasicStringPiece() : ptr_(NULL), length_(0) {}
+ BasicStringPiece(const value_type* str)
+ : ptr_(str),
+ length_((str == NULL) ? 0 : STRING_TYPE::traits_type::length(str)) {}
+ BasicStringPiece(const STRING_TYPE& str)
+ : ptr_(str.data()), length_(str.size()) {}
+ BasicStringPiece(const value_type* offset, size_type len)
+ : ptr_(offset), length_(len) {}
+ BasicStringPiece(const typename STRING_TYPE::const_iterator& begin,
+ const typename STRING_TYPE::const_iterator& end) {
+#if DCHECK_IS_ON()
+ // This assertion is done out-of-line to avoid bringing in logging.h and
+ // instantiating logging macros for every instantiation.
+ internal::AssertIteratorsInOrder(begin, end);
+#endif
+ length_ = static_cast<size_t>(std::distance(begin, end));
+
+ // The length test before assignment is to avoid dereferencing an iterator
+ // that may point to the end() of a string.
+ ptr_ = length_ > 0 ? &*begin : nullptr;
+ }
+
+ // data() may return a pointer to a buffer with embedded NULs, and the
+ // returned buffer may or may not be null terminated. Therefore it is
+ // typically a mistake to pass data() to a routine that expects a NUL
+ // terminated string.
+ const value_type* data() const { return ptr_; }
+ size_type size() const { return length_; }
+ size_type length() const { return length_; }
+ bool empty() const { return length_ == 0; }
+
+ void clear() {
+ ptr_ = NULL;
+ length_ = 0;
+ }
+ void set(const value_type* data, size_type len) {
+ ptr_ = data;
+ length_ = len;
+ }
+ void set(const value_type* str) {
+ ptr_ = str;
+ length_ = str ? STRING_TYPE::traits_type::length(str) : 0;
+ }
+
+ value_type operator[](size_type i) const { return ptr_[i]; }
+ value_type front() const { return ptr_[0]; }
+ value_type back() const { return ptr_[length_ - 1]; }
+
+ void remove_prefix(size_type n) {
+ ptr_ += n;
+ length_ -= n;
+ }
+
+ void remove_suffix(size_type n) {
+ length_ -= n;
+ }
+
+ int compare(const BasicStringPiece<STRING_TYPE>& x) const {
+ int r = wordmemcmp(
+ ptr_, x.ptr_, (length_ < x.length_ ? length_ : x.length_));
+ if (r == 0) {
+ if (length_ < x.length_) r = -1;
+ else if (length_ > x.length_) r = +1;
+ }
+ return r;
+ }
+
+ STRING_TYPE as_string() const {
+ // std::string doesn't like to take a NULL pointer even with a 0 size.
+ return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
+ }
+
+ const_iterator begin() const { return ptr_; }
+ const_iterator end() const { return ptr_ + length_; }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(ptr_ + length_);
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(ptr_);
+ }
+
+ size_type max_size() const { return length_; }
+ size_type capacity() const { return length_; }
+
+ static int wordmemcmp(const value_type* p,
+ const value_type* p2,
+ size_type N) {
+ return STRING_TYPE::traits_type::compare(p, p2, N);
+ }
+
+ // Sets the value of the given string target type to be the current string.
+ // This saves a temporary over doing |a = b.as_string()|
+ void CopyToString(STRING_TYPE* target) const {
+ internal::CopyToString(*this, target);
+ }
+
+ void AppendToString(STRING_TYPE* target) const {
+ internal::AppendToString(*this, target);
+ }
+
+ size_type copy(value_type* buf, size_type n, size_type pos = 0) const {
+ return internal::copy(*this, buf, n, pos);
+ }
+
+ // Does "this" start with "x"
+ bool starts_with(const BasicStringPiece& x) const {
+ return ((this->length_ >= x.length_) &&
+ (wordmemcmp(this->ptr_, x.ptr_, x.length_) == 0));
+ }
+
+ // Does "this" end with "x"
+ bool ends_with(const BasicStringPiece& x) const {
+ return ((this->length_ >= x.length_) &&
+ (wordmemcmp(this->ptr_ + (this->length_-x.length_),
+ x.ptr_, x.length_) == 0));
+ }
+
+ // find: Search for a character or substring at a given offset.
+ size_type find(const BasicStringPiece<STRING_TYPE>& s,
+ size_type pos = 0) const {
+ return internal::find(*this, s, pos);
+ }
+ size_type find(value_type c, size_type pos = 0) const {
+ return internal::find(*this, c, pos);
+ }
+
+ // rfind: Reverse find.
+ size_type rfind(const BasicStringPiece& s,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::rfind(*this, s, pos);
+ }
+ size_type rfind(value_type c, size_type pos = BasicStringPiece::npos) const {
+ return internal::rfind(*this, c, pos);
+ }
+
+ // find_first_of: Find the first occurence of one of a set of characters.
+ size_type find_first_of(const BasicStringPiece& s,
+ size_type pos = 0) const {
+ return internal::find_first_of(*this, s, pos);
+ }
+ size_type find_first_of(value_type c, size_type pos = 0) const {
+ return find(c, pos);
+ }
+
+ // find_first_not_of: Find the first occurence not of a set of characters.
+ size_type find_first_not_of(const BasicStringPiece& s,
+ size_type pos = 0) const {
+ return internal::find_first_not_of(*this, s, pos);
+ }
+ size_type find_first_not_of(value_type c, size_type pos = 0) const {
+ return internal::find_first_not_of(*this, c, pos);
+ }
+
+ // find_last_of: Find the last occurence of one of a set of characters.
+ size_type find_last_of(const BasicStringPiece& s,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::find_last_of(*this, s, pos);
+ }
+ size_type find_last_of(value_type c,
+ size_type pos = BasicStringPiece::npos) const {
+ return rfind(c, pos);
+ }
+
+ // find_last_not_of: Find the last occurence not of a set of characters.
+ size_type find_last_not_of(const BasicStringPiece& s,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::find_last_not_of(*this, s, pos);
+ }
+ size_type find_last_not_of(value_type c,
+ size_type pos = BasicStringPiece::npos) const {
+ return internal::find_last_not_of(*this, c, pos);
+ }
+
+ // substr.
+ BasicStringPiece substr(size_type pos,
+ size_type n = BasicStringPiece::npos) const {
+ return internal::substr(*this, pos, n);
+ }
+
+ protected:
+ const value_type* ptr_;
+ size_type length_;
+};
+
+template <typename STRING_TYPE>
+const typename BasicStringPiece<STRING_TYPE>::size_type
+BasicStringPiece<STRING_TYPE>::npos =
+ typename BasicStringPiece<STRING_TYPE>::size_type(-1);
+
+// MSVC doesn't like complex extern templates and DLLs.
+#if !defined(COMPILER_MSVC)
+extern template class BASE_EXPORT BasicStringPiece<std::string>;
+extern template class BASE_EXPORT BasicStringPiece<string16>;
+#endif
+
+// StingPiece operators --------------------------------------------------------
+
+BASE_EXPORT bool operator==(const StringPiece& x, const StringPiece& y);
+
+inline bool operator!=(const StringPiece& x, const StringPiece& y) {
+ return !(x == y);
+}
+
+inline bool operator<(const StringPiece& x, const StringPiece& y) {
+ const int r = StringPiece::wordmemcmp(
+ x.data(), y.data(), (x.size() < y.size() ? x.size() : y.size()));
+ return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+}
+
+inline bool operator>(const StringPiece& x, const StringPiece& y) {
+ return y < x;
+}
+
+inline bool operator<=(const StringPiece& x, const StringPiece& y) {
+ return !(x > y);
+}
+
+inline bool operator>=(const StringPiece& x, const StringPiece& y) {
+ return !(x < y);
+}
+
+// StringPiece16 operators -----------------------------------------------------
+
+inline bool operator==(const StringPiece16& x, const StringPiece16& y) {
+ if (x.size() != y.size())
+ return false;
+
+ return StringPiece16::wordmemcmp(x.data(), y.data(), x.size()) == 0;
+}
+
+inline bool operator!=(const StringPiece16& x, const StringPiece16& y) {
+ return !(x == y);
+}
+
+inline bool operator<(const StringPiece16& x, const StringPiece16& y) {
+ const int r = StringPiece16::wordmemcmp(
+ x.data(), y.data(), (x.size() < y.size() ? x.size() : y.size()));
+ return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+}
+
+inline bool operator>(const StringPiece16& x, const StringPiece16& y) {
+ return y < x;
+}
+
+inline bool operator<=(const StringPiece16& x, const StringPiece16& y) {
+ return !(x > y);
+}
+
+inline bool operator>=(const StringPiece16& x, const StringPiece16& y) {
+ return !(x < y);
+}
+
+BASE_EXPORT std::ostream& operator<<(std::ostream& o,
+ const StringPiece& piece);
+
+// Hashing ---------------------------------------------------------------------
+
+// We provide appropriate hash functions so StringPiece and StringPiece16 can
+// be used as keys in hash sets and maps.
+
+// This hash function is copied from base/strings/string16.h. We don't use the
+// ones already defined for string and string16 directly because it would
+// require the string constructors to be called, which we don't want.
+#define HASH_STRING_PIECE(StringPieceType, string_piece) \
+ std::size_t result = 0; \
+ for (StringPieceType::const_iterator i = string_piece.begin(); \
+ i != string_piece.end(); ++i) \
+ result = (result * 131) + *i; \
+ return result;
+
+struct StringPieceHash {
+ std::size_t operator()(const StringPiece& sp) const {
+ HASH_STRING_PIECE(StringPiece, sp);
+ }
+};
+struct StringPiece16Hash {
+ std::size_t operator()(const StringPiece16& sp16) const {
+ HASH_STRING_PIECE(StringPiece16, sp16);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_PIECE_H_
diff --git a/libchrome/base/strings/string_piece_unittest.cc b/libchrome/base/strings/string_piece_unittest.cc
new file mode 100644
index 0000000..f05aa15
--- /dev/null
+++ b/libchrome/base/strings/string_piece_unittest.cc
@@ -0,0 +1,691 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+template <typename T>
+class CommonStringPieceTest : public ::testing::Test {
+ public:
+ static const T as_string(const char* input) {
+ return T(input);
+ }
+ static const T& as_string(const T& input) {
+ return input;
+ }
+};
+
+template <>
+class CommonStringPieceTest<string16> : public ::testing::Test {
+ public:
+ static const string16 as_string(const char* input) {
+ return ASCIIToUTF16(input);
+ }
+ static const string16 as_string(const std::string& input) {
+ return ASCIIToUTF16(input);
+ }
+};
+
+typedef ::testing::Types<std::string, string16> SupportedStringTypes;
+
+TYPED_TEST_CASE(CommonStringPieceTest, SupportedStringTypes);
+
+TYPED_TEST(CommonStringPieceTest, CheckComparisonOperators) {
+#define CMP_Y(op, x, y) \
+ { \
+ TypeParam lhs(TestFixture::as_string(x)); \
+ TypeParam rhs(TestFixture::as_string(y)); \
+ ASSERT_TRUE( (BasicStringPiece<TypeParam>((lhs.c_str())) op \
+ BasicStringPiece<TypeParam>((rhs.c_str())))); \
+ ASSERT_TRUE( (BasicStringPiece<TypeParam>((lhs.c_str())).compare( \
+ BasicStringPiece<TypeParam>((rhs.c_str()))) op 0)); \
+ }
+
+#define CMP_N(op, x, y) \
+ { \
+ TypeParam lhs(TestFixture::as_string(x)); \
+ TypeParam rhs(TestFixture::as_string(y)); \
+ ASSERT_FALSE( (BasicStringPiece<TypeParam>((lhs.c_str())) op \
+ BasicStringPiece<TypeParam>((rhs.c_str())))); \
+ ASSERT_FALSE( (BasicStringPiece<TypeParam>((lhs.c_str())).compare( \
+ BasicStringPiece<TypeParam>((rhs.c_str()))) op 0)); \
+ }
+
+ CMP_Y(==, "", "");
+ CMP_Y(==, "a", "a");
+ CMP_Y(==, "aa", "aa");
+ CMP_N(==, "a", "");
+ CMP_N(==, "", "a");
+ CMP_N(==, "a", "b");
+ CMP_N(==, "a", "aa");
+ CMP_N(==, "aa", "a");
+
+ CMP_N(!=, "", "");
+ CMP_N(!=, "a", "a");
+ CMP_N(!=, "aa", "aa");
+ CMP_Y(!=, "a", "");
+ CMP_Y(!=, "", "a");
+ CMP_Y(!=, "a", "b");
+ CMP_Y(!=, "a", "aa");
+ CMP_Y(!=, "aa", "a");
+
+ CMP_Y(<, "a", "b");
+ CMP_Y(<, "a", "aa");
+ CMP_Y(<, "aa", "b");
+ CMP_Y(<, "aa", "bb");
+ CMP_N(<, "a", "a");
+ CMP_N(<, "b", "a");
+ CMP_N(<, "aa", "a");
+ CMP_N(<, "b", "aa");
+ CMP_N(<, "bb", "aa");
+
+ CMP_Y(<=, "a", "a");
+ CMP_Y(<=, "a", "b");
+ CMP_Y(<=, "a", "aa");
+ CMP_Y(<=, "aa", "b");
+ CMP_Y(<=, "aa", "bb");
+ CMP_N(<=, "b", "a");
+ CMP_N(<=, "aa", "a");
+ CMP_N(<=, "b", "aa");
+ CMP_N(<=, "bb", "aa");
+
+ CMP_N(>=, "a", "b");
+ CMP_N(>=, "a", "aa");
+ CMP_N(>=, "aa", "b");
+ CMP_N(>=, "aa", "bb");
+ CMP_Y(>=, "a", "a");
+ CMP_Y(>=, "b", "a");
+ CMP_Y(>=, "aa", "a");
+ CMP_Y(>=, "b", "aa");
+ CMP_Y(>=, "bb", "aa");
+
+ CMP_N(>, "a", "a");
+ CMP_N(>, "a", "b");
+ CMP_N(>, "a", "aa");
+ CMP_N(>, "aa", "b");
+ CMP_N(>, "aa", "bb");
+ CMP_Y(>, "b", "a");
+ CMP_Y(>, "aa", "a");
+ CMP_Y(>, "b", "aa");
+ CMP_Y(>, "bb", "aa");
+
+ std::string x;
+ for (int i = 0; i < 256; i++) {
+ x += 'a';
+ std::string y = x;
+ CMP_Y(==, x, y);
+ for (int j = 0; j < i; j++) {
+ std::string z = x;
+ z[j] = 'b'; // Differs in position 'j'
+ CMP_N(==, x, z);
+ }
+ }
+
+#undef CMP_Y
+#undef CMP_N
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckSTL) {
+ TypeParam alphabet(TestFixture::as_string("abcdefghijklmnopqrstuvwxyz"));
+ TypeParam abc(TestFixture::as_string("abc"));
+ TypeParam xyz(TestFixture::as_string("xyz"));
+ TypeParam foobar(TestFixture::as_string("foobar"));
+
+ BasicStringPiece<TypeParam> a(alphabet);
+ BasicStringPiece<TypeParam> b(abc);
+ BasicStringPiece<TypeParam> c(xyz);
+ BasicStringPiece<TypeParam> d(foobar);
+ BasicStringPiece<TypeParam> e;
+ TypeParam temp(TestFixture::as_string("123"));
+ temp += static_cast<typename TypeParam::value_type>(0);
+ temp += TestFixture::as_string("456");
+ BasicStringPiece<TypeParam> f(temp);
+
+ ASSERT_EQ(a[6], static_cast<typename TypeParam::value_type>('g'));
+ ASSERT_EQ(b[0], static_cast<typename TypeParam::value_type>('a'));
+ ASSERT_EQ(c[2], static_cast<typename TypeParam::value_type>('z'));
+ ASSERT_EQ(f[3], static_cast<typename TypeParam::value_type>('\0'));
+ ASSERT_EQ(f[5], static_cast<typename TypeParam::value_type>('5'));
+
+ ASSERT_EQ(*d.data(), static_cast<typename TypeParam::value_type>('f'));
+ ASSERT_EQ(d.data()[5], static_cast<typename TypeParam::value_type>('r'));
+ ASSERT_TRUE(e.data() == NULL);
+
+ ASSERT_EQ(*a.begin(), static_cast<typename TypeParam::value_type>('a'));
+ ASSERT_EQ(*(b.begin() + 2), static_cast<typename TypeParam::value_type>('c'));
+ ASSERT_EQ(*(c.end() - 1), static_cast<typename TypeParam::value_type>('z'));
+
+ ASSERT_EQ(*a.rbegin(), static_cast<typename TypeParam::value_type>('z'));
+ ASSERT_EQ(*(b.rbegin() + 2),
+ static_cast<typename TypeParam::value_type>('a'));
+ ASSERT_EQ(*(c.rend() - 1), static_cast<typename TypeParam::value_type>('x'));
+ ASSERT_TRUE(a.rbegin() + 26 == a.rend());
+
+ ASSERT_EQ(a.size(), 26U);
+ ASSERT_EQ(b.size(), 3U);
+ ASSERT_EQ(c.size(), 3U);
+ ASSERT_EQ(d.size(), 6U);
+ ASSERT_EQ(e.size(), 0U);
+ ASSERT_EQ(f.size(), 7U);
+
+ ASSERT_TRUE(!d.empty());
+ ASSERT_TRUE(d.begin() != d.end());
+ ASSERT_TRUE(d.begin() + 6 == d.end());
+
+ ASSERT_TRUE(e.empty());
+ ASSERT_TRUE(e.begin() == e.end());
+
+ d.clear();
+ ASSERT_EQ(d.size(), 0U);
+ ASSERT_TRUE(d.empty());
+ ASSERT_TRUE(d.data() == NULL);
+ ASSERT_TRUE(d.begin() == d.end());
+
+ ASSERT_GE(a.max_size(), a.capacity());
+ ASSERT_GE(a.capacity(), a.size());
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckFind) {
+ typedef BasicStringPiece<TypeParam> Piece;
+
+ TypeParam alphabet(TestFixture::as_string("abcdefghijklmnopqrstuvwxyz"));
+ TypeParam abc(TestFixture::as_string("abc"));
+ TypeParam xyz(TestFixture::as_string("xyz"));
+ TypeParam foobar(TestFixture::as_string("foobar"));
+
+ BasicStringPiece<TypeParam> a(alphabet);
+ BasicStringPiece<TypeParam> b(abc);
+ BasicStringPiece<TypeParam> c(xyz);
+ BasicStringPiece<TypeParam> d(foobar);
+
+ d.clear();
+ Piece e;
+ TypeParam temp(TestFixture::as_string("123"));
+ temp.push_back('\0');
+ temp += TestFixture::as_string("456");
+ Piece f(temp);
+
+ typename TypeParam::value_type buf[4] = { '%', '%', '%', '%' };
+ ASSERT_EQ(a.copy(buf, 4), 4U);
+ ASSERT_EQ(buf[0], a[0]);
+ ASSERT_EQ(buf[1], a[1]);
+ ASSERT_EQ(buf[2], a[2]);
+ ASSERT_EQ(buf[3], a[3]);
+ ASSERT_EQ(a.copy(buf, 3, 7), 3U);
+ ASSERT_EQ(buf[0], a[7]);
+ ASSERT_EQ(buf[1], a[8]);
+ ASSERT_EQ(buf[2], a[9]);
+ ASSERT_EQ(buf[3], a[3]);
+ ASSERT_EQ(c.copy(buf, 99), 3U);
+ ASSERT_EQ(buf[0], c[0]);
+ ASSERT_EQ(buf[1], c[1]);
+ ASSERT_EQ(buf[2], c[2]);
+ ASSERT_EQ(buf[3], a[3]);
+
+ ASSERT_EQ(Piece::npos, TypeParam::npos);
+
+ ASSERT_EQ(a.find(b), 0U);
+ ASSERT_EQ(a.find(b, 1), Piece::npos);
+ ASSERT_EQ(a.find(c), 23U);
+ ASSERT_EQ(a.find(c, 9), 23U);
+ ASSERT_EQ(a.find(c, Piece::npos), Piece::npos);
+ ASSERT_EQ(b.find(c), Piece::npos);
+ ASSERT_EQ(b.find(c, Piece::npos), Piece::npos);
+ ASSERT_EQ(a.find(d), 0U);
+ ASSERT_EQ(a.find(e), 0U);
+ ASSERT_EQ(a.find(d, 12), 12U);
+ ASSERT_EQ(a.find(e, 17), 17U);
+ TypeParam not_found(TestFixture::as_string("xx not found bb"));
+ Piece g(not_found);
+ ASSERT_EQ(a.find(g), Piece::npos);
+ // empty string nonsense
+ ASSERT_EQ(d.find(b), Piece::npos);
+ ASSERT_EQ(e.find(b), Piece::npos);
+ ASSERT_EQ(d.find(b, 4), Piece::npos);
+ ASSERT_EQ(e.find(b, 7), Piece::npos);
+
+ size_t empty_search_pos = TypeParam().find(TypeParam());
+ ASSERT_EQ(d.find(d), empty_search_pos);
+ ASSERT_EQ(d.find(e), empty_search_pos);
+ ASSERT_EQ(e.find(d), empty_search_pos);
+ ASSERT_EQ(e.find(e), empty_search_pos);
+ ASSERT_EQ(d.find(d, 4), std::string().find(std::string(), 4));
+ ASSERT_EQ(d.find(e, 4), std::string().find(std::string(), 4));
+ ASSERT_EQ(e.find(d, 4), std::string().find(std::string(), 4));
+ ASSERT_EQ(e.find(e, 4), std::string().find(std::string(), 4));
+
+ ASSERT_EQ(a.find('a'), 0U);
+ ASSERT_EQ(a.find('c'), 2U);
+ ASSERT_EQ(a.find('z'), 25U);
+ ASSERT_EQ(a.find('$'), Piece::npos);
+ ASSERT_EQ(a.find('\0'), Piece::npos);
+ ASSERT_EQ(f.find('\0'), 3U);
+ ASSERT_EQ(f.find('3'), 2U);
+ ASSERT_EQ(f.find('5'), 5U);
+ ASSERT_EQ(g.find('o'), 4U);
+ ASSERT_EQ(g.find('o', 4), 4U);
+ ASSERT_EQ(g.find('o', 5), 8U);
+ ASSERT_EQ(a.find('b', 5), Piece::npos);
+ // empty string nonsense
+ ASSERT_EQ(d.find('\0'), Piece::npos);
+ ASSERT_EQ(e.find('\0'), Piece::npos);
+ ASSERT_EQ(d.find('\0', 4), Piece::npos);
+ ASSERT_EQ(e.find('\0', 7), Piece::npos);
+ ASSERT_EQ(d.find('x'), Piece::npos);
+ ASSERT_EQ(e.find('x'), Piece::npos);
+ ASSERT_EQ(d.find('x', 4), Piece::npos);
+ ASSERT_EQ(e.find('x', 7), Piece::npos);
+
+ ASSERT_EQ(a.rfind(b), 0U);
+ ASSERT_EQ(a.rfind(b, 1), 0U);
+ ASSERT_EQ(a.rfind(c), 23U);
+ ASSERT_EQ(a.rfind(c, 22U), Piece::npos);
+ ASSERT_EQ(a.rfind(c, 1U), Piece::npos);
+ ASSERT_EQ(a.rfind(c, 0U), Piece::npos);
+ ASSERT_EQ(b.rfind(c), Piece::npos);
+ ASSERT_EQ(b.rfind(c, 0U), Piece::npos);
+ ASSERT_EQ(a.rfind(d), static_cast<size_t>(a.as_string().rfind(TypeParam())));
+ ASSERT_EQ(a.rfind(e), a.as_string().rfind(TypeParam()));
+ ASSERT_EQ(a.rfind(d, 12), 12U);
+ ASSERT_EQ(a.rfind(e, 17), 17U);
+ ASSERT_EQ(a.rfind(g), Piece::npos);
+ ASSERT_EQ(d.rfind(b), Piece::npos);
+ ASSERT_EQ(e.rfind(b), Piece::npos);
+ ASSERT_EQ(d.rfind(b, 4), Piece::npos);
+ ASSERT_EQ(e.rfind(b, 7), Piece::npos);
+ // empty string nonsense
+ ASSERT_EQ(d.rfind(d, 4), std::string().rfind(std::string()));
+ ASSERT_EQ(e.rfind(d, 7), std::string().rfind(std::string()));
+ ASSERT_EQ(d.rfind(e, 4), std::string().rfind(std::string()));
+ ASSERT_EQ(e.rfind(e, 7), std::string().rfind(std::string()));
+ ASSERT_EQ(d.rfind(d), std::string().rfind(std::string()));
+ ASSERT_EQ(e.rfind(d), std::string().rfind(std::string()));
+ ASSERT_EQ(d.rfind(e), std::string().rfind(std::string()));
+ ASSERT_EQ(e.rfind(e), std::string().rfind(std::string()));
+
+ ASSERT_EQ(g.rfind('o'), 8U);
+ ASSERT_EQ(g.rfind('q'), Piece::npos);
+ ASSERT_EQ(g.rfind('o', 8), 8U);
+ ASSERT_EQ(g.rfind('o', 7), 4U);
+ ASSERT_EQ(g.rfind('o', 3), Piece::npos);
+ ASSERT_EQ(f.rfind('\0'), 3U);
+ ASSERT_EQ(f.rfind('\0', 12), 3U);
+ ASSERT_EQ(f.rfind('3'), 2U);
+ ASSERT_EQ(f.rfind('5'), 5U);
+ // empty string nonsense
+ ASSERT_EQ(d.rfind('o'), Piece::npos);
+ ASSERT_EQ(e.rfind('o'), Piece::npos);
+ ASSERT_EQ(d.rfind('o', 4), Piece::npos);
+ ASSERT_EQ(e.rfind('o', 7), Piece::npos);
+
+ TypeParam one_two_three_four(TestFixture::as_string("one,two:three;four"));
+ TypeParam comma_colon(TestFixture::as_string(",:"));
+ ASSERT_EQ(3U, Piece(one_two_three_four).find_first_of(comma_colon));
+ ASSERT_EQ(a.find_first_of(b), 0U);
+ ASSERT_EQ(a.find_first_of(b, 0), 0U);
+ ASSERT_EQ(a.find_first_of(b, 1), 1U);
+ ASSERT_EQ(a.find_first_of(b, 2), 2U);
+ ASSERT_EQ(a.find_first_of(b, 3), Piece::npos);
+ ASSERT_EQ(a.find_first_of(c), 23U);
+ ASSERT_EQ(a.find_first_of(c, 23), 23U);
+ ASSERT_EQ(a.find_first_of(c, 24), 24U);
+ ASSERT_EQ(a.find_first_of(c, 25), 25U);
+ ASSERT_EQ(a.find_first_of(c, 26), Piece::npos);
+ ASSERT_EQ(g.find_first_of(b), 13U);
+ ASSERT_EQ(g.find_first_of(c), 0U);
+ ASSERT_EQ(a.find_first_of(f), Piece::npos);
+ ASSERT_EQ(f.find_first_of(a), Piece::npos);
+ // empty string nonsense
+ ASSERT_EQ(a.find_first_of(d), Piece::npos);
+ ASSERT_EQ(a.find_first_of(e), Piece::npos);
+ ASSERT_EQ(d.find_first_of(b), Piece::npos);
+ ASSERT_EQ(e.find_first_of(b), Piece::npos);
+ ASSERT_EQ(d.find_first_of(d), Piece::npos);
+ ASSERT_EQ(e.find_first_of(d), Piece::npos);
+ ASSERT_EQ(d.find_first_of(e), Piece::npos);
+ ASSERT_EQ(e.find_first_of(e), Piece::npos);
+
+ ASSERT_EQ(a.find_first_not_of(b), 3U);
+ ASSERT_EQ(a.find_first_not_of(c), 0U);
+ ASSERT_EQ(b.find_first_not_of(a), Piece::npos);
+ ASSERT_EQ(c.find_first_not_of(a), Piece::npos);
+ ASSERT_EQ(f.find_first_not_of(a), 0U);
+ ASSERT_EQ(a.find_first_not_of(f), 0U);
+ ASSERT_EQ(a.find_first_not_of(d), 0U);
+ ASSERT_EQ(a.find_first_not_of(e), 0U);
+ // empty string nonsense
+ ASSERT_EQ(d.find_first_not_of(a), Piece::npos);
+ ASSERT_EQ(e.find_first_not_of(a), Piece::npos);
+ ASSERT_EQ(d.find_first_not_of(d), Piece::npos);
+ ASSERT_EQ(e.find_first_not_of(d), Piece::npos);
+ ASSERT_EQ(d.find_first_not_of(e), Piece::npos);
+ ASSERT_EQ(e.find_first_not_of(e), Piece::npos);
+
+ TypeParam equals(TestFixture::as_string("===="));
+ Piece h(equals);
+ ASSERT_EQ(h.find_first_not_of('='), Piece::npos);
+ ASSERT_EQ(h.find_first_not_of('=', 3), Piece::npos);
+ ASSERT_EQ(h.find_first_not_of('\0'), 0U);
+ ASSERT_EQ(g.find_first_not_of('x'), 2U);
+ ASSERT_EQ(f.find_first_not_of('\0'), 0U);
+ ASSERT_EQ(f.find_first_not_of('\0', 3), 4U);
+ ASSERT_EQ(f.find_first_not_of('\0', 2), 2U);
+ // empty string nonsense
+ ASSERT_EQ(d.find_first_not_of('x'), Piece::npos);
+ ASSERT_EQ(e.find_first_not_of('x'), Piece::npos);
+ ASSERT_EQ(d.find_first_not_of('\0'), Piece::npos);
+ ASSERT_EQ(e.find_first_not_of('\0'), Piece::npos);
+
+ // Piece g("xx not found bb");
+ TypeParam fifty_six(TestFixture::as_string("56"));
+ Piece i(fifty_six);
+ ASSERT_EQ(h.find_last_of(a), Piece::npos);
+ ASSERT_EQ(g.find_last_of(a), g.size()-1);
+ ASSERT_EQ(a.find_last_of(b), 2U);
+ ASSERT_EQ(a.find_last_of(c), a.size()-1);
+ ASSERT_EQ(f.find_last_of(i), 6U);
+ ASSERT_EQ(a.find_last_of('a'), 0U);
+ ASSERT_EQ(a.find_last_of('b'), 1U);
+ ASSERT_EQ(a.find_last_of('z'), 25U);
+ ASSERT_EQ(a.find_last_of('a', 5), 0U);
+ ASSERT_EQ(a.find_last_of('b', 5), 1U);
+ ASSERT_EQ(a.find_last_of('b', 0), Piece::npos);
+ ASSERT_EQ(a.find_last_of('z', 25), 25U);
+ ASSERT_EQ(a.find_last_of('z', 24), Piece::npos);
+ ASSERT_EQ(f.find_last_of(i, 5), 5U);
+ ASSERT_EQ(f.find_last_of(i, 6), 6U);
+ ASSERT_EQ(f.find_last_of(a, 4), Piece::npos);
+ // empty string nonsense
+ ASSERT_EQ(f.find_last_of(d), Piece::npos);
+ ASSERT_EQ(f.find_last_of(e), Piece::npos);
+ ASSERT_EQ(f.find_last_of(d, 4), Piece::npos);
+ ASSERT_EQ(f.find_last_of(e, 4), Piece::npos);
+ ASSERT_EQ(d.find_last_of(d), Piece::npos);
+ ASSERT_EQ(d.find_last_of(e), Piece::npos);
+ ASSERT_EQ(e.find_last_of(d), Piece::npos);
+ ASSERT_EQ(e.find_last_of(e), Piece::npos);
+ ASSERT_EQ(d.find_last_of(f), Piece::npos);
+ ASSERT_EQ(e.find_last_of(f), Piece::npos);
+ ASSERT_EQ(d.find_last_of(d, 4), Piece::npos);
+ ASSERT_EQ(d.find_last_of(e, 4), Piece::npos);
+ ASSERT_EQ(e.find_last_of(d, 4), Piece::npos);
+ ASSERT_EQ(e.find_last_of(e, 4), Piece::npos);
+ ASSERT_EQ(d.find_last_of(f, 4), Piece::npos);
+ ASSERT_EQ(e.find_last_of(f, 4), Piece::npos);
+
+ ASSERT_EQ(a.find_last_not_of(b), a.size()-1);
+ ASSERT_EQ(a.find_last_not_of(c), 22U);
+ ASSERT_EQ(b.find_last_not_of(a), Piece::npos);
+ ASSERT_EQ(b.find_last_not_of(b), Piece::npos);
+ ASSERT_EQ(f.find_last_not_of(i), 4U);
+ ASSERT_EQ(a.find_last_not_of(c, 24), 22U);
+ ASSERT_EQ(a.find_last_not_of(b, 3), 3U);
+ ASSERT_EQ(a.find_last_not_of(b, 2), Piece::npos);
+ // empty string nonsense
+ ASSERT_EQ(f.find_last_not_of(d), f.size()-1);
+ ASSERT_EQ(f.find_last_not_of(e), f.size()-1);
+ ASSERT_EQ(f.find_last_not_of(d, 4), 4U);
+ ASSERT_EQ(f.find_last_not_of(e, 4), 4U);
+ ASSERT_EQ(d.find_last_not_of(d), Piece::npos);
+ ASSERT_EQ(d.find_last_not_of(e), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of(d), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of(e), Piece::npos);
+ ASSERT_EQ(d.find_last_not_of(f), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of(f), Piece::npos);
+ ASSERT_EQ(d.find_last_not_of(d, 4), Piece::npos);
+ ASSERT_EQ(d.find_last_not_of(e, 4), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of(d, 4), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of(e, 4), Piece::npos);
+ ASSERT_EQ(d.find_last_not_of(f, 4), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of(f, 4), Piece::npos);
+
+ ASSERT_EQ(h.find_last_not_of('x'), h.size() - 1);
+ ASSERT_EQ(h.find_last_not_of('='), Piece::npos);
+ ASSERT_EQ(b.find_last_not_of('c'), 1U);
+ ASSERT_EQ(h.find_last_not_of('x', 2), 2U);
+ ASSERT_EQ(h.find_last_not_of('=', 2), Piece::npos);
+ ASSERT_EQ(b.find_last_not_of('b', 1), 0U);
+ // empty string nonsense
+ ASSERT_EQ(d.find_last_not_of('x'), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of('x'), Piece::npos);
+ ASSERT_EQ(d.find_last_not_of('\0'), Piece::npos);
+ ASSERT_EQ(e.find_last_not_of('\0'), Piece::npos);
+
+ ASSERT_EQ(a.substr(0, 3), b);
+ ASSERT_EQ(a.substr(23), c);
+ ASSERT_EQ(a.substr(23, 3), c);
+ ASSERT_EQ(a.substr(23, 99), c);
+ ASSERT_EQ(a.substr(0), a);
+ ASSERT_EQ(a.substr(3, 2), TestFixture::as_string("de"));
+ // empty string nonsense
+ ASSERT_EQ(a.substr(99, 2), e);
+ ASSERT_EQ(d.substr(99), e);
+ ASSERT_EQ(d.substr(0, 99), e);
+ ASSERT_EQ(d.substr(99, 99), e);
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckCustom) {
+ TypeParam foobar(TestFixture::as_string("foobar"));
+ BasicStringPiece<TypeParam> a(foobar);
+ TypeParam s1(TestFixture::as_string("123"));
+ s1 += static_cast<typename TypeParam::value_type>('\0');
+ s1 += TestFixture::as_string("456");
+ BasicStringPiece<TypeParam> b(s1);
+ BasicStringPiece<TypeParam> e;
+ TypeParam s2;
+
+ // remove_prefix
+ BasicStringPiece<TypeParam> c(a);
+ c.remove_prefix(3);
+ ASSERT_EQ(c, TestFixture::as_string("bar"));
+ c = a;
+ c.remove_prefix(0);
+ ASSERT_EQ(c, a);
+ c.remove_prefix(c.size());
+ ASSERT_EQ(c, e);
+
+ // remove_suffix
+ c = a;
+ c.remove_suffix(3);
+ ASSERT_EQ(c, TestFixture::as_string("foo"));
+ c = a;
+ c.remove_suffix(0);
+ ASSERT_EQ(c, a);
+ c.remove_suffix(c.size());
+ ASSERT_EQ(c, e);
+
+ // set
+ c.set(foobar.c_str());
+ ASSERT_EQ(c, a);
+ c.set(foobar.c_str(), 6);
+ ASSERT_EQ(c, a);
+ c.set(foobar.c_str(), 0);
+ ASSERT_EQ(c, e);
+ c.set(foobar.c_str(), 7); // Note, has an embedded NULL
+ ASSERT_NE(c, a);
+
+ // as_string
+ TypeParam s3(a.as_string().c_str(), 7); // Note, has an embedded NULL
+ ASSERT_TRUE(c == s3);
+ TypeParam s4(e.as_string());
+ ASSERT_TRUE(s4.empty());
+}
+
+TEST(StringPieceTest, CheckCustom) {
+ StringPiece a("foobar");
+ std::string s1("123");
+ s1 += '\0';
+ s1 += "456";
+ StringPiece b(s1);
+ StringPiece e;
+ std::string s2;
+
+ // CopyToString
+ a.CopyToString(&s2);
+ ASSERT_EQ(s2.size(), 6U);
+ ASSERT_EQ(s2, "foobar");
+ b.CopyToString(&s2);
+ ASSERT_EQ(s2.size(), 7U);
+ ASSERT_EQ(s1, s2);
+ e.CopyToString(&s2);
+ ASSERT_TRUE(s2.empty());
+
+ // AppendToString
+ s2.erase();
+ a.AppendToString(&s2);
+ ASSERT_EQ(s2.size(), 6U);
+ ASSERT_EQ(s2, "foobar");
+ a.AppendToString(&s2);
+ ASSERT_EQ(s2.size(), 12U);
+ ASSERT_EQ(s2, "foobarfoobar");
+
+ // starts_with
+ ASSERT_TRUE(a.starts_with(a));
+ ASSERT_TRUE(a.starts_with("foo"));
+ ASSERT_TRUE(a.starts_with(e));
+ ASSERT_TRUE(b.starts_with(s1));
+ ASSERT_TRUE(b.starts_with(b));
+ ASSERT_TRUE(b.starts_with(e));
+ ASSERT_TRUE(e.starts_with(""));
+ ASSERT_TRUE(!a.starts_with(b));
+ ASSERT_TRUE(!b.starts_with(a));
+ ASSERT_TRUE(!e.starts_with(a));
+
+ // ends with
+ ASSERT_TRUE(a.ends_with(a));
+ ASSERT_TRUE(a.ends_with("bar"));
+ ASSERT_TRUE(a.ends_with(e));
+ ASSERT_TRUE(b.ends_with(s1));
+ ASSERT_TRUE(b.ends_with(b));
+ ASSERT_TRUE(b.ends_with(e));
+ ASSERT_TRUE(e.ends_with(""));
+ ASSERT_TRUE(!a.ends_with(b));
+ ASSERT_TRUE(!b.ends_with(a));
+ ASSERT_TRUE(!e.ends_with(a));
+
+ StringPiece c;
+ c.set("foobar", 6);
+ ASSERT_EQ(c, a);
+ c.set("foobar", 0);
+ ASSERT_EQ(c, e);
+ c.set("foobar", 7);
+ ASSERT_NE(c, a);
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckNULL) {
+ // we used to crash here, but now we don't.
+ BasicStringPiece<TypeParam> s(NULL);
+ ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
+ ASSERT_EQ(s.size(), 0U);
+
+ s.set(NULL);
+ ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
+ ASSERT_EQ(s.size(), 0U);
+
+ TypeParam str = s.as_string();
+ ASSERT_EQ(str.length(), 0U);
+ ASSERT_EQ(str, TypeParam());
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckComparisons2) {
+ TypeParam alphabet(TestFixture::as_string("abcdefghijklmnopqrstuvwxyz"));
+ TypeParam alphabet_z(TestFixture::as_string("abcdefghijklmnopqrstuvwxyzz"));
+ TypeParam alphabet_y(TestFixture::as_string("abcdefghijklmnopqrstuvwxyy"));
+ BasicStringPiece<TypeParam> abc(alphabet);
+
+ // check comparison operations on strings longer than 4 bytes.
+ ASSERT_TRUE(abc == BasicStringPiece<TypeParam>(alphabet));
+ ASSERT_EQ(abc.compare(BasicStringPiece<TypeParam>(alphabet)), 0);
+
+ ASSERT_TRUE(abc < BasicStringPiece<TypeParam>(alphabet_z));
+ ASSERT_LT(abc.compare(BasicStringPiece<TypeParam>(alphabet_z)), 0);
+
+ ASSERT_TRUE(abc > BasicStringPiece<TypeParam>(alphabet_y));
+ ASSERT_GT(abc.compare(BasicStringPiece<TypeParam>(alphabet_y)), 0);
+}
+
+// Test operations only supported by std::string version.
+TEST(StringPieceTest, CheckComparisons2) {
+ StringPiece abc("abcdefghijklmnopqrstuvwxyz");
+
+ // starts_with
+ ASSERT_TRUE(abc.starts_with(abc));
+ ASSERT_TRUE(abc.starts_with("abcdefghijklm"));
+ ASSERT_TRUE(!abc.starts_with("abcdefguvwxyz"));
+
+ // ends_with
+ ASSERT_TRUE(abc.ends_with(abc));
+ ASSERT_TRUE(!abc.ends_with("abcdefguvwxyz"));
+ ASSERT_TRUE(abc.ends_with("nopqrstuvwxyz"));
+}
+
+TYPED_TEST(CommonStringPieceTest, StringCompareNotAmbiguous) {
+ ASSERT_TRUE(TestFixture::as_string("hello").c_str() ==
+ TestFixture::as_string("hello"));
+ ASSERT_TRUE(TestFixture::as_string("hello").c_str() <
+ TestFixture::as_string("world"));
+}
+
+TYPED_TEST(CommonStringPieceTest, HeterogenousStringPieceEquals) {
+ TypeParam hello(TestFixture::as_string("hello"));
+
+ ASSERT_TRUE(BasicStringPiece<TypeParam>(hello) == hello);
+ ASSERT_TRUE(hello.c_str() == BasicStringPiece<TypeParam>(hello));
+}
+
+// string16-specific stuff
+TEST(StringPiece16Test, CheckSTL) {
+ // Check some non-ascii characters.
+ string16 fifth(ASCIIToUTF16("123"));
+ fifth.push_back(0x0000);
+ fifth.push_back(0xd8c5);
+ fifth.push_back(0xdffe);
+ StringPiece16 f(fifth);
+
+ ASSERT_EQ(f[3], '\0');
+ ASSERT_EQ(f[5], static_cast<char16>(0xdffe));
+
+ ASSERT_EQ(f.size(), 6U);
+}
+
+
+
+TEST(StringPiece16Test, CheckConversion) {
+ // Make sure that we can convert from UTF8 to UTF16 and back. We use a two
+ // byte character (G clef) to test this.
+ ASSERT_EQ(
+ UTF16ToUTF8(
+ StringPiece16(UTF8ToUTF16("\xf0\x9d\x84\x9e")).as_string()),
+ "\xf0\x9d\x84\x9e");
+}
+
+TYPED_TEST(CommonStringPieceTest, CheckConstructors) {
+ TypeParam str(TestFixture::as_string("hello world"));
+ TypeParam empty;
+
+ ASSERT_TRUE(str == BasicStringPiece<TypeParam>(str));
+ ASSERT_TRUE(str == BasicStringPiece<TypeParam>(str.c_str()));
+ ASSERT_TRUE(TestFixture::as_string("hello") ==
+ BasicStringPiece<TypeParam>(str.c_str(), 5));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(str.c_str(),
+ static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(NULL));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(NULL,
+ static_cast<typename BasicStringPiece<TypeParam>::size_type>(0)));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>());
+ ASSERT_TRUE(str == BasicStringPiece<TypeParam>(str.begin(), str.end()));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(str.begin(), str.begin()));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(empty));
+ ASSERT_TRUE(empty == BasicStringPiece<TypeParam>(empty.begin(), empty.end()));
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_split.cc b/libchrome/base/strings/string_split.cc
new file mode 100644
index 0000000..6c949b9
--- /dev/null
+++ b/libchrome/base/strings/string_split.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_split.h"
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace {
+
+// PieceToOutputType converts a StringPiece as needed to a given output type,
+// which is either the same type of StringPiece (a NOP) or the corresponding
+// non-piece string type.
+//
+// The default converter is a NOP, it works when the OutputType is the
+// correct StringPiece.
+template<typename Str, typename OutputType>
+OutputType PieceToOutputType(BasicStringPiece<Str> piece) {
+ return piece;
+}
+template<> // Convert StringPiece to std::string
+std::string PieceToOutputType<std::string, std::string>(StringPiece piece) {
+ return piece.as_string();
+}
+template<> // Convert StringPiece16 to string16.
+string16 PieceToOutputType<string16, string16>(StringPiece16 piece) {
+ return piece.as_string();
+}
+
+// Returns either the ASCII or UTF-16 whitespace.
+template<typename Str> BasicStringPiece<Str> WhitespaceForType();
+template<> StringPiece16 WhitespaceForType<string16>() {
+ return kWhitespaceUTF16;
+}
+template<> StringPiece WhitespaceForType<std::string>() {
+ return kWhitespaceASCII;
+}
+
+// Optimize the single-character case to call find() on the string instead,
+// since this is the common case and can be made faster. This could have been
+// done with template specialization too, but would have been less clear.
+//
+// There is no corresponding FindFirstNotOf because StringPiece already
+// implements these different versions that do the optimized searching.
+size_t FindFirstOf(StringPiece piece, char c, size_t pos) {
+ return piece.find(c, pos);
+}
+size_t FindFirstOf(StringPiece16 piece, char16 c, size_t pos) {
+ return piece.find(c, pos);
+}
+size_t FindFirstOf(StringPiece piece, StringPiece one_of, size_t pos) {
+ return piece.find_first_of(one_of, pos);
+}
+size_t FindFirstOf(StringPiece16 piece, StringPiece16 one_of, size_t pos) {
+ return piece.find_first_of(one_of, pos);
+}
+
+// General string splitter template. Can take 8- or 16-bit input, can produce
+// the corresponding string or StringPiece output, and can take single- or
+// multiple-character delimiters.
+//
+// DelimiterType is either a character (Str::value_type) or a string piece of
+// multiple characters (BasicStringPiece<Str>). StringPiece has a version of
+// find for both of these cases, and the single-character version is the most
+// common and can be implemented faster, which is why this is a template.
+template<typename Str, typename OutputStringType, typename DelimiterType>
+static std::vector<OutputStringType> SplitStringT(
+ BasicStringPiece<Str> str,
+ DelimiterType delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<OutputStringType> result;
+ if (str.empty())
+ return result;
+
+ size_t start = 0;
+ while (start != Str::npos) {
+ size_t end = FindFirstOf(str, delimiter, start);
+
+ BasicStringPiece<Str> piece;
+ if (end == Str::npos) {
+ piece = str.substr(start);
+ start = Str::npos;
+ } else {
+ piece = str.substr(start, end - start);
+ start = end + 1;
+ }
+
+ if (whitespace == TRIM_WHITESPACE)
+ piece = TrimString(piece, WhitespaceForType<Str>(), TRIM_ALL);
+
+ if (result_type == SPLIT_WANT_ALL || !piece.empty())
+ result.push_back(PieceToOutputType<Str, OutputStringType>(piece));
+ }
+ return result;
+}
+
+bool AppendStringKeyValue(StringPiece input,
+ char delimiter,
+ StringPairs* result) {
+ // Always append a new item regardless of success (it might be empty). The
+ // below code will copy the strings directly into the result pair.
+ result->resize(result->size() + 1);
+ auto& result_pair = result->back();
+
+ // Find the delimiter.
+ size_t end_key_pos = input.find_first_of(delimiter);
+ if (end_key_pos == std::string::npos) {
+ DVLOG(1) << "cannot find delimiter in: " << input;
+ return false; // No delimiter.
+ }
+ input.substr(0, end_key_pos).CopyToString(&result_pair.first);
+
+ // Find the value string.
+ StringPiece remains = input.substr(end_key_pos, input.size() - end_key_pos);
+ size_t begin_value_pos = remains.find_first_not_of(delimiter);
+ if (begin_value_pos == StringPiece::npos) {
+ DVLOG(1) << "cannot parse value from input: " << input;
+ return false; // No value.
+ }
+ remains.substr(begin_value_pos, remains.size() - begin_value_pos)
+ .CopyToString(&result_pair.second);
+
+ return true;
+}
+
+template <typename Str, typename OutputStringType>
+void SplitStringUsingSubstrT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type,
+ std::vector<OutputStringType>* result) {
+ using Piece = BasicStringPiece<Str>;
+ using size_type = typename Piece::size_type;
+
+ result->clear();
+ for (size_type begin_index = 0, end_index = 0; end_index != Piece::npos;
+ begin_index = end_index + delimiter.size()) {
+ end_index = input.find(delimiter, begin_index);
+ Piece term = end_index == Piece::npos
+ ? input.substr(begin_index)
+ : input.substr(begin_index, end_index - begin_index);
+
+ if (whitespace == TRIM_WHITESPACE)
+ term = TrimString(term, WhitespaceForType<Str>(), TRIM_ALL);
+
+ if (result_type == SPLIT_WANT_ALL || !term.empty())
+ result->push_back(PieceToOutputType<Str, OutputStringType>(term));
+ }
+}
+
+} // namespace
+
+std::vector<std::string> SplitString(StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ if (separators.size() == 1) {
+ return SplitStringT<std::string, std::string, char>(
+ input, separators[0], whitespace, result_type);
+ }
+ return SplitStringT<std::string, std::string, StringPiece>(
+ input, separators, whitespace, result_type);
+}
+
+std::vector<string16> SplitString(StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ if (separators.size() == 1) {
+ return SplitStringT<string16, string16, char16>(
+ input, separators[0], whitespace, result_type);
+ }
+ return SplitStringT<string16, string16, StringPiece16>(
+ input, separators, whitespace, result_type);
+}
+
+std::vector<StringPiece> SplitStringPiece(StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ if (separators.size() == 1) {
+ return SplitStringT<std::string, StringPiece, char>(
+ input, separators[0], whitespace, result_type);
+ }
+ return SplitStringT<std::string, StringPiece, StringPiece>(
+ input, separators, whitespace, result_type);
+}
+
+std::vector<StringPiece16> SplitStringPiece(StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ if (separators.size() == 1) {
+ return SplitStringT<string16, StringPiece16, char16>(
+ input, separators[0], whitespace, result_type);
+ }
+ return SplitStringT<string16, StringPiece16, StringPiece16>(
+ input, separators, whitespace, result_type);
+}
+
+bool SplitStringIntoKeyValuePairs(StringPiece input,
+ char key_value_delimiter,
+ char key_value_pair_delimiter,
+ StringPairs* key_value_pairs) {
+ key_value_pairs->clear();
+
+ std::vector<StringPiece> pairs = SplitStringPiece(
+ input, std::string(1, key_value_pair_delimiter),
+ TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ key_value_pairs->reserve(pairs.size());
+
+ bool success = true;
+ for (const StringPiece& pair : pairs) {
+ if (!AppendStringKeyValue(pair, key_value_delimiter, key_value_pairs)) {
+ // Don't return here, to allow for pairs without associated
+ // value or key; just record that the split failed.
+ success = false;
+ }
+ }
+ return success;
+}
+
+void SplitStringUsingSubstr(StringPiece16 input,
+ StringPiece16 delimiter,
+ std::vector<string16>* result) {
+ SplitStringUsingSubstrT(input, delimiter, TRIM_WHITESPACE, SPLIT_WANT_ALL,
+ result);
+}
+
+void SplitStringUsingSubstr(StringPiece input,
+ StringPiece delimiter,
+ std::vector<std::string>* result) {
+ SplitStringUsingSubstrT(input, delimiter, TRIM_WHITESPACE, SPLIT_WANT_ALL,
+ result);
+}
+
+std::vector<StringPiece16> SplitStringPieceUsingSubstr(
+ StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<StringPiece16> result;
+ SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+ return result;
+}
+
+std::vector<StringPiece> SplitStringPieceUsingSubstr(
+ StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<StringPiece> result;
+ SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
+ return result;
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_split.h b/libchrome/base/strings/string_split.h
new file mode 100644
index 0000000..ec9f246
--- /dev/null
+++ b/libchrome/base/strings/string_split.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_SPLIT_H_
+#define BASE_STRINGS_STRING_SPLIT_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+enum WhitespaceHandling {
+ KEEP_WHITESPACE,
+ TRIM_WHITESPACE,
+};
+
+enum SplitResult {
+ // Strictly return all results.
+ //
+ // If the input is ",," and the separator is ',' this will return a
+ // vector of three empty strings.
+ SPLIT_WANT_ALL,
+
+ // Only nonempty results will be added to the results. Multiple separators
+ // will be coalesced. Separators at the beginning and end of the input will
+ // be ignored. With TRIM_WHITESPACE, whitespace-only results will be dropped.
+ //
+ // If the input is ",," and the separator is ',', this will return an empty
+ // vector.
+ SPLIT_WANT_NONEMPTY,
+};
+
+// Split the given string on ANY of the given separators, returning copies of
+// the result.
+//
+// To split on either commas or semicolons, keeping all whitespace:
+//
+// std::vector<std::string> tokens = base::SplitString(
+// input, ",;", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+BASE_EXPORT std::vector<std::string> SplitString(
+ StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+BASE_EXPORT std::vector<string16> SplitString(
+ StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+
+// Like SplitString above except it returns a vector of StringPieces which
+// reference the original buffer without copying. Although you have to be
+// careful to keep the original string unmodified, this provides an efficient
+// way to iterate through tokens in a string.
+//
+// To iterate through all whitespace-separated tokens in an input string:
+//
+// for (const auto& cur :
+// base::SplitStringPiece(input, base::kWhitespaceASCII,
+// base::KEEP_WHITESPACE,
+// base::SPLIT_WANT_NONEMPTY)) {
+// ...
+BASE_EXPORT std::vector<StringPiece> SplitStringPiece(
+ StringPiece input,
+ StringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+BASE_EXPORT std::vector<StringPiece16> SplitStringPiece(
+ StringPiece16 input,
+ StringPiece16 separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+
+using StringPairs = std::vector<std::pair<std::string, std::string>>;
+
+// Splits |line| into key value pairs according to the given delimiters and
+// removes whitespace leading each key and trailing each value. Returns true
+// only if each pair has a non-empty key and value. |key_value_pairs| will
+// include ("","") pairs for entries without |key_value_delimiter|.
+BASE_EXPORT bool SplitStringIntoKeyValuePairs(StringPiece input,
+ char key_value_delimiter,
+ char key_value_pair_delimiter,
+ StringPairs* key_value_pairs);
+
+// Similar to SplitString, but use a substring delimiter instead of a list of
+// characters that are all possible delimiters.
+//
+// TODO(brettw) this should probably be changed and expanded to provide a
+// mirror of the SplitString[Piece] API above, just with the different
+// delimiter handling.
+BASE_EXPORT void SplitStringUsingSubstr(StringPiece16 input,
+ StringPiece16 delimiter,
+ std::vector<string16>* result);
+BASE_EXPORT void SplitStringUsingSubstr(StringPiece input,
+ StringPiece delimiter,
+ std::vector<std::string>* result);
+
+// Like SplitStringUsingSubstr above except it returns a vector of StringPieces
+// which reference the original buffer without copying. Although you have to be
+// careful to keep the original string unmodified, this provides an efficient
+// way to iterate through tokens in a string.
+//
+// To iterate through all newline-separated tokens in an input string:
+//
+// for (const auto& cur :
+// base::SplitStringUsingSubstr(input, "\r\n",
+// base::KEEP_WHITESPACE,
+// base::SPLIT_WANT_NONEMPTY)) {
+// ...
+BASE_EXPORT std::vector<StringPiece16> SplitStringPieceUsingSubstr(
+ StringPiece16 input,
+ StringPiece16 delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+BASE_EXPORT std::vector<StringPiece> SplitStringPieceUsingSubstr(
+ StringPiece input,
+ StringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type);
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_SPLIT_H_
diff --git a/libchrome/base/strings/string_split_unittest.cc b/libchrome/base/strings/string_split_unittest.cc
new file mode 100644
index 0000000..657a2db
--- /dev/null
+++ b/libchrome/base/strings/string_split_unittest.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_split.h"
+
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+class SplitStringIntoKeyValuePairsTest : public testing::Test {
+ protected:
+ base::StringPairs kv_pairs;
+};
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyString) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs(std::string(),
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ EXPECT_TRUE(kv_pairs.empty());
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, MissingKeyValueDelimiter) {
+ EXPECT_FALSE(SplitStringIntoKeyValuePairs("key1,key2:value2",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_TRUE(kv_pairs[0].first.empty());
+ EXPECT_TRUE(kv_pairs[0].second.empty());
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyKeyWithKeyValueDelimiter) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs(":value1,key2:value2",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_TRUE(kv_pairs[0].first.empty());
+ EXPECT_EQ("value1", kv_pairs[0].second);
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, TrailingAndLeadingPairDelimiter) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs(",key1:value1,key2:value2,",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ("key1", kv_pairs[0].first);
+ EXPECT_EQ("value1", kv_pairs[0].second);
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyPair) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:value1,,key3:value3",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ("key1", kv_pairs[0].first);
+ EXPECT_EQ("value1", kv_pairs[0].second);
+ EXPECT_EQ("key3", kv_pairs[1].first);
+ EXPECT_EQ("value3", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, EmptyValue) {
+ EXPECT_FALSE(SplitStringIntoKeyValuePairs("key1:,key2:value2",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ("key1", kv_pairs[0].first);
+ EXPECT_EQ("", kv_pairs[0].second);
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, UntrimmedWhitespace) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1 : value1",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(1U, kv_pairs.size());
+ EXPECT_EQ("key1 ", kv_pairs[0].first);
+ EXPECT_EQ(" value1", kv_pairs[0].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, TrimmedWhitespace) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:value1 , key2:value2",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ("key1", kv_pairs[0].first);
+ EXPECT_EQ("value1", kv_pairs[0].second);
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, MultipleKeyValueDelimiters) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:::value1,key2:value2",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ("key1", kv_pairs[0].first);
+ EXPECT_EQ("value1", kv_pairs[0].second);
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST_F(SplitStringIntoKeyValuePairsTest, OnlySplitAtGivenSeparator) {
+ std::string a("a ?!@#$%^&*()_+:/{}\\\t\nb");
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs(a + "X" + a + "Y" + a + "X" + a,
+ 'X', // Key-value delimiter
+ 'Y', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ(a, kv_pairs[0].first);
+ EXPECT_EQ(a, kv_pairs[0].second);
+ EXPECT_EQ(a, kv_pairs[1].first);
+ EXPECT_EQ(a, kv_pairs[1].second);
+}
+
+
+TEST_F(SplitStringIntoKeyValuePairsTest, DelimiterInValue) {
+ EXPECT_TRUE(SplitStringIntoKeyValuePairs("key1:va:ue1,key2:value2",
+ ':', // Key-value delimiter
+ ',', // Key-value pair delimiter
+ &kv_pairs));
+ ASSERT_EQ(2U, kv_pairs.size());
+ EXPECT_EQ("key1", kv_pairs[0].first);
+ EXPECT_EQ("va:ue1", kv_pairs[0].second);
+ EXPECT_EQ("key2", kv_pairs[1].first);
+ EXPECT_EQ("value2", kv_pairs[1].second);
+}
+
+TEST(SplitStringUsingSubstrTest, EmptyString) {
+ std::vector<std::string> results;
+ SplitStringUsingSubstr(std::string(), "DELIMITER", &results);
+ ASSERT_EQ(1u, results.size());
+ EXPECT_THAT(results, ElementsAre(""));
+}
+
+TEST(StringUtilTest, SplitString_Basics) {
+ std::vector<std::string> r;
+
+ r = SplitString(std::string(), ",:;", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ EXPECT_TRUE(r.empty());
+
+ // Empty separator list
+ r = SplitString("hello, world", "", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ ASSERT_EQ(1u, r.size());
+ EXPECT_EQ("hello, world", r[0]);
+
+ // Should split on any of the separators.
+ r = SplitString("::,,;;", ",:;", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ ASSERT_EQ(7u, r.size());
+ for (auto str : r)
+ ASSERT_TRUE(str.empty());
+
+ r = SplitString("red, green; blue:", ",:;", TRIM_WHITESPACE,
+ SPLIT_WANT_NONEMPTY);
+ ASSERT_EQ(3u, r.size());
+ EXPECT_EQ("red", r[0]);
+ EXPECT_EQ("green", r[1]);
+ EXPECT_EQ("blue", r[2]);
+
+ // Want to split a string along whitespace sequences.
+ r = SplitString(" red green \tblue\n", " \t\n", TRIM_WHITESPACE,
+ SPLIT_WANT_NONEMPTY);
+ ASSERT_EQ(3u, r.size());
+ EXPECT_EQ("red", r[0]);
+ EXPECT_EQ("green", r[1]);
+ EXPECT_EQ("blue", r[2]);
+
+ // Weird case of splitting on spaces but not trimming.
+ r = SplitString(" red ", " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ ASSERT_EQ(3u, r.size());
+ EXPECT_EQ("", r[0]); // Before the first space.
+ EXPECT_EQ("red", r[1]);
+ EXPECT_EQ("", r[2]); // After the last space.
+}
+
+TEST(StringUtilTest, SplitString_WhitespaceAndResultType) {
+ std::vector<std::string> r;
+
+ // Empty input handling.
+ r = SplitString(std::string(), ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ EXPECT_TRUE(r.empty());
+ r = SplitString(std::string(), ",", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ EXPECT_TRUE(r.empty());
+
+ // Input string is space and we're trimming.
+ r = SplitString(" ", ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ ASSERT_EQ(1u, r.size());
+ EXPECT_EQ("", r[0]);
+ r = SplitString(" ", ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ EXPECT_TRUE(r.empty());
+
+ // Test all 4 combinations of flags on ", ,".
+ r = SplitString(", ,", ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ ASSERT_EQ(3u, r.size());
+ EXPECT_EQ("", r[0]);
+ EXPECT_EQ(" ", r[1]);
+ EXPECT_EQ("", r[2]);
+ r = SplitString(", ,", ",", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ ASSERT_EQ(1u, r.size());
+ ASSERT_EQ(" ", r[0]);
+ r = SplitString(", ,", ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ ASSERT_EQ(3u, r.size());
+ EXPECT_EQ("", r[0]);
+ EXPECT_EQ("", r[1]);
+ EXPECT_EQ("", r[2]);
+ r = SplitString(", ,", ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ ASSERT_TRUE(r.empty());
+}
+
+TEST(SplitStringUsingSubstrTest, StringWithNoDelimiter) {
+ std::vector<std::string> results;
+ SplitStringUsingSubstr("alongwordwithnodelimiter", "DELIMITER", &results);
+ ASSERT_EQ(1u, results.size());
+ EXPECT_THAT(results, ElementsAre("alongwordwithnodelimiter"));
+}
+
+TEST(SplitStringUsingSubstrTest, LeadingDelimitersSkipped) {
+ std::vector<std::string> results;
+ SplitStringUsingSubstr(
+ "DELIMITERDELIMITERDELIMITERoneDELIMITERtwoDELIMITERthree",
+ "DELIMITER",
+ &results);
+ ASSERT_EQ(6u, results.size());
+ EXPECT_THAT(results, ElementsAre("", "", "", "one", "two", "three"));
+}
+
+TEST(SplitStringUsingSubstrTest, ConsecutiveDelimitersSkipped) {
+ std::vector<std::string> results;
+ SplitStringUsingSubstr(
+ "unoDELIMITERDELIMITERDELIMITERdosDELIMITERtresDELIMITERDELIMITERcuatro",
+ "DELIMITER",
+ &results);
+ ASSERT_EQ(7u, results.size());
+ EXPECT_THAT(results, ElementsAre("uno", "", "", "dos", "tres", "", "cuatro"));
+}
+
+TEST(SplitStringUsingSubstrTest, TrailingDelimitersSkipped) {
+ std::vector<std::string> results;
+ SplitStringUsingSubstr(
+ "unDELIMITERdeuxDELIMITERtroisDELIMITERquatreDELIMITERDELIMITERDELIMITER",
+ "DELIMITER",
+ &results);
+ ASSERT_EQ(7u, results.size());
+ EXPECT_THAT(
+ results, ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, StringWithNoDelimiter) {
+ std::vector<base::StringPiece> results =
+ SplitStringPieceUsingSubstr("alongwordwithnodelimiter", "DELIMITER",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(1u, results.size());
+ EXPECT_THAT(results, ElementsAre("alongwordwithnodelimiter"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, LeadingDelimitersSkipped) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "DELIMITERDELIMITERDELIMITERoneDELIMITERtwoDELIMITERthree", "DELIMITER",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(6u, results.size());
+ EXPECT_THAT(results, ElementsAre("", "", "", "one", "two", "three"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, ConsecutiveDelimitersSkipped) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "unoDELIMITERDELIMITERDELIMITERdosDELIMITERtresDELIMITERDELIMITERcuatro",
+ "DELIMITER", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(7u, results.size());
+ EXPECT_THAT(results, ElementsAre("uno", "", "", "dos", "tres", "", "cuatro"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, TrailingDelimitersSkipped) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "unDELIMITERdeuxDELIMITERtroisDELIMITERquatreDELIMITERDELIMITERDELIMITER",
+ "DELIMITER", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(7u, results.size());
+ EXPECT_THAT(results,
+ ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, KeepWhitespace) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "un DELIMITERdeux\tDELIMITERtrois\nDELIMITERquatre", "DELIMITER",
+ base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(4u, results.size());
+ EXPECT_THAT(results, ElementsAre("un ", "deux\t", "trois\n", "quatre"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, TrimWhitespace) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "un DELIMITERdeux\tDELIMITERtrois\nDELIMITERquatre", "DELIMITER",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(4u, results.size());
+ EXPECT_THAT(results, ElementsAre("un", "deux", "trois", "quatre"));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, SplitWantAll) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "unDELIMITERdeuxDELIMITERtroisDELIMITERDELIMITER", "DELIMITER",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(5u, results.size());
+ EXPECT_THAT(results, ElementsAre("un", "deux", "trois", "", ""));
+}
+
+TEST(SplitStringPieceUsingSubstrTest, SplitWantNonEmpty) {
+ std::vector<base::StringPiece> results = SplitStringPieceUsingSubstr(
+ "unDELIMITERdeuxDELIMITERtroisDELIMITERDELIMITER", "DELIMITER",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ ASSERT_EQ(3u, results.size());
+ EXPECT_THAT(results, ElementsAre("un", "deux", "trois"));
+}
+
+TEST(StringSplitTest, StringSplitKeepWhitespace) {
+ std::vector<std::string> r;
+
+ r = SplitString(" ", "*", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ ASSERT_EQ(1U, r.size());
+ EXPECT_EQ(r[0], " ");
+
+ r = SplitString("\t \ta\t ", "\t", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ ASSERT_EQ(4U, r.size());
+ EXPECT_EQ(r[0], "");
+ EXPECT_EQ(r[1], " ");
+ EXPECT_EQ(r[2], "a");
+ EXPECT_EQ(r[3], " ");
+
+ r = SplitString("\ta\t\nb\tcc", "\n", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_ALL);
+ ASSERT_EQ(2U, r.size());
+ EXPECT_EQ(r[0], "\ta\t");
+ EXPECT_EQ(r[1], "b\tcc");
+}
+
+TEST(StringSplitTest, SplitStringAlongWhitespace) {
+ struct TestData {
+ const char* input;
+ const size_t expected_result_count;
+ const char* output1;
+ const char* output2;
+ } data[] = {
+ { "a", 1, "a", "" },
+ { " ", 0, "", "" },
+ { " a", 1, "a", "" },
+ { " ab ", 1, "ab", "" },
+ { " ab c", 2, "ab", "c" },
+ { " ab c ", 2, "ab", "c" },
+ { " ab cd", 2, "ab", "cd" },
+ { " ab cd ", 2, "ab", "cd" },
+ { " \ta\t", 1, "a", "" },
+ { " b\ta\t", 2, "b", "a" },
+ { " b\tat", 2, "b", "at" },
+ { "b\tat", 2, "b", "at" },
+ { "b\t at", 2, "b", "at" },
+ };
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ std::vector<std::string> results = base::SplitString(
+ data[i].input, kWhitespaceASCII, base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ ASSERT_EQ(data[i].expected_result_count, results.size());
+ if (data[i].expected_result_count > 0)
+ ASSERT_EQ(data[i].output1, results[0]);
+ if (data[i].expected_result_count > 1)
+ ASSERT_EQ(data[i].output2, results[1]);
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_tokenizer.h b/libchrome/base/strings/string_tokenizer.h
new file mode 100644
index 0000000..8defbac
--- /dev/null
+++ b/libchrome/base/strings/string_tokenizer.h
@@ -0,0 +1,260 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_TOKENIZER_H_
+#define BASE_STRINGS_STRING_TOKENIZER_H_
+
+#include <algorithm>
+#include <string>
+
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// StringTokenizerT is a simple string tokenizer class. It works like an
+// iterator that with each step (see the Advance method) updates members that
+// refer to the next token in the input string. The user may optionally
+// configure the tokenizer to return delimiters.
+//
+// Warning: be careful not to pass a C string into the 2-arg constructor:
+// StringTokenizer t("this is a test", " "); // WRONG
+// This will create a temporary std::string, save the begin() and end()
+// iterators, and then the string will be freed before we actually start
+// tokenizing it.
+// Instead, use a std::string or use the 3 arg constructor of CStringTokenizer.
+//
+//
+// EXAMPLE 1:
+//
+// char input[] = "this is a test";
+// CStringTokenizer t(input, input + strlen(input), " ");
+// while (t.GetNext()) {
+// printf("%s\n", t.token().c_str());
+// }
+//
+// Output:
+//
+// this
+// is
+// a
+// test
+//
+//
+// EXAMPLE 2:
+//
+// std::string input = "no-cache=\"foo, bar\", private";
+// StringTokenizer t(input, ", ");
+// t.set_quote_chars("\"");
+// while (t.GetNext()) {
+// printf("%s\n", t.token().c_str());
+// }
+//
+// Output:
+//
+// no-cache="foo, bar"
+// private
+//
+//
+// EXAMPLE 3:
+//
+// bool next_is_option = false, next_is_value = false;
+// std::string input = "text/html; charset=UTF-8; foo=bar";
+// StringTokenizer t(input, "; =");
+// t.set_options(StringTokenizer::RETURN_DELIMS);
+// while (t.GetNext()) {
+// if (t.token_is_delim()) {
+// switch (*t.token_begin()) {
+// case ';':
+// next_is_option = true;
+// break;
+// case '=':
+// next_is_value = true;
+// break;
+// }
+// } else {
+// const char* label;
+// if (next_is_option) {
+// label = "option-name";
+// next_is_option = false;
+// } else if (next_is_value) {
+// label = "option-value";
+// next_is_value = false;
+// } else {
+// label = "mime-type";
+// }
+// printf("%s: %s\n", label, t.token().c_str());
+// }
+// }
+//
+//
+template <class str, class const_iterator>
+class StringTokenizerT {
+ public:
+ typedef typename str::value_type char_type;
+
+ // Options that may be pass to set_options()
+ enum {
+ // Specifies the delimiters should be returned as tokens
+ RETURN_DELIMS = 1 << 0,
+ };
+
+ // The string object must live longer than the tokenizer. (In particular this
+ // should not be constructed with a temporary.)
+ StringTokenizerT(const str& string,
+ const str& delims) {
+ Init(string.begin(), string.end(), delims);
+ }
+
+ StringTokenizerT(const_iterator string_begin,
+ const_iterator string_end,
+ const str& delims) {
+ Init(string_begin, string_end, delims);
+ }
+
+ // Set the options for this tokenizer. By default, this is 0.
+ void set_options(int options) { options_ = options; }
+
+ // Set the characters to regard as quotes. By default, this is empty. When
+ // a quote char is encountered, the tokenizer will switch into a mode where
+ // it ignores delimiters that it finds. It switches out of this mode once it
+ // finds another instance of the quote char. If a backslash is encountered
+ // within a quoted string, then the next character is skipped.
+ void set_quote_chars(const str& quotes) { quotes_ = quotes; }
+
+ // Call this method to advance the tokenizer to the next delimiter. This
+ // returns false if the tokenizer is complete. This method must be called
+ // before calling any of the token* methods.
+ bool GetNext() {
+ if (quotes_.empty() && options_ == 0)
+ return QuickGetNext();
+ else
+ return FullGetNext();
+ }
+
+ // Start iterating through tokens from the beginning of the string.
+ void Reset() {
+ token_end_ = start_pos_;
+ }
+
+ // Returns true if token is a delimiter. When the tokenizer is constructed
+ // with the RETURN_DELIMS option, this method can be used to check if the
+ // returned token is actually a delimiter.
+ bool token_is_delim() const { return token_is_delim_; }
+
+ // If GetNext() returned true, then these methods may be used to read the
+ // value of the token.
+ const_iterator token_begin() const { return token_begin_; }
+ const_iterator token_end() const { return token_end_; }
+ str token() const { return str(token_begin_, token_end_); }
+ base::StringPiece token_piece() const {
+ return base::StringPiece(&*token_begin_,
+ std::distance(token_begin_, token_end_));
+ }
+
+ private:
+ void Init(const_iterator string_begin,
+ const_iterator string_end,
+ const str& delims) {
+ start_pos_ = string_begin;
+ token_begin_ = string_begin;
+ token_end_ = string_begin;
+ end_ = string_end;
+ delims_ = delims;
+ options_ = 0;
+ token_is_delim_ = false;
+ }
+
+ // Implementation of GetNext() for when we have no quote characters. We have
+ // two separate implementations because AdvanceOne() is a hot spot in large
+ // text files with large tokens.
+ bool QuickGetNext() {
+ token_is_delim_ = false;
+ for (;;) {
+ token_begin_ = token_end_;
+ if (token_end_ == end_)
+ return false;
+ ++token_end_;
+ if (delims_.find(*token_begin_) == str::npos)
+ break;
+ // else skip over delimiter.
+ }
+ while (token_end_ != end_ && delims_.find(*token_end_) == str::npos)
+ ++token_end_;
+ return true;
+ }
+
+ // Implementation of GetNext() for when we have to take quotes into account.
+ bool FullGetNext() {
+ AdvanceState state;
+ token_is_delim_ = false;
+ for (;;) {
+ token_begin_ = token_end_;
+ if (token_end_ == end_)
+ return false;
+ ++token_end_;
+ if (AdvanceOne(&state, *token_begin_))
+ break;
+ if (options_ & RETURN_DELIMS) {
+ token_is_delim_ = true;
+ return true;
+ }
+ // else skip over delimiter.
+ }
+ while (token_end_ != end_ && AdvanceOne(&state, *token_end_))
+ ++token_end_;
+ return true;
+ }
+
+ bool IsDelim(char_type c) const {
+ return delims_.find(c) != str::npos;
+ }
+
+ bool IsQuote(char_type c) const {
+ return quotes_.find(c) != str::npos;
+ }
+
+ struct AdvanceState {
+ bool in_quote;
+ bool in_escape;
+ char_type quote_char;
+ AdvanceState() : in_quote(false), in_escape(false), quote_char('\0') {}
+ };
+
+ // Returns true if a delimiter was not hit.
+ bool AdvanceOne(AdvanceState* state, char_type c) {
+ if (state->in_quote) {
+ if (state->in_escape) {
+ state->in_escape = false;
+ } else if (c == '\\') {
+ state->in_escape = true;
+ } else if (c == state->quote_char) {
+ state->in_quote = false;
+ }
+ } else {
+ if (IsDelim(c))
+ return false;
+ state->in_quote = IsQuote(state->quote_char = c);
+ }
+ return true;
+ }
+
+ const_iterator start_pos_;
+ const_iterator token_begin_;
+ const_iterator token_end_;
+ const_iterator end_;
+ str delims_;
+ str quotes_;
+ int options_;
+ bool token_is_delim_;
+};
+
+typedef StringTokenizerT<std::string, std::string::const_iterator>
+ StringTokenizer;
+typedef StringTokenizerT<std::wstring, std::wstring::const_iterator>
+ WStringTokenizer;
+typedef StringTokenizerT<std::string, const char*> CStringTokenizer;
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_TOKENIZER_H_
diff --git a/libchrome/base/strings/string_tokenizer_unittest.cc b/libchrome/base/strings/string_tokenizer_unittest.cc
new file mode 100644
index 0000000..d391845
--- /dev/null
+++ b/libchrome/base/strings/string_tokenizer_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_tokenizer.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+
+namespace base {
+
+namespace {
+
+TEST(StringTokenizerTest, Simple) {
+ string input = "this is a test";
+ StringTokenizer t(input, " ");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("this"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("is"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("a"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("test"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, Reset) {
+ string input = "this is a test";
+ StringTokenizer t(input, " ");
+
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("this"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("is"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("a"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("test"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+ t.Reset();
+ }
+}
+
+TEST(StringTokenizerTest, RetDelims) {
+ string input = "this is a test";
+ StringTokenizer t(input, " ");
+ t.set_options(StringTokenizer::RETURN_DELIMS);
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("this"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string(" "), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("is"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string(" "), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("a"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string(" "), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("test"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ManyDelims) {
+ string input = "this: is, a-test";
+ StringTokenizer t(input, ": ,-");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("this"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("is"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("a"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("test"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseHeader) {
+ string input = "Content-Type: text/html ; charset=UTF-8";
+ StringTokenizer t(input, ": ;=");
+ t.set_options(StringTokenizer::RETURN_DELIMS);
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_FALSE(t.token_is_delim());
+ EXPECT_EQ(string("Content-Type"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_TRUE(t.token_is_delim());
+ EXPECT_EQ(string(":"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_TRUE(t.token_is_delim());
+ EXPECT_EQ(string(" "), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_FALSE(t.token_is_delim());
+ EXPECT_EQ(string("text/html"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_TRUE(t.token_is_delim());
+ EXPECT_EQ(string(" "), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_TRUE(t.token_is_delim());
+ EXPECT_EQ(string(";"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_TRUE(t.token_is_delim());
+ EXPECT_EQ(string(" "), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_FALSE(t.token_is_delim());
+ EXPECT_EQ(string("charset"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_TRUE(t.token_is_delim());
+ EXPECT_EQ(string("="), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_FALSE(t.token_is_delim());
+ EXPECT_EQ(string("UTF-8"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+ EXPECT_FALSE(t.token_is_delim());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString) {
+ string input = "foo bar 'hello world' baz";
+ StringTokenizer t(input, " ");
+ t.set_quote_chars("'");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("foo"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("bar"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("'hello world'"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("baz"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Malformed) {
+ string input = "bar 'hello wo";
+ StringTokenizer t(input, " ");
+ t.set_quote_chars("'");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("bar"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("'hello wo"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Multiple) {
+ string input = "bar 'hel\"lo\" wo' baz\"";
+ StringTokenizer t(input, " ");
+ t.set_quote_chars("'\"");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("bar"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("'hel\"lo\" wo'"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("baz\""), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes) {
+ string input = "foo 'don\\'t do that'";
+ StringTokenizer t(input, " ");
+ t.set_quote_chars("'");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("foo"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("'don\\'t do that'"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes2) {
+ string input = "foo='a, b', bar";
+ StringTokenizer t(input, ", ");
+ t.set_quote_chars("'");
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("foo='a, b'"), t.token());
+
+ EXPECT_TRUE(t.GetNext());
+ EXPECT_EQ(string("bar"), t.token());
+
+ EXPECT_FALSE(t.GetNext());
+}
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/strings/string_util.cc b/libchrome/base/strings/string_util.cc
new file mode 100644
index 0000000..cb668ed
--- /dev/null
+++ b/libchrome/base/strings/string_util.cc
@@ -0,0 +1,997 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/third_party/icu/icu_utf.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Force the singleton used by EmptyString[16] to be a unique type. This
+// prevents other code that might accidentally use Singleton<string> from
+// getting our internal one.
+struct EmptyStrings {
+ EmptyStrings() {}
+ const std::string s;
+ const string16 s16;
+
+ static EmptyStrings* GetInstance() {
+ return Singleton<EmptyStrings>::get();
+ }
+};
+
+// Used by ReplaceStringPlaceholders to track the position in the string of
+// replaced parameters.
+struct ReplacementOffset {
+ ReplacementOffset(uintptr_t parameter, size_t offset)
+ : parameter(parameter),
+ offset(offset) {}
+
+ // Index of the parameter.
+ uintptr_t parameter;
+
+ // Starting position in the string.
+ size_t offset;
+};
+
+static bool CompareParameter(const ReplacementOffset& elem1,
+ const ReplacementOffset& elem2) {
+ return elem1.parameter < elem2.parameter;
+}
+
+// Assuming that a pointer is the size of a "machine word", then
+// uintptr_t is an integer type that is also a machine word.
+typedef uintptr_t MachineWord;
+const uintptr_t kMachineWordAlignmentMask = sizeof(MachineWord) - 1;
+
+inline bool IsAlignedToMachineWord(const void* pointer) {
+ return !(reinterpret_cast<MachineWord>(pointer) & kMachineWordAlignmentMask);
+}
+
+template<typename T> inline T* AlignToMachineWord(T* pointer) {
+ return reinterpret_cast<T*>(reinterpret_cast<MachineWord>(pointer) &
+ ~kMachineWordAlignmentMask);
+}
+
+template<size_t size, typename CharacterType> struct NonASCIIMask;
+template<> struct NonASCIIMask<4, char16> {
+ static inline uint32_t value() { return 0xFF80FF80U; }
+};
+template<> struct NonASCIIMask<4, char> {
+ static inline uint32_t value() { return 0x80808080U; }
+};
+template<> struct NonASCIIMask<8, char16> {
+ static inline uint64_t value() { return 0xFF80FF80FF80FF80ULL; }
+};
+template<> struct NonASCIIMask<8, char> {
+ static inline uint64_t value() { return 0x8080808080808080ULL; }
+};
+#if defined(WCHAR_T_IS_UTF32)
+template<> struct NonASCIIMask<4, wchar_t> {
+ static inline uint32_t value() { return 0xFFFFFF80U; }
+};
+template<> struct NonASCIIMask<8, wchar_t> {
+ static inline uint64_t value() { return 0xFFFFFF80FFFFFF80ULL; }
+};
+#endif // WCHAR_T_IS_UTF32
+
+} // namespace
+
+bool IsWprintfFormatPortable(const wchar_t* format) {
+ for (const wchar_t* position = format; *position != '\0'; ++position) {
+ if (*position == '%') {
+ bool in_specification = true;
+ bool modifier_l = false;
+ while (in_specification) {
+ // Eat up characters until reaching a known specifier.
+ if (*++position == '\0') {
+ // The format string ended in the middle of a specification. Call
+ // it portable because no unportable specifications were found. The
+ // string is equally broken on all platforms.
+ return true;
+ }
+
+ if (*position == 'l') {
+ // 'l' is the only thing that can save the 's' and 'c' specifiers.
+ modifier_l = true;
+ } else if (((*position == 's' || *position == 'c') && !modifier_l) ||
+ *position == 'S' || *position == 'C' || *position == 'F' ||
+ *position == 'D' || *position == 'O' || *position == 'U') {
+ // Not portable.
+ return false;
+ }
+
+ if (wcschr(L"diouxXeEfgGaAcspn%", *position)) {
+ // Portable, keep scanning the rest of the format string.
+ in_specification = false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+namespace {
+
+template<typename StringType>
+StringType ToLowerASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToLowerASCII(str[i]));
+ return ret;
+}
+
+template<typename StringType>
+StringType ToUpperASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToUpperASCII(str[i]));
+ return ret;
+}
+
+} // namespace
+
+std::string ToLowerASCII(StringPiece str) {
+ return ToLowerASCIIImpl<std::string>(str);
+}
+
+string16 ToLowerASCII(StringPiece16 str) {
+ return ToLowerASCIIImpl<string16>(str);
+}
+
+std::string ToUpperASCII(StringPiece str) {
+ return ToUpperASCIIImpl<std::string>(str);
+}
+
+string16 ToUpperASCII(StringPiece16 str) {
+ return ToUpperASCIIImpl<string16>(str);
+}
+
+template<class StringType>
+int CompareCaseInsensitiveASCIIT(BasicStringPiece<StringType> a,
+ BasicStringPiece<StringType> b) {
+ // Find the first characters that aren't equal and compare them. If the end
+ // of one of the strings is found before a nonequal character, the lengths
+ // of the strings are compared.
+ size_t i = 0;
+ while (i < a.length() && i < b.length()) {
+ typename StringType::value_type lower_a = ToLowerASCII(a[i]);
+ typename StringType::value_type lower_b = ToLowerASCII(b[i]);
+ if (lower_a < lower_b)
+ return -1;
+ if (lower_a > lower_b)
+ return 1;
+ i++;
+ }
+
+ // End of one string hit before finding a different character. Expect the
+ // common case to be "strings equal" at this point so check that first.
+ if (a.length() == b.length())
+ return 0;
+
+ if (a.length() < b.length())
+ return -1;
+ return 1;
+}
+
+int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ return CompareCaseInsensitiveASCIIT<std::string>(a, b);
+}
+
+int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+ return CompareCaseInsensitiveASCIIT<string16>(a, b);
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ if (a.length() != b.length())
+ return false;
+ return CompareCaseInsensitiveASCIIT<std::string>(a, b) == 0;
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+ if (a.length() != b.length())
+ return false;
+ return CompareCaseInsensitiveASCIIT<string16>(a, b) == 0;
+}
+
+const std::string& EmptyString() {
+ return EmptyStrings::GetInstance()->s;
+}
+
+const string16& EmptyString16() {
+ return EmptyStrings::GetInstance()->s16;
+}
+
+template<typename STR>
+bool ReplaceCharsT(const STR& input,
+ const STR& replace_chars,
+ const STR& replace_with,
+ STR* output) {
+ bool removed = false;
+ size_t replace_length = replace_with.length();
+
+ *output = input;
+
+ size_t found = output->find_first_of(replace_chars);
+ while (found != STR::npos) {
+ removed = true;
+ output->replace(found, 1, replace_with);
+ found = output->find_first_of(replace_chars, found + replace_length);
+ }
+
+ return removed;
+}
+
+bool ReplaceChars(const string16& input,
+ const StringPiece16& replace_chars,
+ const string16& replace_with,
+ string16* output) {
+ return ReplaceCharsT(input, replace_chars.as_string(), replace_with, output);
+}
+
+bool ReplaceChars(const std::string& input,
+ const StringPiece& replace_chars,
+ const std::string& replace_with,
+ std::string* output) {
+ return ReplaceCharsT(input, replace_chars.as_string(), replace_with, output);
+}
+
+bool RemoveChars(const string16& input,
+ const StringPiece16& remove_chars,
+ string16* output) {
+ return ReplaceChars(input, remove_chars.as_string(), string16(), output);
+}
+
+bool RemoveChars(const std::string& input,
+ const StringPiece& remove_chars,
+ std::string* output) {
+ return ReplaceChars(input, remove_chars.as_string(), std::string(), output);
+}
+
+template<typename Str>
+TrimPositions TrimStringT(const Str& input,
+ BasicStringPiece<Str> trim_chars,
+ TrimPositions positions,
+ Str* output) {
+ // Find the edges of leading/trailing whitespace as desired. Need to use
+ // a StringPiece version of input to be able to call find* on it with the
+ // StringPiece version of trim_chars (normally the trim_chars will be a
+ // constant so avoid making a copy).
+ BasicStringPiece<Str> input_piece(input);
+ const size_t last_char = input.length() - 1;
+ const size_t first_good_char = (positions & TRIM_LEADING) ?
+ input_piece.find_first_not_of(trim_chars) : 0;
+ const size_t last_good_char = (positions & TRIM_TRAILING) ?
+ input_piece.find_last_not_of(trim_chars) : last_char;
+
+ // When the string was all trimmed, report that we stripped off characters
+ // from whichever position the caller was interested in. For empty input, we
+ // stripped no characters, but we still need to clear |output|.
+ if (input.empty() ||
+ (first_good_char == Str::npos) || (last_good_char == Str::npos)) {
+ bool input_was_empty = input.empty(); // in case output == &input
+ output->clear();
+ return input_was_empty ? TRIM_NONE : positions;
+ }
+
+ // Trim.
+ *output =
+ input.substr(first_good_char, last_good_char - first_good_char + 1);
+
+ // Return where we trimmed from.
+ return static_cast<TrimPositions>(
+ ((first_good_char == 0) ? TRIM_NONE : TRIM_LEADING) |
+ ((last_good_char == last_char) ? TRIM_NONE : TRIM_TRAILING));
+}
+
+bool TrimString(const string16& input,
+ StringPiece16 trim_chars,
+ string16* output) {
+ return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+bool TrimString(const std::string& input,
+ StringPiece trim_chars,
+ std::string* output) {
+ return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+}
+
+template<typename Str>
+BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> trim_chars,
+ TrimPositions positions) {
+ size_t begin = (positions & TRIM_LEADING) ?
+ input.find_first_not_of(trim_chars) : 0;
+ size_t end = (positions & TRIM_TRAILING) ?
+ input.find_last_not_of(trim_chars) + 1 : input.size();
+ return input.substr(begin, end - begin);
+}
+
+StringPiece16 TrimString(StringPiece16 input,
+ const StringPiece16& trim_chars,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, trim_chars, positions);
+}
+
+StringPiece TrimString(StringPiece input,
+ const StringPiece& trim_chars,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, trim_chars, positions);
+}
+
+void TruncateUTF8ToByteSize(const std::string& input,
+ const size_t byte_size,
+ std::string* output) {
+ DCHECK(output);
+ if (byte_size > input.length()) {
+ *output = input;
+ return;
+ }
+ DCHECK_LE(byte_size,
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
+ // Note: This cast is necessary because CBU8_NEXT uses int32_ts.
+ int32_t truncation_length = static_cast<int32_t>(byte_size);
+ int32_t char_index = truncation_length - 1;
+ const char* data = input.data();
+
+ // Using CBU8, we will move backwards from the truncation point
+ // to the beginning of the string looking for a valid UTF8
+ // character. Once a full UTF8 character is found, we will
+ // truncate the string to the end of that character.
+ while (char_index >= 0) {
+ int32_t prev = char_index;
+ base_icu::UChar32 code_point = 0;
+ CBU8_NEXT(data, char_index, truncation_length, code_point);
+ if (!IsValidCharacter(code_point) ||
+ !IsValidCodepoint(code_point)) {
+ char_index = prev - 1;
+ } else {
+ break;
+ }
+ }
+
+ if (char_index >= 0 )
+ *output = input.substr(0, char_index);
+ else
+ output->clear();
+}
+
+TrimPositions TrimWhitespace(const string16& input,
+ TrimPositions positions,
+ string16* output) {
+ return TrimStringT(input, StringPiece16(kWhitespaceUTF16), positions, output);
+}
+
+StringPiece16 TrimWhitespace(StringPiece16 input,
+ TrimPositions positions) {
+ return TrimStringPieceT(input, StringPiece16(kWhitespaceUTF16), positions);
+}
+
+TrimPositions TrimWhitespaceASCII(const std::string& input,
+ TrimPositions positions,
+ std::string* output) {
+ return TrimStringT(input, StringPiece(kWhitespaceASCII), positions, output);
+}
+
+StringPiece TrimWhitespaceASCII(StringPiece input, TrimPositions positions) {
+ return TrimStringPieceT(input, StringPiece(kWhitespaceASCII), positions);
+}
+
+template<typename STR>
+STR CollapseWhitespaceT(const STR& text,
+ bool trim_sequences_with_line_breaks) {
+ STR result;
+ result.resize(text.size());
+
+ // Set flags to pretend we're already in a trimmed whitespace sequence, so we
+ // will trim any leading whitespace.
+ bool in_whitespace = true;
+ bool already_trimmed = true;
+
+ int chars_written = 0;
+ for (typename STR::const_iterator i(text.begin()); i != text.end(); ++i) {
+ if (IsUnicodeWhitespace(*i)) {
+ if (!in_whitespace) {
+ // Reduce all whitespace sequences to a single space.
+ in_whitespace = true;
+ result[chars_written++] = L' ';
+ }
+ if (trim_sequences_with_line_breaks && !already_trimmed &&
+ ((*i == '\n') || (*i == '\r'))) {
+ // Whitespace sequences containing CR or LF are eliminated entirely.
+ already_trimmed = true;
+ --chars_written;
+ }
+ } else {
+ // Non-whitespace chracters are copied straight across.
+ in_whitespace = false;
+ already_trimmed = false;
+ result[chars_written++] = *i;
+ }
+ }
+
+ if (in_whitespace && !already_trimmed) {
+ // Any trailing whitespace is eliminated.
+ --chars_written;
+ }
+
+ result.resize(chars_written);
+ return result;
+}
+
+string16 CollapseWhitespace(const string16& text,
+ bool trim_sequences_with_line_breaks) {
+ return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+std::string CollapseWhitespaceASCII(const std::string& text,
+ bool trim_sequences_with_line_breaks) {
+ return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+bool ContainsOnlyChars(const StringPiece& input,
+ const StringPiece& characters) {
+ return input.find_first_not_of(characters) == StringPiece::npos;
+}
+
+bool ContainsOnlyChars(const StringPiece16& input,
+ const StringPiece16& characters) {
+ return input.find_first_not_of(characters) == StringPiece16::npos;
+}
+
+template <class Char>
+inline bool DoIsStringASCII(const Char* characters, size_t length) {
+ MachineWord all_char_bits = 0;
+ const Char* end = characters + length;
+
+ // Prologue: align the input.
+ while (!IsAlignedToMachineWord(characters) && characters != end) {
+ all_char_bits |= *characters;
+ ++characters;
+ }
+
+ // Compare the values of CPU word size.
+ const Char* word_end = AlignToMachineWord(end);
+ const size_t loop_increment = sizeof(MachineWord) / sizeof(Char);
+ while (characters < word_end) {
+ all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
+ characters += loop_increment;
+ }
+
+ // Process the remaining bytes.
+ while (characters != end) {
+ all_char_bits |= *characters;
+ ++characters;
+ }
+
+ MachineWord non_ascii_bit_mask =
+ NonASCIIMask<sizeof(MachineWord), Char>::value();
+ return !(all_char_bits & non_ascii_bit_mask);
+}
+
+bool IsStringASCII(const StringPiece& str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+
+bool IsStringASCII(const StringPiece16& str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+
+bool IsStringASCII(const string16& str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+bool IsStringASCII(const std::wstring& str) {
+ return DoIsStringASCII(str.data(), str.length());
+}
+#endif
+
+bool IsStringUTF8(const StringPiece& str) {
+ const char *src = str.data();
+ int32_t src_len = static_cast<int32_t>(str.length());
+ int32_t char_index = 0;
+
+ while (char_index < src_len) {
+ int32_t code_point;
+ CBU8_NEXT(src, char_index, src_len, code_point);
+ if (!IsValidCharacter(code_point))
+ return false;
+ }
+ return true;
+}
+
+// Implementation note: Normally this function will be called with a hardcoded
+// constant for the lowercase_ascii parameter. Constructing a StringPiece from
+// a C constant requires running strlen, so the result will be two passes
+// through the buffers, one to file the length of lowercase_ascii, and one to
+// compare each letter.
+//
+// This function could have taken a const char* to avoid this and only do one
+// pass through the string. But the strlen is faster than the case-insensitive
+// compares and lets us early-exit in the case that the strings are different
+// lengths (will often be the case for non-matches). So whether one approach or
+// the other will be faster depends on the case.
+//
+// The hardcoded strings are typically very short so it doesn't matter, and the
+// string piece gives additional flexibility for the caller (doesn't have to be
+// null terminated) so we choose the StringPiece route.
+template<typename Str>
+static inline bool DoLowerCaseEqualsASCII(BasicStringPiece<Str> str,
+ StringPiece lowercase_ascii) {
+ if (str.size() != lowercase_ascii.size())
+ return false;
+ for (size_t i = 0; i < str.size(); i++) {
+ if (ToLowerASCII(str[i]) != lowercase_ascii[i])
+ return false;
+ }
+ return true;
+}
+
+bool LowerCaseEqualsASCII(StringPiece str, StringPiece lowercase_ascii) {
+ return DoLowerCaseEqualsASCII<std::string>(str, lowercase_ascii);
+}
+
+bool LowerCaseEqualsASCII(StringPiece16 str, StringPiece lowercase_ascii) {
+ return DoLowerCaseEqualsASCII<string16>(str, lowercase_ascii);
+}
+
+bool EqualsASCII(StringPiece16 str, StringPiece ascii) {
+ if (str.length() != ascii.length())
+ return false;
+ return std::equal(ascii.begin(), ascii.end(), str.begin());
+}
+
+template<typename Str>
+bool StartsWithT(BasicStringPiece<Str> str,
+ BasicStringPiece<Str> search_for,
+ CompareCase case_sensitivity) {
+ if (search_for.size() > str.size())
+ return false;
+
+ BasicStringPiece<Str> source = str.substr(0, search_for.size());
+
+ switch (case_sensitivity) {
+ case CompareCase::SENSITIVE:
+ return source == search_for;
+
+ case CompareCase::INSENSITIVE_ASCII:
+ return std::equal(
+ search_for.begin(), search_for.end(),
+ source.begin(),
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool StartsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity) {
+ return StartsWithT<std::string>(str, search_for, case_sensitivity);
+}
+
+bool StartsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity) {
+ return StartsWithT<string16>(str, search_for, case_sensitivity);
+}
+
+template <typename Str>
+bool EndsWithT(BasicStringPiece<Str> str,
+ BasicStringPiece<Str> search_for,
+ CompareCase case_sensitivity) {
+ if (search_for.size() > str.size())
+ return false;
+
+ BasicStringPiece<Str> source = str.substr(str.size() - search_for.size(),
+ search_for.size());
+
+ switch (case_sensitivity) {
+ case CompareCase::SENSITIVE:
+ return source == search_for;
+
+ case CompareCase::INSENSITIVE_ASCII:
+ return std::equal(
+ source.begin(), source.end(),
+ search_for.begin(),
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool EndsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity) {
+ return EndsWithT<std::string>(str, search_for, case_sensitivity);
+}
+
+bool EndsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity) {
+ return EndsWithT<string16>(str, search_for, case_sensitivity);
+}
+
+char HexDigitToInt(wchar_t c) {
+ DCHECK(IsHexDigit(c));
+ if (c >= '0' && c <= '9')
+ return static_cast<char>(c - '0');
+ if (c >= 'A' && c <= 'F')
+ return static_cast<char>(c - 'A' + 10);
+ if (c >= 'a' && c <= 'f')
+ return static_cast<char>(c - 'a' + 10);
+ return 0;
+}
+
+bool IsUnicodeWhitespace(wchar_t c) {
+ // kWhitespaceWide is a NULL-terminated string
+ for (const wchar_t* cur = kWhitespaceWide; *cur; ++cur) {
+ if (*cur == c)
+ return true;
+ }
+ return false;
+}
+
+static const char* const kByteStringsUnlocalized[] = {
+ " B",
+ " kB",
+ " MB",
+ " GB",
+ " TB",
+ " PB"
+};
+
+string16 FormatBytesUnlocalized(int64_t bytes) {
+ double unit_amount = static_cast<double>(bytes);
+ size_t dimension = 0;
+ const int kKilo = 1024;
+ while (unit_amount >= kKilo &&
+ dimension < arraysize(kByteStringsUnlocalized) - 1) {
+ unit_amount /= kKilo;
+ dimension++;
+ }
+
+ char buf[64];
+ if (bytes != 0 && dimension > 0 && unit_amount < 100) {
+ base::snprintf(buf, arraysize(buf), "%.1lf%s", unit_amount,
+ kByteStringsUnlocalized[dimension]);
+ } else {
+ base::snprintf(buf, arraysize(buf), "%.0lf%s", unit_amount,
+ kByteStringsUnlocalized[dimension]);
+ }
+
+ return ASCIIToUTF16(buf);
+}
+
+// Runs in O(n) time in the length of |str|.
+template<class StringType>
+void DoReplaceSubstringsAfterOffset(StringType* str,
+ size_t offset,
+ BasicStringPiece<StringType> find_this,
+ BasicStringPiece<StringType> replace_with,
+ bool replace_all) {
+ DCHECK(!find_this.empty());
+
+ // If the find string doesn't appear, there's nothing to do.
+ offset = str->find(find_this.data(), offset, find_this.size());
+ if (offset == StringType::npos)
+ return;
+
+ // If we're only replacing one instance, there's no need to do anything
+ // complicated.
+ size_t find_length = find_this.length();
+ if (!replace_all) {
+ str->replace(offset, find_length, replace_with.data(), replace_with.size());
+ return;
+ }
+
+ // If the find and replace strings are the same length, we can simply use
+ // replace() on each instance, and finish the entire operation in O(n) time.
+ size_t replace_length = replace_with.length();
+ if (find_length == replace_length) {
+ do {
+ str->replace(offset, find_length,
+ replace_with.data(), replace_with.size());
+ offset = str->find(find_this.data(), offset + replace_length,
+ find_this.size());
+ } while (offset != StringType::npos);
+ return;
+ }
+
+ // Since the find and replace strings aren't the same length, a loop like the
+ // one above would be O(n^2) in the worst case, as replace() will shift the
+ // entire remaining string each time. We need to be more clever to keep
+ // things O(n).
+ //
+ // If we're shortening the string, we can alternate replacements with shifting
+ // forward the intervening characters using memmove().
+ size_t str_length = str->length();
+ if (find_length > replace_length) {
+ size_t write_offset = offset;
+ do {
+ if (replace_length) {
+ str->replace(write_offset, replace_length,
+ replace_with.data(), replace_with.size());
+ write_offset += replace_length;
+ }
+ size_t read_offset = offset + find_length;
+ offset = std::min(
+ str->find(find_this.data(), read_offset, find_this.size()),
+ str_length);
+ size_t length = offset - read_offset;
+ if (length) {
+ memmove(&(*str)[write_offset], &(*str)[read_offset],
+ length * sizeof(typename StringType::value_type));
+ write_offset += length;
+ }
+ } while (offset < str_length);
+ str->resize(write_offset);
+ return;
+ }
+
+ // We're lengthening the string. We can use alternating replacements and
+ // memmove() calls like above, but we need to precalculate the final string
+ // length and then expand from back-to-front to avoid overwriting the string
+ // as we're reading it, needing to shift, or having to copy to a second string
+ // temporarily.
+ size_t first_match = offset;
+
+ // First, calculate the final length and resize the string.
+ size_t final_length = str_length;
+ size_t expansion = replace_length - find_length;
+ size_t current_match;
+ do {
+ final_length += expansion;
+ // Minor optimization: save this offset into |current_match|, so that on
+ // exit from the loop, |current_match| will point at the last instance of
+ // the find string, and we won't need to find() it again immediately.
+ current_match = offset;
+ offset = str->find(find_this.data(), offset + find_length,
+ find_this.size());
+ } while (offset != StringType::npos);
+ str->resize(final_length);
+
+ // Now do the replacement loop, working backwards through the string.
+ for (size_t prev_match = str_length, write_offset = final_length; ;
+ current_match = str->rfind(find_this.data(), current_match - 1,
+ find_this.size())) {
+ size_t read_offset = current_match + find_length;
+ size_t length = prev_match - read_offset;
+ if (length) {
+ write_offset -= length;
+ memmove(&(*str)[write_offset], &(*str)[read_offset],
+ length * sizeof(typename StringType::value_type));
+ }
+ write_offset -= replace_length;
+ str->replace(write_offset, replace_length,
+ replace_with.data(), replace_with.size());
+ if (current_match == first_match)
+ return;
+ prev_match = current_match;
+ }
+}
+
+void ReplaceFirstSubstringAfterOffset(string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with) {
+ DoReplaceSubstringsAfterOffset<string16>(
+ str, start_offset, find_this, replace_with, false); // Replace first.
+}
+
+void ReplaceFirstSubstringAfterOffset(std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with) {
+ DoReplaceSubstringsAfterOffset<std::string>(
+ str, start_offset, find_this, replace_with, false); // Replace first.
+}
+
+void ReplaceSubstringsAfterOffset(string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with) {
+ DoReplaceSubstringsAfterOffset<string16>(
+ str, start_offset, find_this, replace_with, true); // Replace all.
+}
+
+void ReplaceSubstringsAfterOffset(std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with) {
+ DoReplaceSubstringsAfterOffset<std::string>(
+ str, start_offset, find_this, replace_with, true); // Replace all.
+}
+
+template <class string_type>
+inline typename string_type::value_type* WriteIntoT(string_type* str,
+ size_t length_with_null) {
+ DCHECK_GT(length_with_null, 1u);
+ str->reserve(length_with_null);
+ str->resize(length_with_null - 1);
+ return &((*str)[0]);
+}
+
+char* WriteInto(std::string* str, size_t length_with_null) {
+ return WriteIntoT(str, length_with_null);
+}
+
+char16* WriteInto(string16* str, size_t length_with_null) {
+ return WriteIntoT(str, length_with_null);
+}
+
+template<typename STR>
+static STR JoinStringT(const std::vector<STR>& parts,
+ BasicStringPiece<STR> sep) {
+ if (parts.empty())
+ return STR();
+
+ STR result(parts[0]);
+ auto iter = parts.begin();
+ ++iter;
+
+ for (; iter != parts.end(); ++iter) {
+ sep.AppendToString(&result);
+ result += *iter;
+ }
+
+ return result;
+}
+
+std::string JoinString(const std::vector<std::string>& parts,
+ StringPiece separator) {
+ return JoinStringT(parts, separator);
+}
+
+string16 JoinString(const std::vector<string16>& parts,
+ StringPiece16 separator) {
+ return JoinStringT(parts, separator);
+}
+
+template<class FormatStringType, class OutStringType>
+OutStringType DoReplaceStringPlaceholders(
+ const FormatStringType& format_string,
+ const std::vector<OutStringType>& subst,
+ std::vector<size_t>* offsets) {
+ size_t substitutions = subst.size();
+ DCHECK_LT(substitutions, 10U);
+
+ size_t sub_length = 0;
+ for (const auto& cur : subst)
+ sub_length += cur.length();
+
+ OutStringType formatted;
+ formatted.reserve(format_string.length() + sub_length);
+
+ std::vector<ReplacementOffset> r_offsets;
+ for (auto i = format_string.begin(); i != format_string.end(); ++i) {
+ if ('$' == *i) {
+ if (i + 1 != format_string.end()) {
+ ++i;
+ if ('$' == *i) {
+ while (i != format_string.end() && '$' == *i) {
+ formatted.push_back('$');
+ ++i;
+ }
+ --i;
+ } else {
+ if (*i < '1' || *i > '9') {
+ DLOG(ERROR) << "Invalid placeholder: $" << *i;
+ continue;
+ }
+ uintptr_t index = *i - '1';
+ if (offsets) {
+ ReplacementOffset r_offset(index,
+ static_cast<int>(formatted.size()));
+ r_offsets.insert(std::lower_bound(r_offsets.begin(),
+ r_offsets.end(),
+ r_offset,
+ &CompareParameter),
+ r_offset);
+ }
+ if (index < substitutions)
+ formatted.append(subst.at(index));
+ }
+ }
+ } else {
+ formatted.push_back(*i);
+ }
+ }
+ if (offsets) {
+ for (const auto& cur : r_offsets)
+ offsets->push_back(cur.offset);
+ }
+ return formatted;
+}
+
+string16 ReplaceStringPlaceholders(const string16& format_string,
+ const std::vector<string16>& subst,
+ std::vector<size_t>* offsets) {
+ return DoReplaceStringPlaceholders(format_string, subst, offsets);
+}
+
+std::string ReplaceStringPlaceholders(const StringPiece& format_string,
+ const std::vector<std::string>& subst,
+ std::vector<size_t>* offsets) {
+ return DoReplaceStringPlaceholders(format_string, subst, offsets);
+}
+
+string16 ReplaceStringPlaceholders(const string16& format_string,
+ const string16& a,
+ size_t* offset) {
+ std::vector<size_t> offsets;
+ std::vector<string16> subst;
+ subst.push_back(a);
+ string16 result = ReplaceStringPlaceholders(format_string, subst, &offsets);
+
+ DCHECK_EQ(1U, offsets.size());
+ if (offset)
+ *offset = offsets[0];
+ return result;
+}
+
+// The following code is compatible with the OpenBSD lcpy interface. See:
+// http://www.gratisoft.us/todd/papers/strlcpy.html
+// ftp://ftp.openbsd.org/pub/OpenBSD/src/lib/libc/string/{wcs,str}lcpy.c
+
+namespace {
+
+template <typename CHAR>
+size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
+ for (size_t i = 0; i < dst_size; ++i) {
+ if ((dst[i] = src[i]) == 0) // We hit and copied the terminating NULL.
+ return i;
+ }
+
+ // We were left off at dst_size. We over copied 1 byte. Null terminate.
+ if (dst_size != 0)
+ dst[dst_size - 1] = 0;
+
+ // Count the rest of the |src|, and return it's length in characters.
+ while (src[dst_size]) ++dst_size;
+ return dst_size;
+}
+
+} // namespace
+
+size_t strlcpy(char* dst, const char* src, size_t dst_size) {
+ return lcpyT<char>(dst, src, dst_size);
+}
+size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
+ return lcpyT<wchar_t>(dst, src, dst_size);
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_util.h b/libchrome/base/strings/string_util.h
new file mode 100644
index 0000000..0ee077c
--- /dev/null
+++ b/libchrome/base/strings/string_util.h
@@ -0,0 +1,475 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines utility functions for working with strings.
+
+#ifndef BASE_STRINGS_STRING_UTIL_H_
+#define BASE_STRINGS_STRING_UTIL_H_
+
+#include <ctype.h>
+#include <stdarg.h> // va_list
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h" // For implicit conversions.
+#include "build/build_config.h"
+
+#if defined(ANDROID)
+// On Android, bionic's stdio.h defines an snprintf macro when being built with
+// clang. Undefine it here so it won't collide with base::snprintf().
+#undef snprintf
+#endif // defined(ANDROID)
+
+namespace base {
+
+// C standard-library functions that aren't cross-platform are provided as
+// "base::...", and their prototypes are listed below. These functions are
+// then implemented as inline calls to the platform-specific equivalents in the
+// platform-specific headers.
+
+// Wrapper for vsnprintf that always null-terminates and always returns the
+// number of characters that would be in an untruncated formatted
+// string, even when truncation occurs.
+int vsnprintf(char* buffer, size_t size, const char* format, va_list arguments)
+ PRINTF_FORMAT(3, 0);
+
+// Some of these implementations need to be inlined.
+
+// We separate the declaration from the implementation of this inline
+// function just so the PRINTF_FORMAT works.
+inline int snprintf(char* buffer,
+ size_t size,
+ _Printf_format_string_ const char* format,
+ ...) PRINTF_FORMAT(3, 4);
+inline int snprintf(char* buffer,
+ size_t size,
+ _Printf_format_string_ const char* format,
+ ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ int result = vsnprintf(buffer, size, format, arguments);
+ va_end(arguments);
+ return result;
+}
+
+// BSD-style safe and consistent string copy functions.
+// Copies |src| to |dst|, where |dst_size| is the total allocated size of |dst|.
+// Copies at most |dst_size|-1 characters, and always NULL terminates |dst|, as
+// long as |dst_size| is not 0. Returns the length of |src| in characters.
+// If the return value is >= dst_size, then the output was truncated.
+// NOTE: All sizes are in number of characters, NOT in bytes.
+BASE_EXPORT size_t strlcpy(char* dst, const char* src, size_t dst_size);
+BASE_EXPORT size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size);
+
+// Scan a wprintf format string to determine whether it's portable across a
+// variety of systems. This function only checks that the conversion
+// specifiers used by the format string are supported and have the same meaning
+// on a variety of systems. It doesn't check for other errors that might occur
+// within a format string.
+//
+// Nonportable conversion specifiers for wprintf are:
+// - 's' and 'c' without an 'l' length modifier. %s and %c operate on char
+// data on all systems except Windows, which treat them as wchar_t data.
+// Use %ls and %lc for wchar_t data instead.
+// - 'S' and 'C', which operate on wchar_t data on all systems except Windows,
+// which treat them as char data. Use %ls and %lc for wchar_t data
+// instead.
+// - 'F', which is not identified by Windows wprintf documentation.
+// - 'D', 'O', and 'U', which are deprecated and not available on all systems.
+// Use %ld, %lo, and %lu instead.
+//
+// Note that there is no portable conversion specifier for char data when
+// working with wprintf.
+//
+// This function is intended to be called from base::vswprintf.
+BASE_EXPORT bool IsWprintfFormatPortable(const wchar_t* format);
+
+// ASCII-specific tolower. The standard library's tolower is locale sensitive,
+// so we don't want to use it here.
+inline char ToLowerASCII(char c) {
+ return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+inline char16 ToLowerASCII(char16 c) {
+ return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+
+// ASCII-specific toupper. The standard library's toupper is locale sensitive,
+// so we don't want to use it here.
+inline char ToUpperASCII(char c) {
+ return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+inline char16 ToUpperASCII(char16 c) {
+ return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+
+// Converts the given string to it's ASCII-lowercase equivalent.
+BASE_EXPORT std::string ToLowerASCII(StringPiece str);
+BASE_EXPORT string16 ToLowerASCII(StringPiece16 str);
+
+// Converts the given string to it's ASCII-uppercase equivalent.
+BASE_EXPORT std::string ToUpperASCII(StringPiece str);
+BASE_EXPORT string16 ToUpperASCII(StringPiece16 str);
+
+// Functor for case-insensitive ASCII comparisons for STL algorithms like
+// std::search.
+//
+// Note that a full Unicode version of this functor is not possible to write
+// because case mappings might change the number of characters, depend on
+// context (combining accents), and require handling UTF-16. If you need
+// proper Unicode support, use base::i18n::ToLower/FoldCase and then just
+// use a normal operator== on the result.
+template<typename Char> struct CaseInsensitiveCompareASCII {
+ public:
+ bool operator()(Char x, Char y) const {
+ return ToLowerASCII(x) == ToLowerASCII(y);
+ }
+};
+
+// Like strcasecmp for case-insensitive ASCII characters only. Returns:
+// -1 (a < b)
+// 0 (a == b)
+// 1 (a > b)
+// (unlike strcasecmp which can return values greater or less than 1/-1). For
+// full Unicode support, use base::i18n::ToLower or base::i18h::FoldCase
+// and then just call the normal string operators on the result.
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// Equality for ASCII case-insensitive comparisons. For full Unicode support,
+// use base::i18n::ToLower or base::i18h::FoldCase and then compare with either
+// == or !=.
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// These threadsafe functions return references to globally unique empty
+// strings.
+//
+// It is likely faster to construct a new empty string object (just a few
+// instructions to set the length to 0) than to get the empty string singleton
+// returned by these functions (which requires threadsafe singleton access).
+//
+// Therefore, DO NOT USE THESE AS A GENERAL-PURPOSE SUBSTITUTE FOR DEFAULT
+// CONSTRUCTORS. There is only one case where you should use these: functions
+// which need to return a string by reference (e.g. as a class member
+// accessor), and don't have an empty string to use (e.g. in an error case).
+// These should not be used as initializers, function arguments, or return
+// values for functions which return by value or outparam.
+BASE_EXPORT const std::string& EmptyString();
+BASE_EXPORT const string16& EmptyString16();
+
+// Contains the set of characters representing whitespace in the corresponding
+// encoding. Null-terminated. The ASCII versions are the whitespaces as defined
+// by HTML5, and don't include control characters.
+BASE_EXPORT extern const wchar_t kWhitespaceWide[]; // Includes Unicode.
+BASE_EXPORT extern const char16 kWhitespaceUTF16[]; // Includes Unicode.
+BASE_EXPORT extern const char kWhitespaceASCII[];
+BASE_EXPORT extern const char16 kWhitespaceASCIIAs16[]; // No unicode.
+
+// Null-terminated string representing the UTF-8 byte order mark.
+BASE_EXPORT extern const char kUtf8ByteOrderMark[];
+
+// Removes characters in |remove_chars| from anywhere in |input|. Returns true
+// if any characters were removed. |remove_chars| must be null-terminated.
+// NOTE: Safe to use the same variable for both |input| and |output|.
+BASE_EXPORT bool RemoveChars(const string16& input,
+ const StringPiece16& remove_chars,
+ string16* output);
+BASE_EXPORT bool RemoveChars(const std::string& input,
+ const StringPiece& remove_chars,
+ std::string* output);
+
+// Replaces characters in |replace_chars| from anywhere in |input| with
+// |replace_with|. Each character in |replace_chars| will be replaced with
+// the |replace_with| string. Returns true if any characters were replaced.
+// |replace_chars| must be null-terminated.
+// NOTE: Safe to use the same variable for both |input| and |output|.
+BASE_EXPORT bool ReplaceChars(const string16& input,
+ const StringPiece16& replace_chars,
+ const string16& replace_with,
+ string16* output);
+BASE_EXPORT bool ReplaceChars(const std::string& input,
+ const StringPiece& replace_chars,
+ const std::string& replace_with,
+ std::string* output);
+
+enum TrimPositions {
+ TRIM_NONE = 0,
+ TRIM_LEADING = 1 << 0,
+ TRIM_TRAILING = 1 << 1,
+ TRIM_ALL = TRIM_LEADING | TRIM_TRAILING,
+};
+
+// Removes characters in |trim_chars| from the beginning and end of |input|.
+// The 8-bit version only works on 8-bit characters, not UTF-8.
+//
+// It is safe to use the same variable for both |input| and |output| (this is
+// the normal usage to trim in-place).
+BASE_EXPORT bool TrimString(const string16& input,
+ StringPiece16 trim_chars,
+ string16* output);
+BASE_EXPORT bool TrimString(const std::string& input,
+ StringPiece trim_chars,
+ std::string* output);
+
+// StringPiece versions of the above. The returned pieces refer to the original
+// buffer.
+BASE_EXPORT StringPiece16 TrimString(StringPiece16 input,
+ const StringPiece16& trim_chars,
+ TrimPositions positions);
+BASE_EXPORT StringPiece TrimString(StringPiece input,
+ const StringPiece& trim_chars,
+ TrimPositions positions);
+
+// Truncates a string to the nearest UTF-8 character that will leave
+// the string less than or equal to the specified byte size.
+BASE_EXPORT void TruncateUTF8ToByteSize(const std::string& input,
+ const size_t byte_size,
+ std::string* output);
+
+// Trims any whitespace from either end of the input string.
+//
+// The StringPiece versions return a substring referencing the input buffer.
+// The ASCII versions look only for ASCII whitespace.
+//
+// The std::string versions return where whitespace was found.
+// NOTE: Safe to use the same variable for both input and output.
+BASE_EXPORT TrimPositions TrimWhitespace(const string16& input,
+ TrimPositions positions,
+ string16* output);
+BASE_EXPORT StringPiece16 TrimWhitespace(StringPiece16 input,
+ TrimPositions positions);
+BASE_EXPORT TrimPositions TrimWhitespaceASCII(const std::string& input,
+ TrimPositions positions,
+ std::string* output);
+BASE_EXPORT StringPiece TrimWhitespaceASCII(StringPiece input,
+ TrimPositions positions);
+
+// Searches for CR or LF characters. Removes all contiguous whitespace
+// strings that contain them. This is useful when trying to deal with text
+// copied from terminals.
+// Returns |text|, with the following three transformations:
+// (1) Leading and trailing whitespace is trimmed.
+// (2) If |trim_sequences_with_line_breaks| is true, any other whitespace
+// sequences containing a CR or LF are trimmed.
+// (3) All other whitespace sequences are converted to single spaces.
+BASE_EXPORT string16 CollapseWhitespace(
+ const string16& text,
+ bool trim_sequences_with_line_breaks);
+BASE_EXPORT std::string CollapseWhitespaceASCII(
+ const std::string& text,
+ bool trim_sequences_with_line_breaks);
+
+// Returns true if |input| is empty or contains only characters found in
+// |characters|.
+BASE_EXPORT bool ContainsOnlyChars(const StringPiece& input,
+ const StringPiece& characters);
+BASE_EXPORT bool ContainsOnlyChars(const StringPiece16& input,
+ const StringPiece16& characters);
+
+// Returns true if the specified string matches the criteria. How can a wide
+// string be 8-bit or UTF8? It contains only characters that are < 256 (in the
+// first case) or characters that use only 8-bits and whose 8-bit
+// representation looks like a UTF-8 string (the second case).
+//
+// Note that IsStringUTF8 checks not only if the input is structurally
+// valid but also if it doesn't contain any non-character codepoint
+// (e.g. U+FFFE). It's done on purpose because all the existing callers want
+// to have the maximum 'discriminating' power from other encodings. If
+// there's a use case for just checking the structural validity, we have to
+// add a new function for that.
+//
+// IsStringASCII assumes the input is likely all ASCII, and does not leave early
+// if it is not the case.
+BASE_EXPORT bool IsStringUTF8(const StringPiece& str);
+BASE_EXPORT bool IsStringASCII(const StringPiece& str);
+BASE_EXPORT bool IsStringASCII(const StringPiece16& str);
+// A convenience adaptor for WebStrings, as they don't convert into
+// StringPieces directly.
+BASE_EXPORT bool IsStringASCII(const string16& str);
+#if defined(WCHAR_T_IS_UTF32)
+BASE_EXPORT bool IsStringASCII(const std::wstring& str);
+#endif
+
+// Compare the lower-case form of the given string against the given
+// previously-lower-cased ASCII string (typically a constant).
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece str,
+ StringPiece lowecase_ascii);
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece16 str,
+ StringPiece lowecase_ascii);
+
+// Performs a case-sensitive string compare of the given 16-bit string against
+// the given 8-bit ASCII string (typically a constant). The behavior is
+// undefined if the |ascii| string is not ASCII.
+BASE_EXPORT bool EqualsASCII(StringPiece16 str, StringPiece ascii);
+
+// Indicates case sensitivity of comparisons. Only ASCII case insensitivity
+// is supported. Full Unicode case-insensitive conversions would need to go in
+// base/i18n so it can use ICU.
+//
+// If you need to do Unicode-aware case-insensitive StartsWith/EndsWith, it's
+// best to call base::i18n::ToLower() or base::i18n::FoldCase() (see
+// base/i18n/case_conversion.h for usage advice) on the arguments, and then use
+// the results to a case-sensitive comparison.
+enum class CompareCase {
+ SENSITIVE,
+ INSENSITIVE_ASCII,
+};
+
+BASE_EXPORT bool StartsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity);
+BASE_EXPORT bool StartsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity);
+BASE_EXPORT bool EndsWith(StringPiece str,
+ StringPiece search_for,
+ CompareCase case_sensitivity);
+BASE_EXPORT bool EndsWith(StringPiece16 str,
+ StringPiece16 search_for,
+ CompareCase case_sensitivity);
+
+// Determines the type of ASCII character, independent of locale (the C
+// library versions will change based on locale).
+template <typename Char>
+inline bool IsAsciiWhitespace(Char c) {
+ return c == ' ' || c == '\r' || c == '\n' || c == '\t';
+}
+template <typename Char>
+inline bool IsAsciiAlpha(Char c) {
+ return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+template <typename Char>
+inline bool IsAsciiUpper(Char c) {
+ return c >= 'A' && c <= 'Z';
+}
+template <typename Char>
+inline bool IsAsciiLower(Char c) {
+ return c >= 'a' && c <= 'z';
+}
+template <typename Char>
+inline bool IsAsciiDigit(Char c) {
+ return c >= '0' && c <= '9';
+}
+
+template <typename Char>
+inline bool IsHexDigit(Char c) {
+ return (c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'F') ||
+ (c >= 'a' && c <= 'f');
+}
+
+// Returns the integer corresponding to the given hex character. For example:
+// '4' -> 4
+// 'a' -> 10
+// 'B' -> 11
+// Assumes the input is a valid hex character. DCHECKs in debug builds if not.
+BASE_EXPORT char HexDigitToInt(wchar_t c);
+
+// Returns true if it's a Unicode whitespace character.
+BASE_EXPORT bool IsUnicodeWhitespace(wchar_t c);
+
+// Return a byte string in human-readable format with a unit suffix. Not
+// appropriate for use in any UI; use of FormatBytes and friends in ui/base is
+// highly recommended instead. TODO(avi): Figure out how to get callers to use
+// FormatBytes instead; remove this.
+BASE_EXPORT string16 FormatBytesUnlocalized(int64_t bytes);
+
+// Starting at |start_offset| (usually 0), replace the first instance of
+// |find_this| with |replace_with|.
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
+ base::string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with);
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
+ std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with);
+
+// Starting at |start_offset| (usually 0), look through |str| and replace all
+// instances of |find_this| with |replace_with|.
+//
+// This does entire substrings; use std::replace in <algorithm> for single
+// characters, for example:
+// std::replace(str.begin(), str.end(), 'a', 'b');
+BASE_EXPORT void ReplaceSubstringsAfterOffset(
+ string16* str,
+ size_t start_offset,
+ StringPiece16 find_this,
+ StringPiece16 replace_with);
+BASE_EXPORT void ReplaceSubstringsAfterOffset(
+ std::string* str,
+ size_t start_offset,
+ StringPiece find_this,
+ StringPiece replace_with);
+
+// Reserves enough memory in |str| to accommodate |length_with_null| characters,
+// sets the size of |str| to |length_with_null - 1| characters, and returns a
+// pointer to the underlying contiguous array of characters. This is typically
+// used when calling a function that writes results into a character array, but
+// the caller wants the data to be managed by a string-like object. It is
+// convenient in that is can be used inline in the call, and fast in that it
+// avoids copying the results of the call from a char* into a string.
+//
+// |length_with_null| must be at least 2, since otherwise the underlying string
+// would have size 0, and trying to access &((*str)[0]) in that case can result
+// in a number of problems.
+//
+// Internally, this takes linear time because the resize() call 0-fills the
+// underlying array for potentially all
+// (|length_with_null - 1| * sizeof(string_type::value_type)) bytes. Ideally we
+// could avoid this aspect of the resize() call, as we expect the caller to
+// immediately write over this memory, but there is no other way to set the size
+// of the string, and not doing that will mean people who access |str| rather
+// than str.c_str() will get back a string of whatever size |str| had on entry
+// to this function (probably 0).
+BASE_EXPORT char* WriteInto(std::string* str, size_t length_with_null);
+BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
+#ifndef OS_WIN
+BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
+#endif
+
+// Does the opposite of SplitString().
+BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
+ StringPiece16 separator);
+
+// Replace $1-$2-$3..$9 in the format string with values from |subst|.
+// Additionally, any number of consecutive '$' characters is replaced by that
+// number less one. Eg $$->$, $$$->$$, etc. The offsets parameter here can be
+// NULL. This only allows you to use up to nine replacements.
+BASE_EXPORT string16 ReplaceStringPlaceholders(
+ const string16& format_string,
+ const std::vector<string16>& subst,
+ std::vector<size_t>* offsets);
+
+BASE_EXPORT std::string ReplaceStringPlaceholders(
+ const StringPiece& format_string,
+ const std::vector<std::string>& subst,
+ std::vector<size_t>* offsets);
+
+// Single-string shortcut for ReplaceStringHolders. |offset| may be NULL.
+BASE_EXPORT string16 ReplaceStringPlaceholders(const string16& format_string,
+ const string16& a,
+ size_t* offset);
+
+} // namespace base
+
+#if defined(OS_WIN)
+#include "base/strings/string_util_win.h"
+#elif defined(OS_POSIX)
+#include "base/strings/string_util_posix.h"
+#else
+#error Define string operations appropriately for your platform
+#endif
+
+#endif // BASE_STRINGS_STRING_UTIL_H_
diff --git a/libchrome/base/strings/string_util_constants.cc b/libchrome/base/strings/string_util_constants.cc
new file mode 100644
index 0000000..aba1b12
--- /dev/null
+++ b/libchrome/base/strings/string_util_constants.cc
@@ -0,0 +1,67 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+namespace base {
+
+#define WHITESPACE_UNICODE \
+ 0x0009, /* CHARACTER TABULATION */ \
+ 0x000A, /* LINE FEED (LF) */ \
+ 0x000B, /* LINE TABULATION */ \
+ 0x000C, /* FORM FEED (FF) */ \
+ 0x000D, /* CARRIAGE RETURN (CR) */ \
+ 0x0020, /* SPACE */ \
+ 0x0085, /* NEXT LINE (NEL) */ \
+ 0x00A0, /* NO-BREAK SPACE */ \
+ 0x1680, /* OGHAM SPACE MARK */ \
+ 0x2000, /* EN QUAD */ \
+ 0x2001, /* EM QUAD */ \
+ 0x2002, /* EN SPACE */ \
+ 0x2003, /* EM SPACE */ \
+ 0x2004, /* THREE-PER-EM SPACE */ \
+ 0x2005, /* FOUR-PER-EM SPACE */ \
+ 0x2006, /* SIX-PER-EM SPACE */ \
+ 0x2007, /* FIGURE SPACE */ \
+ 0x2008, /* PUNCTUATION SPACE */ \
+ 0x2009, /* THIN SPACE */ \
+ 0x200A, /* HAIR SPACE */ \
+ 0x2028, /* LINE SEPARATOR */ \
+ 0x2029, /* PARAGRAPH SEPARATOR */ \
+ 0x202F, /* NARROW NO-BREAK SPACE */ \
+ 0x205F, /* MEDIUM MATHEMATICAL SPACE */ \
+ 0x3000, /* IDEOGRAPHIC SPACE */ \
+ 0
+
+const wchar_t kWhitespaceWide[] = {
+ WHITESPACE_UNICODE
+};
+
+const char16 kWhitespaceUTF16[] = {
+ WHITESPACE_UNICODE
+};
+
+const char kWhitespaceASCII[] = {
+ 0x09, // CHARACTER TABULATION
+ 0x0A, // LINE FEED (LF)
+ 0x0B, // LINE TABULATION
+ 0x0C, // FORM FEED (FF)
+ 0x0D, // CARRIAGE RETURN (CR)
+ 0x20, // SPACE
+ 0
+};
+
+const char16 kWhitespaceASCIIAs16[] = {
+ 0x09, // CHARACTER TABULATION
+ 0x0A, // LINE FEED (LF)
+ 0x0B, // LINE TABULATION
+ 0x0C, // FORM FEED (FF)
+ 0x0D, // CARRIAGE RETURN (CR)
+ 0x20, // SPACE
+ 0
+};
+
+const char kUtf8ByteOrderMark[] = "\xEF\xBB\xBF";
+
+} // namespace base
diff --git a/libchrome/base/strings/string_util_posix.h b/libchrome/base/strings/string_util_posix.h
new file mode 100644
index 0000000..8299118
--- /dev/null
+++ b/libchrome/base/strings/string_util_posix.h
@@ -0,0 +1,37 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_POSIX_H_
+#define BASE_STRINGS_STRING_UTIL_POSIX_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+ return ::strdup(str);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+ const char* format, va_list arguments) {
+ return ::vsnprintf(buffer, size, format, arguments);
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments) {
+ DCHECK(IsWprintfFormatPortable(format));
+ return ::vswprintf(buffer, size, format, arguments);
+}
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_UTIL_POSIX_H_
diff --git a/libchrome/base/strings/string_util_unittest.cc b/libchrome/base/strings/string_util_unittest.cc
new file mode 100644
index 0000000..df2226e
--- /dev/null
+++ b/libchrome/base/strings/string_util_unittest.cc
@@ -0,0 +1,1130 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util.h"
+
+#include <math.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+
+namespace base {
+
+static const struct trim_case {
+ const wchar_t* input;
+ const TrimPositions positions;
+ const wchar_t* output;
+ const TrimPositions return_value;
+} trim_cases[] = {
+ {L" Google Video ", TRIM_LEADING, L"Google Video ", TRIM_LEADING},
+ {L" Google Video ", TRIM_TRAILING, L" Google Video", TRIM_TRAILING},
+ {L" Google Video ", TRIM_ALL, L"Google Video", TRIM_ALL},
+ {L"Google Video", TRIM_ALL, L"Google Video", TRIM_NONE},
+ {L"", TRIM_ALL, L"", TRIM_NONE},
+ {L" ", TRIM_LEADING, L"", TRIM_LEADING},
+ {L" ", TRIM_TRAILING, L"", TRIM_TRAILING},
+ {L" ", TRIM_ALL, L"", TRIM_ALL},
+ {L"\t\rTest String\n", TRIM_ALL, L"Test String", TRIM_ALL},
+ {L"\x2002Test String\x00A0\x3000", TRIM_ALL, L"Test String", TRIM_ALL},
+};
+
+static const struct trim_case_ascii {
+ const char* input;
+ const TrimPositions positions;
+ const char* output;
+ const TrimPositions return_value;
+} trim_cases_ascii[] = {
+ {" Google Video ", TRIM_LEADING, "Google Video ", TRIM_LEADING},
+ {" Google Video ", TRIM_TRAILING, " Google Video", TRIM_TRAILING},
+ {" Google Video ", TRIM_ALL, "Google Video", TRIM_ALL},
+ {"Google Video", TRIM_ALL, "Google Video", TRIM_NONE},
+ {"", TRIM_ALL, "", TRIM_NONE},
+ {" ", TRIM_LEADING, "", TRIM_LEADING},
+ {" ", TRIM_TRAILING, "", TRIM_TRAILING},
+ {" ", TRIM_ALL, "", TRIM_ALL},
+ {"\t\rTest String\n", TRIM_ALL, "Test String", TRIM_ALL},
+};
+
+namespace {
+
+// Helper used to test TruncateUTF8ToByteSize.
+bool Truncated(const std::string& input,
+ const size_t byte_size,
+ std::string* output) {
+ size_t prev = input.length();
+ TruncateUTF8ToByteSize(input, byte_size, output);
+ return prev != output->length();
+}
+
+} // namespace
+
+TEST(StringUtilTest, TruncateUTF8ToByteSize) {
+ std::string output;
+
+ // Empty strings and invalid byte_size arguments
+ EXPECT_FALSE(Truncated(std::string(), 0, &output));
+ EXPECT_EQ(output, "");
+ EXPECT_TRUE(Truncated("\xe1\x80\xbf", 0, &output));
+ EXPECT_EQ(output, "");
+ EXPECT_FALSE(Truncated("\xe1\x80\xbf", static_cast<size_t>(-1), &output));
+ EXPECT_FALSE(Truncated("\xe1\x80\xbf", 4, &output));
+
+ // Testing the truncation of valid UTF8 correctly
+ EXPECT_TRUE(Truncated("abc", 2, &output));
+ EXPECT_EQ(output, "ab");
+ EXPECT_TRUE(Truncated("\xc2\x81\xc2\x81", 2, &output));
+ EXPECT_EQ(output.compare("\xc2\x81"), 0);
+ EXPECT_TRUE(Truncated("\xc2\x81\xc2\x81", 3, &output));
+ EXPECT_EQ(output.compare("\xc2\x81"), 0);
+ EXPECT_FALSE(Truncated("\xc2\x81\xc2\x81", 4, &output));
+ EXPECT_EQ(output.compare("\xc2\x81\xc2\x81"), 0);
+
+ {
+ const char array[] = "\x00\x00\xc2\x81\xc2\x81";
+ const std::string array_string(array, arraysize(array));
+ EXPECT_TRUE(Truncated(array_string, 4, &output));
+ EXPECT_EQ(output.compare(std::string("\x00\x00\xc2\x81", 4)), 0);
+ }
+
+ {
+ const char array[] = "\x00\xc2\x81\xc2\x81";
+ const std::string array_string(array, arraysize(array));
+ EXPECT_TRUE(Truncated(array_string, 4, &output));
+ EXPECT_EQ(output.compare(std::string("\x00\xc2\x81", 3)), 0);
+ }
+
+ // Testing invalid UTF8
+ EXPECT_TRUE(Truncated("\xed\xa0\x80\xed\xbf\xbf", 6, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xed\xa0\x8f", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xed\xbf\xbf", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+
+ // Testing invalid UTF8 mixed with valid UTF8
+ EXPECT_FALSE(Truncated("\xe1\x80\xbf", 3, &output));
+ EXPECT_EQ(output.compare("\xe1\x80\xbf"), 0);
+ EXPECT_FALSE(Truncated("\xf1\x80\xa0\xbf", 4, &output));
+ EXPECT_EQ(output.compare("\xf1\x80\xa0\xbf"), 0);
+ EXPECT_FALSE(Truncated("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf",
+ 10, &output));
+ EXPECT_EQ(output.compare("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf"), 0);
+ EXPECT_TRUE(Truncated("a\xc2\x81\xe1\x80\xbf\xf1""a""\x80\xa0",
+ 10, &output));
+ EXPECT_EQ(output.compare("a\xc2\x81\xe1\x80\xbf\xf1""a"), 0);
+ EXPECT_FALSE(Truncated("\xef\xbb\xbf" "abc", 6, &output));
+ EXPECT_EQ(output.compare("\xef\xbb\xbf" "abc"), 0);
+
+ // Overlong sequences
+ EXPECT_TRUE(Truncated("\xc0\x80", 2, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xc1\x80\xc1\x81", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xe0\x80\x80", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xe0\x82\x80", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xe0\x9f\xbf", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf0\x80\x80\x8D", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf0\x80\x82\x91", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf0\x80\xa0\x80", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf0\x8f\xbb\xbf", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf8\x80\x80\x80\xbf", 5, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xfc\x80\x80\x80\xa0\xa5", 6, &output));
+ EXPECT_EQ(output.compare(""), 0);
+
+ // Beyond U+10FFFF (the upper limit of Unicode codespace)
+ EXPECT_TRUE(Truncated("\xf4\x90\x80\x80", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf8\xa0\xbf\x80\xbf", 5, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xfc\x9c\xbf\x80\xbf\x80", 6, &output));
+ EXPECT_EQ(output.compare(""), 0);
+
+ // BOMs in UTF-16(BE|LE) and UTF-32(BE|LE)
+ EXPECT_TRUE(Truncated("\xfe\xff", 2, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xff\xfe", 2, &output));
+ EXPECT_EQ(output.compare(""), 0);
+
+ {
+ const char array[] = "\x00\x00\xfe\xff";
+ const std::string array_string(array, arraysize(array));
+ EXPECT_TRUE(Truncated(array_string, 4, &output));
+ EXPECT_EQ(output.compare(std::string("\x00\x00", 2)), 0);
+ }
+
+ // Variants on the previous test
+ {
+ const char array[] = "\xff\xfe\x00\x00";
+ const std::string array_string(array, 4);
+ EXPECT_FALSE(Truncated(array_string, 4, &output));
+ EXPECT_EQ(output.compare(std::string("\xff\xfe\x00\x00", 4)), 0);
+ }
+ {
+ const char array[] = "\xff\x00\x00\xfe";
+ const std::string array_string(array, arraysize(array));
+ EXPECT_TRUE(Truncated(array_string, 4, &output));
+ EXPECT_EQ(output.compare(std::string("\xff\x00\x00", 3)), 0);
+ }
+
+ // Non-characters : U+xxFFF[EF] where xx is 0x00 through 0x10 and <FDD0,FDEF>
+ EXPECT_TRUE(Truncated("\xef\xbf\xbe", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf0\x8f\xbf\xbe", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xf3\xbf\xbf\xbf", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xef\xb7\x90", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_TRUE(Truncated("\xef\xb7\xaf", 3, &output));
+ EXPECT_EQ(output.compare(""), 0);
+
+ // Strings in legacy encodings that are valid in UTF-8, but
+ // are invalid as UTF-8 in real data.
+ EXPECT_TRUE(Truncated("caf\xe9", 4, &output));
+ EXPECT_EQ(output.compare("caf"), 0);
+ EXPECT_TRUE(Truncated("\xb0\xa1\xb0\xa2", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+ EXPECT_FALSE(Truncated("\xa7\x41\xa6\x6e", 4, &output));
+ EXPECT_EQ(output.compare("\xa7\x41\xa6\x6e"), 0);
+ EXPECT_TRUE(Truncated("\xa7\x41\xa6\x6e\xd9\xee\xe4\xee", 7,
+ &output));
+ EXPECT_EQ(output.compare("\xa7\x41\xa6\x6e"), 0);
+
+ // Testing using the same string as input and output.
+ EXPECT_FALSE(Truncated(output, 4, &output));
+ EXPECT_EQ(output.compare("\xa7\x41\xa6\x6e"), 0);
+ EXPECT_TRUE(Truncated(output, 3, &output));
+ EXPECT_EQ(output.compare("\xa7\x41"), 0);
+
+ // "abc" with U+201[CD] in windows-125[0-8]
+ EXPECT_TRUE(Truncated("\x93" "abc\x94", 5, &output));
+ EXPECT_EQ(output.compare("\x93" "abc"), 0);
+
+ // U+0639 U+064E U+0644 U+064E in ISO-8859-6
+ EXPECT_TRUE(Truncated("\xd9\xee\xe4\xee", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+
+ // U+03B3 U+03B5 U+03B9 U+03AC in ISO-8859-7
+ EXPECT_TRUE(Truncated("\xe3\xe5\xe9\xdC", 4, &output));
+ EXPECT_EQ(output.compare(""), 0);
+}
+
+TEST(StringUtilTest, TrimWhitespace) {
+ string16 output; // Allow contents to carry over to next testcase
+ for (size_t i = 0; i < arraysize(trim_cases); ++i) {
+ const trim_case& value = trim_cases[i];
+ EXPECT_EQ(value.return_value,
+ TrimWhitespace(WideToUTF16(value.input), value.positions,
+ &output));
+ EXPECT_EQ(WideToUTF16(value.output), output);
+ }
+
+ // Test that TrimWhitespace() can take the same string for input and output
+ output = ASCIIToUTF16(" This is a test \r\n");
+ EXPECT_EQ(TRIM_ALL, TrimWhitespace(output, TRIM_ALL, &output));
+ EXPECT_EQ(ASCIIToUTF16("This is a test"), output);
+
+ // Once more, but with a string of whitespace
+ output = ASCIIToUTF16(" \r\n");
+ EXPECT_EQ(TRIM_ALL, TrimWhitespace(output, TRIM_ALL, &output));
+ EXPECT_EQ(string16(), output);
+
+ std::string output_ascii;
+ for (size_t i = 0; i < arraysize(trim_cases_ascii); ++i) {
+ const trim_case_ascii& value = trim_cases_ascii[i];
+ EXPECT_EQ(value.return_value,
+ TrimWhitespaceASCII(value.input, value.positions, &output_ascii));
+ EXPECT_EQ(value.output, output_ascii);
+ }
+}
+
+static const struct collapse_case {
+ const wchar_t* input;
+ const bool trim;
+ const wchar_t* output;
+} collapse_cases[] = {
+ {L" Google Video ", false, L"Google Video"},
+ {L"Google Video", false, L"Google Video"},
+ {L"", false, L""},
+ {L" ", false, L""},
+ {L"\t\rTest String\n", false, L"Test String"},
+ {L"\x2002Test String\x00A0\x3000", false, L"Test String"},
+ {L" Test \n \t String ", false, L"Test String"},
+ {L"\x2002Test\x1680 \x2028 \tString\x00A0\x3000", false, L"Test String"},
+ {L" Test String", false, L"Test String"},
+ {L"Test String ", false, L"Test String"},
+ {L"Test String", false, L"Test String"},
+ {L"", true, L""},
+ {L"\n", true, L""},
+ {L" \r ", true, L""},
+ {L"\nFoo", true, L"Foo"},
+ {L"\r Foo ", true, L"Foo"},
+ {L" Foo bar ", true, L"Foo bar"},
+ {L" \tFoo bar \n", true, L"Foo bar"},
+ {L" a \r b\n c \r\n d \t\re \t f \n ", true, L"abcde f"},
+};
+
+TEST(StringUtilTest, CollapseWhitespace) {
+ for (size_t i = 0; i < arraysize(collapse_cases); ++i) {
+ const collapse_case& value = collapse_cases[i];
+ EXPECT_EQ(WideToUTF16(value.output),
+ CollapseWhitespace(WideToUTF16(value.input), value.trim));
+ }
+}
+
+static const struct collapse_case_ascii {
+ const char* input;
+ const bool trim;
+ const char* output;
+} collapse_cases_ascii[] = {
+ {" Google Video ", false, "Google Video"},
+ {"Google Video", false, "Google Video"},
+ {"", false, ""},
+ {" ", false, ""},
+ {"\t\rTest String\n", false, "Test String"},
+ {" Test \n \t String ", false, "Test String"},
+ {" Test String", false, "Test String"},
+ {"Test String ", false, "Test String"},
+ {"Test String", false, "Test String"},
+ {"", true, ""},
+ {"\n", true, ""},
+ {" \r ", true, ""},
+ {"\nFoo", true, "Foo"},
+ {"\r Foo ", true, "Foo"},
+ {" Foo bar ", true, "Foo bar"},
+ {" \tFoo bar \n", true, "Foo bar"},
+ {" a \r b\n c \r\n d \t\re \t f \n ", true, "abcde f"},
+};
+
+TEST(StringUtilTest, CollapseWhitespaceASCII) {
+ for (size_t i = 0; i < arraysize(collapse_cases_ascii); ++i) {
+ const collapse_case_ascii& value = collapse_cases_ascii[i];
+ EXPECT_EQ(value.output, CollapseWhitespaceASCII(value.input, value.trim));
+ }
+}
+
+TEST(StringUtilTest, IsStringUTF8) {
+ EXPECT_TRUE(IsStringUTF8("abc"));
+ EXPECT_TRUE(IsStringUTF8("\xc2\x81"));
+ EXPECT_TRUE(IsStringUTF8("\xe1\x80\xbf"));
+ EXPECT_TRUE(IsStringUTF8("\xf1\x80\xa0\xbf"));
+ EXPECT_TRUE(IsStringUTF8("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf"));
+ EXPECT_TRUE(IsStringUTF8("\xef\xbb\xbf" "abc")); // UTF-8 BOM
+
+ // surrogate code points
+ EXPECT_FALSE(IsStringUTF8("\xed\xa0\x80\xed\xbf\xbf"));
+ EXPECT_FALSE(IsStringUTF8("\xed\xa0\x8f"));
+ EXPECT_FALSE(IsStringUTF8("\xed\xbf\xbf"));
+
+ // overlong sequences
+ EXPECT_FALSE(IsStringUTF8("\xc0\x80")); // U+0000
+ EXPECT_FALSE(IsStringUTF8("\xc1\x80\xc1\x81")); // "AB"
+ EXPECT_FALSE(IsStringUTF8("\xe0\x80\x80")); // U+0000
+ EXPECT_FALSE(IsStringUTF8("\xe0\x82\x80")); // U+0080
+ EXPECT_FALSE(IsStringUTF8("\xe0\x9f\xbf")); // U+07ff
+ EXPECT_FALSE(IsStringUTF8("\xf0\x80\x80\x8D")); // U+000D
+ EXPECT_FALSE(IsStringUTF8("\xf0\x80\x82\x91")); // U+0091
+ EXPECT_FALSE(IsStringUTF8("\xf0\x80\xa0\x80")); // U+0800
+ EXPECT_FALSE(IsStringUTF8("\xf0\x8f\xbb\xbf")); // U+FEFF (BOM)
+ EXPECT_FALSE(IsStringUTF8("\xf8\x80\x80\x80\xbf")); // U+003F
+ EXPECT_FALSE(IsStringUTF8("\xfc\x80\x80\x80\xa0\xa5")); // U+00A5
+
+ // Beyond U+10FFFF (the upper limit of Unicode codespace)
+ EXPECT_FALSE(IsStringUTF8("\xf4\x90\x80\x80")); // U+110000
+ EXPECT_FALSE(IsStringUTF8("\xf8\xa0\xbf\x80\xbf")); // 5 bytes
+ EXPECT_FALSE(IsStringUTF8("\xfc\x9c\xbf\x80\xbf\x80")); // 6 bytes
+
+ // BOMs in UTF-16(BE|LE) and UTF-32(BE|LE)
+ EXPECT_FALSE(IsStringUTF8("\xfe\xff"));
+ EXPECT_FALSE(IsStringUTF8("\xff\xfe"));
+ EXPECT_FALSE(IsStringUTF8(std::string("\x00\x00\xfe\xff", 4)));
+ EXPECT_FALSE(IsStringUTF8("\xff\xfe\x00\x00"));
+
+ // Non-characters : U+xxFFF[EF] where xx is 0x00 through 0x10 and <FDD0,FDEF>
+ EXPECT_FALSE(IsStringUTF8("\xef\xbf\xbe")); // U+FFFE)
+ EXPECT_FALSE(IsStringUTF8("\xf0\x8f\xbf\xbe")); // U+1FFFE
+ EXPECT_FALSE(IsStringUTF8("\xf3\xbf\xbf\xbf")); // U+10FFFF
+ EXPECT_FALSE(IsStringUTF8("\xef\xb7\x90")); // U+FDD0
+ EXPECT_FALSE(IsStringUTF8("\xef\xb7\xaf")); // U+FDEF
+ // Strings in legacy encodings. We can certainly make up strings
+ // in a legacy encoding that are valid in UTF-8, but in real data,
+ // most of them are invalid as UTF-8.
+ EXPECT_FALSE(IsStringUTF8("caf\xe9")); // cafe with U+00E9 in ISO-8859-1
+ EXPECT_FALSE(IsStringUTF8("\xb0\xa1\xb0\xa2")); // U+AC00, U+AC001 in EUC-KR
+ EXPECT_FALSE(IsStringUTF8("\xa7\x41\xa6\x6e")); // U+4F60 U+597D in Big5
+ // "abc" with U+201[CD] in windows-125[0-8]
+ EXPECT_FALSE(IsStringUTF8("\x93" "abc\x94"));
+ // U+0639 U+064E U+0644 U+064E in ISO-8859-6
+ EXPECT_FALSE(IsStringUTF8("\xd9\xee\xe4\xee"));
+ // U+03B3 U+03B5 U+03B9 U+03AC in ISO-8859-7
+ EXPECT_FALSE(IsStringUTF8("\xe3\xe5\xe9\xdC"));
+
+ // Check that we support Embedded Nulls. The first uses the canonical UTF-8
+ // representation, and the second uses a 2-byte sequence. The second version
+ // is invalid UTF-8 since UTF-8 states that the shortest encoding for a
+ // given codepoint must be used.
+ static const char kEmbeddedNull[] = "embedded\0null";
+ EXPECT_TRUE(IsStringUTF8(
+ std::string(kEmbeddedNull, sizeof(kEmbeddedNull))));
+ EXPECT_FALSE(IsStringUTF8("embedded\xc0\x80U+0000"));
+}
+
+TEST(StringUtilTest, IsStringASCII) {
+ static char char_ascii[] =
+ "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF";
+ static char16 char16_ascii[] = {
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'A',
+ 'B', 'C', 'D', 'E', 'F', '0', '1', '2', '3', '4', '5', '6',
+ '7', '8', '9', '0', 'A', 'B', 'C', 'D', 'E', 'F', 0 };
+ static std::wstring wchar_ascii(
+ L"0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF");
+
+ // Test a variety of the fragment start positions and lengths in order to make
+ // sure that bit masking in IsStringASCII works correctly.
+ // Also, test that a non-ASCII character will be detected regardless of its
+ // position inside the string.
+ {
+ const size_t string_length = arraysize(char_ascii) - 1;
+ for (size_t offset = 0; offset < 8; ++offset) {
+ for (size_t len = 0, max_len = string_length - offset; len < max_len;
+ ++len) {
+ EXPECT_TRUE(IsStringASCII(StringPiece(char_ascii + offset, len)));
+ for (size_t char_pos = offset; char_pos < len; ++char_pos) {
+ char_ascii[char_pos] |= '\x80';
+ EXPECT_FALSE(IsStringASCII(StringPiece(char_ascii + offset, len)));
+ char_ascii[char_pos] &= ~'\x80';
+ }
+ }
+ }
+ }
+
+ {
+ const size_t string_length = arraysize(char16_ascii) - 1;
+ for (size_t offset = 0; offset < 4; ++offset) {
+ for (size_t len = 0, max_len = string_length - offset; len < max_len;
+ ++len) {
+ EXPECT_TRUE(IsStringASCII(StringPiece16(char16_ascii + offset, len)));
+ for (size_t char_pos = offset; char_pos < len; ++char_pos) {
+ char16_ascii[char_pos] |= 0x80;
+ EXPECT_FALSE(
+ IsStringASCII(StringPiece16(char16_ascii + offset, len)));
+ char16_ascii[char_pos] &= ~0x80;
+ // Also test when the upper half is non-zero.
+ char16_ascii[char_pos] |= 0x100;
+ EXPECT_FALSE(
+ IsStringASCII(StringPiece16(char16_ascii + offset, len)));
+ char16_ascii[char_pos] &= ~0x100;
+ }
+ }
+ }
+ }
+
+ {
+ const size_t string_length = wchar_ascii.length();
+ for (size_t len = 0; len < string_length; ++len) {
+ EXPECT_TRUE(IsStringASCII(wchar_ascii.substr(0, len)));
+ for (size_t char_pos = 0; char_pos < len; ++char_pos) {
+ wchar_ascii[char_pos] |= 0x80;
+ EXPECT_FALSE(
+ IsStringASCII(wchar_ascii.substr(0, len)));
+ wchar_ascii[char_pos] &= ~0x80;
+ wchar_ascii[char_pos] |= 0x100;
+ EXPECT_FALSE(
+ IsStringASCII(wchar_ascii.substr(0, len)));
+ wchar_ascii[char_pos] &= ~0x100;
+#if defined(WCHAR_T_IS_UTF32)
+ wchar_ascii[char_pos] |= 0x10000;
+ EXPECT_FALSE(
+ IsStringASCII(wchar_ascii.substr(0, len)));
+ wchar_ascii[char_pos] &= ~0x10000;
+#endif // WCHAR_T_IS_UTF32
+ }
+ }
+ }
+}
+
+TEST(StringUtilTest, ConvertASCII) {
+ static const char* const char_cases[] = {
+ "Google Video",
+ "Hello, world\n",
+ "0123ABCDwxyz \a\b\t\r\n!+,.~"
+ };
+
+ static const wchar_t* const wchar_cases[] = {
+ L"Google Video",
+ L"Hello, world\n",
+ L"0123ABCDwxyz \a\b\t\r\n!+,.~"
+ };
+
+ for (size_t i = 0; i < arraysize(char_cases); ++i) {
+ EXPECT_TRUE(IsStringASCII(char_cases[i]));
+ string16 utf16 = ASCIIToUTF16(char_cases[i]);
+ EXPECT_EQ(WideToUTF16(wchar_cases[i]), utf16);
+
+ std::string ascii = UTF16ToASCII(WideToUTF16(wchar_cases[i]));
+ EXPECT_EQ(char_cases[i], ascii);
+ }
+
+ EXPECT_FALSE(IsStringASCII("Google \x80Video"));
+
+ // Convert empty strings.
+ string16 empty16;
+ std::string empty;
+ EXPECT_EQ(empty, UTF16ToASCII(empty16));
+ EXPECT_EQ(empty16, ASCIIToUTF16(empty));
+
+ // Convert strings with an embedded NUL character.
+ const char chars_with_nul[] = "test\0string";
+ const int length_with_nul = arraysize(chars_with_nul) - 1;
+ std::string string_with_nul(chars_with_nul, length_with_nul);
+ string16 string16_with_nul = ASCIIToUTF16(string_with_nul);
+ EXPECT_EQ(static_cast<string16::size_type>(length_with_nul),
+ string16_with_nul.length());
+ std::string narrow_with_nul = UTF16ToASCII(string16_with_nul);
+ EXPECT_EQ(static_cast<std::string::size_type>(length_with_nul),
+ narrow_with_nul.length());
+ EXPECT_EQ(0, string_with_nul.compare(narrow_with_nul));
+}
+
+TEST(StringUtilTest, ToLowerASCII) {
+ EXPECT_EQ('c', ToLowerASCII('C'));
+ EXPECT_EQ('c', ToLowerASCII('c'));
+ EXPECT_EQ('2', ToLowerASCII('2'));
+
+ EXPECT_EQ(static_cast<char16>('c'), ToLowerASCII(static_cast<char16>('C')));
+ EXPECT_EQ(static_cast<char16>('c'), ToLowerASCII(static_cast<char16>('c')));
+ EXPECT_EQ(static_cast<char16>('2'), ToLowerASCII(static_cast<char16>('2')));
+
+ EXPECT_EQ("cc2", ToLowerASCII("Cc2"));
+ EXPECT_EQ(ASCIIToUTF16("cc2"), ToLowerASCII(ASCIIToUTF16("Cc2")));
+}
+
+TEST(StringUtilTest, ToUpperASCII) {
+ EXPECT_EQ('C', ToUpperASCII('C'));
+ EXPECT_EQ('C', ToUpperASCII('c'));
+ EXPECT_EQ('2', ToUpperASCII('2'));
+
+ EXPECT_EQ(static_cast<char16>('C'), ToUpperASCII(static_cast<char16>('C')));
+ EXPECT_EQ(static_cast<char16>('C'), ToUpperASCII(static_cast<char16>('c')));
+ EXPECT_EQ(static_cast<char16>('2'), ToUpperASCII(static_cast<char16>('2')));
+
+ EXPECT_EQ("CC2", ToUpperASCII("Cc2"));
+ EXPECT_EQ(ASCIIToUTF16("CC2"), ToUpperASCII(ASCIIToUTF16("Cc2")));
+}
+
+TEST(StringUtilTest, LowerCaseEqualsASCII) {
+ static const struct {
+ const char* src_a;
+ const char* dst;
+ } lowercase_cases[] = {
+ { "FoO", "foo" },
+ { "foo", "foo" },
+ { "FOO", "foo" },
+ };
+
+ for (size_t i = 0; i < arraysize(lowercase_cases); ++i) {
+ EXPECT_TRUE(LowerCaseEqualsASCII(ASCIIToUTF16(lowercase_cases[i].src_a),
+ lowercase_cases[i].dst));
+ EXPECT_TRUE(LowerCaseEqualsASCII(lowercase_cases[i].src_a,
+ lowercase_cases[i].dst));
+ }
+}
+
+TEST(StringUtilTest, FormatBytesUnlocalized) {
+ static const struct {
+ int64_t bytes;
+ const char* expected;
+ } cases[] = {
+ // Expected behavior: we show one post-decimal digit when we have
+ // under two pre-decimal digits, except in cases where it makes no
+ // sense (zero or bytes).
+ // Since we switch units once we cross the 1000 mark, this keeps
+ // the display of file sizes or bytes consistently around three
+ // digits.
+ {0, "0 B"},
+ {512, "512 B"},
+ {1024*1024, "1.0 MB"},
+ {1024*1024*1024, "1.0 GB"},
+ {10LL*1024*1024*1024, "10.0 GB"},
+ {99LL*1024*1024*1024, "99.0 GB"},
+ {105LL*1024*1024*1024, "105 GB"},
+ {105LL*1024*1024*1024 + 500LL*1024*1024, "105 GB"},
+ {~(1LL << 63), "8192 PB"},
+
+ {99*1024 + 103, "99.1 kB"},
+ {1024*1024 + 103, "1.0 MB"},
+ {1024*1024 + 205 * 1024, "1.2 MB"},
+ {1024*1024*1024 + (927 * 1024*1024), "1.9 GB"},
+ {10LL*1024*1024*1024, "10.0 GB"},
+ {100LL*1024*1024*1024, "100 GB"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ EXPECT_EQ(ASCIIToUTF16(cases[i].expected),
+ FormatBytesUnlocalized(cases[i].bytes));
+ }
+}
+TEST(StringUtilTest, ReplaceSubstringsAfterOffset) {
+ static const struct {
+ const char* str;
+ string16::size_type start_offset;
+ const char* find_this;
+ const char* replace_with;
+ const char* expected;
+ } cases[] = {
+ {"aaa", 0, "a", "b", "bbb"},
+ {"abb", 0, "ab", "a", "ab"},
+ {"Removing some substrings inging", 0, "ing", "", "Remov some substrs "},
+ {"Not found", 0, "x", "0", "Not found"},
+ {"Not found again", 5, "x", "0", "Not found again"},
+ {" Making it much longer ", 0, " ", "Four score and seven years ago",
+ "Four score and seven years agoMakingFour score and seven years agoit"
+ "Four score and seven years agomuchFour score and seven years agolonger"
+ "Four score and seven years ago"},
+ {"Invalid offset", 9999, "t", "foobar", "Invalid offset"},
+ {"Replace me only me once", 9, "me ", "", "Replace me only once"},
+ {"abababab", 2, "ab", "c", "abccc"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); i++) {
+ string16 str = ASCIIToUTF16(cases[i].str);
+ ReplaceSubstringsAfterOffset(&str, cases[i].start_offset,
+ ASCIIToUTF16(cases[i].find_this),
+ ASCIIToUTF16(cases[i].replace_with));
+ EXPECT_EQ(ASCIIToUTF16(cases[i].expected), str);
+ }
+}
+
+TEST(StringUtilTest, ReplaceFirstSubstringAfterOffset) {
+ static const struct {
+ const char* str;
+ string16::size_type start_offset;
+ const char* find_this;
+ const char* replace_with;
+ const char* expected;
+ } cases[] = {
+ {"aaa", 0, "a", "b", "baa"},
+ {"abb", 0, "ab", "a", "ab"},
+ {"Removing some substrings inging", 0, "ing", "",
+ "Remov some substrings inging"},
+ {"Not found", 0, "x", "0", "Not found"},
+ {"Not found again", 5, "x", "0", "Not found again"},
+ {" Making it much longer ", 0, " ", "Four score and seven years ago",
+ "Four score and seven years agoMaking it much longer "},
+ {"Invalid offset", 9999, "t", "foobar", "Invalid offset"},
+ {"Replace me only me once", 4, "me ", "", "Replace only me once"},
+ {"abababab", 2, "ab", "c", "abcabab"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); i++) {
+ string16 str = ASCIIToUTF16(cases[i].str);
+ ReplaceFirstSubstringAfterOffset(&str, cases[i].start_offset,
+ ASCIIToUTF16(cases[i].find_this),
+ ASCIIToUTF16(cases[i].replace_with));
+ EXPECT_EQ(ASCIIToUTF16(cases[i].expected), str);
+ }
+}
+
+TEST(StringUtilTest, HexDigitToInt) {
+ EXPECT_EQ(0, HexDigitToInt('0'));
+ EXPECT_EQ(1, HexDigitToInt('1'));
+ EXPECT_EQ(2, HexDigitToInt('2'));
+ EXPECT_EQ(3, HexDigitToInt('3'));
+ EXPECT_EQ(4, HexDigitToInt('4'));
+ EXPECT_EQ(5, HexDigitToInt('5'));
+ EXPECT_EQ(6, HexDigitToInt('6'));
+ EXPECT_EQ(7, HexDigitToInt('7'));
+ EXPECT_EQ(8, HexDigitToInt('8'));
+ EXPECT_EQ(9, HexDigitToInt('9'));
+ EXPECT_EQ(10, HexDigitToInt('A'));
+ EXPECT_EQ(11, HexDigitToInt('B'));
+ EXPECT_EQ(12, HexDigitToInt('C'));
+ EXPECT_EQ(13, HexDigitToInt('D'));
+ EXPECT_EQ(14, HexDigitToInt('E'));
+ EXPECT_EQ(15, HexDigitToInt('F'));
+
+ // Verify the lower case as well.
+ EXPECT_EQ(10, HexDigitToInt('a'));
+ EXPECT_EQ(11, HexDigitToInt('b'));
+ EXPECT_EQ(12, HexDigitToInt('c'));
+ EXPECT_EQ(13, HexDigitToInt('d'));
+ EXPECT_EQ(14, HexDigitToInt('e'));
+ EXPECT_EQ(15, HexDigitToInt('f'));
+}
+
+TEST(StringUtilTest, JoinString) {
+ std::string separator(", ");
+ std::vector<std::string> parts;
+ EXPECT_EQ(std::string(), JoinString(parts, separator));
+
+ parts.push_back("a");
+ EXPECT_EQ("a", JoinString(parts, separator));
+
+ parts.push_back("b");
+ parts.push_back("c");
+ EXPECT_EQ("a, b, c", JoinString(parts, separator));
+
+ parts.push_back(std::string());
+ EXPECT_EQ("a, b, c, ", JoinString(parts, separator));
+ parts.push_back(" ");
+ EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
+}
+
+TEST(StringUtilTest, JoinString16) {
+ string16 separator = ASCIIToUTF16(", ");
+ std::vector<string16> parts;
+ EXPECT_EQ(string16(), JoinString(parts, separator));
+
+ parts.push_back(ASCIIToUTF16("a"));
+ EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
+
+ parts.push_back(ASCIIToUTF16("b"));
+ parts.push_back(ASCIIToUTF16("c"));
+ EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString(parts, separator));
+
+ parts.push_back(ASCIIToUTF16(""));
+ EXPECT_EQ(ASCIIToUTF16("a, b, c, "), JoinString(parts, separator));
+ parts.push_back(ASCIIToUTF16(" "));
+ EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
+}
+
+TEST(StringUtilTest, StartsWith) {
+ EXPECT_TRUE(StartsWith("javascript:url", "javascript",
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith("JavaScript:url", "javascript",
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith("javascript:url", "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith("JavaScript:url", "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith("java", "javascript", base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith("java", "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(std::string(), "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(std::string(), "javascript",
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith("java", std::string(),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith("java", std::string(), base::CompareCase::SENSITIVE));
+
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("javascript:url"),
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith(ASCIIToUTF16("JavaScript:url"),
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("javascript:url"),
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("JavaScript:url"),
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"), ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"), ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(),
+ base::CompareCase::SENSITIVE));
+}
+
+TEST(StringUtilTest, EndsWith) {
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.Plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.Plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(
+ EndsWith(string16(), string16(), base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(string16(), string16(), base::CompareCase::SENSITIVE));
+}
+
+TEST(StringUtilTest, GetStringFWithOffsets) {
+ std::vector<string16> subst;
+ subst.push_back(ASCIIToUTF16("1"));
+ subst.push_back(ASCIIToUTF16("2"));
+ std::vector<size_t> offsets;
+
+ ReplaceStringPlaceholders(ASCIIToUTF16("Hello, $1. Your number is $2."),
+ subst,
+ &offsets);
+ EXPECT_EQ(2U, offsets.size());
+ EXPECT_EQ(7U, offsets[0]);
+ EXPECT_EQ(25U, offsets[1]);
+ offsets.clear();
+
+ ReplaceStringPlaceholders(ASCIIToUTF16("Hello, $2. Your number is $1."),
+ subst,
+ &offsets);
+ EXPECT_EQ(2U, offsets.size());
+ EXPECT_EQ(25U, offsets[0]);
+ EXPECT_EQ(7U, offsets[1]);
+ offsets.clear();
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersTooFew) {
+ // Test whether replacestringplaceholders works as expected when there
+ // are fewer inputs than outputs.
+ std::vector<string16> subst;
+ subst.push_back(ASCIIToUTF16("9a"));
+ subst.push_back(ASCIIToUTF16("8b"));
+ subst.push_back(ASCIIToUTF16("7c"));
+
+ string16 formatted =
+ ReplaceStringPlaceholders(
+ ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$1g,$2h,$3i"), subst, nullptr);
+
+ EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,d,e,f,9ag,8bh,7ci"), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholders) {
+ std::vector<string16> subst;
+ subst.push_back(ASCIIToUTF16("9a"));
+ subst.push_back(ASCIIToUTF16("8b"));
+ subst.push_back(ASCIIToUTF16("7c"));
+ subst.push_back(ASCIIToUTF16("6d"));
+ subst.push_back(ASCIIToUTF16("5e"));
+ subst.push_back(ASCIIToUTF16("4f"));
+ subst.push_back(ASCIIToUTF16("3g"));
+ subst.push_back(ASCIIToUTF16("2h"));
+ subst.push_back(ASCIIToUTF16("1i"));
+
+ string16 formatted =
+ ReplaceStringPlaceholders(
+ ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i"), subst, nullptr);
+
+ EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii"), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersOneDigit) {
+ std::vector<string16> subst;
+ subst.push_back(ASCIIToUTF16("1a"));
+ string16 formatted =
+ ReplaceStringPlaceholders(ASCIIToUTF16(" $16 "), subst, nullptr);
+ EXPECT_EQ(ASCIIToUTF16(" 1a6 "), formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersInvalidPlaceholder) {
+ std::vector<string16> subst;
+ subst.push_back(ASCIIToUTF16("1a"));
+ string16 formatted =
+ ReplaceStringPlaceholders(ASCIIToUTF16("+$-+$A+$1+"), subst, nullptr);
+ EXPECT_EQ(ASCIIToUTF16("+++1a+"), formatted);
+}
+
+TEST(StringUtilTest, StdStringReplaceStringPlaceholders) {
+ std::vector<std::string> subst;
+ subst.push_back("9a");
+ subst.push_back("8b");
+ subst.push_back("7c");
+ subst.push_back("6d");
+ subst.push_back("5e");
+ subst.push_back("4f");
+ subst.push_back("3g");
+ subst.push_back("2h");
+ subst.push_back("1i");
+
+ std::string formatted =
+ ReplaceStringPlaceholders(
+ "$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i", subst, nullptr);
+
+ EXPECT_EQ("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii", formatted);
+}
+
+TEST(StringUtilTest, ReplaceStringPlaceholdersConsecutiveDollarSigns) {
+ std::vector<std::string> subst;
+ subst.push_back("a");
+ subst.push_back("b");
+ subst.push_back("c");
+ EXPECT_EQ(ReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst, nullptr),
+ "$1 $$2 $$$3");
+}
+
+TEST(StringUtilTest, LcpyTest) {
+ // Test the normal case where we fit in our buffer.
+ {
+ char dst[10];
+ wchar_t wdst[10];
+ EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+ EXPECT_EQ(0, memcmp(dst, "abcdefg", 8));
+ EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+ EXPECT_EQ(0, memcmp(wdst, L"abcdefg", sizeof(wchar_t) * 8));
+ }
+
+ // Test dst_size == 0, nothing should be written to |dst| and we should
+ // have the equivalent of strlen(src).
+ {
+ char dst[2] = {1, 2};
+ wchar_t wdst[2] = {1, 2};
+ EXPECT_EQ(7U, strlcpy(dst, "abcdefg", 0));
+ EXPECT_EQ(1, dst[0]);
+ EXPECT_EQ(2, dst[1]);
+ EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", 0));
+ EXPECT_EQ(static_cast<wchar_t>(1), wdst[0]);
+ EXPECT_EQ(static_cast<wchar_t>(2), wdst[1]);
+ }
+
+ // Test the case were we _just_ competely fit including the null.
+ {
+ char dst[8];
+ wchar_t wdst[8];
+ EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+ EXPECT_EQ(0, memcmp(dst, "abcdefg", 8));
+ EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+ EXPECT_EQ(0, memcmp(wdst, L"abcdefg", sizeof(wchar_t) * 8));
+ }
+
+ // Test the case were we we are one smaller, so we can't fit the null.
+ {
+ char dst[7];
+ wchar_t wdst[7];
+ EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+ EXPECT_EQ(0, memcmp(dst, "abcdef", 7));
+ EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+ EXPECT_EQ(0, memcmp(wdst, L"abcdef", sizeof(wchar_t) * 7));
+ }
+
+ // Test the case were we are just too small.
+ {
+ char dst[3];
+ wchar_t wdst[3];
+ EXPECT_EQ(7U, strlcpy(dst, "abcdefg", arraysize(dst)));
+ EXPECT_EQ(0, memcmp(dst, "ab", 3));
+ EXPECT_EQ(7U, wcslcpy(wdst, L"abcdefg", arraysize(wdst)));
+ EXPECT_EQ(0, memcmp(wdst, L"ab", sizeof(wchar_t) * 3));
+ }
+}
+
+TEST(StringUtilTest, WprintfFormatPortabilityTest) {
+ static const struct {
+ const wchar_t* input;
+ bool portable;
+ } cases[] = {
+ { L"%ls", true },
+ { L"%s", false },
+ { L"%S", false },
+ { L"%lS", false },
+ { L"Hello, %s", false },
+ { L"%lc", true },
+ { L"%c", false },
+ { L"%C", false },
+ { L"%lC", false },
+ { L"%ls %s", false },
+ { L"%s %ls", false },
+ { L"%s %ls %s", false },
+ { L"%f", true },
+ { L"%f %F", false },
+ { L"%d %D", false },
+ { L"%o %O", false },
+ { L"%u %U", false },
+ { L"%f %d %o %u", true },
+ { L"%-8d (%02.1f%)", true },
+ { L"% 10s", false },
+ { L"% 10ls", true }
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i)
+ EXPECT_EQ(cases[i].portable, IsWprintfFormatPortable(cases[i].input));
+}
+
+TEST(StringUtilTest, RemoveChars) {
+ const char kRemoveChars[] = "-/+*";
+ std::string input = "A-+bc/d!*";
+ EXPECT_TRUE(RemoveChars(input, kRemoveChars, &input));
+ EXPECT_EQ("Abcd!", input);
+
+ // No characters match kRemoveChars.
+ EXPECT_FALSE(RemoveChars(input, kRemoveChars, &input));
+ EXPECT_EQ("Abcd!", input);
+
+ // Empty string.
+ input.clear();
+ EXPECT_FALSE(RemoveChars(input, kRemoveChars, &input));
+ EXPECT_EQ(std::string(), input);
+}
+
+TEST(StringUtilTest, ReplaceChars) {
+ struct TestData {
+ const char* input;
+ const char* replace_chars;
+ const char* replace_with;
+ const char* output;
+ bool result;
+ } cases[] = {
+ { "", "", "", "", false },
+ { "test", "", "", "test", false },
+ { "test", "", "!", "test", false },
+ { "test", "z", "!", "test", false },
+ { "test", "e", "!", "t!st", true },
+ { "test", "e", "!?", "t!?st", true },
+ { "test", "ez", "!", "t!st", true },
+ { "test", "zed", "!?", "t!?st", true },
+ { "test", "t", "!?", "!?es!?", true },
+ { "test", "et", "!>", "!>!>s!>", true },
+ { "test", "zest", "!", "!!!!", true },
+ { "test", "szt", "!", "!e!!", true },
+ { "test", "t", "test", "testestest", true },
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ std::string output;
+ bool result = ReplaceChars(cases[i].input,
+ cases[i].replace_chars,
+ cases[i].replace_with,
+ &output);
+ EXPECT_EQ(cases[i].result, result);
+ EXPECT_EQ(cases[i].output, output);
+ }
+}
+
+TEST(StringUtilTest, ContainsOnlyChars) {
+ // Providing an empty list of characters should return false but for the empty
+ // string.
+ EXPECT_TRUE(ContainsOnlyChars(std::string(), std::string()));
+ EXPECT_FALSE(ContainsOnlyChars("Hello", std::string()));
+
+ EXPECT_TRUE(ContainsOnlyChars(std::string(), "1234"));
+ EXPECT_TRUE(ContainsOnlyChars("1", "1234"));
+ EXPECT_TRUE(ContainsOnlyChars("1", "4321"));
+ EXPECT_TRUE(ContainsOnlyChars("123", "4321"));
+ EXPECT_FALSE(ContainsOnlyChars("123a", "4321"));
+
+ EXPECT_TRUE(ContainsOnlyChars(std::string(), kWhitespaceASCII));
+ EXPECT_TRUE(ContainsOnlyChars(" ", kWhitespaceASCII));
+ EXPECT_TRUE(ContainsOnlyChars("\t", kWhitespaceASCII));
+ EXPECT_TRUE(ContainsOnlyChars("\t \r \n ", kWhitespaceASCII));
+ EXPECT_FALSE(ContainsOnlyChars("a", kWhitespaceASCII));
+ EXPECT_FALSE(ContainsOnlyChars("\thello\r \n ", kWhitespaceASCII));
+
+ EXPECT_TRUE(ContainsOnlyChars(string16(), kWhitespaceUTF16));
+ EXPECT_TRUE(ContainsOnlyChars(ASCIIToUTF16(" "), kWhitespaceUTF16));
+ EXPECT_TRUE(ContainsOnlyChars(ASCIIToUTF16("\t"), kWhitespaceUTF16));
+ EXPECT_TRUE(ContainsOnlyChars(ASCIIToUTF16("\t \r \n "), kWhitespaceUTF16));
+ EXPECT_FALSE(ContainsOnlyChars(ASCIIToUTF16("a"), kWhitespaceUTF16));
+ EXPECT_FALSE(ContainsOnlyChars(ASCIIToUTF16("\thello\r \n "),
+ kWhitespaceUTF16));
+}
+
+TEST(StringUtilTest, CompareCaseInsensitiveASCII) {
+ EXPECT_EQ(0, CompareCaseInsensitiveASCII("", ""));
+ EXPECT_EQ(0, CompareCaseInsensitiveASCII("Asdf", "aSDf"));
+
+ // Differing lengths.
+ EXPECT_EQ(-1, CompareCaseInsensitiveASCII("Asdf", "aSDfA"));
+ EXPECT_EQ(1, CompareCaseInsensitiveASCII("AsdfA", "aSDf"));
+
+ // Differing values.
+ EXPECT_EQ(-1, CompareCaseInsensitiveASCII("AsdfA", "aSDfb"));
+ EXPECT_EQ(1, CompareCaseInsensitiveASCII("Asdfb", "aSDfA"));
+}
+
+TEST(StringUtilTest, EqualsCaseInsensitiveASCII) {
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("", ""));
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("Asdf", "aSDF"));
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("bsdf", "aSDF"));
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("Asdf", "aSDFz"));
+}
+
+TEST(StringUtilTest, IsUnicodeWhitespace) {
+ // NOT unicode white space.
+ EXPECT_FALSE(IsUnicodeWhitespace(L'\0'));
+ EXPECT_FALSE(IsUnicodeWhitespace(L'A'));
+ EXPECT_FALSE(IsUnicodeWhitespace(L'0'));
+ EXPECT_FALSE(IsUnicodeWhitespace(L'.'));
+ EXPECT_FALSE(IsUnicodeWhitespace(L';'));
+ EXPECT_FALSE(IsUnicodeWhitespace(L'\x4100'));
+
+ // Actual unicode whitespace.
+ EXPECT_TRUE(IsUnicodeWhitespace(L' '));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\xa0'));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\x3000'));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\t'));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\r'));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\v'));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\f'));
+ EXPECT_TRUE(IsUnicodeWhitespace(L'\n'));
+}
+
+class WriteIntoTest : public testing::Test {
+ protected:
+ static void WritesCorrectly(size_t num_chars) {
+ std::string buffer;
+ char kOriginal[] = "supercali";
+ strncpy(WriteInto(&buffer, num_chars + 1), kOriginal, num_chars);
+ // Using std::string(buffer.c_str()) instead of |buffer| truncates the
+ // string at the first \0.
+ EXPECT_EQ(std::string(kOriginal,
+ std::min(num_chars, arraysize(kOriginal) - 1)),
+ std::string(buffer.c_str()));
+ EXPECT_EQ(num_chars, buffer.size());
+ }
+};
+
+TEST_F(WriteIntoTest, WriteInto) {
+ // Validate that WriteInto reserves enough space and
+ // sizes a string correctly.
+ WritesCorrectly(1);
+ WritesCorrectly(2);
+ WritesCorrectly(5000);
+
+ // Validate that WriteInto doesn't modify other strings
+ // when using a Copy-on-Write implementation.
+ const char kLive[] = "live";
+ const char kDead[] = "dead";
+ const std::string live = kLive;
+ std::string dead = live;
+ strncpy(WriteInto(&dead, 5), kDead, 4);
+ EXPECT_EQ(kDead, dead);
+ EXPECT_EQ(4u, dead.size());
+ EXPECT_EQ(kLive, live);
+ EXPECT_EQ(4u, live.size());
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/string_util_win.h b/libchrome/base/strings/string_util_win.h
new file mode 100644
index 0000000..7f260bf
--- /dev/null
+++ b/libchrome/base/strings/string_util_win.h
@@ -0,0 +1,44 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_WIN_H_
+#define BASE_STRINGS_STRING_UTIL_WIN_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+ return _strdup(str);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+ const char* format, va_list arguments) {
+ int length = vsnprintf_s(buffer, size, size - 1, format, arguments);
+ if (length < 0)
+ return _vscprintf(format, arguments);
+ return length;
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments) {
+ DCHECK(IsWprintfFormatPortable(format));
+
+ int length = _vsnwprintf_s(buffer, size, size - 1, format, arguments);
+ if (length < 0)
+ return _vscwprintf(format, arguments);
+ return length;
+}
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_UTIL_WIN_H_
diff --git a/libchrome/base/strings/stringize_macros.h b/libchrome/base/strings/stringize_macros.h
new file mode 100644
index 0000000..d4e2707
--- /dev/null
+++ b/libchrome/base/strings/stringize_macros.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines preprocessor macros for stringizing preprocessor
+// symbols (or their output) and manipulating preprocessor symbols
+// that define strings.
+
+#ifndef BASE_STRINGS_STRINGIZE_MACROS_H_
+#define BASE_STRINGS_STRINGIZE_MACROS_H_
+
+#include "build/build_config.h"
+
+// This is not very useful as it does not expand defined symbols if
+// called directly. Use its counterpart without the _NO_EXPANSION
+// suffix, below.
+#define STRINGIZE_NO_EXPANSION(x) #x
+
+// Use this to quote the provided parameter, first expanding it if it
+// is a preprocessor symbol.
+//
+// For example, if:
+// #define A FOO
+// #define B(x) myobj->FunctionCall(x)
+//
+// Then:
+// STRINGIZE(A) produces "FOO"
+// STRINGIZE(B(y)) produces "myobj->FunctionCall(y)"
+#define STRINGIZE(x) STRINGIZE_NO_EXPANSION(x)
+
+#endif // BASE_STRINGS_STRINGIZE_MACROS_H_
diff --git a/libchrome/base/strings/stringize_macros_unittest.cc b/libchrome/base/strings/stringize_macros_unittest.cc
new file mode 100644
index 0000000..d7f9e56
--- /dev/null
+++ b/libchrome/base/strings/stringize_macros_unittest.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringize_macros.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Macros as per documentation in header file.
+#define PREPROCESSOR_UTIL_UNITTEST_A FOO
+#define PREPROCESSOR_UTIL_UNITTEST_B(x) myobj->FunctionCall(x)
+#define PREPROCESSOR_UTIL_UNITTEST_C "foo"
+
+TEST(StringizeTest, Ansi) {
+ EXPECT_STREQ(
+ "PREPROCESSOR_UTIL_UNITTEST_A",
+ STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_A));
+ EXPECT_STREQ(
+ "PREPROCESSOR_UTIL_UNITTEST_B(y)",
+ STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+ EXPECT_STREQ(
+ "PREPROCESSOR_UTIL_UNITTEST_C",
+ STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_C));
+
+ EXPECT_STREQ("FOO", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_A));
+ EXPECT_STREQ("myobj->FunctionCall(y)",
+ STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+ EXPECT_STREQ("\"foo\"", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_C));
+}
diff --git a/libchrome/base/strings/stringprintf.cc b/libchrome/base/strings/stringprintf.cc
new file mode 100644
index 0000000..415845d
--- /dev/null
+++ b/libchrome/base/strings/stringprintf.cc
@@ -0,0 +1,189 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringprintf.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/scoped_clear_errno.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Overloaded wrappers around vsnprintf and vswprintf. The buf_size parameter
+// is the size of the buffer. These return the number of characters in the
+// formatted string excluding the NUL terminator. If the buffer is not
+// large enough to accommodate the formatted string without truncation, they
+// return the number of characters that would be in the fully-formatted string
+// (vsnprintf, and vswprintf on Windows), or -1 (vswprintf on POSIX platforms).
+inline int vsnprintfT(char* buffer,
+ size_t buf_size,
+ const char* format,
+ va_list argptr) {
+ return base::vsnprintf(buffer, buf_size, format, argptr);
+}
+
+#if defined(OS_WIN)
+inline int vsnprintfT(wchar_t* buffer,
+ size_t buf_size,
+ const wchar_t* format,
+ va_list argptr) {
+ return base::vswprintf(buffer, buf_size, format, argptr);
+}
+#endif
+
+// Templatized backend for StringPrintF/StringAppendF. This does not finalize
+// the va_list, the caller is expected to do that.
+template <class StringType>
+static void StringAppendVT(StringType* dst,
+ const typename StringType::value_type* format,
+ va_list ap) {
+ // First try with a small fixed size buffer.
+ // This buffer size should be kept in sync with StringUtilTest.GrowBoundary
+ // and StringUtilTest.StringPrintfBounds.
+ typename StringType::value_type stack_buf[1024];
+
+ va_list ap_copy;
+ va_copy(ap_copy, ap);
+
+#if !defined(OS_WIN)
+ ScopedClearErrno clear_errno;
+#endif
+ int result = vsnprintfT(stack_buf, arraysize(stack_buf), format, ap_copy);
+ va_end(ap_copy);
+
+ if (result >= 0 && result < static_cast<int>(arraysize(stack_buf))) {
+ // It fit.
+ dst->append(stack_buf, result);
+ return;
+ }
+
+ // Repeatedly increase buffer size until it fits.
+ int mem_length = arraysize(stack_buf);
+ while (true) {
+ if (result < 0) {
+#if defined(OS_WIN)
+ // On Windows, vsnprintfT always returns the number of characters in a
+ // fully-formatted string, so if we reach this point, something else is
+ // wrong and no amount of buffer-doubling is going to fix it.
+ return;
+#else
+ if (errno != 0 && errno != EOVERFLOW)
+ return;
+ // Try doubling the buffer size.
+ mem_length *= 2;
+#endif
+ } else {
+ // We need exactly "result + 1" characters.
+ mem_length = result + 1;
+ }
+
+ if (mem_length > 32 * 1024 * 1024) {
+ // That should be plenty, don't try anything larger. This protects
+ // against huge allocations when using vsnprintfT implementations that
+ // return -1 for reasons other than overflow without setting errno.
+ DLOG(WARNING) << "Unable to printf the requested string due to size.";
+ return;
+ }
+
+ std::vector<typename StringType::value_type> mem_buf(mem_length);
+
+ // NOTE: You can only use a va_list once. Since we're in a while loop, we
+ // need to make a new copy each time so we don't use up the original.
+ va_copy(ap_copy, ap);
+ result = vsnprintfT(&mem_buf[0], mem_length, format, ap_copy);
+ va_end(ap_copy);
+
+ if ((result >= 0) && (result < mem_length)) {
+ // It fit.
+ dst->append(&mem_buf[0], result);
+ return;
+ }
+ }
+}
+
+} // namespace
+
+std::string StringPrintf(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::string result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+
+#if defined(OS_WIN)
+std::wstring StringPrintf(const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::wstring result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+#endif
+
+std::string StringPrintV(const char* format, va_list ap) {
+ std::string result;
+ StringAppendV(&result, format, ap);
+ return result;
+}
+
+const std::string& SStringPrintf(std::string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+
+#if defined(OS_WIN)
+const std::wstring& SStringPrintf(std::wstring* dst,
+ const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+#endif
+
+void StringAppendF(std::string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+
+#if defined(OS_WIN)
+void StringAppendF(std::wstring* dst, const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+#endif
+
+void StringAppendV(std::string* dst, const char* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+
+#if defined(OS_WIN)
+void StringAppendV(std::wstring* dst, const wchar_t* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/strings/stringprintf.h b/libchrome/base/strings/stringprintf.h
new file mode 100644
index 0000000..7a75d89
--- /dev/null
+++ b/libchrome/base/strings/stringprintf.h
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRINGPRINTF_H_
+#define BASE_STRINGS_STRINGPRINTF_H_
+
+#include <stdarg.h> // va_list
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Return a C++ string given printf-like input.
+BASE_EXPORT std::string StringPrintf(_Printf_format_string_ const char* format,
+ ...)
+ PRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+#if defined(OS_WIN)
+BASE_EXPORT std::wstring StringPrintf(
+ _Printf_format_string_ const wchar_t* format,
+ ...) WPRINTF_FORMAT(1, 2) WARN_UNUSED_RESULT;
+#endif
+
+// Return a C++ string given vprintf-like input.
+BASE_EXPORT std::string StringPrintV(const char* format, va_list ap)
+ PRINTF_FORMAT(1, 0) WARN_UNUSED_RESULT;
+
+// Store result into a supplied string and return it.
+BASE_EXPORT const std::string& SStringPrintf(
+ std::string* dst,
+ _Printf_format_string_ const char* format,
+ ...) PRINTF_FORMAT(2, 3);
+#if defined(OS_WIN)
+BASE_EXPORT const std::wstring& SStringPrintf(
+ std::wstring* dst,
+ _Printf_format_string_ const wchar_t* format,
+ ...) WPRINTF_FORMAT(2, 3);
+#endif
+
+// Append result to a supplied string.
+BASE_EXPORT void StringAppendF(std::string* dst,
+ _Printf_format_string_ const char* format,
+ ...) PRINTF_FORMAT(2, 3);
+#if defined(OS_WIN)
+BASE_EXPORT void StringAppendF(std::wstring* dst,
+ _Printf_format_string_ const wchar_t* format,
+ ...) WPRINTF_FORMAT(2, 3);
+#endif
+
+// Lower-level routine that takes a va_list and appends to a specified
+// string. All other routines are just convenience wrappers around it.
+BASE_EXPORT void StringAppendV(std::string* dst, const char* format, va_list ap)
+ PRINTF_FORMAT(2, 0);
+#if defined(OS_WIN)
+BASE_EXPORT void StringAppendV(std::wstring* dst,
+ const wchar_t* format, va_list ap)
+ WPRINTF_FORMAT(2, 0);
+#endif
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRINGPRINTF_H_
diff --git a/libchrome/base/strings/stringprintf_unittest.cc b/libchrome/base/strings/stringprintf_unittest.cc
new file mode 100644
index 0000000..e2d3a90
--- /dev/null
+++ b/libchrome/base/strings/stringprintf_unittest.cc
@@ -0,0 +1,182 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringprintf.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// A helper for the StringAppendV test that follows.
+//
+// Just forwards its args to StringAppendV.
+static void StringAppendVTestHelper(std::string* out, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(out, format, ap);
+ va_end(ap);
+}
+
+} // namespace
+
+TEST(StringPrintfTest, StringPrintfEmpty) {
+ EXPECT_EQ("", StringPrintf("%s", ""));
+}
+
+TEST(StringPrintfTest, StringPrintfMisc) {
+ EXPECT_EQ("123hello w", StringPrintf("%3d%2s %1c", 123, "hello", 'w'));
+#if defined(OS_WIN)
+ EXPECT_EQ(L"123hello w", StringPrintf(L"%3d%2ls %1lc", 123, L"hello", 'w'));
+#endif
+}
+
+TEST(StringPrintfTest, StringAppendfEmptyString) {
+ std::string value("Hello");
+ StringAppendF(&value, "%s", "");
+ EXPECT_EQ("Hello", value);
+
+#if defined(OS_WIN)
+ std::wstring valuew(L"Hello");
+ StringAppendF(&valuew, L"%ls", L"");
+ EXPECT_EQ(L"Hello", valuew);
+#endif
+}
+
+TEST(StringPrintfTest, StringAppendfString) {
+ std::string value("Hello");
+ StringAppendF(&value, " %s", "World");
+ EXPECT_EQ("Hello World", value);
+
+#if defined(OS_WIN)
+ std::wstring valuew(L"Hello");
+ StringAppendF(&valuew, L" %ls", L"World");
+ EXPECT_EQ(L"Hello World", valuew);
+#endif
+}
+
+TEST(StringPrintfTest, StringAppendfInt) {
+ std::string value("Hello");
+ StringAppendF(&value, " %d", 123);
+ EXPECT_EQ("Hello 123", value);
+
+#if defined(OS_WIN)
+ std::wstring valuew(L"Hello");
+ StringAppendF(&valuew, L" %d", 123);
+ EXPECT_EQ(L"Hello 123", valuew);
+#endif
+}
+
+// Make sure that lengths exactly around the initial buffer size are handled
+// correctly.
+TEST(StringPrintfTest, StringPrintfBounds) {
+ const int kSrcLen = 1026;
+ char src[kSrcLen];
+ for (size_t i = 0; i < arraysize(src); i++)
+ src[i] = 'A';
+
+ wchar_t srcw[kSrcLen];
+ for (size_t i = 0; i < arraysize(srcw); i++)
+ srcw[i] = 'A';
+
+ for (int i = 1; i < 3; i++) {
+ src[kSrcLen - i] = 0;
+ std::string out;
+ SStringPrintf(&out, "%s", src);
+ EXPECT_STREQ(src, out.c_str());
+
+#if defined(OS_WIN)
+ srcw[kSrcLen - i] = 0;
+ std::wstring outw;
+ SStringPrintf(&outw, L"%ls", srcw);
+ EXPECT_STREQ(srcw, outw.c_str());
+#endif
+ }
+}
+
+// Test very large sprintfs that will cause the buffer to grow.
+TEST(StringPrintfTest, Grow) {
+ char src[1026];
+ for (size_t i = 0; i < arraysize(src); i++)
+ src[i] = 'A';
+ src[1025] = 0;
+
+ const char fmt[] = "%sB%sB%sB%sB%sB%sB%s";
+
+ std::string out;
+ SStringPrintf(&out, fmt, src, src, src, src, src, src, src);
+
+ const int kRefSize = 320000;
+ char* ref = new char[kRefSize];
+#if defined(OS_WIN)
+ sprintf_s(ref, kRefSize, fmt, src, src, src, src, src, src, src);
+#elif defined(OS_POSIX)
+ snprintf(ref, kRefSize, fmt, src, src, src, src, src, src, src);
+#endif
+
+ EXPECT_STREQ(ref, out.c_str());
+ delete[] ref;
+}
+
+TEST(StringPrintfTest, StringAppendV) {
+ std::string out;
+ StringAppendVTestHelper(&out, "%d foo %s", 1, "bar");
+ EXPECT_EQ("1 foo bar", out);
+}
+
+// Test the boundary condition for the size of the string_util's
+// internal buffer.
+TEST(StringPrintfTest, GrowBoundary) {
+ const int kStringUtilBufLen = 1024;
+ // Our buffer should be one larger than the size of StringAppendVT's stack
+ // buffer.
+ // And need extra one for NULL-terminator.
+ const int kBufLen = kStringUtilBufLen + 1 + 1;
+ char src[kBufLen];
+ for (int i = 0; i < kBufLen - 1; ++i)
+ src[i] = 'a';
+ src[kBufLen - 1] = 0;
+
+ std::string out;
+ SStringPrintf(&out, "%s", src);
+
+ EXPECT_STREQ(src, out.c_str());
+}
+
+#if defined(OS_WIN)
+// vswprintf in Visual Studio 2013 fails when given U+FFFF. This tests that the
+// failure case is gracefuly handled. In Visual Studio 2015 the bad character
+// is passed through.
+TEST(StringPrintfTest, Invalid) {
+ wchar_t invalid[2];
+ invalid[0] = 0xffff;
+ invalid[1] = 0;
+
+ std::wstring out;
+ SStringPrintf(&out, L"%ls", invalid);
+#if _MSC_VER >= 1900
+ EXPECT_STREQ(invalid, out.c_str());
+#else
+ EXPECT_STREQ(L"", out.c_str());
+#endif
+}
+#endif
+
+// Test that StringPrintf and StringAppendV do not change errno.
+TEST(StringPrintfTest, StringPrintfErrno) {
+ errno = 1;
+ EXPECT_EQ("", StringPrintf("%s", ""));
+ EXPECT_EQ(1, errno);
+ std::string out;
+ StringAppendVTestHelper(&out, "%d foo %s", 1, "bar");
+ EXPECT_EQ(1, errno);
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/sys_string_conversions.h b/libchrome/base/strings/sys_string_conversions.h
new file mode 100644
index 0000000..b41a228
--- /dev/null
+++ b/libchrome/base/strings/sys_string_conversions.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_SYS_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_SYS_STRING_CONVERSIONS_H_
+
+// Provides system-dependent string type conversions for cases where it's
+// necessary to not use ICU. Generally, you should not need this in Chrome,
+// but it is used in some shared code. Dependencies should be minimal.
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+#ifdef __OBJC__
+@class NSString;
+#else
+class NSString;
+#endif
+#endif // OS_MACOSX
+
+namespace base {
+
+// Converts between wide and UTF-8 representations of a string. On error, the
+// result is system-dependent.
+BASE_EXPORT std::string SysWideToUTF8(const std::wstring& wide);
+BASE_EXPORT std::wstring SysUTF8ToWide(const StringPiece& utf8);
+
+// Converts between wide and the system multi-byte representations of a string.
+// DANGER: This will lose information and can change (on Windows, this can
+// change between reboots).
+BASE_EXPORT std::string SysWideToNativeMB(const std::wstring& wide);
+BASE_EXPORT std::wstring SysNativeMBToWide(const StringPiece& native_mb);
+
+// Windows-specific ------------------------------------------------------------
+
+#if defined(OS_WIN)
+
+// Converts between 8-bit and wide strings, using the given code page. The
+// code page identifier is one accepted by the Windows function
+// MultiByteToWideChar().
+BASE_EXPORT std::wstring SysMultiByteToWide(const StringPiece& mb,
+ uint32_t code_page);
+BASE_EXPORT std::string SysWideToMultiByte(const std::wstring& wide,
+ uint32_t code_page);
+
+#endif // defined(OS_WIN)
+
+// Mac-specific ----------------------------------------------------------------
+
+#if defined(OS_MACOSX)
+
+// Converts between STL strings and CFStringRefs/NSStrings.
+
+// Creates a string, and returns it with a refcount of 1. You are responsible
+// for releasing it. Returns NULL on failure.
+BASE_EXPORT CFStringRef SysUTF8ToCFStringRef(const std::string& utf8);
+BASE_EXPORT CFStringRef SysUTF16ToCFStringRef(const string16& utf16);
+
+// Same, but returns an autoreleased NSString.
+BASE_EXPORT NSString* SysUTF8ToNSString(const std::string& utf8);
+BASE_EXPORT NSString* SysUTF16ToNSString(const string16& utf16);
+
+// Converts a CFStringRef to an STL string. Returns an empty string on failure.
+BASE_EXPORT std::string SysCFStringRefToUTF8(CFStringRef ref);
+BASE_EXPORT string16 SysCFStringRefToUTF16(CFStringRef ref);
+
+// Same, but accepts NSString input. Converts nil NSString* to the appropriate
+// string type of length 0.
+BASE_EXPORT std::string SysNSStringToUTF8(NSString* ref);
+BASE_EXPORT string16 SysNSStringToUTF16(NSString* ref);
+
+#endif // defined(OS_MACOSX)
+
+} // namespace base
+
+#endif // BASE_STRINGS_SYS_STRING_CONVERSIONS_H_
diff --git a/libchrome/base/strings/sys_string_conversions_mac.mm b/libchrome/base/strings/sys_string_conversions_mac.mm
new file mode 100644
index 0000000..32fe89c
--- /dev/null
+++ b/libchrome/base/strings/sys_string_conversions_mac.mm
@@ -0,0 +1,187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#import <Foundation/Foundation.h>
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/mac/foundation_util.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+namespace {
+
+// Convert the supplied CFString into the specified encoding, and return it as
+// an STL string of the template type. Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the asssertion code!
+template<typename StringType>
+static StringType CFStringToSTLStringWithEncodingT(CFStringRef cfstring,
+ CFStringEncoding encoding) {
+ CFIndex length = CFStringGetLength(cfstring);
+ if (length == 0)
+ return StringType();
+
+ CFRange whole_string = CFRangeMake(0, length);
+ CFIndex out_size;
+ CFIndex converted = CFStringGetBytes(cfstring,
+ whole_string,
+ encoding,
+ 0, // lossByte
+ false, // isExternalRepresentation
+ NULL, // buffer
+ 0, // maxBufLen
+ &out_size);
+ if (converted == 0 || out_size == 0)
+ return StringType();
+
+ // out_size is the number of UInt8-sized units needed in the destination.
+ // A buffer allocated as UInt8 units might not be properly aligned to
+ // contain elements of StringType::value_type. Use a container for the
+ // proper value_type, and convert out_size by figuring the number of
+ // value_type elements per UInt8. Leave room for a NUL terminator.
+ typename StringType::size_type elements =
+ out_size * sizeof(UInt8) / sizeof(typename StringType::value_type) + 1;
+
+ std::vector<typename StringType::value_type> out_buffer(elements);
+ converted = CFStringGetBytes(cfstring,
+ whole_string,
+ encoding,
+ 0, // lossByte
+ false, // isExternalRepresentation
+ reinterpret_cast<UInt8*>(&out_buffer[0]),
+ out_size,
+ NULL); // usedBufLen
+ if (converted == 0)
+ return StringType();
+
+ out_buffer[elements - 1] = '\0';
+ return StringType(&out_buffer[0], elements - 1);
+}
+
+// Given an STL string |in| with an encoding specified by |in_encoding|,
+// convert it to |out_encoding| and return it as an STL string of the
+// |OutStringType| template type. Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the asssertion code!
+template<typename InStringType, typename OutStringType>
+static OutStringType STLStringToSTLStringWithEncodingsT(
+ const InStringType& in,
+ CFStringEncoding in_encoding,
+ CFStringEncoding out_encoding) {
+ typename InStringType::size_type in_length = in.length();
+ if (in_length == 0)
+ return OutStringType();
+
+ base::ScopedCFTypeRef<CFStringRef> cfstring(CFStringCreateWithBytesNoCopy(
+ NULL,
+ reinterpret_cast<const UInt8*>(in.data()),
+ in_length * sizeof(typename InStringType::value_type),
+ in_encoding,
+ false,
+ kCFAllocatorNull));
+ if (!cfstring)
+ return OutStringType();
+
+ return CFStringToSTLStringWithEncodingT<OutStringType>(cfstring,
+ out_encoding);
+}
+
+// Given an STL string |in| with an encoding specified by |in_encoding|,
+// return it as a CFStringRef. Returns NULL on failure.
+template<typename StringType>
+static CFStringRef STLStringToCFStringWithEncodingsT(
+ const StringType& in,
+ CFStringEncoding in_encoding) {
+ typename StringType::size_type in_length = in.length();
+ if (in_length == 0)
+ return CFSTR("");
+
+ return CFStringCreateWithBytes(kCFAllocatorDefault,
+ reinterpret_cast<const UInt8*>(in.data()),
+ in_length *
+ sizeof(typename StringType::value_type),
+ in_encoding,
+ false);
+}
+
+// Specify the byte ordering explicitly, otherwise CFString will be confused
+// when strings don't carry BOMs, as they typically won't.
+static const CFStringEncoding kNarrowStringEncoding = kCFStringEncodingUTF8;
+#ifdef __BIG_ENDIAN__
+static const CFStringEncoding kMediumStringEncoding = kCFStringEncodingUTF16BE;
+static const CFStringEncoding kWideStringEncoding = kCFStringEncodingUTF32BE;
+#elif defined(__LITTLE_ENDIAN__)
+static const CFStringEncoding kMediumStringEncoding = kCFStringEncodingUTF16LE;
+static const CFStringEncoding kWideStringEncoding = kCFStringEncodingUTF32LE;
+#endif // __LITTLE_ENDIAN__
+
+} // namespace
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToUTF8(const std::wstring& wide) {
+ return STLStringToSTLStringWithEncodingsT<std::wstring, std::string>(
+ wide, kWideStringEncoding, kNarrowStringEncoding);
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+ return STLStringToSTLStringWithEncodingsT<StringPiece, std::wstring>(
+ utf8, kNarrowStringEncoding, kWideStringEncoding);
+}
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+ return SysWideToUTF8(wide);
+}
+
+std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+ return SysUTF8ToWide(native_mb);
+}
+
+CFStringRef SysUTF8ToCFStringRef(const std::string& utf8) {
+ return STLStringToCFStringWithEncodingsT(utf8, kNarrowStringEncoding);
+}
+
+CFStringRef SysUTF16ToCFStringRef(const string16& utf16) {
+ return STLStringToCFStringWithEncodingsT(utf16, kMediumStringEncoding);
+}
+
+NSString* SysUTF8ToNSString(const std::string& utf8) {
+ return (NSString*)base::mac::CFTypeRefToNSObjectAutorelease(
+ SysUTF8ToCFStringRef(utf8));
+}
+
+NSString* SysUTF16ToNSString(const string16& utf16) {
+ return (NSString*)base::mac::CFTypeRefToNSObjectAutorelease(
+ SysUTF16ToCFStringRef(utf16));
+}
+
+std::string SysCFStringRefToUTF8(CFStringRef ref) {
+ return CFStringToSTLStringWithEncodingT<std::string>(ref,
+ kNarrowStringEncoding);
+}
+
+string16 SysCFStringRefToUTF16(CFStringRef ref) {
+ return CFStringToSTLStringWithEncodingT<string16>(ref,
+ kMediumStringEncoding);
+}
+
+std::string SysNSStringToUTF8(NSString* nsstring) {
+ if (!nsstring)
+ return std::string();
+ return SysCFStringRefToUTF8(reinterpret_cast<CFStringRef>(nsstring));
+}
+
+string16 SysNSStringToUTF16(NSString* nsstring) {
+ if (!nsstring)
+ return string16();
+ return SysCFStringRefToUTF16(reinterpret_cast<CFStringRef>(nsstring));
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/sys_string_conversions_posix.cc b/libchrome/base/strings/sys_string_conversions_posix.cc
new file mode 100644
index 0000000..a8dcfd0
--- /dev/null
+++ b/libchrome/base/strings/sys_string_conversions_posix.cc
@@ -0,0 +1,162 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/sys_string_conversions.h"
+
+#include <stddef.h>
+#include <wchar.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+std::string SysWideToUTF8(const std::wstring& wide) {
+ // In theory this should be using the system-provided conversion rather
+ // than our ICU, but this will do for now.
+ return WideToUTF8(wide);
+}
+std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+ // In theory this should be using the system-provided conversion rather
+ // than our ICU, but this will do for now.
+ std::wstring out;
+ UTF8ToWide(utf8.data(), utf8.size(), &out);
+ return out;
+}
+
+#if defined(SYSTEM_NATIVE_UTF8) || defined(OS_ANDROID)
+// TODO(port): Consider reverting the OS_ANDROID when we have wcrtomb()
+// support and a better understanding of what calls these routines.
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+ return WideToUTF8(wide);
+}
+
+std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+ return SysUTF8ToWide(native_mb);
+}
+
+#else
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+ mbstate_t ps;
+
+ // Calculate the number of multi-byte characters. We walk through the string
+ // without writing the output, counting the number of multi-byte characters.
+ size_t num_out_chars = 0;
+ memset(&ps, 0, sizeof(ps));
+ for (size_t i = 0; i < wide.size(); ++i) {
+ const wchar_t src = wide[i];
+ // Use a temp buffer since calling wcrtomb with an output of NULL does not
+ // calculate the output length.
+ char buf[16];
+ // Skip NULLs to avoid wcrtomb's special handling of them.
+ size_t res = src ? wcrtomb(buf, src, &ps) : 0;
+ switch (res) {
+ // Handle any errors and return an empty string.
+ case static_cast<size_t>(-1):
+ return std::string();
+ break;
+ case 0:
+ // We hit an embedded null byte, keep going.
+ ++num_out_chars;
+ break;
+ default:
+ num_out_chars += res;
+ break;
+ }
+ }
+
+ if (num_out_chars == 0)
+ return std::string();
+
+ std::string out;
+ out.resize(num_out_chars);
+
+ // We walk the input string again, with |i| tracking the index of the
+ // wide input, and |j| tracking the multi-byte output.
+ memset(&ps, 0, sizeof(ps));
+ for (size_t i = 0, j = 0; i < wide.size(); ++i) {
+ const wchar_t src = wide[i];
+ // We don't want wcrtomb to do its funkiness for embedded NULLs.
+ size_t res = src ? wcrtomb(&out[j], src, &ps) : 0;
+ switch (res) {
+ // Handle any errors and return an empty string.
+ case static_cast<size_t>(-1):
+ return std::string();
+ break;
+ case 0:
+ // We hit an embedded null byte, keep going.
+ ++j; // Output is already zeroed.
+ break;
+ default:
+ j += res;
+ break;
+ }
+ }
+
+ return out;
+}
+
+std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+ mbstate_t ps;
+
+ // Calculate the number of wide characters. We walk through the string
+ // without writing the output, counting the number of wide characters.
+ size_t num_out_chars = 0;
+ memset(&ps, 0, sizeof(ps));
+ for (size_t i = 0; i < native_mb.size(); ) {
+ const char* src = native_mb.data() + i;
+ size_t res = mbrtowc(NULL, src, native_mb.size() - i, &ps);
+ switch (res) {
+ // Handle any errors and return an empty string.
+ case static_cast<size_t>(-2):
+ case static_cast<size_t>(-1):
+ return std::wstring();
+ break;
+ case 0:
+ // We hit an embedded null byte, keep going.
+ i += 1; // Fall through.
+ default:
+ i += res;
+ ++num_out_chars;
+ break;
+ }
+ }
+
+ if (num_out_chars == 0)
+ return std::wstring();
+
+ std::wstring out;
+ out.resize(num_out_chars);
+
+ memset(&ps, 0, sizeof(ps)); // Clear the shift state.
+ // We walk the input string again, with |i| tracking the index of the
+ // multi-byte input, and |j| tracking the wide output.
+ for (size_t i = 0, j = 0; i < native_mb.size(); ++j) {
+ const char* src = native_mb.data() + i;
+ wchar_t* dst = &out[j];
+ size_t res = mbrtowc(dst, src, native_mb.size() - i, &ps);
+ switch (res) {
+ // Handle any errors and return an empty string.
+ case static_cast<size_t>(-2):
+ case static_cast<size_t>(-1):
+ return std::wstring();
+ break;
+ case 0:
+ i += 1; // Skip null byte.
+ break;
+ default:
+ i += res;
+ break;
+ }
+ }
+
+ return out;
+}
+
+#endif // OS_CHROMEOS
+
+} // namespace base
diff --git a/libchrome/base/strings/sys_string_conversions_unittest.cc b/libchrome/base/strings/sys_string_conversions_unittest.cc
new file mode 100644
index 0000000..f5ffaec
--- /dev/null
+++ b/libchrome/base/strings/sys_string_conversions_unittest.cc
@@ -0,0 +1,196 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/test/scoped_locale.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#ifdef WCHAR_T_IS_UTF32
+static const std::wstring kSysWideOldItalicLetterA = L"\x10300";
+#else
+static const std::wstring kSysWideOldItalicLetterA = L"\xd800\xdf00";
+#endif
+
+namespace base {
+
+TEST(SysStrings, SysWideToUTF8) {
+ EXPECT_EQ("Hello, world", SysWideToUTF8(L"Hello, world"));
+ EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToUTF8(L"\x4f60\x597d"));
+
+ // >16 bits
+ EXPECT_EQ("\xF0\x90\x8C\x80", SysWideToUTF8(kSysWideOldItalicLetterA));
+
+ // Error case. When Windows finds a UTF-16 character going off the end of
+ // a string, it just converts that literal value to UTF-8, even though this
+ // is invalid.
+ //
+ // This is what XP does, but Vista has different behavior, so we don't bother
+ // verifying it:
+ // EXPECT_EQ("\xE4\xBD\xA0\xED\xA0\x80zyxw",
+ // SysWideToUTF8(L"\x4f60\xd800zyxw"));
+
+ // Test embedded NULLs.
+ std::wstring wide_null(L"a");
+ wide_null.push_back(0);
+ wide_null.push_back('b');
+
+ std::string expected_null("a");
+ expected_null.push_back(0);
+ expected_null.push_back('b');
+
+ EXPECT_EQ(expected_null, SysWideToUTF8(wide_null));
+}
+
+TEST(SysStrings, SysUTF8ToWide) {
+ EXPECT_EQ(L"Hello, world", SysUTF8ToWide("Hello, world"));
+ EXPECT_EQ(L"\x4f60\x597d", SysUTF8ToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
+ // >16 bits
+ EXPECT_EQ(kSysWideOldItalicLetterA, SysUTF8ToWide("\xF0\x90\x8C\x80"));
+
+ // Error case. When Windows finds an invalid UTF-8 character, it just skips
+ // it. This seems weird because it's inconsistent with the reverse conversion.
+ //
+ // This is what XP does, but Vista has different behavior, so we don't bother
+ // verifying it:
+ // EXPECT_EQ(L"\x4f60zyxw", SysUTF8ToWide("\xe4\xbd\xa0\xe5\xa5zyxw"));
+
+ // Test embedded NULLs.
+ std::string utf8_null("a");
+ utf8_null.push_back(0);
+ utf8_null.push_back('b');
+
+ std::wstring expected_null(L"a");
+ expected_null.push_back(0);
+ expected_null.push_back('b');
+
+ EXPECT_EQ(expected_null, SysUTF8ToWide(utf8_null));
+}
+
+#if defined(OS_LINUX) // Tests depend on setting a specific Linux locale.
+
+TEST(SysStrings, SysWideToNativeMB) {
+#if !defined(SYSTEM_NATIVE_UTF8)
+ ScopedLocale locale("en_US.UTF-8");
+#endif
+ EXPECT_EQ("Hello, world", SysWideToNativeMB(L"Hello, world"));
+ EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToNativeMB(L"\x4f60\x597d"));
+
+ // >16 bits
+ EXPECT_EQ("\xF0\x90\x8C\x80", SysWideToNativeMB(kSysWideOldItalicLetterA));
+
+ // Error case. When Windows finds a UTF-16 character going off the end of
+ // a string, it just converts that literal value to UTF-8, even though this
+ // is invalid.
+ //
+ // This is what XP does, but Vista has different behavior, so we don't bother
+ // verifying it:
+ // EXPECT_EQ("\xE4\xBD\xA0\xED\xA0\x80zyxw",
+ // SysWideToNativeMB(L"\x4f60\xd800zyxw"));
+
+ // Test embedded NULLs.
+ std::wstring wide_null(L"a");
+ wide_null.push_back(0);
+ wide_null.push_back('b');
+
+ std::string expected_null("a");
+ expected_null.push_back(0);
+ expected_null.push_back('b');
+
+ EXPECT_EQ(expected_null, SysWideToNativeMB(wide_null));
+}
+
+// We assume the test is running in a UTF8 locale.
+TEST(SysStrings, SysNativeMBToWide) {
+#if !defined(SYSTEM_NATIVE_UTF8)
+ ScopedLocale locale("en_US.UTF-8");
+#endif
+ EXPECT_EQ(L"Hello, world", SysNativeMBToWide("Hello, world"));
+ EXPECT_EQ(L"\x4f60\x597d", SysNativeMBToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
+ // >16 bits
+ EXPECT_EQ(kSysWideOldItalicLetterA, SysNativeMBToWide("\xF0\x90\x8C\x80"));
+
+ // Error case. When Windows finds an invalid UTF-8 character, it just skips
+ // it. This seems weird because it's inconsistent with the reverse conversion.
+ //
+ // This is what XP does, but Vista has different behavior, so we don't bother
+ // verifying it:
+ // EXPECT_EQ(L"\x4f60zyxw", SysNativeMBToWide("\xe4\xbd\xa0\xe5\xa5zyxw"));
+
+ // Test embedded NULLs.
+ std::string utf8_null("a");
+ utf8_null.push_back(0);
+ utf8_null.push_back('b');
+
+ std::wstring expected_null(L"a");
+ expected_null.push_back(0);
+ expected_null.push_back('b');
+
+ EXPECT_EQ(expected_null, SysNativeMBToWide(utf8_null));
+}
+
+static const wchar_t* const kConvertRoundtripCases[] = {
+ L"Google Video",
+ // "网页 图片 资讯更多 »"
+ L"\x7f51\x9875\x0020\x56fe\x7247\x0020\x8d44\x8baf\x66f4\x591a\x0020\x00bb",
+ // "Παγκόσμιος Ιστός"
+ L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+ L"\x03bf\x03c2\x0020\x0399\x03c3\x03c4\x03cc\x03c2",
+ // "Поиск страниц на русском"
+ L"\x041f\x043e\x0438\x0441\x043a\x0020\x0441\x0442"
+ L"\x0440\x0430\x043d\x0438\x0446\x0020\x043d\x0430"
+ L"\x0020\x0440\x0443\x0441\x0441\x043a\x043e\x043c",
+ // "전체서비스"
+ L"\xc804\xccb4\xc11c\xbe44\xc2a4",
+
+ // Test characters that take more than 16 bits. This will depend on whether
+ // wchar_t is 16 or 32 bits.
+#if defined(WCHAR_T_IS_UTF16)
+ L"\xd800\xdf00",
+ // ????? (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+ L"\xd807\xdd40\xd807\xdd41\xd807\xdd42\xd807\xdd43\xd807\xdd44",
+#elif defined(WCHAR_T_IS_UTF32)
+ L"\x10300",
+ // ????? (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+ L"\x11d40\x11d41\x11d42\x11d43\x11d44",
+#endif
+};
+
+
+TEST(SysStrings, SysNativeMBAndWide) {
+#if !defined(SYSTEM_NATIVE_UTF8)
+ ScopedLocale locale("en_US.UTF-8");
+#endif
+ for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+ std::wstring wide = kConvertRoundtripCases[i];
+ std::wstring trip = SysNativeMBToWide(SysWideToNativeMB(wide));
+ EXPECT_EQ(wide.size(), trip.size());
+ EXPECT_EQ(wide, trip);
+ }
+
+ // We assume our test is running in UTF-8, so double check through ICU.
+ for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+ std::wstring wide = kConvertRoundtripCases[i];
+ std::wstring trip = SysNativeMBToWide(WideToUTF8(wide));
+ EXPECT_EQ(wide.size(), trip.size());
+ EXPECT_EQ(wide, trip);
+ }
+
+ for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+ std::wstring wide = kConvertRoundtripCases[i];
+ std::wstring trip = UTF8ToWide(SysWideToNativeMB(wide));
+ EXPECT_EQ(wide.size(), trip.size());
+ EXPECT_EQ(wide, trip);
+ }
+}
+#endif // OS_LINUX
+
+} // namespace base
diff --git a/libchrome/base/strings/utf_string_conversion_utils.cc b/libchrome/base/strings/utf_string_conversion_utils.cc
new file mode 100644
index 0000000..22058a5
--- /dev/null
+++ b/libchrome/base/strings/utf_string_conversion_utils.cc
@@ -0,0 +1,148 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversion_utils.h"
+
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+// ReadUnicodeCharacter --------------------------------------------------------
+
+bool ReadUnicodeCharacter(const char* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point_out) {
+ // U8_NEXT expects to be able to use -1 to signal an error, so we must
+ // use a signed type for code_point. But this function returns false
+ // on error anyway, so code_point_out is unsigned.
+ int32_t code_point;
+ CBU8_NEXT(src, *char_index, src_len, code_point);
+ *code_point_out = static_cast<uint32_t>(code_point);
+
+ // The ICU macro above moves to the next char, we want to point to the last
+ // char consumed.
+ (*char_index)--;
+
+ // Validate the decoded value.
+ return IsValidCodepoint(code_point);
+}
+
+bool ReadUnicodeCharacter(const char16* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point) {
+ if (CBU16_IS_SURROGATE(src[*char_index])) {
+ if (!CBU16_IS_SURROGATE_LEAD(src[*char_index]) ||
+ *char_index + 1 >= src_len ||
+ !CBU16_IS_TRAIL(src[*char_index + 1])) {
+ // Invalid surrogate pair.
+ return false;
+ }
+
+ // Valid surrogate pair.
+ *code_point = CBU16_GET_SUPPLEMENTARY(src[*char_index],
+ src[*char_index + 1]);
+ (*char_index)++;
+ } else {
+ // Not a surrogate, just one 16-bit word.
+ *code_point = src[*char_index];
+ }
+
+ return IsValidCodepoint(*code_point);
+}
+
+#if defined(WCHAR_T_IS_UTF32)
+bool ReadUnicodeCharacter(const wchar_t* src,
+ int32_t /*src_len*/,
+ int32_t* char_index,
+ uint32_t* code_point) {
+ // Conversion is easy since the source is 32-bit.
+ *code_point = src[*char_index];
+
+ // Validate the value.
+ return IsValidCodepoint(*code_point);
+}
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// WriteUnicodeCharacter -------------------------------------------------------
+
+size_t WriteUnicodeCharacter(uint32_t code_point, std::string* output) {
+ if (code_point <= 0x7f) {
+ // Fast path the common case of one byte.
+ output->push_back(static_cast<char>(code_point));
+ return 1;
+ }
+
+
+ // CBU8_APPEND_UNSAFE can append up to 4 bytes.
+ size_t char_offset = output->length();
+ size_t original_char_offset = char_offset;
+ output->resize(char_offset + CBU8_MAX_LENGTH);
+
+ CBU8_APPEND_UNSAFE(&(*output)[0], char_offset, code_point);
+
+ // CBU8_APPEND_UNSAFE will advance our pointer past the inserted character, so
+ // it will represent the new length of the string.
+ output->resize(char_offset);
+ return char_offset - original_char_offset;
+}
+
+size_t WriteUnicodeCharacter(uint32_t code_point, string16* output) {
+ if (CBU16_LENGTH(code_point) == 1) {
+ // Thie code point is in the Basic Multilingual Plane (BMP).
+ output->push_back(static_cast<char16>(code_point));
+ return 1;
+ }
+ // Non-BMP characters use a double-character encoding.
+ size_t char_offset = output->length();
+ output->resize(char_offset + CBU16_MAX_LENGTH);
+ CBU16_APPEND_UNSAFE(&(*output)[0], char_offset, code_point);
+ return CBU16_MAX_LENGTH;
+}
+
+// Generalized Unicode converter -----------------------------------------------
+
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src,
+ size_t src_len,
+ std::string* output) {
+ output->clear();
+ if (src_len == 0)
+ return;
+ if (src[0] < 0x80) {
+ // Assume that the entire input will be ASCII.
+ output->reserve(src_len);
+ } else {
+ // Assume that the entire input is non-ASCII and will have 3 bytes per char.
+ output->reserve(src_len * 3);
+ }
+}
+
+// Instantiate versions we know callers will need.
+template void PrepareForUTF8Output(const wchar_t*, size_t, std::string*);
+template void PrepareForUTF8Output(const char16*, size_t, std::string*);
+
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src,
+ size_t src_len,
+ STRING* output) {
+ output->clear();
+ if (src_len == 0)
+ return;
+ if (static_cast<unsigned char>(src[0]) < 0x80) {
+ // Assume the input is all ASCII, which means 1:1 correspondence.
+ output->reserve(src_len);
+ } else {
+ // Otherwise assume that the UTF-8 sequences will have 2 bytes for each
+ // character.
+ output->reserve(src_len / 2);
+ }
+}
+
+// Instantiate versions we know callers will need.
+template void PrepareForUTF16Or32Output(const char*, size_t, std::wstring*);
+template void PrepareForUTF16Or32Output(const char*, size_t, string16*);
+
+} // namespace base
diff --git a/libchrome/base/strings/utf_string_conversion_utils.h b/libchrome/base/strings/utf_string_conversion_utils.h
new file mode 100644
index 0000000..c716404
--- /dev/null
+++ b/libchrome/base/strings/utf_string_conversion_utils.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
+#define BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
+
+// This should only be used by the various UTF string conversion files.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+
+namespace base {
+
+inline bool IsValidCodepoint(uint32_t code_point) {
+ // Excludes the surrogate code points ([0xD800, 0xDFFF]) and
+ // codepoints larger than 0x10FFFF (the highest codepoint allowed).
+ // Non-characters and unassigned codepoints are allowed.
+ return code_point < 0xD800u ||
+ (code_point >= 0xE000u && code_point <= 0x10FFFFu);
+}
+
+inline bool IsValidCharacter(uint32_t code_point) {
+ // Excludes non-characters (U+FDD0..U+FDEF, and all codepoints ending in
+ // 0xFFFE or 0xFFFF) from the set of valid code points.
+ return code_point < 0xD800u || (code_point >= 0xE000u &&
+ code_point < 0xFDD0u) || (code_point > 0xFDEFu &&
+ code_point <= 0x10FFFFu && (code_point & 0xFFFEu) != 0xFFFEu);
+}
+
+// ReadUnicodeCharacter --------------------------------------------------------
+
+// Reads a UTF-8 stream, placing the next code point into the given output
+// |*code_point|. |src| represents the entire string to read, and |*char_index|
+// is the character offset within the string to start reading at. |*char_index|
+// will be updated to index the last character read, such that incrementing it
+// (as in a for loop) will take the reader to the next character.
+//
+// Returns true on success. On false, |*code_point| will be invalid.
+BASE_EXPORT bool ReadUnicodeCharacter(const char* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point_out);
+
+// Reads a UTF-16 character. The usage is the same as the 8-bit version above.
+BASE_EXPORT bool ReadUnicodeCharacter(const char16* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point);
+
+#if defined(WCHAR_T_IS_UTF32)
+// Reads UTF-32 character. The usage is the same as the 8-bit version above.
+BASE_EXPORT bool ReadUnicodeCharacter(const wchar_t* src,
+ int32_t src_len,
+ int32_t* char_index,
+ uint32_t* code_point);
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// WriteUnicodeCharacter -------------------------------------------------------
+
+// Appends a UTF-8 character to the given 8-bit string. Returns the number of
+// bytes written.
+BASE_EXPORT size_t WriteUnicodeCharacter(uint32_t code_point,
+ std::string* output);
+
+// Appends the given code point as a UTF-16 character to the given 16-bit
+// string. Returns the number of 16-bit values written.
+BASE_EXPORT size_t WriteUnicodeCharacter(uint32_t code_point, string16* output);
+
+#if defined(WCHAR_T_IS_UTF32)
+// Appends the given UTF-32 character to the given 32-bit string. Returns the
+// number of 32-bit values written.
+inline size_t WriteUnicodeCharacter(uint32_t code_point, std::wstring* output) {
+ // This is the easy case, just append the character.
+ output->push_back(code_point);
+ return 1;
+}
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// Generalized Unicode converter -----------------------------------------------
+
+// Guesses the length of the output in UTF-8 in bytes, clears that output
+// string, and reserves that amount of space. We assume that the input
+// character types are unsigned, which will be true for UTF-16 and -32 on our
+// systems.
+template<typename CHAR>
+void PrepareForUTF8Output(const CHAR* src, size_t src_len, std::string* output);
+
+// Prepares an output buffer (containing either UTF-16 or -32 data) given some
+// UTF-8 input that will be converted to it. See PrepareForUTF8Output().
+template<typename STRING>
+void PrepareForUTF16Or32Output(const char* src, size_t src_len, STRING* output);
+
+} // namespace base
+
+#endif // BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
diff --git a/libchrome/base/strings/utf_string_conversions.cc b/libchrome/base/strings/utf_string_conversions.cc
new file mode 100644
index 0000000..6b17eac
--- /dev/null
+++ b/libchrome/base/strings/utf_string_conversions.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversions.h"
+
+#include <stdint.h>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversion_utils.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Generalized Unicode converter -----------------------------------------------
+
+// Converts the given source Unicode character type to the given destination
+// Unicode character type as a STL string. The given input buffer and size
+// determine the source, and the given output STL string will be replaced by
+// the result.
+template<typename SRC_CHAR, typename DEST_STRING>
+bool ConvertUnicode(const SRC_CHAR* src,
+ size_t src_len,
+ DEST_STRING* output) {
+ // ICU requires 32-bit numbers.
+ bool success = true;
+ int32_t src_len32 = static_cast<int32_t>(src_len);
+ for (int32_t i = 0; i < src_len32; i++) {
+ uint32_t code_point;
+ if (ReadUnicodeCharacter(src, src_len32, &i, &code_point)) {
+ WriteUnicodeCharacter(code_point, output);
+ } else {
+ WriteUnicodeCharacter(0xFFFD, output);
+ success = false;
+ }
+ }
+
+ return success;
+}
+
+} // namespace
+
+// UTF-8 <-> Wide --------------------------------------------------------------
+
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
+ if (IsStringASCII(std::wstring(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF8Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+std::string WideToUTF8(const std::wstring& wide) {
+ if (IsStringASCII(wide)) {
+ return std::string(wide.data(), wide.data() + wide.length());
+ }
+
+ std::string ret;
+ PrepareForUTF8Output(wide.data(), wide.length(), &ret);
+ ConvertUnicode(wide.data(), wide.length(), &ret);
+ return ret;
+}
+
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
+ if (IsStringASCII(StringPiece(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF16Or32Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+std::wstring UTF8ToWide(StringPiece utf8) {
+ if (IsStringASCII(utf8)) {
+ return std::wstring(utf8.begin(), utf8.end());
+ }
+
+ std::wstring ret;
+ PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+ ConvertUnicode(utf8.data(), utf8.length(), &ret);
+ return ret;
+}
+
+// UTF-16 <-> Wide -------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF16)
+
+// When wide == UTF-16, then conversions are a NOP.
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+ output->assign(src, src_len);
+ return true;
+}
+
+string16 WideToUTF16(const std::wstring& wide) {
+ return wide;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+ output->assign(src, src_len);
+ return true;
+}
+
+std::wstring UTF16ToWide(const string16& utf16) {
+ return utf16;
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
+ output->clear();
+ // Assume that normally we won't have any non-BMP characters so the counts
+ // will be the same.
+ output->reserve(src_len);
+ return ConvertUnicode(src, src_len, output);
+}
+
+string16 WideToUTF16(const std::wstring& wide) {
+ string16 ret;
+ WideToUTF16(wide.data(), wide.length(), &ret);
+ return ret;
+}
+
+bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
+ output->clear();
+ // Assume that normally we won't have any non-BMP characters so the counts
+ // will be the same.
+ output->reserve(src_len);
+ return ConvertUnicode(src, src_len, output);
+}
+
+std::wstring UTF16ToWide(const string16& utf16) {
+ std::wstring ret;
+ UTF16ToWide(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#endif // defined(WCHAR_T_IS_UTF32)
+
+// UTF16 <-> UTF8 --------------------------------------------------------------
+
+#if defined(WCHAR_T_IS_UTF32)
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+ if (IsStringASCII(StringPiece(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF16Or32Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+ if (IsStringASCII(utf8)) {
+ return string16(utf8.begin(), utf8.end());
+ }
+
+ string16 ret;
+ PrepareForUTF16Or32Output(utf8.data(), utf8.length(), &ret);
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ ConvertUnicode(utf8.data(), utf8.length(), &ret);
+ return ret;
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+ if (IsStringASCII(StringPiece16(src, src_len))) {
+ output->assign(src, src + src_len);
+ return true;
+ } else {
+ PrepareForUTF8Output(src, src_len, output);
+ return ConvertUnicode(src, src_len, output);
+ }
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ if (IsStringASCII(utf16)) {
+ return std::string(utf16.begin(), utf16.end());
+ }
+
+ std::string ret;
+ // Ignore the success flag of this call, it will do the best it can for
+ // invalid input, which is what we want here.
+ UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#elif defined(WCHAR_T_IS_UTF16)
+// Easy case since we can use the "wide" versions we already wrote above.
+
+bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
+ return UTF8ToWide(src, src_len, output);
+}
+
+string16 UTF8ToUTF16(StringPiece utf8) {
+ return UTF8ToWide(utf8);
+}
+
+bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
+ return WideToUTF8(src, src_len, output);
+}
+
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ if (IsStringASCII(utf16))
+ return std::string(utf16.data(), utf16.data() + utf16.length());
+
+ std::string ret;
+ PrepareForUTF8Output(utf16.data(), utf16.length(), &ret);
+ ConvertUnicode(utf16.data(), utf16.length(), &ret);
+ return ret;
+}
+
+#endif
+
+string16 ASCIIToUTF16(StringPiece ascii) {
+ DCHECK(IsStringASCII(ascii)) << ascii;
+ return string16(ascii.begin(), ascii.end());
+}
+
+std::string UTF16ToASCII(StringPiece16 utf16) {
+ DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
+ return std::string(utf16.begin(), utf16.end());
+}
+
+} // namespace base
diff --git a/libchrome/base/strings/utf_string_conversions.h b/libchrome/base/strings/utf_string_conversions.h
new file mode 100644
index 0000000..2995f4c
--- /dev/null
+++ b/libchrome/base/strings/utf_string_conversions.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
+#define BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// These convert between UTF-8, -16, and -32 strings. They are potentially slow,
+// so avoid unnecessary conversions. The low-level versions return a boolean
+// indicating whether the conversion was 100% valid. In this case, it will still
+// do the best it can and put the result in the output buffer. The versions that
+// return strings ignore this error and just return the best conversion
+// possible.
+BASE_EXPORT bool WideToUTF8(const wchar_t* src, size_t src_len,
+ std::string* output);
+BASE_EXPORT std::string WideToUTF8(const std::wstring& wide);
+BASE_EXPORT bool UTF8ToWide(const char* src, size_t src_len,
+ std::wstring* output);
+BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8);
+
+BASE_EXPORT bool WideToUTF16(const wchar_t* src, size_t src_len,
+ string16* output);
+BASE_EXPORT string16 WideToUTF16(const std::wstring& wide);
+BASE_EXPORT bool UTF16ToWide(const char16* src, size_t src_len,
+ std::wstring* output);
+BASE_EXPORT std::wstring UTF16ToWide(const string16& utf16);
+
+BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
+BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8);
+BASE_EXPORT bool UTF16ToUTF8(const char16* src, size_t src_len,
+ std::string* output);
+BASE_EXPORT std::string UTF16ToUTF8(StringPiece16 utf16);
+
+// This converts an ASCII string, typically a hardcoded constant, to a UTF16
+// string.
+BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii);
+
+// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
+// beforehand.
+BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16);
+
+} // namespace base
+
+#endif // BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
diff --git a/libchrome/base/strings/utf_string_conversions_unittest.cc b/libchrome/base/strings/utf_string_conversions_unittest.cc
new file mode 100644
index 0000000..8107713
--- /dev/null
+++ b/libchrome/base/strings/utf_string_conversions_unittest.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const wchar_t* const kConvertRoundtripCases[] = {
+ L"Google Video",
+ // "网页 图片 资讯更多 »"
+ L"\x7f51\x9875\x0020\x56fe\x7247\x0020\x8d44\x8baf\x66f4\x591a\x0020\x00bb",
+ // "Παγκόσμιος Ιστός"
+ L"\x03a0\x03b1\x03b3\x03ba\x03cc\x03c3\x03bc\x03b9"
+ L"\x03bf\x03c2\x0020\x0399\x03c3\x03c4\x03cc\x03c2",
+ // "Поиск страниц на русском"
+ L"\x041f\x043e\x0438\x0441\x043a\x0020\x0441\x0442"
+ L"\x0440\x0430\x043d\x0438\x0446\x0020\x043d\x0430"
+ L"\x0020\x0440\x0443\x0441\x0441\x043a\x043e\x043c",
+ // "전체서비스"
+ L"\xc804\xccb4\xc11c\xbe44\xc2a4",
+
+ // Test characters that take more than 16 bits. This will depend on whether
+ // wchar_t is 16 or 32 bits.
+#if defined(WCHAR_T_IS_UTF16)
+ L"\xd800\xdf00",
+ // ????? (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+ L"\xd807\xdd40\xd807\xdd41\xd807\xdd42\xd807\xdd43\xd807\xdd44",
+#elif defined(WCHAR_T_IS_UTF32)
+ L"\x10300",
+ // ????? (Mathematical Alphanumeric Symbols (U+011d40 - U+011d44 : A,B,C,D,E)
+ L"\x11d40\x11d41\x11d42\x11d43\x11d44",
+#endif
+};
+
+} // namespace
+
+TEST(UTFStringConversionsTest, ConvertUTF8AndWide) {
+ // we round-trip all the wide strings through UTF-8 to make sure everything
+ // agrees on the conversion. This uses the stream operators to test them
+ // simultaneously.
+ for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
+ std::ostringstream utf8;
+ utf8 << WideToUTF8(kConvertRoundtripCases[i]);
+ std::wostringstream wide;
+ wide << UTF8ToWide(utf8.str());
+
+ EXPECT_EQ(kConvertRoundtripCases[i], wide.str());
+ }
+}
+
+TEST(UTFStringConversionsTest, ConvertUTF8AndWideEmptyString) {
+ // An empty std::wstring should be converted to an empty std::string,
+ // and vice versa.
+ std::wstring wempty;
+ std::string empty;
+ EXPECT_EQ(empty, WideToUTF8(wempty));
+ EXPECT_EQ(wempty, UTF8ToWide(empty));
+}
+
+TEST(UTFStringConversionsTest, ConvertUTF8ToWide) {
+ struct UTF8ToWideCase {
+ const char* utf8;
+ const wchar_t* wide;
+ bool success;
+ } convert_cases[] = {
+ // Regular UTF-8 input.
+ {"\xe4\xbd\xa0\xe5\xa5\xbd", L"\x4f60\x597d", true},
+ // Non-character is passed through.
+ {"\xef\xbf\xbfHello", L"\xffffHello", true},
+ // Truncated UTF-8 sequence.
+ {"\xe4\xa0\xe5\xa5\xbd", L"\xfffd\x597d", false},
+ // Truncated off the end.
+ {"\xe5\xa5\xbd\xe4\xa0", L"\x597d\xfffd", false},
+ // Non-shortest-form UTF-8.
+ {"\xf0\x84\xbd\xa0\xe5\xa5\xbd", L"\xfffd\x597d", false},
+ // This UTF-8 character decodes to a UTF-16 surrogate, which is illegal.
+ {"\xed\xb0\x80", L"\xfffd", false},
+ // Non-BMP characters. The second is a non-character regarded as valid.
+ // The result will either be in UTF-16 or UTF-32.
+#if defined(WCHAR_T_IS_UTF16)
+ {"A\xF0\x90\x8C\x80z", L"A\xd800\xdf00z", true},
+ {"A\xF4\x8F\xBF\xBEz", L"A\xdbff\xdffez", true},
+#elif defined(WCHAR_T_IS_UTF32)
+ {"A\xF0\x90\x8C\x80z", L"A\x10300z", true},
+ {"A\xF4\x8F\xBF\xBEz", L"A\x10fffez", true},
+#endif
+ };
+
+ for (size_t i = 0; i < arraysize(convert_cases); i++) {
+ std::wstring converted;
+ EXPECT_EQ(convert_cases[i].success,
+ UTF8ToWide(convert_cases[i].utf8,
+ strlen(convert_cases[i].utf8),
+ &converted));
+ std::wstring expected(convert_cases[i].wide);
+ EXPECT_EQ(expected, converted);
+ }
+
+ // Manually test an embedded NULL.
+ std::wstring converted;
+ EXPECT_TRUE(UTF8ToWide("\00Z\t", 3, &converted));
+ ASSERT_EQ(3U, converted.length());
+ EXPECT_EQ(static_cast<wchar_t>(0), converted[0]);
+ EXPECT_EQ('Z', converted[1]);
+ EXPECT_EQ('\t', converted[2]);
+
+ // Make sure that conversion replaces, not appends.
+ EXPECT_TRUE(UTF8ToWide("B", 1, &converted));
+ ASSERT_EQ(1U, converted.length());
+ EXPECT_EQ('B', converted[0]);
+}
+
+#if defined(WCHAR_T_IS_UTF16)
+// This test is only valid when wchar_t == UTF-16.
+TEST(UTFStringConversionsTest, ConvertUTF16ToUTF8) {
+ struct WideToUTF8Case {
+ const wchar_t* utf16;
+ const char* utf8;
+ bool success;
+ } convert_cases[] = {
+ // Regular UTF-16 input.
+ {L"\x4f60\x597d", "\xe4\xbd\xa0\xe5\xa5\xbd", true},
+ // Test a non-BMP character.
+ {L"\xd800\xdf00", "\xF0\x90\x8C\x80", true},
+ // Non-characters are passed through.
+ {L"\xffffHello", "\xEF\xBF\xBFHello", true},
+ {L"\xdbff\xdffeHello", "\xF4\x8F\xBF\xBEHello", true},
+ // The first character is a truncated UTF-16 character.
+ {L"\xd800\x597d", "\xef\xbf\xbd\xe5\xa5\xbd", false},
+ // Truncated at the end.
+ {L"\x597d\xd800", "\xe5\xa5\xbd\xef\xbf\xbd", false},
+ };
+
+ for (const auto& test : convert_cases) {
+ std::string converted;
+ EXPECT_EQ(test.success,
+ WideToUTF8(test.utf16, wcslen(test.utf16), &converted));
+ std::string expected(test.utf8);
+ EXPECT_EQ(expected, converted);
+ }
+}
+
+#elif defined(WCHAR_T_IS_UTF32)
+// This test is only valid when wchar_t == UTF-32.
+TEST(UTFStringConversionsTest, ConvertUTF32ToUTF8) {
+ struct WideToUTF8Case {
+ const wchar_t* utf32;
+ const char* utf8;
+ bool success;
+ } convert_cases[] = {
+ // Regular 16-bit input.
+ {L"\x4f60\x597d", "\xe4\xbd\xa0\xe5\xa5\xbd", true},
+ // Test a non-BMP character.
+ {L"A\x10300z", "A\xF0\x90\x8C\x80z", true},
+ // Non-characters are passed through.
+ {L"\xffffHello", "\xEF\xBF\xBFHello", true},
+ {L"\x10fffeHello", "\xF4\x8F\xBF\xBEHello", true},
+ // Invalid Unicode code points.
+ {L"\xfffffffHello", "\xEF\xBF\xBDHello", false},
+ // The first character is a truncated UTF-16 character.
+ {L"\xd800\x597d", "\xef\xbf\xbd\xe5\xa5\xbd", false},
+ {L"\xdc01Hello", "\xef\xbf\xbdHello", false},
+ };
+
+ for (const auto& test : convert_cases) {
+ std::string converted;
+ EXPECT_EQ(test.success,
+ WideToUTF8(test.utf32, wcslen(test.utf32), &converted));
+ std::string expected(test.utf8);
+ EXPECT_EQ(expected, converted);
+ }
+}
+#endif // defined(WCHAR_T_IS_UTF32)
+
+TEST(UTFStringConversionsTest, ConvertMultiString) {
+ static char16 multi16[] = {
+ 'f', 'o', 'o', '\0',
+ 'b', 'a', 'r', '\0',
+ 'b', 'a', 'z', '\0',
+ '\0'
+ };
+ static char multi[] = {
+ 'f', 'o', 'o', '\0',
+ 'b', 'a', 'r', '\0',
+ 'b', 'a', 'z', '\0',
+ '\0'
+ };
+ string16 multistring16;
+ memcpy(WriteInto(&multistring16, arraysize(multi16)), multi16,
+ sizeof(multi16));
+ EXPECT_EQ(arraysize(multi16) - 1, multistring16.length());
+ std::string expected;
+ memcpy(WriteInto(&expected, arraysize(multi)), multi, sizeof(multi));
+ EXPECT_EQ(arraysize(multi) - 1, expected.length());
+ const std::string& converted = UTF16ToUTF8(multistring16);
+ EXPECT_EQ(arraysize(multi) - 1, converted.length());
+ EXPECT_EQ(expected, converted);
+}
+
+} // namespace base
diff --git a/libchrome/base/sync_socket.h b/libchrome/base/sync_socket.h
new file mode 100644
index 0000000..fcf4155
--- /dev/null
+++ b/libchrome/base/sync_socket.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNC_SOCKET_H_
+#define BASE_SYNC_SOCKET_H_
+
+// A socket abstraction used for sending and receiving plain
+// data. Because the receiving is blocking, they can be used to perform
+// rudimentary cross-process synchronization with low latency.
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+#include <sys/types.h>
+
+#if defined(OS_POSIX)
+#include "base/file_descriptor_posix.h"
+#endif
+
+namespace base {
+
+class BASE_EXPORT SyncSocket {
+ public:
+#if defined(OS_WIN)
+ typedef HANDLE Handle;
+ typedef Handle TransitDescriptor;
+#else
+ typedef int Handle;
+ typedef FileDescriptor TransitDescriptor;
+#endif
+ static const Handle kInvalidHandle;
+
+ SyncSocket();
+
+ // Creates a SyncSocket from a Handle. Used in transport.
+ explicit SyncSocket(Handle handle) : handle_(handle) {}
+ virtual ~SyncSocket();
+
+ // Initializes and connects a pair of sockets.
+ // |socket_a| and |socket_b| must not hold a valid handle. Upon successful
+ // return, the sockets will both be valid and connected.
+ static bool CreatePair(SyncSocket* socket_a, SyncSocket* socket_b);
+
+ // Returns |Handle| wrapped in a |TransitDescriptor|.
+ static Handle UnwrapHandle(const TransitDescriptor& descriptor);
+
+ // Prepares a |TransitDescriptor| which wraps |Handle| used for transit.
+ // This is used to prepare the underlying shared resource before passing back
+ // the handle to be used by the peer process.
+ bool PrepareTransitDescriptor(ProcessHandle peer_process_handle,
+ TransitDescriptor* descriptor);
+
+ // Closes the SyncSocket. Returns true on success, false on failure.
+ virtual bool Close();
+
+ // Sends the message to the remote peer of the SyncSocket.
+ // Note it is not safe to send messages from the same socket handle by
+ // multiple threads simultaneously.
+ // buffer is a pointer to the data to send.
+ // length is the length of the data to send (must be non-zero).
+ // Returns the number of bytes sent, or 0 upon failure.
+ virtual size_t Send(const void* buffer, size_t length);
+
+ // Receives a message from an SyncSocket.
+ // buffer is a pointer to the buffer to receive data.
+ // length is the number of bytes of data to receive (must be non-zero).
+ // Returns the number of bytes received, or 0 upon failure.
+ virtual size_t Receive(void* buffer, size_t length);
+
+ // Same as Receive() but only blocks for data until |timeout| has elapsed or
+ // |buffer| |length| is exhausted. Currently only timeouts less than one
+ // second are allowed. Return the amount of data read.
+ virtual size_t ReceiveWithTimeout(void* buffer,
+ size_t length,
+ TimeDelta timeout);
+
+ // Returns the number of bytes available. If non-zero, Receive() will not
+ // not block when called.
+ virtual size_t Peek();
+
+ // Extracts the contained handle. Used for transferring between
+ // processes.
+ Handle handle() const { return handle_; }
+
+ protected:
+ Handle handle_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SyncSocket);
+};
+
+// Derives from SyncSocket and adds support for shutting down the socket from
+// another thread while a blocking Receive or Send is being done from the
+// thread that owns the socket.
+class BASE_EXPORT CancelableSyncSocket : public SyncSocket {
+ public:
+ CancelableSyncSocket();
+ explicit CancelableSyncSocket(Handle handle);
+ ~CancelableSyncSocket() override {}
+
+ // Initializes a pair of cancelable sockets. See documentation for
+ // SyncSocket::CreatePair for more details.
+ static bool CreatePair(CancelableSyncSocket* socket_a,
+ CancelableSyncSocket* socket_b);
+
+ // A way to shut down a socket even if another thread is currently performing
+ // a blocking Receive or Send.
+ bool Shutdown();
+
+#if defined(OS_WIN)
+ // Since the Linux and Mac implementations actually use a socket, shutting
+ // them down from another thread is pretty simple - we can just call
+ // shutdown(). However, the Windows implementation relies on named pipes
+ // and there isn't a way to cancel a blocking synchronous Read that is
+ // supported on <Vista. So, for Windows only, we override these
+ // SyncSocket methods in order to support shutting down the 'socket'.
+ bool Close() override;
+ size_t Receive(void* buffer, size_t length) override;
+ size_t ReceiveWithTimeout(void* buffer,
+ size_t length,
+ TimeDelta timeout) override;
+#endif
+
+ // Send() is overridden to catch cases where the remote end is not responding
+ // and we fill the local socket buffer. When the buffer is full, this
+ // implementation of Send() will not block indefinitely as
+ // SyncSocket::Send will, but instead return 0, as no bytes could be sent.
+ // Note that the socket will not be closed in this case.
+ size_t Send(const void* buffer, size_t length) override;
+
+ private:
+#if defined(OS_WIN)
+ WaitableEvent shutdown_event_;
+ WaitableEvent file_operation_;
+#endif
+ DISALLOW_COPY_AND_ASSIGN(CancelableSyncSocket);
+};
+
+#if defined(OS_WIN) && !defined(COMPONENT_BUILD)
+// TODO(cpu): remove this once chrome is split in two dlls.
+__declspec(selectany)
+ const SyncSocket::Handle SyncSocket::kInvalidHandle = INVALID_HANDLE_VALUE;
+#endif
+
+} // namespace base
+
+#endif // BASE_SYNC_SOCKET_H_
diff --git a/libchrome/base/sync_socket_posix.cc b/libchrome/base/sync_socket_posix.cc
new file mode 100644
index 0000000..995c8e9
--- /dev/null
+++ b/libchrome/base/sync_socket_posix.cc
@@ -0,0 +1,248 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sync_socket.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#if defined(OS_SOLARIS)
+#include <sys/filio.h>
+#endif
+
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+// To avoid users sending negative message lengths to Send/Receive
+// we clamp message lengths, which are size_t, to no more than INT_MAX.
+const size_t kMaxMessageLength = static_cast<size_t>(INT_MAX);
+
+// Writes |length| of |buffer| into |handle|. Returns the number of bytes
+// written or zero on error. |length| must be greater than 0.
+size_t SendHelper(SyncSocket::Handle handle,
+ const void* buffer,
+ size_t length) {
+ DCHECK_GT(length, 0u);
+ DCHECK_LE(length, kMaxMessageLength);
+ DCHECK_NE(handle, SyncSocket::kInvalidHandle);
+ const char* charbuffer = static_cast<const char*>(buffer);
+ return WriteFileDescriptor(handle, charbuffer, length)
+ ? static_cast<size_t>(length)
+ : 0;
+}
+
+bool CloseHandle(SyncSocket::Handle handle) {
+ if (handle != SyncSocket::kInvalidHandle && close(handle) < 0) {
+ DPLOG(ERROR) << "close";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+const SyncSocket::Handle SyncSocket::kInvalidHandle = -1;
+
+SyncSocket::SyncSocket() : handle_(kInvalidHandle) {}
+
+SyncSocket::~SyncSocket() {
+ Close();
+}
+
+// static
+bool SyncSocket::CreatePair(SyncSocket* socket_a, SyncSocket* socket_b) {
+ DCHECK_NE(socket_a, socket_b);
+ DCHECK_EQ(socket_a->handle_, kInvalidHandle);
+ DCHECK_EQ(socket_b->handle_, kInvalidHandle);
+
+#if defined(OS_MACOSX)
+ int nosigpipe = 1;
+#endif // defined(OS_MACOSX)
+
+ Handle handles[2] = { kInvalidHandle, kInvalidHandle };
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, handles) != 0) {
+ CloseHandle(handles[0]);
+ CloseHandle(handles[1]);
+ return false;
+ }
+
+#if defined(OS_MACOSX)
+ // On OSX an attempt to read or write to a closed socket may generate a
+ // SIGPIPE rather than returning -1. setsockopt will shut this off.
+ if (0 != setsockopt(handles[0], SOL_SOCKET, SO_NOSIGPIPE,
+ &nosigpipe, sizeof nosigpipe) ||
+ 0 != setsockopt(handles[1], SOL_SOCKET, SO_NOSIGPIPE,
+ &nosigpipe, sizeof nosigpipe)) {
+ CloseHandle(handles[0]);
+ CloseHandle(handles[1]);
+ return false;
+ }
+#endif
+
+ // Copy the handles out for successful return.
+ socket_a->handle_ = handles[0];
+ socket_b->handle_ = handles[1];
+
+ return true;
+}
+
+// static
+SyncSocket::Handle SyncSocket::UnwrapHandle(
+ const TransitDescriptor& descriptor) {
+ return descriptor.fd;
+}
+
+bool SyncSocket::PrepareTransitDescriptor(ProcessHandle /*peer_process_handle*/,
+ TransitDescriptor* descriptor) {
+ descriptor->fd = handle();
+ descriptor->auto_close = false;
+ return descriptor->fd != kInvalidHandle;
+}
+
+bool SyncSocket::Close() {
+ const bool retval = CloseHandle(handle_);
+ handle_ = kInvalidHandle;
+ return retval;
+}
+
+size_t SyncSocket::Send(const void* buffer, size_t length) {
+ ThreadRestrictions::AssertIOAllowed();
+ return SendHelper(handle_, buffer, length);
+}
+
+size_t SyncSocket::Receive(void* buffer, size_t length) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK_GT(length, 0u);
+ DCHECK_LE(length, kMaxMessageLength);
+ DCHECK_NE(handle_, kInvalidHandle);
+ char* charbuffer = static_cast<char*>(buffer);
+ if (ReadFromFD(handle_, charbuffer, length))
+ return length;
+ return 0;
+}
+
+size_t SyncSocket::ReceiveWithTimeout(void* buffer,
+ size_t length,
+ TimeDelta timeout) {
+ ThreadRestrictions::AssertIOAllowed();
+ DCHECK_GT(length, 0u);
+ DCHECK_LE(length, kMaxMessageLength);
+ DCHECK_NE(handle_, kInvalidHandle);
+
+ // TODO(dalecurtis): There's an undiagnosed issue on OSX where we're seeing
+ // large numbers of open files which prevents select() from being used. In
+ // this case, the best we can do is Peek() to see if we can Receive() now or
+ // return a timeout error (0) if not. See http://crbug.com/314364.
+ if (handle_ >= FD_SETSIZE)
+ return Peek() < length ? 0 : Receive(buffer, length);
+
+ // Only timeouts greater than zero and less than one second are allowed.
+ DCHECK_GT(timeout.InMicroseconds(), 0);
+ DCHECK_LT(timeout.InMicroseconds(),
+ base::TimeDelta::FromSeconds(1).InMicroseconds());
+
+ // Track the start time so we can reduce the timeout as data is read.
+ TimeTicks start_time = TimeTicks::Now();
+ const TimeTicks finish_time = start_time + timeout;
+
+ fd_set read_fds;
+ size_t bytes_read_total;
+ for (bytes_read_total = 0;
+ bytes_read_total < length && timeout.InMicroseconds() > 0;
+ timeout = finish_time - base::TimeTicks::Now()) {
+ FD_ZERO(&read_fds);
+ FD_SET(handle_, &read_fds);
+
+ // Wait for data to become available.
+ struct timeval timeout_struct =
+ { 0, static_cast<suseconds_t>(timeout.InMicroseconds()) };
+ const int select_result =
+ select(handle_ + 1, &read_fds, NULL, NULL, &timeout_struct);
+ // Handle EINTR manually since we need to update the timeout value.
+ if (select_result == -1 && errno == EINTR)
+ continue;
+ if (select_result <= 0)
+ return bytes_read_total;
+
+ // select() only tells us that data is ready for reading, not how much. We
+ // must Peek() for the amount ready for reading to avoid blocking.
+ DCHECK(FD_ISSET(handle_, &read_fds));
+ const size_t bytes_to_read = std::min(Peek(), length - bytes_read_total);
+
+ // There may be zero bytes to read if the socket at the other end closed.
+ if (!bytes_to_read)
+ return bytes_read_total;
+
+ const size_t bytes_received =
+ Receive(static_cast<char*>(buffer) + bytes_read_total, bytes_to_read);
+ bytes_read_total += bytes_received;
+ if (bytes_received != bytes_to_read)
+ return bytes_read_total;
+ }
+
+ return bytes_read_total;
+}
+
+size_t SyncSocket::Peek() {
+ DCHECK_NE(handle_, kInvalidHandle);
+ int number_chars = 0;
+ if (ioctl(handle_, FIONREAD, &number_chars) == -1) {
+ // If there is an error in ioctl, signal that the channel would block.
+ return 0;
+ }
+ DCHECK_GE(number_chars, 0);
+ return number_chars;
+}
+
+CancelableSyncSocket::CancelableSyncSocket() {}
+CancelableSyncSocket::CancelableSyncSocket(Handle handle)
+ : SyncSocket(handle) {
+}
+
+bool CancelableSyncSocket::Shutdown() {
+ DCHECK_NE(handle_, kInvalidHandle);
+ return HANDLE_EINTR(shutdown(handle_, SHUT_RDWR)) >= 0;
+}
+
+size_t CancelableSyncSocket::Send(const void* buffer, size_t length) {
+ DCHECK_GT(length, 0u);
+ DCHECK_LE(length, kMaxMessageLength);
+ DCHECK_NE(handle_, kInvalidHandle);
+
+ const int flags = fcntl(handle_, F_GETFL);
+ if (flags != -1 && (flags & O_NONBLOCK) == 0) {
+ // Set the socket to non-blocking mode for sending if its original mode
+ // is blocking.
+ fcntl(handle_, F_SETFL, flags | O_NONBLOCK);
+ }
+
+ const size_t len = SendHelper(handle_, buffer, length);
+
+ if (flags != -1 && (flags & O_NONBLOCK) == 0) {
+ // Restore the original flags.
+ fcntl(handle_, F_SETFL, flags);
+ }
+
+ return len;
+}
+
+// static
+bool CancelableSyncSocket::CreatePair(CancelableSyncSocket* socket_a,
+ CancelableSyncSocket* socket_b) {
+ return SyncSocket::CreatePair(socket_a, socket_b);
+}
+
+} // namespace base
diff --git a/libchrome/base/sync_socket_unittest.cc b/libchrome/base/sync_socket_unittest.cc
new file mode 100644
index 0000000..97a1aec
--- /dev/null
+++ b/libchrome/base/sync_socket_unittest.cc
@@ -0,0 +1,131 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/macros.h"
+#include "base/sync_socket.h"
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const int kReceiveTimeoutInMilliseconds = 750;
+
+class HangingReceiveThread : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit HangingReceiveThread(base::SyncSocket* socket)
+ : socket_(socket),
+ thread_(this, "HangingReceiveThread") {
+ thread_.Start();
+ }
+
+ ~HangingReceiveThread() override {}
+
+ void Run() override {
+ int data = 0;
+ ASSERT_EQ(socket_->Peek(), 0u);
+
+ // Use receive with timeout so we don't hang the test harness indefinitely.
+ ASSERT_EQ(0u, socket_->ReceiveWithTimeout(
+ &data, sizeof(data), base::TimeDelta::FromMilliseconds(
+ kReceiveTimeoutInMilliseconds)));
+ }
+
+ void Stop() {
+ thread_.Join();
+ }
+
+ private:
+ base::SyncSocket* socket_;
+ base::DelegateSimpleThread thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(HangingReceiveThread);
+};
+
+// Tests sending data between two SyncSockets. Uses ASSERT() and thus will exit
+// early upon failure. Callers should use ASSERT_NO_FATAL_FAILURE() if testing
+// continues after return.
+void SendReceivePeek(base::SyncSocket* socket_a, base::SyncSocket* socket_b) {
+ int received = 0;
+ const int kSending = 123;
+ static_assert(sizeof(kSending) == sizeof(received), "invalid data size");
+
+ ASSERT_EQ(0u, socket_a->Peek());
+ ASSERT_EQ(0u, socket_b->Peek());
+
+ // Verify |socket_a| can send to |socket_a| and |socket_a| can Receive from
+ // |socket_a|.
+ ASSERT_EQ(sizeof(kSending), socket_a->Send(&kSending, sizeof(kSending)));
+ ASSERT_EQ(sizeof(kSending), socket_b->Peek());
+ ASSERT_EQ(sizeof(kSending), socket_b->Receive(&received, sizeof(kSending)));
+ ASSERT_EQ(kSending, received);
+
+ ASSERT_EQ(0u, socket_a->Peek());
+ ASSERT_EQ(0u, socket_b->Peek());
+
+ // Now verify the reverse.
+ received = 0;
+ ASSERT_EQ(sizeof(kSending), socket_b->Send(&kSending, sizeof(kSending)));
+ ASSERT_EQ(sizeof(kSending), socket_a->Peek());
+ ASSERT_EQ(sizeof(kSending), socket_a->Receive(&received, sizeof(kSending)));
+ ASSERT_EQ(kSending, received);
+
+ ASSERT_EQ(0u, socket_a->Peek());
+ ASSERT_EQ(0u, socket_b->Peek());
+
+ ASSERT_TRUE(socket_a->Close());
+ ASSERT_TRUE(socket_b->Close());
+}
+
+template <class SocketType>
+void NormalSendReceivePeek() {
+ SocketType socket_a, socket_b;
+ ASSERT_TRUE(SocketType::CreatePair(&socket_a, &socket_b));
+ SendReceivePeek(&socket_a, &socket_b);
+}
+
+template <class SocketType>
+void ClonedSendReceivePeek() {
+ SocketType socket_a, socket_b;
+ ASSERT_TRUE(SocketType::CreatePair(&socket_a, &socket_b));
+
+ // Create new SyncSockets from the paired handles.
+ SocketType socket_c(socket_a.handle()), socket_d(socket_b.handle());
+ SendReceivePeek(&socket_c, &socket_d);
+}
+
+} // namespace
+
+TEST(SyncSocket, NormalSendReceivePeek) {
+ NormalSendReceivePeek<base::SyncSocket>();
+}
+
+TEST(SyncSocket, ClonedSendReceivePeek) {
+ ClonedSendReceivePeek<base::SyncSocket>();
+}
+
+TEST(CancelableSyncSocket, NormalSendReceivePeek) {
+ NormalSendReceivePeek<base::CancelableSyncSocket>();
+}
+
+TEST(CancelableSyncSocket, ClonedSendReceivePeek) {
+ ClonedSendReceivePeek<base::CancelableSyncSocket>();
+}
+
+TEST(CancelableSyncSocket, CancelReceiveShutdown) {
+ base::CancelableSyncSocket socket_a, socket_b;
+ ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&socket_a, &socket_b));
+
+ base::TimeTicks start = base::TimeTicks::Now();
+ HangingReceiveThread thread(&socket_b);
+ ASSERT_TRUE(socket_b.Shutdown());
+ thread.Stop();
+
+ // Ensure the receive didn't just timeout.
+ ASSERT_LT((base::TimeTicks::Now() - start).InMilliseconds(),
+ kReceiveTimeoutInMilliseconds);
+
+ ASSERT_TRUE(socket_a.Close());
+ ASSERT_TRUE(socket_b.Close());
+}
diff --git a/libchrome/base/synchronization/cancellation_flag.cc b/libchrome/base/synchronization/cancellation_flag.cc
new file mode 100644
index 0000000..ca5c0a8
--- /dev/null
+++ b/libchrome/base/synchronization/cancellation_flag.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/cancellation_flag.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+void CancellationFlag::Set() {
+#if !defined(NDEBUG)
+ DCHECK_EQ(set_on_, PlatformThread::CurrentId());
+#endif
+ base::subtle::Release_Store(&flag_, 1);
+}
+
+bool CancellationFlag::IsSet() const {
+ return base::subtle::Acquire_Load(&flag_) != 0;
+}
+
+void CancellationFlag::UnsafeResetForTesting() {
+ base::subtle::Release_Store(&flag_, 0);
+}
+
+} // namespace base
diff --git a/libchrome/base/synchronization/cancellation_flag.h b/libchrome/base/synchronization/cancellation_flag.h
new file mode 100644
index 0000000..f2f83f4
--- /dev/null
+++ b/libchrome/base/synchronization/cancellation_flag.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
+#define BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+// CancellationFlag allows one thread to cancel jobs executed on some worker
+// thread. Calling Set() from one thread and IsSet() from a number of threads
+// is thread-safe.
+//
+// This class IS NOT intended for synchronization between threads.
+class BASE_EXPORT CancellationFlag {
+ public:
+ CancellationFlag() : flag_(false) {
+#if !defined(NDEBUG)
+ set_on_ = PlatformThread::CurrentId();
+#endif
+ }
+ ~CancellationFlag() {}
+
+ // Set the flag. May only be called on the thread which owns the object.
+ void Set();
+ bool IsSet() const; // Returns true iff the flag was set.
+
+ // For subtle reasons that may be different on different architectures,
+ // a different thread testing IsSet() may erroneously read 'true' after
+ // this method has been called.
+ void UnsafeResetForTesting();
+
+ private:
+ base::subtle::Atomic32 flag_;
+#if !defined(NDEBUG)
+ PlatformThreadId set_on_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(CancellationFlag);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
diff --git a/libchrome/base/synchronization/cancellation_flag_unittest.cc b/libchrome/base/synchronization/cancellation_flag_unittest.cc
new file mode 100644
index 0000000..13c74bc
--- /dev/null
+++ b/libchrome/base/synchronization/cancellation_flag_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests of CancellationFlag class.
+
+#include "base/synchronization/cancellation_flag.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+namespace {
+
+//------------------------------------------------------------------------------
+// Define our test class.
+//------------------------------------------------------------------------------
+
+void CancelHelper(CancellationFlag* flag) {
+#if GTEST_HAS_DEATH_TEST
+ ASSERT_DEBUG_DEATH(flag->Set(), "");
+#endif
+}
+
+TEST(CancellationFlagTest, SimpleSingleThreadedTest) {
+ CancellationFlag flag;
+ ASSERT_FALSE(flag.IsSet());
+ flag.Set();
+ ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(CancellationFlagTest, DoubleSetTest) {
+ CancellationFlag flag;
+ ASSERT_FALSE(flag.IsSet());
+ flag.Set();
+ ASSERT_TRUE(flag.IsSet());
+ flag.Set();
+ ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(CancellationFlagTest, SetOnDifferentThreadDeathTest) {
+ // Checks that Set() can't be called from any other thread.
+ // CancellationFlag should die on a DCHECK if Set() is called from
+ // other thread.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ Thread t("CancellationFlagTest.SetOnDifferentThreadDeathTest");
+ ASSERT_TRUE(t.Start());
+ ASSERT_TRUE(t.message_loop());
+ ASSERT_TRUE(t.IsRunning());
+
+ CancellationFlag flag;
+ t.task_runner()->PostTask(FROM_HERE, base::Bind(&CancelHelper, &flag));
+}
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/synchronization/condition_variable.h b/libchrome/base/synchronization/condition_variable.h
new file mode 100644
index 0000000..ebf90d2
--- /dev/null
+++ b/libchrome/base/synchronization/condition_variable.h
@@ -0,0 +1,122 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ConditionVariable wraps pthreads condition variable synchronization or, on
+// Windows, simulates it. This functionality is very helpful for having
+// several threads wait for an event, as is common with a thread pool managed
+// by a master. The meaning of such an event in the (worker) thread pool
+// scenario is that additional tasks are now available for processing. It is
+// used in Chrome in the DNS prefetching system to notify worker threads that
+// a queue now has items (tasks) which need to be tended to. A related use
+// would have a pool manager waiting on a ConditionVariable, waiting for a
+// thread in the pool to announce (signal) that there is now more room in a
+// (bounded size) communications queue for the manager to deposit tasks, or,
+// as a second example, that the queue of tasks is completely empty and all
+// workers are waiting.
+//
+// USAGE NOTE 1: spurious signal events are possible with this and
+// most implementations of condition variables. As a result, be
+// *sure* to retest your condition before proceeding. The following
+// is a good example of doing this correctly:
+//
+// while (!work_to_be_done()) Wait(...);
+//
+// In contrast do NOT do the following:
+//
+// if (!work_to_be_done()) Wait(...); // Don't do this.
+//
+// Especially avoid the above if you are relying on some other thread only
+// issuing a signal up *if* there is work-to-do. There can/will
+// be spurious signals. Recheck state on waiting thread before
+// assuming the signal was intentional. Caveat caller ;-).
+//
+// USAGE NOTE 2: Broadcast() frees up all waiting threads at once,
+// which leads to contention for the locks they all held when they
+// called Wait(). This results in POOR performance. A much better
+// approach to getting a lot of threads out of Wait() is to have each
+// thread (upon exiting Wait()) call Signal() to free up another
+// Wait'ing thread. Look at condition_variable_unittest.cc for
+// both examples.
+//
+// Broadcast() can be used nicely during teardown, as it gets the job
+// done, and leaves no sleeping threads... and performance is less
+// critical at that point.
+//
+// The semantics of Broadcast() are carefully crafted so that *all*
+// threads that were waiting when the request was made will indeed
+// get signaled. Some implementations mess up, and don't signal them
+// all, while others allow the wait to be effectively turned off (for
+// a while while waiting threads come around). This implementation
+// appears correct, as it will not "lose" any signals, and will guarantee
+// that all threads get signaled by Broadcast().
+//
+// This implementation offers support for "performance" in its selection of
+// which thread to revive. Performance, in direct contrast with "fairness,"
+// assures that the thread that most recently began to Wait() is selected by
+// Signal to revive. Fairness would (if publicly supported) assure that the
+// thread that has Wait()ed the longest is selected. The default policy
+// may improve performance, as the selected thread may have a greater chance of
+// having some of its stack data in various CPU caches.
+//
+// For a discussion of the many very subtle implementation details, see the FAQ
+// at the end of condition_variable_win.cc.
+
+#ifndef BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+#define BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+class BASE_EXPORT ConditionVariable {
+ public:
+ // Construct a cv for use with ONLY one user lock.
+ explicit ConditionVariable(Lock* user_lock);
+
+ ~ConditionVariable();
+
+ // Wait() releases the caller's critical section atomically as it starts to
+ // sleep, and the reacquires it when it is signaled.
+ void Wait();
+ void TimedWait(const TimeDelta& max_time);
+
+ // Broadcast() revives all waiting threads.
+ void Broadcast();
+ // Signal() revives one waiting thread.
+ void Signal();
+
+ private:
+
+#if defined(OS_WIN)
+ CONDITION_VARIABLE cv_;
+ SRWLOCK* const srwlock_;
+#elif defined(OS_POSIX)
+ pthread_cond_t condition_;
+ pthread_mutex_t* user_mutex_;
+#endif
+
+#if DCHECK_IS_ON() && (defined(OS_WIN) || defined(OS_POSIX))
+ base::Lock* const user_lock_; // Needed to adjust shadow lock state on wait.
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_CONDITION_VARIABLE_H_
diff --git a/libchrome/base/synchronization/condition_variable_posix.cc b/libchrome/base/synchronization/condition_variable_posix.cc
new file mode 100644
index 0000000..d86fd18
--- /dev/null
+++ b/libchrome/base/synchronization/condition_variable_posix.cc
@@ -0,0 +1,137 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/condition_variable.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/time.h>
+
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+ : user_mutex_(user_lock->lock_.native_handle())
+#if DCHECK_IS_ON()
+ , user_lock_(user_lock)
+#endif
+{
+ int rv = 0;
+ // http://crbug.com/293736
+ // NaCl doesn't support monotonic clock based absolute deadlines.
+ // On older Android platform versions, it's supported through the
+ // non-standard pthread_cond_timedwait_monotonic_np. Newer platform
+ // versions have pthread_condattr_setclock.
+ // Mac can use relative time deadlines.
+#if !defined(OS_MACOSX) && !defined(OS_NACL) && \
+ !(defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+ pthread_condattr_t attrs;
+ rv = pthread_condattr_init(&attrs);
+ DCHECK_EQ(0, rv);
+ pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
+ rv = pthread_cond_init(&condition_, &attrs);
+ pthread_condattr_destroy(&attrs);
+#else
+ rv = pthread_cond_init(&condition_, NULL);
+#endif
+ DCHECK_EQ(0, rv);
+}
+
+ConditionVariable::~ConditionVariable() {
+#if defined(OS_MACOSX)
+ // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+ // Darwin kernel. http://crbug.com/517681.
+ {
+ base::Lock lock;
+ base::AutoLock l(lock);
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ pthread_cond_timedwait_relative_np(&condition_, lock.lock_.native_handle(),
+ &ts);
+ }
+#endif
+
+ int rv = pthread_cond_destroy(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Wait() {
+ base::ThreadRestrictions::AssertWaitAllowed();
+#if DCHECK_IS_ON()
+ user_lock_->CheckHeldAndUnmark();
+#endif
+ int rv = pthread_cond_wait(&condition_, user_mutex_);
+ DCHECK_EQ(0, rv);
+#if DCHECK_IS_ON()
+ user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+ base::ThreadRestrictions::AssertWaitAllowed();
+ int64_t usecs = max_time.InMicroseconds();
+ struct timespec relative_time;
+ relative_time.tv_sec = usecs / Time::kMicrosecondsPerSecond;
+ relative_time.tv_nsec =
+ (usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
+
+#if DCHECK_IS_ON()
+ user_lock_->CheckHeldAndUnmark();
+#endif
+
+#if defined(OS_MACOSX)
+ int rv = pthread_cond_timedwait_relative_np(
+ &condition_, user_mutex_, &relative_time);
+#else
+ // The timeout argument to pthread_cond_timedwait is in absolute time.
+ struct timespec absolute_time;
+#if defined(OS_NACL)
+ // See comment in constructor for why this is different in NaCl.
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_usec * Time::kNanosecondsPerMicrosecond;
+#else
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_nsec;
+#endif
+
+ absolute_time.tv_sec += relative_time.tv_sec;
+ absolute_time.tv_nsec += relative_time.tv_nsec;
+ absolute_time.tv_sec += absolute_time.tv_nsec / Time::kNanosecondsPerSecond;
+ absolute_time.tv_nsec %= Time::kNanosecondsPerSecond;
+ DCHECK_GE(absolute_time.tv_sec, now.tv_sec); // Overflow paranoia
+
+#if defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+ int rv = pthread_cond_timedwait_monotonic_np(
+ &condition_, user_mutex_, &absolute_time);
+#else
+ int rv = pthread_cond_timedwait(&condition_, user_mutex_, &absolute_time);
+#endif // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+#endif // OS_MACOSX
+
+ DCHECK(rv == 0 || rv == ETIMEDOUT);
+#if DCHECK_IS_ON()
+ user_lock_->CheckUnheldAndMark();
+#endif
+}
+
+void ConditionVariable::Broadcast() {
+ int rv = pthread_cond_broadcast(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+void ConditionVariable::Signal() {
+ int rv = pthread_cond_signal(&condition_);
+ DCHECK_EQ(0, rv);
+}
+
+} // namespace base
diff --git a/libchrome/base/synchronization/condition_variable_unittest.cc b/libchrome/base/synchronization/condition_variable_unittest.cc
new file mode 100644
index 0000000..d60b2b8
--- /dev/null
+++ b/libchrome/base/synchronization/condition_variable_unittest.cc
@@ -0,0 +1,768 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Multi-threaded tests of ConditionVariable class.
+
+#include "base/synchronization/condition_variable.h"
+
+#include <time.h>
+
+#include <algorithm>
+#include <memory>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_collision_warner.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+namespace {
+//------------------------------------------------------------------------------
+// Define our test class, with several common variables.
+//------------------------------------------------------------------------------
+
+class ConditionVariableTest : public PlatformTest {
+ public:
+ const TimeDelta kZeroMs;
+ const TimeDelta kTenMs;
+ const TimeDelta kThirtyMs;
+ const TimeDelta kFortyFiveMs;
+ const TimeDelta kSixtyMs;
+ const TimeDelta kOneHundredMs;
+
+ ConditionVariableTest()
+ : kZeroMs(TimeDelta::FromMilliseconds(0)),
+ kTenMs(TimeDelta::FromMilliseconds(10)),
+ kThirtyMs(TimeDelta::FromMilliseconds(30)),
+ kFortyFiveMs(TimeDelta::FromMilliseconds(45)),
+ kSixtyMs(TimeDelta::FromMilliseconds(60)),
+ kOneHundredMs(TimeDelta::FromMilliseconds(100)) {
+ }
+};
+
+//------------------------------------------------------------------------------
+// Define a class that will control activities an several multi-threaded tests.
+// The general structure of multi-threaded tests is that a test case will
+// construct an instance of a WorkQueue. The WorkQueue will spin up some
+// threads and control them throughout their lifetime, as well as maintaining
+// a central repository of the work thread's activity. Finally, the WorkQueue
+// will command the the worker threads to terminate. At that point, the test
+// cases will validate that the WorkQueue has records showing that the desired
+// activities were performed.
+//------------------------------------------------------------------------------
+
+// Callers are responsible for synchronizing access to the following class.
+// The WorkQueue::lock_, as accessed via WorkQueue::lock(), should be used for
+// all synchronized access.
+class WorkQueue : public PlatformThread::Delegate {
+ public:
+ explicit WorkQueue(int thread_count);
+ ~WorkQueue() override;
+
+ // PlatformThread::Delegate interface.
+ void ThreadMain() override;
+
+ //----------------------------------------------------------------------------
+ // Worker threads only call the following methods.
+ // They should use the lock to get exclusive access.
+ int GetThreadId(); // Get an ID assigned to a thread..
+ bool EveryIdWasAllocated() const; // Indicates that all IDs were handed out.
+ TimeDelta GetAnAssignment(int thread_id); // Get a work task duration.
+ void WorkIsCompleted(int thread_id);
+
+ int task_count() const;
+ bool allow_help_requests() const; // Workers can signal more workers.
+ bool shutdown() const; // Check if shutdown has been requested.
+
+ void thread_shutting_down();
+
+
+ //----------------------------------------------------------------------------
+ // Worker threads can call them but not needed to acquire a lock.
+ Lock* lock();
+
+ ConditionVariable* work_is_available();
+ ConditionVariable* all_threads_have_ids();
+ ConditionVariable* no_more_tasks();
+
+ //----------------------------------------------------------------------------
+ // The rest of the methods are for use by the controlling master thread (the
+ // test case code).
+ void ResetHistory();
+ int GetMinCompletionsByWorkerThread() const;
+ int GetMaxCompletionsByWorkerThread() const;
+ int GetNumThreadsTakingAssignments() const;
+ int GetNumThreadsCompletingTasks() const;
+ int GetNumberOfCompletedTasks() const;
+
+ void SetWorkTime(TimeDelta delay);
+ void SetTaskCount(int count);
+ void SetAllowHelp(bool allow);
+
+ // The following must be called without locking, and will spin wait until the
+ // threads are all in a wait state.
+ void SpinUntilAllThreadsAreWaiting();
+ void SpinUntilTaskCountLessThan(int task_count);
+
+ // Caller must acquire lock before calling.
+ void SetShutdown();
+
+ // Compares the |shutdown_task_count_| to the |thread_count| and returns true
+ // if they are equal. This check will acquire the |lock_| so the caller
+ // should not hold the lock when calling this method.
+ bool ThreadSafeCheckShutdown(int thread_count);
+
+ private:
+ // Both worker threads and controller use the following to synchronize.
+ Lock lock_;
+ ConditionVariable work_is_available_; // To tell threads there is work.
+
+ // Conditions to notify the controlling process (if it is interested).
+ ConditionVariable all_threads_have_ids_; // All threads are running.
+ ConditionVariable no_more_tasks_; // Task count is zero.
+
+ const int thread_count_;
+ int waiting_thread_count_;
+ std::unique_ptr<PlatformThreadHandle[]> thread_handles_;
+ std::vector<int> assignment_history_; // Number of assignment per worker.
+ std::vector<int> completion_history_; // Number of completions per worker.
+ int thread_started_counter_; // Used to issue unique id to workers.
+ int shutdown_task_count_; // Number of tasks told to shutdown
+ int task_count_; // Number of assignment tasks waiting to be processed.
+ TimeDelta worker_delay_; // Time each task takes to complete.
+ bool allow_help_requests_; // Workers can signal more workers.
+ bool shutdown_; // Set when threads need to terminate.
+
+ DFAKE_MUTEX(locked_methods_);
+};
+
+//------------------------------------------------------------------------------
+// The next section contains the actual tests.
+//------------------------------------------------------------------------------
+
+TEST_F(ConditionVariableTest, StartupShutdownTest) {
+ Lock lock;
+
+ // First try trivial startup/shutdown.
+ {
+ ConditionVariable cv1(&lock);
+ } // Call for cv1 destruction.
+
+ // Exercise with at least a few waits.
+ ConditionVariable cv(&lock);
+
+ lock.Acquire();
+ cv.TimedWait(kTenMs); // Wait for 10 ms.
+ cv.TimedWait(kTenMs); // Wait for 10 ms.
+ lock.Release();
+
+ lock.Acquire();
+ cv.TimedWait(kTenMs); // Wait for 10 ms.
+ cv.TimedWait(kTenMs); // Wait for 10 ms.
+ cv.TimedWait(kTenMs); // Wait for 10 ms.
+ lock.Release();
+} // Call for cv destruction.
+
+TEST_F(ConditionVariableTest, TimeoutTest) {
+ Lock lock;
+ ConditionVariable cv(&lock);
+ lock.Acquire();
+
+ TimeTicks start = TimeTicks::Now();
+ const TimeDelta WAIT_TIME = TimeDelta::FromMilliseconds(300);
+ // Allow for clocking rate granularity.
+ const TimeDelta FUDGE_TIME = TimeDelta::FromMilliseconds(50);
+
+ cv.TimedWait(WAIT_TIME + FUDGE_TIME);
+ TimeDelta duration = TimeTicks::Now() - start;
+ // We can't use EXPECT_GE here as the TimeDelta class does not support the
+ // required stream conversion.
+ EXPECT_TRUE(duration >= WAIT_TIME);
+
+ lock.Release();
+}
+
+#if defined(OS_POSIX)
+const int kDiscontinuitySeconds = 2;
+
+void BackInTime(Lock* lock) {
+ AutoLock auto_lock(*lock);
+
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ tv.tv_sec -= kDiscontinuitySeconds;
+ settimeofday(&tv, NULL);
+}
+
+// Tests that TimedWait ignores changes to the system clock.
+// Test is disabled by default, because it needs to run as root to muck with the
+// system clock.
+// http://crbug.com/293736
+TEST_F(ConditionVariableTest, DISABLED_TimeoutAcrossSetTimeOfDay) {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ tv.tv_sec += kDiscontinuitySeconds;
+ if (settimeofday(&tv, NULL) < 0) {
+ PLOG(ERROR) << "Could not set time of day. Run as root?";
+ return;
+ }
+
+ Lock lock;
+ ConditionVariable cv(&lock);
+ lock.Acquire();
+
+ Thread thread("Helper");
+ thread.Start();
+ thread.task_runner()->PostTask(FROM_HERE, base::Bind(&BackInTime, &lock));
+
+ TimeTicks start = TimeTicks::Now();
+ const TimeDelta kWaitTime = TimeDelta::FromMilliseconds(300);
+ // Allow for clocking rate granularity.
+ const TimeDelta kFudgeTime = TimeDelta::FromMilliseconds(50);
+
+ cv.TimedWait(kWaitTime + kFudgeTime);
+ TimeDelta duration = TimeTicks::Now() - start;
+
+ thread.Stop();
+ // We can't use EXPECT_GE here as the TimeDelta class does not support the
+ // required stream conversion.
+ EXPECT_TRUE(duration >= kWaitTime);
+ EXPECT_TRUE(duration <= TimeDelta::FromSeconds(kDiscontinuitySeconds));
+
+ lock.Release();
+}
+#endif
+
+
+// Suddenly got flaky on Win, see http://crbug.com/10607 (starting at
+// comment #15).
+#if defined(OS_WIN)
+#define MAYBE_MultiThreadConsumerTest DISABLED_MultiThreadConsumerTest
+#else
+#define MAYBE_MultiThreadConsumerTest MultiThreadConsumerTest
+#endif
+// Test serial task servicing, as well as two parallel task servicing methods.
+TEST_F(ConditionVariableTest, MAYBE_MultiThreadConsumerTest) {
+ const int kThreadCount = 10;
+ WorkQueue queue(kThreadCount); // Start the threads.
+
+ const int kTaskCount = 10; // Number of tasks in each mini-test here.
+
+ Time start_time; // Used to time task processing.
+
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ while (!queue.EveryIdWasAllocated())
+ queue.all_threads_have_ids()->Wait();
+ }
+
+ // If threads aren't in a wait state, they may start to gobble up tasks in
+ // parallel, short-circuiting (breaking) this test.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ // Since we have no tasks yet, all threads should be waiting by now.
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(0, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(0, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_EQ(0, queue.GetMaxCompletionsByWorkerThread());
+ EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+ EXPECT_EQ(0, queue.GetNumberOfCompletedTasks());
+
+ // Set up to make each task include getting help from another worker, so
+ // so that the work gets done in paralell.
+ queue.ResetHistory();
+ queue.SetTaskCount(kTaskCount);
+ queue.SetWorkTime(kThirtyMs);
+ queue.SetAllowHelp(true);
+
+ start_time = Time::Now();
+ }
+
+ queue.work_is_available()->Signal(); // But each worker can signal another.
+ // Wait till we at least start to handle tasks (and we're not all waiting).
+ queue.SpinUntilTaskCountLessThan(kTaskCount);
+ // Wait to allow the all workers to get done.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ // Wait until all work tasks have at least been assigned.
+ base::AutoLock auto_lock(*queue.lock());
+ while (queue.task_count())
+ queue.no_more_tasks()->Wait();
+
+ // To avoid racy assumptions, we'll just assert that at least 2 threads
+ // did work. We know that the first worker should have gone to sleep, and
+ // hence a second worker should have gotten an assignment.
+ EXPECT_LE(2, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(kTaskCount, queue.GetNumberOfCompletedTasks());
+
+ // Try to ask all workers to help, and only a few will do the work.
+ queue.ResetHistory();
+ queue.SetTaskCount(3);
+ queue.SetWorkTime(kThirtyMs);
+ queue.SetAllowHelp(false);
+ }
+ queue.work_is_available()->Broadcast(); // Make them all try.
+ // Wait till we at least start to handle tasks (and we're not all waiting).
+ queue.SpinUntilTaskCountLessThan(3);
+ // Wait to allow the 3 workers to get done.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(3, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(3, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_EQ(1, queue.GetMaxCompletionsByWorkerThread());
+ EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+ EXPECT_EQ(3, queue.GetNumberOfCompletedTasks());
+
+ // Set up to make each task get help from another worker.
+ queue.ResetHistory();
+ queue.SetTaskCount(3);
+ queue.SetWorkTime(kThirtyMs);
+ queue.SetAllowHelp(true); // Allow (unnecessary) help requests.
+ }
+ queue.work_is_available()->Broadcast(); // Signal all threads.
+ // Wait till we at least start to handle tasks (and we're not all waiting).
+ queue.SpinUntilTaskCountLessThan(3);
+ // Wait to allow the 3 workers to get done.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(3, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(3, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_EQ(1, queue.GetMaxCompletionsByWorkerThread());
+ EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+ EXPECT_EQ(3, queue.GetNumberOfCompletedTasks());
+
+ // Set up to make each task get help from another worker.
+ queue.ResetHistory();
+ queue.SetTaskCount(20); // 2 tasks per thread.
+ queue.SetWorkTime(kThirtyMs);
+ queue.SetAllowHelp(true);
+ }
+ queue.work_is_available()->Signal(); // But each worker can signal another.
+ // Wait till we at least start to handle tasks (and we're not all waiting).
+ queue.SpinUntilTaskCountLessThan(20);
+ // Wait to allow the 10 workers to get done.
+ queue.SpinUntilAllThreadsAreWaiting(); // Should take about 60 ms.
+
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(10, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(10, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_EQ(20, queue.GetNumberOfCompletedTasks());
+
+ // Same as last test, but with Broadcast().
+ queue.ResetHistory();
+ queue.SetTaskCount(20); // 2 tasks per thread.
+ queue.SetWorkTime(kThirtyMs);
+ queue.SetAllowHelp(true);
+ }
+ queue.work_is_available()->Broadcast();
+ // Wait till we at least start to handle tasks (and we're not all waiting).
+ queue.SpinUntilTaskCountLessThan(20);
+ // Wait to allow the 10 workers to get done.
+ queue.SpinUntilAllThreadsAreWaiting(); // Should take about 60 ms.
+
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(10, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(10, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_EQ(20, queue.GetNumberOfCompletedTasks());
+
+ queue.SetShutdown();
+ }
+ queue.work_is_available()->Broadcast(); // Force check for shutdown.
+
+ SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(1),
+ queue.ThreadSafeCheckShutdown(kThreadCount));
+}
+
+TEST_F(ConditionVariableTest, LargeFastTaskTest) {
+ const int kThreadCount = 200;
+ WorkQueue queue(kThreadCount); // Start the threads.
+
+ Lock private_lock; // Used locally for master to wait.
+ base::AutoLock private_held_lock(private_lock);
+ ConditionVariable private_cv(&private_lock);
+
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ while (!queue.EveryIdWasAllocated())
+ queue.all_threads_have_ids()->Wait();
+ }
+
+ // Wait a bit more to allow threads to reach their wait state.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ // Since we have no tasks, all threads should be waiting by now.
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(0, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(0, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_EQ(0, queue.GetMaxCompletionsByWorkerThread());
+ EXPECT_EQ(0, queue.GetMinCompletionsByWorkerThread());
+ EXPECT_EQ(0, queue.GetNumberOfCompletedTasks());
+
+ // Set up to make all workers do (an average of) 20 tasks.
+ queue.ResetHistory();
+ queue.SetTaskCount(20 * kThreadCount);
+ queue.SetWorkTime(kFortyFiveMs);
+ queue.SetAllowHelp(false);
+ }
+ queue.work_is_available()->Broadcast(); // Start up all threads.
+ // Wait until we've handed out all tasks.
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ while (queue.task_count() != 0)
+ queue.no_more_tasks()->Wait();
+ }
+
+ // Wait till the last of the tasks complete.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ // With Broadcast(), every thread should have participated.
+ // but with racing.. they may not all have done equal numbers of tasks.
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(kThreadCount, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(kThreadCount, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_LE(20, queue.GetMaxCompletionsByWorkerThread());
+ EXPECT_EQ(20 * kThreadCount, queue.GetNumberOfCompletedTasks());
+
+ // Set up to make all workers do (an average of) 4 tasks.
+ queue.ResetHistory();
+ queue.SetTaskCount(kThreadCount * 4);
+ queue.SetWorkTime(kFortyFiveMs);
+ queue.SetAllowHelp(true); // Might outperform Broadcast().
+ }
+ queue.work_is_available()->Signal(); // Start up one thread.
+
+ // Wait until we've handed out all tasks
+ {
+ base::AutoLock auto_lock(*queue.lock());
+ while (queue.task_count() != 0)
+ queue.no_more_tasks()->Wait();
+ }
+
+ // Wait till the last of the tasks complete.
+ queue.SpinUntilAllThreadsAreWaiting();
+
+ {
+ // With Signal(), every thread should have participated.
+ // but with racing.. they may not all have done four tasks.
+ base::AutoLock auto_lock(*queue.lock());
+ EXPECT_EQ(kThreadCount, queue.GetNumThreadsTakingAssignments());
+ EXPECT_EQ(kThreadCount, queue.GetNumThreadsCompletingTasks());
+ EXPECT_EQ(0, queue.task_count());
+ EXPECT_LE(4, queue.GetMaxCompletionsByWorkerThread());
+ EXPECT_EQ(4 * kThreadCount, queue.GetNumberOfCompletedTasks());
+
+ queue.SetShutdown();
+ }
+ queue.work_is_available()->Broadcast(); // Force check for shutdown.
+
+ // Wait for shutdowns to complete.
+ SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(TimeDelta::FromMinutes(1),
+ queue.ThreadSafeCheckShutdown(kThreadCount));
+}
+
+//------------------------------------------------------------------------------
+// Finally we provide the implementation for the methods in the WorkQueue class.
+//------------------------------------------------------------------------------
+
+WorkQueue::WorkQueue(int thread_count)
+ : lock_(),
+ work_is_available_(&lock_),
+ all_threads_have_ids_(&lock_),
+ no_more_tasks_(&lock_),
+ thread_count_(thread_count),
+ waiting_thread_count_(0),
+ thread_handles_(new PlatformThreadHandle[thread_count]),
+ assignment_history_(thread_count),
+ completion_history_(thread_count),
+ thread_started_counter_(0),
+ shutdown_task_count_(0),
+ task_count_(0),
+ allow_help_requests_(false),
+ shutdown_(false) {
+ EXPECT_GE(thread_count_, 1);
+ ResetHistory();
+ SetTaskCount(0);
+ SetWorkTime(TimeDelta::FromMilliseconds(30));
+
+ for (int i = 0; i < thread_count_; ++i) {
+ PlatformThreadHandle pth;
+ EXPECT_TRUE(PlatformThread::Create(0, this, &pth));
+ thread_handles_[i] = pth;
+ }
+}
+
+WorkQueue::~WorkQueue() {
+ {
+ base::AutoLock auto_lock(lock_);
+ SetShutdown();
+ }
+ work_is_available_.Broadcast(); // Tell them all to terminate.
+
+ for (int i = 0; i < thread_count_; ++i) {
+ PlatformThread::Join(thread_handles_[i]);
+ }
+ EXPECT_EQ(0, waiting_thread_count_);
+}
+
+int WorkQueue::GetThreadId() {
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ DCHECK(!EveryIdWasAllocated());
+ return thread_started_counter_++; // Give out Unique IDs.
+}
+
+bool WorkQueue::EveryIdWasAllocated() const {
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ return thread_count_ == thread_started_counter_;
+}
+
+TimeDelta WorkQueue::GetAnAssignment(int thread_id) {
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ DCHECK_LT(0, task_count_);
+ assignment_history_[thread_id]++;
+ if (0 == --task_count_) {
+ no_more_tasks_.Signal();
+ }
+ return worker_delay_;
+}
+
+void WorkQueue::WorkIsCompleted(int thread_id) {
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ completion_history_[thread_id]++;
+}
+
+int WorkQueue::task_count() const {
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ return task_count_;
+}
+
+bool WorkQueue::allow_help_requests() const {
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ return allow_help_requests_;
+}
+
+bool WorkQueue::shutdown() const {
+ lock_.AssertAcquired();
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ return shutdown_;
+}
+
+// Because this method is called from the test's main thread we need to actually
+// take the lock. Threads will call the thread_shutting_down() method with the
+// lock already acquired.
+bool WorkQueue::ThreadSafeCheckShutdown(int thread_count) {
+ bool all_shutdown;
+ base::AutoLock auto_lock(lock_);
+ {
+ // Declare in scope so DFAKE is guranteed to be destroyed before AutoLock.
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ all_shutdown = (shutdown_task_count_ == thread_count);
+ }
+ return all_shutdown;
+}
+
+void WorkQueue::thread_shutting_down() {
+ lock_.AssertAcquired();
+ DFAKE_SCOPED_RECURSIVE_LOCK(locked_methods_);
+ shutdown_task_count_++;
+}
+
+Lock* WorkQueue::lock() {
+ return &lock_;
+}
+
+ConditionVariable* WorkQueue::work_is_available() {
+ return &work_is_available_;
+}
+
+ConditionVariable* WorkQueue::all_threads_have_ids() {
+ return &all_threads_have_ids_;
+}
+
+ConditionVariable* WorkQueue::no_more_tasks() {
+ return &no_more_tasks_;
+}
+
+void WorkQueue::ResetHistory() {
+ for (int i = 0; i < thread_count_; ++i) {
+ assignment_history_[i] = 0;
+ completion_history_[i] = 0;
+ }
+}
+
+int WorkQueue::GetMinCompletionsByWorkerThread() const {
+ int minumum = completion_history_[0];
+ for (int i = 0; i < thread_count_; ++i)
+ minumum = std::min(minumum, completion_history_[i]);
+ return minumum;
+}
+
+int WorkQueue::GetMaxCompletionsByWorkerThread() const {
+ int maximum = completion_history_[0];
+ for (int i = 0; i < thread_count_; ++i)
+ maximum = std::max(maximum, completion_history_[i]);
+ return maximum;
+}
+
+int WorkQueue::GetNumThreadsTakingAssignments() const {
+ int count = 0;
+ for (int i = 0; i < thread_count_; ++i)
+ if (assignment_history_[i])
+ count++;
+ return count;
+}
+
+int WorkQueue::GetNumThreadsCompletingTasks() const {
+ int count = 0;
+ for (int i = 0; i < thread_count_; ++i)
+ if (completion_history_[i])
+ count++;
+ return count;
+}
+
+int WorkQueue::GetNumberOfCompletedTasks() const {
+ int total = 0;
+ for (int i = 0; i < thread_count_; ++i)
+ total += completion_history_[i];
+ return total;
+}
+
+void WorkQueue::SetWorkTime(TimeDelta delay) {
+ worker_delay_ = delay;
+}
+
+void WorkQueue::SetTaskCount(int count) {
+ task_count_ = count;
+}
+
+void WorkQueue::SetAllowHelp(bool allow) {
+ allow_help_requests_ = allow;
+}
+
+void WorkQueue::SetShutdown() {
+ lock_.AssertAcquired();
+ shutdown_ = true;
+}
+
+void WorkQueue::SpinUntilAllThreadsAreWaiting() {
+ while (true) {
+ {
+ base::AutoLock auto_lock(lock_);
+ if (waiting_thread_count_ == thread_count_)
+ break;
+ }
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+ }
+}
+
+void WorkQueue::SpinUntilTaskCountLessThan(int task_count) {
+ while (true) {
+ {
+ base::AutoLock auto_lock(lock_);
+ if (task_count_ < task_count)
+ break;
+ }
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
+ }
+}
+
+
+//------------------------------------------------------------------------------
+// Define the standard worker task. Several tests will spin out many of these
+// threads.
+//------------------------------------------------------------------------------
+
+// The multithread tests involve several threads with a task to perform as
+// directed by an instance of the class WorkQueue.
+// The task is to:
+// a) Check to see if there are more tasks (there is a task counter).
+// a1) Wait on condition variable if there are no tasks currently.
+// b) Call a function to see what should be done.
+// c) Do some computation based on the number of milliseconds returned in (b).
+// d) go back to (a).
+
+// WorkQueue::ThreadMain() implements the above task for all threads.
+// It calls the controlling object to tell the creator about progress, and to
+// ask about tasks.
+
+void WorkQueue::ThreadMain() {
+ int thread_id;
+ {
+ base::AutoLock auto_lock(lock_);
+ thread_id = GetThreadId();
+ if (EveryIdWasAllocated())
+ all_threads_have_ids()->Signal(); // Tell creator we're ready.
+ }
+
+ Lock private_lock; // Used to waste time on "our work".
+ while (1) { // This is the main consumer loop.
+ TimeDelta work_time;
+ bool could_use_help;
+ {
+ base::AutoLock auto_lock(lock_);
+ while (0 == task_count() && !shutdown()) {
+ ++waiting_thread_count_;
+ work_is_available()->Wait();
+ --waiting_thread_count_;
+ }
+ if (shutdown()) {
+ // Ack the notification of a shutdown message back to the controller.
+ thread_shutting_down();
+ return; // Terminate.
+ }
+ // Get our task duration from the queue.
+ work_time = GetAnAssignment(thread_id);
+ could_use_help = (task_count() > 0) && allow_help_requests();
+ } // Release lock
+
+ // Do work (outside of locked region.
+ if (could_use_help)
+ work_is_available()->Signal(); // Get help from other threads.
+
+ if (work_time > TimeDelta::FromMilliseconds(0)) {
+ // We could just sleep(), but we'll instead further exercise the
+ // condition variable class, and do a timed wait.
+ base::AutoLock auto_lock(private_lock);
+ ConditionVariable private_cv(&private_lock);
+ private_cv.TimedWait(work_time); // Unsynchronized waiting.
+ }
+
+ {
+ base::AutoLock auto_lock(lock_);
+ // Send notification that we completed our "work."
+ WorkIsCompleted(thread_id);
+ }
+ }
+}
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/synchronization/lock.cc b/libchrome/base/synchronization/lock.cc
new file mode 100644
index 0000000..03297ad
--- /dev/null
+++ b/libchrome/base/synchronization/lock.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is used for debugging assertion support. The Lock class
+// is functionally a wrapper around the LockImpl class, so the only
+// real intelligence in the class is in the debugging logic.
+
+#include "base/synchronization/lock.h"
+
+#if DCHECK_IS_ON()
+
+namespace base {
+
+Lock::Lock() : lock_() {
+}
+
+Lock::~Lock() {
+ DCHECK(owning_thread_ref_.is_null());
+}
+
+void Lock::AssertAcquired() const {
+ DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+}
+
+void Lock::CheckHeldAndUnmark() {
+ DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
+ owning_thread_ref_ = PlatformThreadRef();
+}
+
+void Lock::CheckUnheldAndMark() {
+ DCHECK(owning_thread_ref_.is_null());
+ owning_thread_ref_ = PlatformThread::CurrentRef();
+}
+
+} // namespace base
+
+#endif // DCHECK_IS_ON()
diff --git a/libchrome/base/synchronization/lock.h b/libchrome/base/synchronization/lock.h
new file mode 100644
index 0000000..fbf6cef
--- /dev/null
+++ b/libchrome/base/synchronization/lock.h
@@ -0,0 +1,136 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_H_
+#define BASE_SYNCHRONIZATION_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock_impl.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// A convenient wrapper for an OS specific critical section. The only real
+// intelligence in this class is in debug mode for the support for the
+// AssertAcquired() method.
+class BASE_EXPORT Lock {
+ public:
+#if !DCHECK_IS_ON()
+ // Optimized wrapper implementation
+ Lock() : lock_() {}
+ ~Lock() {}
+ void Acquire() { lock_.Lock(); }
+ void Release() { lock_.Unlock(); }
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by another thread, immediately return false. This must not be called
+ // by a thread already holding the lock (what happens is undefined and an
+ // assertion may fail).
+ bool Try() { return lock_.Try(); }
+
+ // Null implementation if not debug.
+ void AssertAcquired() const {}
+#else
+ Lock();
+ ~Lock();
+
+ // NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
+ // a thread attempts to acquire the lock a second time (while already holding
+ // it).
+ void Acquire() {
+ lock_.Lock();
+ CheckUnheldAndMark();
+ }
+ void Release() {
+ CheckHeldAndUnmark();
+ lock_.Unlock();
+ }
+
+ bool Try() {
+ bool rv = lock_.Try();
+ if (rv) {
+ CheckUnheldAndMark();
+ }
+ return rv;
+ }
+
+ void AssertAcquired() const;
+#endif // DCHECK_IS_ON()
+
+#if defined(OS_POSIX) || defined(OS_WIN)
+ // Both Windows and POSIX implementations of ConditionVariable need to be
+ // able to see our lock and tweak our debugging counters, as they release and
+ // acquire locks inside of their condition variable APIs.
+ friend class ConditionVariable;
+#endif
+
+ private:
+#if DCHECK_IS_ON()
+ // Members and routines taking care of locks assertions.
+ // Note that this checks for recursive locks and allows them
+ // if the variable is set. This is allowed by the underlying implementation
+ // on windows but not on Posix, so we're doing unneeded checks on Posix.
+ // It's worth it to share the code.
+ void CheckHeldAndUnmark();
+ void CheckUnheldAndMark();
+
+ // All private data is implicitly protected by lock_.
+ // Be VERY careful to only access members under that lock.
+ base::PlatformThreadRef owning_thread_ref_;
+#endif // DCHECK_IS_ON()
+
+ // Platform specific underlying lock implementation.
+ internal::LockImpl lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(Lock);
+};
+
+// A helper class that acquires the given Lock while the AutoLock is in scope.
+class AutoLock {
+ public:
+ struct AlreadyAcquired {};
+
+ explicit AutoLock(Lock& lock) : lock_(lock) {
+ lock_.Acquire();
+ }
+
+ AutoLock(Lock& lock, const AlreadyAcquired&) : lock_(lock) {
+ lock_.AssertAcquired();
+ }
+
+ ~AutoLock() {
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ Lock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoLock);
+};
+
+// AutoUnlock is a helper that will Release() the |lock| argument in the
+// constructor, and re-Acquire() it in the destructor.
+class AutoUnlock {
+ public:
+ explicit AutoUnlock(Lock& lock) : lock_(lock) {
+ // We require our caller to have the lock.
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ ~AutoUnlock() {
+ lock_.Acquire();
+ }
+
+ private:
+ Lock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoUnlock);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_LOCK_H_
diff --git a/libchrome/base/synchronization/lock_impl.h b/libchrome/base/synchronization/lock_impl.h
new file mode 100644
index 0000000..cbaabc7
--- /dev/null
+++ b/libchrome/base/synchronization/lock_impl.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+#define BASE_SYNCHRONIZATION_LOCK_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+namespace base {
+namespace internal {
+
+// This class implements the underlying platform-specific spin-lock mechanism
+// used for the Lock class. Most users should not use LockImpl directly, but
+// should instead use Lock.
+class BASE_EXPORT LockImpl {
+ public:
+#if defined(OS_WIN)
+ using NativeHandle = SRWLOCK;
+#elif defined(OS_POSIX)
+ using NativeHandle = pthread_mutex_t;
+#endif
+
+ LockImpl();
+ ~LockImpl();
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by something else, immediately return false.
+ bool Try();
+
+ // Take the lock, blocking until it is available if necessary.
+ void Lock();
+
+ // Release the lock. This must only be called by the lock's holder: after
+ // a successful call to Try, or a call to Lock.
+ void Unlock();
+
+ // Return the native underlying lock.
+ // TODO(awalker): refactor lock and condition variables so that this is
+ // unnecessary.
+ NativeHandle* native_handle() { return &native_handle_; }
+
+ private:
+ NativeHandle native_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockImpl);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_LOCK_IMPL_H_
diff --git a/libchrome/base/synchronization/lock_impl_posix.cc b/libchrome/base/synchronization/lock_impl_posix.cc
new file mode 100644
index 0000000..5619ada
--- /dev/null
+++ b/libchrome/base/synchronization/lock_impl_posix.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock_impl.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+LockImpl::LockImpl() {
+#ifndef NDEBUG
+ // In debug, setup attributes for lock error checking.
+ pthread_mutexattr_t mta;
+ int rv = pthread_mutexattr_init(&mta);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+ rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+ rv = pthread_mutex_init(&native_handle_, &mta);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+ rv = pthread_mutexattr_destroy(&mta);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+#else
+ // In release, go with the default lock attributes.
+ pthread_mutex_init(&native_handle_, NULL);
+#endif
+}
+
+LockImpl::~LockImpl() {
+ int rv = pthread_mutex_destroy(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+}
+
+bool LockImpl::Try() {
+ int rv = pthread_mutex_trylock(&native_handle_);
+ DCHECK(rv == 0 || rv == EBUSY) << ". " << strerror(rv);
+ return rv == 0;
+}
+
+void LockImpl::Lock() {
+ int rv = pthread_mutex_lock(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+}
+
+void LockImpl::Unlock() {
+ int rv = pthread_mutex_unlock(&native_handle_);
+ DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/synchronization/lock_unittest.cc b/libchrome/base/synchronization/lock_unittest.cc
new file mode 100644
index 0000000..27f335e
--- /dev/null
+++ b/libchrome/base/synchronization/lock_unittest.cc
@@ -0,0 +1,215 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Basic test to make sure that Acquire()/Release()/Try() don't crash ----------
+
+class BasicLockTestThread : public PlatformThread::Delegate {
+ public:
+ explicit BasicLockTestThread(Lock* lock) : lock_(lock), acquired_(0) {}
+
+ void ThreadMain() override {
+ for (int i = 0; i < 10; i++) {
+ lock_->Acquire();
+ acquired_++;
+ lock_->Release();
+ }
+ for (int i = 0; i < 10; i++) {
+ lock_->Acquire();
+ acquired_++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ lock_->Release();
+ }
+ for (int i = 0; i < 10; i++) {
+ if (lock_->Try()) {
+ acquired_++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ lock_->Release();
+ }
+ }
+ }
+
+ int acquired() const { return acquired_; }
+
+ private:
+ Lock* lock_;
+ int acquired_;
+
+ DISALLOW_COPY_AND_ASSIGN(BasicLockTestThread);
+};
+
+TEST(LockTest, Basic) {
+ Lock lock;
+ BasicLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ int acquired = 0;
+ for (int i = 0; i < 5; i++) {
+ lock.Acquire();
+ acquired++;
+ lock.Release();
+ }
+ for (int i = 0; i < 10; i++) {
+ lock.Acquire();
+ acquired++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ lock.Release();
+ }
+ for (int i = 0; i < 10; i++) {
+ if (lock.Try()) {
+ acquired++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ lock.Release();
+ }
+ }
+ for (int i = 0; i < 5; i++) {
+ lock.Acquire();
+ acquired++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 20));
+ lock.Release();
+ }
+
+ PlatformThread::Join(handle);
+
+ EXPECT_GE(acquired, 20);
+ EXPECT_GE(thread.acquired(), 20);
+}
+
+// Test that Try() works as expected -------------------------------------------
+
+class TryLockTestThread : public PlatformThread::Delegate {
+ public:
+ explicit TryLockTestThread(Lock* lock) : lock_(lock), got_lock_(false) {}
+
+ void ThreadMain() override {
+ got_lock_ = lock_->Try();
+ if (got_lock_)
+ lock_->Release();
+ }
+
+ bool got_lock() const { return got_lock_; }
+
+ private:
+ Lock* lock_;
+ bool got_lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(TryLockTestThread);
+};
+
+TEST(LockTest, TryLock) {
+ Lock lock;
+
+ ASSERT_TRUE(lock.Try());
+ // We now have the lock....
+
+ // This thread will not be able to get the lock.
+ {
+ TryLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ PlatformThread::Join(handle);
+
+ ASSERT_FALSE(thread.got_lock());
+ }
+
+ lock.Release();
+
+ // This thread will....
+ {
+ TryLockTestThread thread(&lock);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ PlatformThread::Join(handle);
+
+ ASSERT_TRUE(thread.got_lock());
+ // But it released it....
+ ASSERT_TRUE(lock.Try());
+ }
+
+ lock.Release();
+}
+
+// Tests that locks actually exclude -------------------------------------------
+
+class MutexLockTestThread : public PlatformThread::Delegate {
+ public:
+ MutexLockTestThread(Lock* lock, int* value) : lock_(lock), value_(value) {}
+
+ // Static helper which can also be called from the main thread.
+ static void DoStuff(Lock* lock, int* value) {
+ for (int i = 0; i < 40; i++) {
+ lock->Acquire();
+ int v = *value;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(rand() % 10));
+ *value = v + 1;
+ lock->Release();
+ }
+ }
+
+ void ThreadMain() override { DoStuff(lock_, value_); }
+
+ private:
+ Lock* lock_;
+ int* value_;
+
+ DISALLOW_COPY_AND_ASSIGN(MutexLockTestThread);
+};
+
+TEST(LockTest, MutexTwoThreads) {
+ Lock lock;
+ int value = 0;
+
+ MutexLockTestThread thread(&lock, &value);
+ PlatformThreadHandle handle;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+
+ MutexLockTestThread::DoStuff(&lock, &value);
+
+ PlatformThread::Join(handle);
+
+ EXPECT_EQ(2 * 40, value);
+}
+
+TEST(LockTest, MutexFourThreads) {
+ Lock lock;
+ int value = 0;
+
+ MutexLockTestThread thread1(&lock, &value);
+ MutexLockTestThread thread2(&lock, &value);
+ MutexLockTestThread thread3(&lock, &value);
+ PlatformThreadHandle handle1;
+ PlatformThreadHandle handle2;
+ PlatformThreadHandle handle3;
+
+ ASSERT_TRUE(PlatformThread::Create(0, &thread1, &handle1));
+ ASSERT_TRUE(PlatformThread::Create(0, &thread2, &handle2));
+ ASSERT_TRUE(PlatformThread::Create(0, &thread3, &handle3));
+
+ MutexLockTestThread::DoStuff(&lock, &value);
+
+ PlatformThread::Join(handle1);
+ PlatformThread::Join(handle2);
+ PlatformThread::Join(handle3);
+
+ EXPECT_EQ(4 * 40, value);
+}
+
+} // namespace base
diff --git a/libchrome/base/synchronization/read_write_lock.h b/libchrome/base/synchronization/read_write_lock.h
new file mode 100644
index 0000000..4c59b7b
--- /dev/null
+++ b/libchrome/base/synchronization/read_write_lock.h
@@ -0,0 +1,105 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+#define BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_NACL)
+#include "base/synchronization/lock.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#else
+# error No reader-writer lock defined for this platform.
+#endif
+
+namespace base {
+namespace subtle {
+
+// An OS-independent wrapper around reader-writer locks. There's no magic here.
+//
+// You are strongly encouraged to use base::Lock instead of this, unless you
+// can demonstrate contention and show that this would lead to an improvement.
+// This lock does not make any guarantees of fairness, which can lead to writer
+// starvation under certain access patterns. You should carefully consider your
+// writer access patterns before using this lock.
+class BASE_EXPORT ReadWriteLock {
+ public:
+ ReadWriteLock();
+ ~ReadWriteLock();
+
+ // Reader lock functions.
+ void ReadAcquire();
+ void ReadRelease();
+
+ // Writer lock functions.
+ void WriteAcquire();
+ void WriteRelease();
+
+ private:
+#if defined(OS_WIN)
+ using NativeHandle = SRWLOCK;
+#elif defined(OS_NACL)
+ using NativeHandle = Lock;
+#elif defined(OS_POSIX)
+ using NativeHandle = pthread_rwlock_t;
+#endif
+
+ NativeHandle native_handle_;
+
+#if defined(OS_NACL)
+ // Even though NaCl has a pthread_rwlock implementation, the build rules don't
+ // make it universally available. So instead, implement a slower and trivial
+ // reader-writer lock using a regular mutex.
+ // TODO(amistry): Remove this and use the posix implementation when it's
+ // available in all build configurations.
+ uint32_t readers_ = 0;
+ // base::Lock does checking to ensure the lock is acquired and released on the
+ // same thread. This is not the case for this lock, so use pthread mutexes
+ // directly here.
+ pthread_mutex_t writer_lock_ = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ReadWriteLock);
+};
+
+class AutoReadLock {
+ public:
+ explicit AutoReadLock(ReadWriteLock& lock) : lock_(lock) {
+ lock_.ReadAcquire();
+ }
+ ~AutoReadLock() {
+ lock_.ReadRelease();
+ }
+
+ private:
+ ReadWriteLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoReadLock);
+};
+
+class AutoWriteLock {
+ public:
+ explicit AutoWriteLock(ReadWriteLock& lock) : lock_(lock) {
+ lock_.WriteAcquire();
+ }
+ ~AutoWriteLock() {
+ lock_.WriteRelease();
+ }
+
+ private:
+ ReadWriteLock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoWriteLock);
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
diff --git a/libchrome/base/synchronization/read_write_lock_posix.cc b/libchrome/base/synchronization/read_write_lock_posix.cc
new file mode 100644
index 0000000..e5de091
--- /dev/null
+++ b/libchrome/base/synchronization/read_write_lock_posix.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace subtle {
+
+ReadWriteLock::ReadWriteLock() : native_handle_(PTHREAD_RWLOCK_INITIALIZER) {}
+
+ReadWriteLock::~ReadWriteLock() {
+ int result = pthread_rwlock_destroy(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadAcquire() {
+ int result = pthread_rwlock_rdlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadRelease() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteAcquire() {
+ int result = pthread_rwlock_wrlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteRelease() {
+ int result = pthread_rwlock_unlock(&native_handle_);
+ DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/libchrome/base/synchronization/spin_wait.h b/libchrome/base/synchronization/spin_wait.h
new file mode 100644
index 0000000..9b147cd
--- /dev/null
+++ b/libchrome/base/synchronization/spin_wait.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides a macro ONLY for use in testing.
+// DO NOT USE IN PRODUCTION CODE. There are much better ways to wait.
+
+// This code is very helpful in testing multi-threaded code, without depending
+// on almost any primitives. This is especially helpful if you are testing
+// those primitive multi-threaded constructs.
+
+// We provide a simple one argument spin wait (for 1 second), and a generic
+// spin wait (for longer periods of time).
+
+#ifndef BASE_SYNCHRONIZATION_SPIN_WAIT_H_
+#define BASE_SYNCHRONIZATION_SPIN_WAIT_H_
+
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+// Provide a macro that will wait no longer than 1 second for an asynchronous
+// change is the value of an expression.
+// A typical use would be:
+//
+// SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(0 == f(x));
+//
+// The expression will be evaluated repeatedly until it is true, or until
+// the time (1 second) expires.
+// Since tests generally have a 5 second watch dog timer, this spin loop is
+// typically used to get the padding needed on a given test platform to assure
+// that the test passes, even if load varies, and external events vary.
+
+#define SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(expression) \
+ SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(base::TimeDelta::FromSeconds(1), \
+ (expression))
+
+#define SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(delta, expression) do { \
+ base::TimeTicks start = base::TimeTicks::Now(); \
+ const base::TimeDelta kTimeout = delta; \
+ while (!(expression)) { \
+ if (kTimeout < base::TimeTicks::Now() - start) { \
+ EXPECT_LE((base::TimeTicks::Now() - start).InMilliseconds(), \
+ kTimeout.InMilliseconds()) << "Timed out"; \
+ break; \
+ } \
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(50)); \
+ } \
+ } while (0)
+
+#endif // BASE_SYNCHRONIZATION_SPIN_WAIT_H_
diff --git a/libchrome/base/synchronization/waitable_event.h b/libchrome/base/synchronization/waitable_event.h
new file mode 100644
index 0000000..3863e98
--- /dev/null
+++ b/libchrome/base/synchronization/waitable_event.h
@@ -0,0 +1,196 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+#if defined(OS_POSIX)
+#include <list>
+#include <utility>
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#endif
+
+namespace base {
+
+class TimeDelta;
+
+// A WaitableEvent can be a useful thread synchronization tool when you want to
+// allow one thread to wait for another thread to finish some work. For
+// non-Windows systems, this can only be used from within a single address
+// space.
+//
+// Use a WaitableEvent when you would otherwise use a Lock+ConditionVariable to
+// protect a simple boolean value. However, if you find yourself using a
+// WaitableEvent in conjunction with a Lock to wait for a more complex state
+// change (e.g., for an item to be added to a queue), then you should probably
+// be using a ConditionVariable instead of a WaitableEvent.
+//
+// NOTE: On Windows, this class provides a subset of the functionality afforded
+// by a Windows event object. This is intentional. If you are writing Windows
+// specific code and you need other features of a Windows event, then you might
+// be better off just using an Windows event directly.
+class BASE_EXPORT WaitableEvent {
+ public:
+ // Indicates whether a WaitableEvent should automatically reset the event
+ // state after a single waiting thread has been released or remain signaled
+ // until Reset() is manually invoked.
+ enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+ // Indicates whether a new WaitableEvent should start in a signaled state or
+ // not.
+ enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+ // Constructs a WaitableEvent with policy and initial state as detailed in
+ // the above enums.
+ WaitableEvent(ResetPolicy reset_policy, InitialState initial_state);
+
+#if defined(OS_WIN)
+ // Create a WaitableEvent from an Event HANDLE which has already been
+ // created. This objects takes ownership of the HANDLE and will close it when
+ // deleted.
+ explicit WaitableEvent(win::ScopedHandle event_handle);
+#endif
+
+ ~WaitableEvent();
+
+ // Put the event in the un-signaled state.
+ void Reset();
+
+ // Put the event in the signaled state. Causing any thread blocked on Wait
+ // to be woken up.
+ void Signal();
+
+ // Returns true if the event is in the signaled state, else false. If this
+ // is not a manual reset event, then this test will cause a reset.
+ bool IsSignaled();
+
+ // Wait indefinitely for the event to be signaled. Wait's return "happens
+ // after" |Signal| has completed. This means that it's safe for a
+ // WaitableEvent to synchronise its own destruction, like this:
+ //
+ // WaitableEvent *e = new WaitableEvent;
+ // SendToOtherThread(e);
+ // e->Wait();
+ // delete e;
+ void Wait();
+
+ // Wait up until max_time has passed for the event to be signaled. Returns
+ // true if the event was signaled. If this method returns false, then it
+ // does not necessarily mean that max_time was exceeded.
+ //
+ // TimedWait can synchronise its own destruction like |Wait|.
+ bool TimedWait(const TimeDelta& max_time);
+
+#if defined(OS_WIN)
+ HANDLE handle() const { return handle_.Get(); }
+#endif
+
+ // Wait, synchronously, on multiple events.
+ // waitables: an array of WaitableEvent pointers
+ // count: the number of elements in @waitables
+ //
+ // returns: the index of a WaitableEvent which has been signaled.
+ //
+ // You MUST NOT delete any of the WaitableEvent objects while this wait is
+ // happening, however WaitMany's return "happens after" the |Signal| call
+ // that caused it has completed, like |Wait|.
+ static size_t WaitMany(WaitableEvent** waitables, size_t count);
+
+ // For asynchronous waiting, see WaitableEventWatcher
+
+ // This is a private helper class. It's here because it's used by friends of
+ // this class (such as WaitableEventWatcher) to be able to enqueue elements
+ // of the wait-list
+ class Waiter {
+ public:
+ // Signal the waiter to wake up.
+ //
+ // Consider the case of a Waiter which is in multiple WaitableEvent's
+ // wait-lists. Each WaitableEvent is automatic-reset and two of them are
+ // signaled at the same time. Now, each will wake only the first waiter in
+ // the wake-list before resetting. However, if those two waiters happen to
+ // be the same object (as can happen if another thread didn't have a chance
+ // to dequeue the waiter from the other wait-list in time), two auto-resets
+ // will have happened, but only one waiter has been signaled!
+ //
+ // Because of this, a Waiter may "reject" a wake by returning false. In
+ // this case, the auto-reset WaitableEvent shouldn't act as if anything has
+ // been notified.
+ virtual bool Fire(WaitableEvent* signaling_event) = 0;
+
+ // Waiters may implement this in order to provide an extra condition for
+ // two Waiters to be considered equal. In WaitableEvent::Dequeue, if the
+ // pointers match then this function is called as a final check. See the
+ // comments in ~Handle for why.
+ virtual bool Compare(void* tag) = 0;
+
+ protected:
+ virtual ~Waiter() {}
+ };
+
+ private:
+ friend class WaitableEventWatcher;
+
+#if defined(OS_WIN)
+ win::ScopedHandle handle_;
+#else
+ // On Windows, one can close a HANDLE which is currently being waited on. The
+ // MSDN documentation says that the resulting behaviour is 'undefined', but
+ // it doesn't crash. However, if we were to include the following members
+ // directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an
+ // event which gets deleted. This mismatch has bitten us several times now,
+ // so we have a kernel of the WaitableEvent, which is reference counted.
+ // WaitableEventWatchers may then take a reference and thus match the Windows
+ // behaviour.
+ struct WaitableEventKernel :
+ public RefCountedThreadSafe<WaitableEventKernel> {
+ public:
+ WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
+
+ bool Dequeue(Waiter* waiter, void* tag);
+
+ base::Lock lock_;
+ const bool manual_reset_;
+ bool signaled_;
+ std::list<Waiter*> waiters_;
+
+ private:
+ friend class RefCountedThreadSafe<WaitableEventKernel>;
+ ~WaitableEventKernel();
+ };
+
+ typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex;
+
+ // When dealing with arrays of WaitableEvent*, we want to sort by the address
+ // of the WaitableEvent in order to have a globally consistent locking order.
+ // In that case we keep them, in sorted order, in an array of pairs where the
+ // second element is the index of the WaitableEvent in the original,
+ // unsorted, array.
+ static size_t EnqueueMany(WaiterAndIndex* waitables,
+ size_t count, Waiter* waiter);
+
+ bool SignalAll();
+ bool SignalOne();
+ void Enqueue(Waiter* waiter);
+
+ scoped_refptr<WaitableEventKernel> kernel_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
diff --git a/libchrome/base/synchronization/waitable_event_posix.cc b/libchrome/base/synchronization/waitable_event_posix.cc
new file mode 100644
index 0000000..b32c882
--- /dev/null
+++ b/libchrome/base/synchronization/waitable_event_posix.cc
@@ -0,0 +1,415 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_restrictions.h"
+
+// -----------------------------------------------------------------------------
+// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
+// support cross-process events (where one process can signal an event which
+// others are waiting on). Because of this, we can avoid having one thread per
+// listener in several cases.
+//
+// The WaitableEvent maintains a list of waiters, protected by a lock. Each
+// waiter is either an async wait, in which case we have a Task and the
+// MessageLoop to run it on, or a blocking wait, in which case we have the
+// condition variable to signal.
+//
+// Waiting involves grabbing the lock and adding oneself to the wait list. Async
+// waits can be canceled, which means grabbing the lock and removing oneself
+// from the list.
+//
+// Waiting on multiple events is handled by adding a single, synchronous wait to
+// the wait-list of many events. An event passes a pointer to itself when
+// firing a waiter and so we can store that pointer to find out which event
+// triggered.
+// -----------------------------------------------------------------------------
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// This is just an abstract base class for waking the two types of waiters
+// -----------------------------------------------------------------------------
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+ InitialState initial_state)
+ : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
+
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Reset() {
+ base::AutoLock locked(kernel_->lock_);
+ kernel_->signaled_ = false;
+}
+
+void WaitableEvent::Signal() {
+ base::AutoLock locked(kernel_->lock_);
+
+ if (kernel_->signaled_)
+ return;
+
+ if (kernel_->manual_reset_) {
+ SignalAll();
+ kernel_->signaled_ = true;
+ } else {
+ // In the case of auto reset, if no waiters were woken, we remain
+ // signaled.
+ if (!SignalOne())
+ kernel_->signaled_ = true;
+ }
+}
+
+bool WaitableEvent::IsSignaled() {
+ base::AutoLock locked(kernel_->lock_);
+
+ const bool result = kernel_->signaled_;
+ if (result && !kernel_->manual_reset_)
+ kernel_->signaled_ = false;
+ return result;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waits
+
+// -----------------------------------------------------------------------------
+// This is a synchronous waiter. The thread is waiting on the given condition
+// variable and the fired flag in this object.
+// -----------------------------------------------------------------------------
+class SyncWaiter : public WaitableEvent::Waiter {
+ public:
+ SyncWaiter()
+ : fired_(false),
+ signaling_event_(NULL),
+ lock_(),
+ cv_(&lock_) {
+ }
+
+ bool Fire(WaitableEvent* signaling_event) override {
+ base::AutoLock locked(lock_);
+
+ if (fired_)
+ return false;
+
+ fired_ = true;
+ signaling_event_ = signaling_event;
+
+ cv_.Broadcast();
+
+ // Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on
+ // the blocking thread's stack. There is no |delete this;| in Fire. The
+ // SyncWaiter object is destroyed when it goes out of scope.
+
+ return true;
+ }
+
+ WaitableEvent* signaling_event() const {
+ return signaling_event_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // These waiters are always stack allocated and don't delete themselves. Thus
+ // there's no problem and the ABA tag is the same as the object pointer.
+ // ---------------------------------------------------------------------------
+ bool Compare(void* tag) override { return this == tag; }
+
+ // ---------------------------------------------------------------------------
+ // Called with lock held.
+ // ---------------------------------------------------------------------------
+ bool fired() const {
+ return fired_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // During a TimedWait, we need a way to make sure that an auto-reset
+ // WaitableEvent doesn't think that this event has been signaled between
+ // unlocking it and removing it from the wait-list. Called with lock held.
+ // ---------------------------------------------------------------------------
+ void Disable() {
+ fired_ = true;
+ }
+
+ base::Lock* lock() {
+ return &lock_;
+ }
+
+ base::ConditionVariable* cv() {
+ return &cv_;
+ }
+
+ private:
+ bool fired_;
+ WaitableEvent* signaling_event_; // The WaitableEvent which woke us
+ base::Lock lock_;
+ base::ConditionVariable cv_;
+};
+
+void WaitableEvent::Wait() {
+ bool result = TimedWait(TimeDelta::FromSeconds(-1));
+ DCHECK(result) << "TimedWait() should never fail with infinite timeout";
+}
+
+bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
+ base::ThreadRestrictions::AssertWaitAllowed();
+ const TimeTicks end_time(TimeTicks::Now() + max_time);
+ const bool finite_time = max_time.ToInternalValue() >= 0;
+
+ kernel_->lock_.Acquire();
+ if (kernel_->signaled_) {
+ if (!kernel_->manual_reset_) {
+ // In this case we were signaled when we had no waiters. Now that
+ // someone has waited upon us, we can automatically reset.
+ kernel_->signaled_ = false;
+ }
+
+ kernel_->lock_.Release();
+ return true;
+ }
+
+ SyncWaiter sw;
+ sw.lock()->Acquire();
+
+ Enqueue(&sw);
+ kernel_->lock_.Release();
+ // We are violating locking order here by holding the SyncWaiter lock but not
+ // the WaitableEvent lock. However, this is safe because we don't lock @lock_
+ // again before unlocking it.
+
+ for (;;) {
+ const TimeTicks current_time(TimeTicks::Now());
+
+ if (sw.fired() || (finite_time && current_time >= end_time)) {
+ const bool return_value = sw.fired();
+
+ // We can't acquire @lock_ before releasing the SyncWaiter lock (because
+ // of locking order), however, in between the two a signal could be fired
+ // and @sw would accept it, however we will still return false, so the
+ // signal would be lost on an auto-reset WaitableEvent. Thus we call
+ // Disable which makes sw::Fire return false.
+ sw.Disable();
+ sw.lock()->Release();
+
+ // This is a bug that has been enshrined in the interface of
+ // WaitableEvent now: |Dequeue| is called even when |sw.fired()| is true,
+ // even though it'll always return false in that case. However, taking
+ // the lock ensures that |Signal| has completed before we return and
+ // means that a WaitableEvent can synchronise its own destruction.
+ kernel_->lock_.Acquire();
+ kernel_->Dequeue(&sw, &sw);
+ kernel_->lock_.Release();
+
+ return return_value;
+ }
+
+ if (finite_time) {
+ const TimeDelta max_wait(end_time - current_time);
+ sw.cv()->TimedWait(max_wait);
+ } else {
+ sw.cv()->Wait();
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waiting on multiple objects.
+
+static bool // StrictWeakOrdering
+cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
+ const std::pair<WaitableEvent*, unsigned> &b) {
+ return a.first < b.first;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
+ size_t count) {
+ base::ThreadRestrictions::AssertWaitAllowed();
+ DCHECK(count) << "Cannot wait on no events";
+
+ // We need to acquire the locks in a globally consistent order. Thus we sort
+ // the array of waitables by address. We actually sort a pairs so that we can
+ // map back to the original index values later.
+ std::vector<std::pair<WaitableEvent*, size_t> > waitables;
+ waitables.reserve(count);
+ for (size_t i = 0; i < count; ++i)
+ waitables.push_back(std::make_pair(raw_waitables[i], i));
+
+ DCHECK_EQ(count, waitables.size());
+
+ sort(waitables.begin(), waitables.end(), cmp_fst_addr);
+
+ // The set of waitables must be distinct. Since we have just sorted by
+ // address, we can check this cheaply by comparing pairs of consecutive
+ // elements.
+ for (size_t i = 0; i < waitables.size() - 1; ++i) {
+ DCHECK(waitables[i].first != waitables[i+1].first);
+ }
+
+ SyncWaiter sw;
+
+ const size_t r = EnqueueMany(&waitables[0], count, &sw);
+ if (r) {
+ // One of the events is already signaled. The SyncWaiter has not been
+ // enqueued anywhere. EnqueueMany returns the count of remaining waitables
+ // when the signaled one was seen, so the index of the signaled event is
+ // @count - @r.
+ return waitables[count - r].second;
+ }
+
+ // At this point, we hold the locks on all the WaitableEvents and we have
+ // enqueued our waiter in them all.
+ sw.lock()->Acquire();
+ // Release the WaitableEvent locks in the reverse order
+ for (size_t i = 0; i < count; ++i) {
+ waitables[count - (1 + i)].first->kernel_->lock_.Release();
+ }
+
+ for (;;) {
+ if (sw.fired())
+ break;
+
+ sw.cv()->Wait();
+ }
+ sw.lock()->Release();
+
+ // The address of the WaitableEvent which fired is stored in the SyncWaiter.
+ WaitableEvent *const signaled_event = sw.signaling_event();
+ // This will store the index of the raw_waitables which fired.
+ size_t signaled_index = 0;
+
+ // Take the locks of each WaitableEvent in turn (except the signaled one) and
+ // remove our SyncWaiter from the wait-list
+ for (size_t i = 0; i < count; ++i) {
+ if (raw_waitables[i] != signaled_event) {
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ // There's no possible ABA issue with the address of the SyncWaiter here
+ // because it lives on the stack. Thus the tag value is just the pointer
+ // value again.
+ raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
+ raw_waitables[i]->kernel_->lock_.Release();
+ } else {
+ // By taking this lock here we ensure that |Signal| has completed by the
+ // time we return, because |Signal| holds this lock. This matches the
+ // behaviour of |Wait| and |TimedWait|.
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ raw_waitables[i]->kernel_->lock_.Release();
+ signaled_index = i;
+ }
+ }
+
+ return signaled_index;
+}
+
+// -----------------------------------------------------------------------------
+// If return value == 0:
+// The locks of the WaitableEvents have been taken in order and the Waiter has
+// been enqueued in the wait-list of each. None of the WaitableEvents are
+// currently signaled
+// else:
+// None of the WaitableEvent locks are held. The Waiter has not been enqueued
+// in any of them and the return value is the index of the first WaitableEvent
+// which was signaled, from the end of the array.
+// -----------------------------------------------------------------------------
+// static
+size_t WaitableEvent::EnqueueMany
+ (std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count, Waiter* waiter) {
+ if (!count)
+ return 0;
+
+ waitables[0].first->kernel_->lock_.Acquire();
+ if (waitables[0].first->kernel_->signaled_) {
+ if (!waitables[0].first->kernel_->manual_reset_)
+ waitables[0].first->kernel_->signaled_ = false;
+ waitables[0].first->kernel_->lock_.Release();
+ return count;
+ }
+
+ const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
+ if (r) {
+ waitables[0].first->kernel_->lock_.Release();
+ } else {
+ waitables[0].first->Enqueue(waiter);
+ }
+
+ return r;
+}
+
+// -----------------------------------------------------------------------------
+
+
+// -----------------------------------------------------------------------------
+// Private functions...
+
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+ ResetPolicy reset_policy,
+ InitialState initial_state)
+ : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+ signaled_(initial_state == InitialState::SIGNALED) {}
+
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
+
+// -----------------------------------------------------------------------------
+// Wake all waiting waiters. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::SignalAll() {
+ bool signaled_at_least_one = false;
+
+ for (std::list<Waiter*>::iterator
+ i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) {
+ if ((*i)->Fire(this))
+ signaled_at_least_one = true;
+ }
+
+ kernel_->waiters_.clear();
+ return signaled_at_least_one;
+}
+
+// ---------------------------------------------------------------------------
+// Try to wake a single waiter. Return true if one was woken. Called with lock
+// held.
+// ---------------------------------------------------------------------------
+bool WaitableEvent::SignalOne() {
+ for (;;) {
+ if (kernel_->waiters_.empty())
+ return false;
+
+ const bool r = (*kernel_->waiters_.begin())->Fire(this);
+ kernel_->waiters_.pop_front();
+ if (r)
+ return true;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Add a waiter to the list of those waiting. Called with lock held.
+// -----------------------------------------------------------------------------
+void WaitableEvent::Enqueue(Waiter* waiter) {
+ kernel_->waiters_.push_back(waiter);
+}
+
+// -----------------------------------------------------------------------------
+// Remove a waiter from the list of those waiting. Return true if the waiter was
+// actually removed. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
+ for (std::list<Waiter*>::iterator
+ i = waiters_.begin(); i != waiters_.end(); ++i) {
+ if (*i == waiter && (*i)->Compare(tag)) {
+ waiters_.erase(i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// -----------------------------------------------------------------------------
+
+} // namespace base
diff --git a/libchrome/base/synchronization/waitable_event_unittest.cc b/libchrome/base/synchronization/waitable_event_unittest.cc
new file mode 100644
index 0000000..ac5c9f1
--- /dev/null
+++ b/libchrome/base/synchronization/waitable_event_unittest.cc
@@ -0,0 +1,163 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event.h"
+
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(WaitableEventTest, ManualBasics) {
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ EXPECT_FALSE(event.IsSignaled());
+
+ event.Signal();
+ EXPECT_TRUE(event.IsSignaled());
+ EXPECT_TRUE(event.IsSignaled());
+
+ event.Reset();
+ EXPECT_FALSE(event.IsSignaled());
+ EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+
+ event.Signal();
+ event.Wait();
+ EXPECT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+}
+
+TEST(WaitableEventTest, AutoBasics) {
+ WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ EXPECT_FALSE(event.IsSignaled());
+
+ event.Signal();
+ EXPECT_TRUE(event.IsSignaled());
+ EXPECT_FALSE(event.IsSignaled());
+
+ event.Reset();
+ EXPECT_FALSE(event.IsSignaled());
+ EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+
+ event.Signal();
+ event.Wait();
+ EXPECT_FALSE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+
+ event.Signal();
+ EXPECT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(10)));
+}
+
+TEST(WaitableEventTest, WaitManyShortcut) {
+ WaitableEvent* ev[5];
+ for (unsigned i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
+
+ ev[3]->Signal();
+ EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
+
+ ev[3]->Signal();
+ EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
+
+ ev[4]->Signal();
+ EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 4u);
+
+ ev[0]->Signal();
+ EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 0u);
+
+ for (unsigned i = 0; i < 5; ++i)
+ delete ev[i];
+}
+
+class WaitableEventSignaler : public PlatformThread::Delegate {
+ public:
+ WaitableEventSignaler(TimeDelta delay, WaitableEvent* event)
+ : delay_(delay),
+ event_(event) {
+ }
+
+ void ThreadMain() override {
+ PlatformThread::Sleep(delay_);
+ event_->Signal();
+ }
+
+ private:
+ const TimeDelta delay_;
+ WaitableEvent* event_;
+};
+
+// Tests that a WaitableEvent can be safely deleted when |Wait| is done without
+// additional synchronization.
+TEST(WaitableEventTest, WaitAndDelete) {
+ WaitableEvent* ev =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev);
+ PlatformThreadHandle thread;
+ PlatformThread::Create(0, &signaler, &thread);
+
+ ev->Wait();
+ delete ev;
+
+ PlatformThread::Join(thread);
+}
+
+// Tests that a WaitableEvent can be safely deleted when |WaitMany| is done
+// without additional synchronization.
+TEST(WaitableEventTest, WaitMany) {
+ WaitableEvent* ev[5];
+ for (unsigned i = 0; i < 5; ++i) {
+ ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ }
+
+ WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev[2]);
+ PlatformThreadHandle thread;
+ PlatformThread::Create(0, &signaler, &thread);
+
+ size_t index = WaitableEvent::WaitMany(ev, 5);
+
+ for (unsigned i = 0; i < 5; ++i)
+ delete ev[i];
+
+ PlatformThread::Join(thread);
+ EXPECT_EQ(2u, index);
+}
+
+// Tests that using TimeDelta::Max() on TimedWait() is not the same as passing
+// a timeout of 0. (crbug.com/465948)
+#if defined(OS_POSIX)
+// crbug.com/465948 not fixed yet.
+#define MAYBE_TimedWait DISABLED_TimedWait
+#else
+#define MAYBE_TimedWait TimedWait
+#endif
+TEST(WaitableEventTest, MAYBE_TimedWait) {
+ WaitableEvent* ev =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ TimeDelta thread_delay = TimeDelta::FromMilliseconds(10);
+ WaitableEventSignaler signaler(thread_delay, ev);
+ PlatformThreadHandle thread;
+ TimeTicks start = TimeTicks::Now();
+ PlatformThread::Create(0, &signaler, &thread);
+
+ ev->TimedWait(TimeDelta::Max());
+ EXPECT_GE(TimeTicks::Now() - start, thread_delay);
+ delete ev;
+
+ PlatformThread::Join(thread);
+}
+
+} // namespace base
diff --git a/libchrome/base/synchronization/waitable_event_watcher.h b/libchrome/base/synchronization/waitable_event_watcher.h
new file mode 100644
index 0000000..eb51eff
--- /dev/null
+++ b/libchrome/base/synchronization/waitable_event_watcher.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
+#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/object_watcher.h"
+#else
+#include "base/callback.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#endif
+
+namespace base {
+
+class Flag;
+class AsyncWaiter;
+class AsyncCallbackTask;
+class WaitableEvent;
+
+// This class provides a way to wait on a WaitableEvent asynchronously.
+//
+// Each instance of this object can be waiting on a single WaitableEvent. When
+// the waitable event is signaled, a callback is made in the thread of a given
+// MessageLoop. This callback can be deleted by deleting the waiter.
+//
+// Typical usage:
+//
+// class MyClass {
+// public:
+// void DoStuffWhenSignaled(WaitableEvent *waitable_event) {
+// watcher_.StartWatching(waitable_event,
+// base::Bind(&MyClass::OnWaitableEventSignaled, this);
+// }
+// private:
+// void OnWaitableEventSignaled(WaitableEvent* waitable_event) {
+// // OK, time to do stuff!
+// }
+// base::WaitableEventWatcher watcher_;
+// };
+//
+// In the above example, MyClass wants to "do stuff" when waitable_event
+// becomes signaled. WaitableEventWatcher makes this task easy. When MyClass
+// goes out of scope, the watcher_ will be destroyed, and there is no need to
+// worry about OnWaitableEventSignaled being called on a deleted MyClass
+// pointer.
+//
+// BEWARE: With automatically reset WaitableEvents, a signal may be lost if it
+// occurs just before a WaitableEventWatcher is deleted. There is currently no
+// safe way to stop watching an automatic reset WaitableEvent without possibly
+// missing a signal.
+//
+// NOTE: you /are/ allowed to delete the WaitableEvent while still waiting on
+// it with a Watcher. It will act as if the event was never signaled.
+
+class BASE_EXPORT WaitableEventWatcher
+#if defined(OS_WIN)
+ : public win::ObjectWatcher::Delegate {
+#else
+ : public MessageLoop::DestructionObserver {
+#endif
+ public:
+ typedef Callback<void(WaitableEvent*)> EventCallback;
+ WaitableEventWatcher();
+ ~WaitableEventWatcher() override;
+
+ // When @event is signaled, the given callback is called on the thread of the
+ // current message loop when StartWatching is called.
+ bool StartWatching(WaitableEvent* event, const EventCallback& callback);
+
+ // Cancel the current watch. Must be called from the same thread which
+ // started the watch.
+ //
+ // Does nothing if no event is being watched, nor if the watch has completed.
+ // The callback will *not* be called for the current watch after this
+ // function returns. Since the callback runs on the same thread as this
+ // function, it cannot be called during this function either.
+ void StopWatching();
+
+ // Return the currently watched event, or NULL if no object is currently being
+ // watched.
+ WaitableEvent* GetWatchedEvent();
+
+ // Return the callback that will be invoked when the event is
+ // signaled.
+ const EventCallback& callback() const { return callback_; }
+
+ private:
+#if defined(OS_WIN)
+ void OnObjectSignaled(HANDLE h) override;
+ win::ObjectWatcher watcher_;
+#else
+ // Implementation of MessageLoop::DestructionObserver
+ void WillDestroyCurrentMessageLoop() override;
+
+ MessageLoop* message_loop_;
+ scoped_refptr<Flag> cancel_flag_;
+ AsyncWaiter* waiter_;
+ base::Closure internal_callback_;
+ scoped_refptr<WaitableEvent::WaitableEventKernel> kernel_;
+#endif
+
+ WaitableEvent* event_;
+ EventCallback callback_;
+};
+
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
diff --git a/libchrome/base/synchronization/waitable_event_watcher_posix.cc b/libchrome/base/synchronization/waitable_event_watcher_posix.cc
new file mode 100644
index 0000000..7cf8688
--- /dev/null
+++ b/libchrome/base/synchronization/waitable_event_watcher_posix.cc
@@ -0,0 +1,270 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/waitable_event_watcher.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// WaitableEventWatcher (async waits).
+//
+// The basic design is that we add an AsyncWaiter to the wait-list of the event.
+// That AsyncWaiter has a pointer to MessageLoop, and a Task to be posted to it.
+// The MessageLoop ends up running the task, which calls the delegate.
+//
+// Since the wait can be canceled, we have a thread-safe Flag object which is
+// set when the wait has been canceled. At each stage in the above, we check the
+// flag before going onto the next stage. Since the wait may only be canceled in
+// the MessageLoop which runs the Task, we are assured that the delegate cannot
+// be called after canceling...
+
+// -----------------------------------------------------------------------------
+// A thread-safe, reference-counted, write-once flag.
+// -----------------------------------------------------------------------------
+class Flag : public RefCountedThreadSafe<Flag> {
+ public:
+ Flag() { flag_ = false; }
+
+ void Set() {
+ AutoLock locked(lock_);
+ flag_ = true;
+ }
+
+ bool value() const {
+ AutoLock locked(lock_);
+ return flag_;
+ }
+
+ private:
+ friend class RefCountedThreadSafe<Flag>;
+ ~Flag() {}
+
+ mutable Lock lock_;
+ bool flag_;
+
+ DISALLOW_COPY_AND_ASSIGN(Flag);
+};
+
+// -----------------------------------------------------------------------------
+// This is an asynchronous waiter which posts a task to a MessageLoop when
+// fired. An AsyncWaiter may only be in a single wait-list.
+// -----------------------------------------------------------------------------
+class AsyncWaiter : public WaitableEvent::Waiter {
+ public:
+ AsyncWaiter(MessageLoop* message_loop,
+ const base::Closure& callback,
+ Flag* flag)
+ : message_loop_(message_loop),
+ callback_(callback),
+ flag_(flag) { }
+
+ bool Fire(WaitableEvent* event) override {
+ // Post the callback if we haven't been cancelled.
+ if (!flag_->value()) {
+ message_loop_->task_runner()->PostTask(FROM_HERE, callback_);
+ }
+
+ // We are removed from the wait-list by the WaitableEvent itself. It only
+ // remains to delete ourselves.
+ delete this;
+
+ // We can always return true because an AsyncWaiter is never in two
+ // different wait-lists at the same time.
+ return true;
+ }
+
+ // See StopWatching for discussion
+ bool Compare(void* tag) override { return tag == flag_.get(); }
+
+ private:
+ MessageLoop *const message_loop_;
+ base::Closure callback_;
+ scoped_refptr<Flag> flag_;
+};
+
+// -----------------------------------------------------------------------------
+// For async waits we need to make a callback in a MessageLoop thread. We do
+// this by posting a callback, which calls the delegate and keeps track of when
+// the event is canceled.
+// -----------------------------------------------------------------------------
+void AsyncCallbackHelper(Flag* flag,
+ const WaitableEventWatcher::EventCallback& callback,
+ WaitableEvent* event) {
+ // Runs in MessageLoop thread.
+ if (!flag->value()) {
+ // This is to let the WaitableEventWatcher know that the event has occured
+ // because it needs to be able to return NULL from GetWatchedObject
+ flag->Set();
+ callback.Run(event);
+ }
+}
+
+WaitableEventWatcher::WaitableEventWatcher()
+ : message_loop_(NULL),
+ cancel_flag_(NULL),
+ waiter_(NULL),
+ event_(NULL) {
+}
+
+WaitableEventWatcher::~WaitableEventWatcher() {
+ StopWatching();
+}
+
+// -----------------------------------------------------------------------------
+// The Handle is how the user cancels a wait. After deleting the Handle we
+// insure that the delegate cannot be called.
+// -----------------------------------------------------------------------------
+bool WaitableEventWatcher::StartWatching(
+ WaitableEvent* event,
+ const EventCallback& callback) {
+ MessageLoop *const current_ml = MessageLoop::current();
+ DCHECK(current_ml) << "Cannot create WaitableEventWatcher without a "
+ "current MessageLoop";
+
+ // A user may call StartWatching from within the callback function. In this
+ // case, we won't know that we have finished watching, expect that the Flag
+ // will have been set in AsyncCallbackHelper().
+ if (cancel_flag_.get() && cancel_flag_->value()) {
+ if (message_loop_) {
+ message_loop_->RemoveDestructionObserver(this);
+ message_loop_ = NULL;
+ }
+
+ cancel_flag_ = NULL;
+ }
+
+ DCHECK(!cancel_flag_.get()) << "StartWatching called while still watching";
+
+ cancel_flag_ = new Flag;
+ callback_ = callback;
+ internal_callback_ = base::Bind(
+ &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback_, event);
+ WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
+
+ AutoLock locked(kernel->lock_);
+
+ event_ = event;
+
+ if (kernel->signaled_) {
+ if (!kernel->manual_reset_)
+ kernel->signaled_ = false;
+
+ // No hairpinning - we can't call the delegate directly here. We have to
+ // enqueue a task on the MessageLoop as normal.
+ current_ml->task_runner()->PostTask(FROM_HERE, internal_callback_);
+ return true;
+ }
+
+ message_loop_ = current_ml;
+ current_ml->AddDestructionObserver(this);
+
+ kernel_ = kernel;
+ waiter_ = new AsyncWaiter(current_ml, internal_callback_, cancel_flag_.get());
+ event->Enqueue(waiter_);
+
+ return true;
+}
+
+void WaitableEventWatcher::StopWatching() {
+ callback_.Reset();
+
+ if (message_loop_) {
+ message_loop_->RemoveDestructionObserver(this);
+ message_loop_ = NULL;
+ }
+
+ if (!cancel_flag_.get()) // if not currently watching...
+ return;
+
+ if (cancel_flag_->value()) {
+ // In this case, the event has fired, but we haven't figured that out yet.
+ // The WaitableEvent may have been deleted too.
+ cancel_flag_ = NULL;
+ return;
+ }
+
+ if (!kernel_.get()) {
+ // We have no kernel. This means that we never enqueued a Waiter on an
+ // event because the event was already signaled when StartWatching was
+ // called.
+ //
+ // In this case, a task was enqueued on the MessageLoop and will run.
+ // We set the flag in case the task hasn't yet run. The flag will stop the
+ // delegate getting called. If the task has run then we have the last
+ // reference to the flag and it will be deleted immedately after.
+ cancel_flag_->Set();
+ cancel_flag_ = NULL;
+ return;
+ }
+
+ AutoLock locked(kernel_->lock_);
+ // We have a lock on the kernel. No one else can signal the event while we
+ // have it.
+
+ // We have a possible ABA issue here. If Dequeue was to compare only the
+ // pointer values then it's possible that the AsyncWaiter could have been
+ // fired, freed and the memory reused for a different Waiter which was
+ // enqueued in the same wait-list. We would think that that waiter was our
+ // AsyncWaiter and remove it.
+ //
+ // To stop this, Dequeue also takes a tag argument which is passed to the
+ // virtual Compare function before the two are considered a match. So we need
+ // a tag which is good for the lifetime of this handle: the Flag. Since we
+ // have a reference to the Flag, its memory cannot be reused while this object
+ // still exists. So if we find a waiter with the correct pointer value, and
+ // which shares a Flag pointer, we have a real match.
+ if (kernel_->Dequeue(waiter_, cancel_flag_.get())) {
+ // Case 2: the waiter hasn't been signaled yet; it was still on the wait
+ // list. We've removed it, thus we can delete it and the task (which cannot
+ // have been enqueued with the MessageLoop because the waiter was never
+ // signaled)
+ delete waiter_;
+ internal_callback_.Reset();
+ cancel_flag_ = NULL;
+ return;
+ }
+
+ // Case 3: the waiter isn't on the wait-list, thus it was signaled. It may
+ // not have run yet, so we set the flag to tell it not to bother enqueuing the
+ // task on the MessageLoop, but to delete it instead. The Waiter deletes
+ // itself once run.
+ cancel_flag_->Set();
+ cancel_flag_ = NULL;
+
+ // If the waiter has already run then the task has been enqueued. If the Task
+ // hasn't yet run, the flag will stop the delegate from getting called. (This
+ // is thread safe because one may only delete a Handle from the MessageLoop
+ // thread.)
+ //
+ // If the delegate has already been called then we have nothing to do. The
+ // task has been deleted by the MessageLoop.
+}
+
+WaitableEvent* WaitableEventWatcher::GetWatchedEvent() {
+ if (!cancel_flag_.get())
+ return NULL;
+
+ if (cancel_flag_->value())
+ return NULL;
+
+ return event_;
+}
+
+// -----------------------------------------------------------------------------
+// This is called when the MessageLoop which the callback will be run it is
+// deleted. We need to cancel the callback as if we had been deleted, but we
+// will still be deleted at some point in the future.
+// -----------------------------------------------------------------------------
+void WaitableEventWatcher::WillDestroyCurrentMessageLoop() {
+ StopWatching();
+}
+
+} // namespace base
diff --git a/libchrome/base/sys_byteorder.h b/libchrome/base/sys_byteorder.h
new file mode 100644
index 0000000..8d9066c
--- /dev/null
+++ b/libchrome/base/sys_byteorder.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header defines cross-platform ByteSwap() implementations for 16, 32 and
+// 64-bit values, and NetToHostXX() / HostToNextXX() functions equivalent to
+// the traditional ntohX() and htonX() functions.
+// Use the functions defined here rather than using the platform-specific
+// functions directly.
+
+#ifndef BASE_SYS_BYTEORDER_H_
+#define BASE_SYS_BYTEORDER_H_
+
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
+inline uint16_t ByteSwap(uint16_t x) {
+#if defined(COMPILER_MSVC)
+ return _byteswap_ushort(x);
+#else
+ return __builtin_bswap16(x);
+#endif
+}
+
+inline uint32_t ByteSwap(uint32_t x) {
+#if defined(COMPILER_MSVC)
+ return _byteswap_ulong(x);
+#else
+ return __builtin_bswap32(x);
+#endif
+}
+
+inline uint64_t ByteSwap(uint64_t x) {
+#if defined(COMPILER_MSVC)
+ return _byteswap_uint64(x);
+#else
+ return __builtin_bswap64(x);
+#endif
+}
+
+// Converts the bytes in |x| from host order (endianness) to little endian, and
+// returns the result.
+inline uint16_t ByteSwapToLE16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return x;
+#else
+ return ByteSwap(x);
+#endif
+}
+inline uint32_t ByteSwapToLE32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return x;
+#else
+ return ByteSwap(x);
+#endif
+}
+inline uint64_t ByteSwapToLE64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return x;
+#else
+ return ByteSwap(x);
+#endif
+}
+
+// Converts the bytes in |x| from network to host order (endianness), and
+// returns the result.
+inline uint16_t NetToHost16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return ByteSwap(x);
+#else
+ return x;
+#endif
+}
+inline uint32_t NetToHost32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return ByteSwap(x);
+#else
+ return x;
+#endif
+}
+inline uint64_t NetToHost64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return ByteSwap(x);
+#else
+ return x;
+#endif
+}
+
+// Converts the bytes in |x| from host to network order (endianness), and
+// returns the result.
+inline uint16_t HostToNet16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return ByteSwap(x);
+#else
+ return x;
+#endif
+}
+inline uint32_t HostToNet32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return ByteSwap(x);
+#else
+ return x;
+#endif
+}
+inline uint64_t HostToNet64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+ return ByteSwap(x);
+#else
+ return x;
+#endif
+}
+
+} // namespace base
+
+#endif // BASE_SYS_BYTEORDER_H_
diff --git a/libchrome/base/sys_info.cc b/libchrome/base/sys_info.cc
new file mode 100644
index 0000000..5aac9b7
--- /dev/null
+++ b/libchrome/base/sys_info.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/lazy_instance.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info_internal.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if !defined(OS_ANDROID)
+
+static const int kLowMemoryDeviceThresholdMB = 512;
+
+bool DetectLowEndDevice() {
+ CommandLine* command_line = CommandLine::ForCurrentProcess();
+ if (command_line->HasSwitch(switches::kEnableLowEndDeviceMode))
+ return true;
+ if (command_line->HasSwitch(switches::kDisableLowEndDeviceMode))
+ return false;
+
+ int ram_size_mb = SysInfo::AmountOfPhysicalMemoryMB();
+ return (ram_size_mb > 0 && ram_size_mb <= kLowMemoryDeviceThresholdMB);
+}
+
+static LazyInstance<
+ internal::LazySysInfoValue<bool, DetectLowEndDevice> >::Leaky
+ g_lazy_low_end_device = LAZY_INSTANCE_INITIALIZER;
+
+// static
+bool SysInfo::IsLowEndDevice() {
+ const std::string group_name =
+ base::FieldTrialList::FindFullName("MemoryReduction");
+
+ // Low End Device Mode will be enabled if this client is assigned to
+ // one of those EnabledXXX groups.
+ if (StartsWith(group_name, "Enabled", CompareCase::SENSITIVE))
+ return true;
+
+ return g_lazy_low_end_device.Get().value();
+}
+#endif
+
+#if (!defined(OS_MACOSX) || defined(OS_IOS)) && !defined(OS_ANDROID)
+std::string SysInfo::HardwareModelName() {
+ return std::string();
+}
+#endif
+
+// static
+base::TimeDelta SysInfo::Uptime() {
+ // This code relies on an implementation detail of TimeTicks::Now() - that
+ // its return value happens to coincide with the system uptime value in
+ // microseconds, on Win/Mac/iOS/Linux/ChromeOS and Android.
+ int64_t uptime_in_microseconds = TimeTicks::Now().ToInternalValue();
+ return base::TimeDelta::FromMicroseconds(uptime_in_microseconds);
+}
+
+} // namespace base
diff --git a/libchrome/base/sys_info.h b/libchrome/base/sys_info.h
new file mode 100644
index 0000000..b107477
--- /dev/null
+++ b/libchrome/base/sys_info.h
@@ -0,0 +1,144 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYS_INFO_H_
+#define BASE_SYS_INFO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class BASE_EXPORT SysInfo {
+ public:
+ // Return the number of logical processors/cores on the current machine.
+ static int NumberOfProcessors();
+
+ // Return the number of bytes of physical memory on the current machine.
+ static int64_t AmountOfPhysicalMemory();
+
+ // Return the number of bytes of current available physical memory on the
+ // machine.
+ static int64_t AmountOfAvailablePhysicalMemory();
+
+ // Return the number of bytes of virtual memory of this process. A return
+ // value of zero means that there is no limit on the available virtual
+ // memory.
+ static int64_t AmountOfVirtualMemory();
+
+ // Return the number of megabytes of physical memory on the current machine.
+ static int AmountOfPhysicalMemoryMB() {
+ return static_cast<int>(AmountOfPhysicalMemory() / 1024 / 1024);
+ }
+
+ // Return the number of megabytes of available virtual memory, or zero if it
+ // is unlimited.
+ static int AmountOfVirtualMemoryMB() {
+ return static_cast<int>(AmountOfVirtualMemory() / 1024 / 1024);
+ }
+
+ // Return the available disk space in bytes on the volume containing |path|,
+ // or -1 on failure.
+ static int64_t AmountOfFreeDiskSpace(const FilePath& path);
+
+ // Return the total disk space in bytes on the volume containing |path|, or -1
+ // on failure.
+ static int64_t AmountOfTotalDiskSpace(const FilePath& path);
+
+ // Returns system uptime.
+ static TimeDelta Uptime();
+
+ // Returns a descriptive string for the current machine model or an empty
+ // string if the machine model is unknown or an error occured.
+ // e.g. "MacPro1,1" on Mac, or "Nexus 5" on Android. Only implemented on OS X,
+ // Android, and Chrome OS. This returns an empty string on other platforms.
+ static std::string HardwareModelName();
+
+ // Returns the name of the host operating system.
+ static std::string OperatingSystemName();
+
+ // Returns the version of the host operating system.
+ static std::string OperatingSystemVersion();
+
+ // Retrieves detailed numeric values for the OS version.
+ // TODO(port): Implement a Linux version of this method and enable the
+ // corresponding unit test.
+ // DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
+ // for OS version-specific feature checks and workarounds. If you must use
+ // an OS version check instead of a feature check, use the base::mac::IsOS*
+ // family from base/mac/mac_util.h, or base::win::GetVersion from
+ // base/win/windows_version.h.
+ static void OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version);
+
+ // Returns the architecture of the running operating system.
+ // Exact return value may differ across platforms.
+ // e.g. a 32-bit x86 kernel on a 64-bit capable CPU will return "x86",
+ // whereas a x86-64 kernel on the same CPU will return "x86_64"
+ static std::string OperatingSystemArchitecture();
+
+ // Avoid using this. Use base/cpu.h to get information about the CPU instead.
+ // http://crbug.com/148884
+ // Returns the CPU model name of the system. If it can not be figured out,
+ // an empty string is returned.
+ static std::string CPUModelName();
+
+ // Return the smallest amount of memory (in bytes) which the VM system will
+ // allocate.
+ static size_t VMAllocationGranularity();
+
+#if defined(OS_CHROMEOS)
+ typedef std::map<std::string, std::string> LsbReleaseMap;
+
+ // Returns the contents of /etc/lsb-release as a map.
+ static const LsbReleaseMap& GetLsbReleaseMap();
+
+ // If |key| is present in the LsbReleaseMap, sets |value| and returns true.
+ static bool GetLsbReleaseValue(const std::string& key, std::string* value);
+
+ // Convenience function for GetLsbReleaseValue("CHROMEOS_RELEASE_BOARD",...).
+ // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
+ static std::string GetLsbReleaseBoard();
+
+ // Returns the creation time of /etc/lsb-release. (Used to get the date and
+ // time of the Chrome OS build).
+ static Time GetLsbReleaseTime();
+
+ // Returns true when actually running in a Chrome OS environment.
+ static bool IsRunningOnChromeOS();
+
+ // Test method to force re-parsing of lsb-release.
+ static void SetChromeOSVersionInfoForTest(const std::string& lsb_release,
+ const Time& lsb_release_time);
+#endif // defined(OS_CHROMEOS)
+
+#if defined(OS_ANDROID)
+ // Returns the Android build's codename.
+ static std::string GetAndroidBuildCodename();
+
+ // Returns the Android build ID.
+ static std::string GetAndroidBuildID();
+
+ static int DalvikHeapSizeMB();
+ static int DalvikHeapGrowthLimitMB();
+#endif // defined(OS_ANDROID)
+
+ // Returns true if this is a low-end device.
+ // Low-end device refers to devices having less than 512M memory in the
+ // current implementation.
+ static bool IsLowEndDevice();
+};
+
+} // namespace base
+
+#endif // BASE_SYS_INFO_H_
diff --git a/libchrome/base/sys_info_chromeos.cc b/libchrome/base/sys_info_chromeos.cc
new file mode 100644
index 0000000..3794ed9
--- /dev/null
+++ b/libchrome/base/sys_info_chromeos.cc
@@ -0,0 +1,222 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/environment.h"
+#include "base/files/file.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+namespace {
+
+const char* const kLinuxStandardBaseVersionKeys[] = {
+ "CHROMEOS_RELEASE_VERSION",
+ "GOOGLE_RELEASE",
+ "DISTRIB_RELEASE",
+};
+
+const char kChromeOsReleaseNameKey[] = "CHROMEOS_RELEASE_NAME";
+
+const char* const kChromeOsReleaseNames[] = {
+ "Chrome OS",
+ "Chromium OS",
+};
+
+const char kLinuxStandardBaseReleaseFile[] = "/etc/lsb-release";
+
+const char kLsbReleaseKey[] = "LSB_RELEASE";
+const char kLsbReleaseTimeKey[] = "LSB_RELEASE_TIME"; // Seconds since epoch
+
+const char kLsbReleaseSourceKey[] = "lsb-release";
+const char kLsbReleaseSourceEnv[] = "env";
+const char kLsbReleaseSourceFile[] = "file";
+
+class ChromeOSVersionInfo {
+ public:
+ ChromeOSVersionInfo() {
+ Parse();
+ }
+
+ void Parse() {
+ lsb_release_map_.clear();
+ major_version_ = 0;
+ minor_version_ = 0;
+ bugfix_version_ = 0;
+ is_running_on_chromeos_ = false;
+
+ std::string lsb_release, lsb_release_time_str;
+ std::unique_ptr<Environment> env(Environment::Create());
+ bool parsed_from_env =
+ env->GetVar(kLsbReleaseKey, &lsb_release) &&
+ env->GetVar(kLsbReleaseTimeKey, &lsb_release_time_str);
+ if (parsed_from_env) {
+ double us = 0;
+ if (StringToDouble(lsb_release_time_str, &us))
+ lsb_release_time_ = Time::FromDoubleT(us);
+ } else {
+ // If the LSB_RELEASE and LSB_RELEASE_TIME environment variables are not
+ // set, fall back to a blocking read of the lsb_release file. This should
+ // only happen in non Chrome OS environments.
+ ThreadRestrictions::ScopedAllowIO allow_io;
+ FilePath path(kLinuxStandardBaseReleaseFile);
+ ReadFileToString(path, &lsb_release);
+ File::Info fileinfo;
+ if (GetFileInfo(path, &fileinfo))
+ lsb_release_time_ = fileinfo.creation_time;
+ }
+ ParseLsbRelease(lsb_release);
+ // For debugging:
+ lsb_release_map_[kLsbReleaseSourceKey] =
+ parsed_from_env ? kLsbReleaseSourceEnv : kLsbReleaseSourceFile;
+ }
+
+ bool GetLsbReleaseValue(const std::string& key, std::string* value) {
+ SysInfo::LsbReleaseMap::const_iterator iter = lsb_release_map_.find(key);
+ if (iter == lsb_release_map_.end())
+ return false;
+ *value = iter->second;
+ return true;
+ }
+
+ void GetVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ *major_version = major_version_;
+ *minor_version = minor_version_;
+ *bugfix_version = bugfix_version_;
+ }
+
+ const Time& lsb_release_time() const { return lsb_release_time_; }
+ const SysInfo::LsbReleaseMap& lsb_release_map() const {
+ return lsb_release_map_;
+ }
+ bool is_running_on_chromeos() const { return is_running_on_chromeos_; }
+
+ private:
+ void ParseLsbRelease(const std::string& lsb_release) {
+ // Parse and cache lsb_release key pairs. There should only be a handful
+ // of entries so the overhead for this will be small, and it can be
+ // useful for debugging.
+ base::StringPairs pairs;
+ SplitStringIntoKeyValuePairs(lsb_release, '=', '\n', &pairs);
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ std::string key, value;
+ TrimWhitespaceASCII(pairs[i].first, TRIM_ALL, &key);
+ TrimWhitespaceASCII(pairs[i].second, TRIM_ALL, &value);
+ if (key.empty())
+ continue;
+ lsb_release_map_[key] = value;
+ }
+ // Parse the version from the first matching recognized version key.
+ std::string version;
+ for (size_t i = 0; i < arraysize(kLinuxStandardBaseVersionKeys); ++i) {
+ std::string key = kLinuxStandardBaseVersionKeys[i];
+ if (GetLsbReleaseValue(key, &version) && !version.empty())
+ break;
+ }
+ StringTokenizer tokenizer(version, ".");
+ if (tokenizer.GetNext()) {
+ StringToInt(StringPiece(tokenizer.token_begin(), tokenizer.token_end()),
+ &major_version_);
+ }
+ if (tokenizer.GetNext()) {
+ StringToInt(StringPiece(tokenizer.token_begin(), tokenizer.token_end()),
+ &minor_version_);
+ }
+ if (tokenizer.GetNext()) {
+ StringToInt(StringPiece(tokenizer.token_begin(), tokenizer.token_end()),
+ &bugfix_version_);
+ }
+
+ // Check release name for Chrome OS.
+ std::string release_name;
+ if (GetLsbReleaseValue(kChromeOsReleaseNameKey, &release_name)) {
+ for (size_t i = 0; i < arraysize(kChromeOsReleaseNames); ++i) {
+ if (release_name == kChromeOsReleaseNames[i]) {
+ is_running_on_chromeos_ = true;
+ break;
+ }
+ }
+ }
+ }
+
+ Time lsb_release_time_;
+ SysInfo::LsbReleaseMap lsb_release_map_;
+ int32_t major_version_;
+ int32_t minor_version_;
+ int32_t bugfix_version_;
+ bool is_running_on_chromeos_;
+};
+
+static LazyInstance<ChromeOSVersionInfo>
+ g_chrome_os_version_info = LAZY_INSTANCE_INITIALIZER;
+
+ChromeOSVersionInfo& GetChromeOSVersionInfo() {
+ return g_chrome_os_version_info.Get();
+}
+
+} // namespace
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ return GetChromeOSVersionInfo().GetVersionNumbers(
+ major_version, minor_version, bugfix_version);
+}
+
+// static
+const SysInfo::LsbReleaseMap& SysInfo::GetLsbReleaseMap() {
+ return GetChromeOSVersionInfo().lsb_release_map();
+}
+
+// static
+bool SysInfo::GetLsbReleaseValue(const std::string& key, std::string* value) {
+ return GetChromeOSVersionInfo().GetLsbReleaseValue(key, value);
+}
+
+// static
+std::string SysInfo::GetLsbReleaseBoard() {
+ const char kMachineInfoBoard[] = "CHROMEOS_RELEASE_BOARD";
+ std::string board;
+ if (!GetLsbReleaseValue(kMachineInfoBoard, &board))
+ board = "unknown";
+ return board;
+}
+
+// static
+Time SysInfo::GetLsbReleaseTime() {
+ return GetChromeOSVersionInfo().lsb_release_time();
+}
+
+// static
+bool SysInfo::IsRunningOnChromeOS() {
+ return GetChromeOSVersionInfo().is_running_on_chromeos();
+}
+
+// static
+void SysInfo::SetChromeOSVersionInfoForTest(const std::string& lsb_release,
+ const Time& lsb_release_time) {
+ std::unique_ptr<Environment> env(Environment::Create());
+ env->SetVar(kLsbReleaseKey, lsb_release);
+ env->SetVar(kLsbReleaseTimeKey,
+ DoubleToString(lsb_release_time.ToDoubleT()));
+ g_chrome_os_version_info.Get().Parse();
+}
+
+} // namespace base
diff --git a/libchrome/base/sys_info_internal.h b/libchrome/base/sys_info_internal.h
new file mode 100644
index 0000000..a179219
--- /dev/null
+++ b/libchrome/base/sys_info_internal.h
@@ -0,0 +1,34 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYS_INFO_INTERNAL_H_
+#define BASE_SYS_INFO_INTERNAL_H_
+
+#include "base/macros.h"
+
+namespace base {
+
+namespace internal {
+
+template<typename T, T (*F)(void)>
+class LazySysInfoValue {
+ public:
+ LazySysInfoValue()
+ : value_(F()) { }
+
+ ~LazySysInfoValue() { }
+
+ T value() { return value_; }
+
+ private:
+ const T value_;
+
+ DISALLOW_COPY_AND_ASSIGN(LazySysInfoValue);
+};
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_SYS_INFO_INTERNAL_H_
diff --git a/libchrome/base/sys_info_linux.cc b/libchrome/base/sys_info_linux.cc
new file mode 100644
index 0000000..298d245
--- /dev/null
+++ b/libchrome/base/sys_info_linux.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_info_internal.h"
+#include "build/build_config.h"
+
+namespace {
+
+int64_t AmountOfMemory(int pages_name) {
+ long pages = sysconf(pages_name);
+ long page_size = sysconf(_SC_PAGESIZE);
+ if (pages == -1 || page_size == -1) {
+ NOTREACHED();
+ return 0;
+ }
+ return static_cast<int64_t>(pages) * page_size;
+}
+
+int64_t AmountOfPhysicalMemory() {
+ return AmountOfMemory(_SC_PHYS_PAGES);
+}
+
+base::LazyInstance<
+ base::internal::LazySysInfoValue<int64_t, AmountOfPhysicalMemory>>::Leaky
+ g_lazy_physical_memory = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+namespace base {
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
+ return AmountOfMemory(_SC_AVPHYS_PAGES);
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ return g_lazy_physical_memory.Get().value();
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
+ const char kCpuModelPrefix[] = "Hardware";
+#else
+ const char kCpuModelPrefix[] = "model name";
+#endif
+ std::string contents;
+ ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
+ DCHECK(!contents.empty());
+ if (!contents.empty()) {
+ std::istringstream iss(contents);
+ std::string line;
+ while (std::getline(iss, line)) {
+ if (line.compare(0, strlen(kCpuModelPrefix), kCpuModelPrefix) == 0) {
+ size_t pos = line.find(": ");
+ return line.substr(pos + 2);
+ }
+ }
+ }
+ return std::string();
+}
+
+} // namespace base
diff --git a/libchrome/base/sys_info_mac.mm b/libchrome/base/sys_info_mac.mm
new file mode 100644
index 0000000..102d99f
--- /dev/null
+++ b/libchrome/base/sys_info_mac.mm
@@ -0,0 +1,126 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <ApplicationServices/ApplicationServices.h>
+#include <CoreServices/CoreServices.h>
+#import <Foundation/Foundation.h>
+#include <mach/mach_host.h>
+#include <mach/mach_init.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_port.h"
+#import "base/mac/sdk_forward_declarations.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+// static
+std::string SysInfo::OperatingSystemName() {
+ return "Mac OS X";
+}
+
+// static
+std::string SysInfo::OperatingSystemVersion() {
+ int32_t major, minor, bugfix;
+ OperatingSystemVersionNumbers(&major, &minor, &bugfix);
+ return base::StringPrintf("%d.%d.%d", major, minor, bugfix);
+}
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+#if defined(MAC_OS_X_VERSION_10_10)
+ NSProcessInfo* processInfo = [NSProcessInfo processInfo];
+ if ([processInfo respondsToSelector:@selector(operatingSystemVersion)]) {
+ NSOperatingSystemVersion version = [processInfo operatingSystemVersion];
+ *major_version = version.majorVersion;
+ *minor_version = version.minorVersion;
+ *bugfix_version = version.patchVersion;
+ } else {
+#else
+ // Android buildbots are too old and have trouble using the forward
+ // declarations for some reason. Conditionally-compile the above block
+ // only when building on a more modern version of OS X.
+ if (true) {
+#endif
+ // -[NSProcessInfo operatingSystemVersion] is documented available in 10.10.
+ // It's also available via a private API since 10.9.2. For the remaining
+ // cases in 10.9, rely on ::Gestalt(..). Since this code is only needed for
+ // 10.9.0 and 10.9.1 and uses the recommended replacement thereafter,
+ // suppress the warning for this fallback case.
+ DCHECK(base::mac::IsOSMavericks());
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+ Gestalt(gestaltSystemVersionMajor,
+ reinterpret_cast<SInt32*>(major_version));
+ Gestalt(gestaltSystemVersionMinor,
+ reinterpret_cast<SInt32*>(minor_version));
+ Gestalt(gestaltSystemVersionBugFix,
+ reinterpret_cast<SInt32*>(bugfix_version));
+#pragma clang diagnostic pop
+ }
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ struct host_basic_info hostinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ base::mac::ScopedMachSendRight host(mach_host_self());
+ int result = host_info(host.get(),
+ HOST_BASIC_INFO,
+ reinterpret_cast<host_info_t>(&hostinfo),
+ &count);
+ if (result != KERN_SUCCESS) {
+ NOTREACHED();
+ return 0;
+ }
+ DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+ return static_cast<int64_t>(hostinfo.max_mem);
+}
+
+// static
+int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
+ base::mac::ScopedMachSendRight host(mach_host_self());
+ vm_statistics_data_t vm_info;
+ mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
+
+ if (host_statistics(host.get(),
+ HOST_VM_INFO,
+ reinterpret_cast<host_info_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
+ NOTREACHED();
+ return 0;
+ }
+
+ return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
+ PAGE_SIZE;
+}
+
+// static
+std::string SysInfo::CPUModelName() {
+ char name[256];
+ size_t len = arraysize(name);
+ if (sysctlbyname("machdep.cpu.brand_string", &name, &len, NULL, 0) == 0)
+ return name;
+ return std::string();
+}
+
+std::string SysInfo::HardwareModelName() {
+ char model[256];
+ size_t len = sizeof(model);
+ if (sysctlbyname("hw.model", model, &len, NULL, 0) == 0)
+ return std::string(model, 0, len);
+ return std::string();
+}
+
+} // namespace base
diff --git a/libchrome/base/sys_info_posix.cc b/libchrome/base/sys_info_posix.cc
new file mode 100644
index 0000000..5d1c450
--- /dev/null
+++ b/libchrome/base/sys_info_posix.cc
@@ -0,0 +1,170 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/resource.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/sys_info_internal.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <sys/vfs.h>
+#define statvfs statfs // Android uses a statvfs-like statfs struct and call.
+#else
+#include <sys/statvfs.h>
+#endif
+
+namespace {
+
+#if !defined(OS_OPENBSD)
+int NumberOfProcessors() {
+ // sysconf returns the number of "logical" (not "physical") processors on both
+ // Mac and Linux. So we get the number of max available "logical" processors.
+ //
+ // Note that the number of "currently online" processors may be fewer than the
+ // returned value of NumberOfProcessors(). On some platforms, the kernel may
+ // make some processors offline intermittently, to save power when system
+ // loading is low.
+ //
+ // One common use case that needs to know the processor count is to create
+ // optimal number of threads for optimization. It should make plan according
+ // to the number of "max available" processors instead of "currently online"
+ // ones. The kernel should be smart enough to make all processors online when
+ // it has sufficient number of threads waiting to run.
+ long res = sysconf(_SC_NPROCESSORS_CONF);
+ if (res == -1) {
+ NOTREACHED();
+ return 1;
+ }
+
+ return static_cast<int>(res);
+}
+
+base::LazyInstance<
+ base::internal::LazySysInfoValue<int, NumberOfProcessors> >::Leaky
+ g_lazy_number_of_processors = LAZY_INSTANCE_INITIALIZER;
+#endif
+
+int64_t AmountOfVirtualMemory() {
+ struct rlimit limit;
+ int result = getrlimit(RLIMIT_DATA, &limit);
+ if (result != 0) {
+ NOTREACHED();
+ return 0;
+ }
+ return limit.rlim_cur == RLIM_INFINITY ? 0 : limit.rlim_cur;
+}
+
+base::LazyInstance<
+ base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
+ g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
+
+bool GetDiskSpaceInfo(const base::FilePath& path,
+ int64_t* available_bytes,
+ int64_t* total_bytes) {
+ struct statvfs stats;
+ if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+ return false;
+
+ if (available_bytes)
+ *available_bytes = static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+ if (total_bytes)
+ *total_bytes = static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+ return true;
+}
+
+} // namespace
+
+namespace base {
+
+#if !defined(OS_OPENBSD)
+int SysInfo::NumberOfProcessors() {
+ return g_lazy_number_of_processors.Get().value();
+}
+#endif
+
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+ return g_lazy_virtual_memory.Get().value();
+}
+
+// static
+int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ int64_t available;
+ if (!GetDiskSpaceInfo(path, &available, nullptr))
+ return -1;
+ return available;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ int64_t total;
+ if (!GetDiskSpaceInfo(path, nullptr, &total))
+ return -1;
+ return total;
+}
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+// static
+std::string SysInfo::OperatingSystemName() {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ return std::string();
+ }
+ return std::string(info.sysname);
+}
+#endif
+
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
+// static
+std::string SysInfo::OperatingSystemVersion() {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ return std::string();
+ }
+ return std::string(info.release);
+}
+#endif
+
+// static
+std::string SysInfo::OperatingSystemArchitecture() {
+ struct utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ return std::string();
+ }
+ std::string arch(info.machine);
+ if (arch == "i386" || arch == "i486" || arch == "i586" || arch == "i686") {
+ arch = "x86";
+ } else if (arch == "amd64") {
+ arch = "x86_64";
+ }
+ return arch;
+}
+
+// static
+size_t SysInfo::VMAllocationGranularity() {
+ return getpagesize();
+}
+
+} // namespace base
diff --git a/libchrome/base/sys_info_unittest.cc b/libchrome/base/sys_info_unittest.cc
new file mode 100644
index 0000000..0231df6
--- /dev/null
+++ b/libchrome/base/sys_info_unittest.cc
@@ -0,0 +1,159 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+
+#include "base/environment.h"
+#include "base/files/file_util.h"
+#include "base/sys_info.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+typedef PlatformTest SysInfoTest;
+using base::FilePath;
+
+TEST_F(SysInfoTest, NumProcs) {
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
+}
+
+TEST_F(SysInfoTest, AmountOfMem) {
+ // We aren't actually testing that it's correct, just that it's sane.
+ EXPECT_GT(base::SysInfo::AmountOfPhysicalMemory(), 0);
+ EXPECT_GT(base::SysInfo::AmountOfPhysicalMemoryMB(), 0);
+ // The maxmimal amount of virtual memory can be zero which means unlimited.
+ EXPECT_GE(base::SysInfo::AmountOfVirtualMemory(), 0);
+}
+
+TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
+ // We aren't actually testing that it's correct, just that it's sane.
+ FilePath tmp_path;
+ ASSERT_TRUE(base::GetTempDir(&tmp_path));
+ EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+ << tmp_path.value();
+}
+
+TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
+ // We aren't actually testing that it's correct, just that it's sane.
+ FilePath tmp_path;
+ ASSERT_TRUE(base::GetTempDir(&tmp_path));
+ EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
+ << tmp_path.value();
+}
+
+#if defined(OS_WIN) || defined(OS_MACOSX)
+TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
+ int32_t os_major_version = -1;
+ int32_t os_minor_version = -1;
+ int32_t os_bugfix_version = -1;
+ base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
+ EXPECT_GT(os_major_version, -1);
+ EXPECT_GT(os_minor_version, -1);
+ EXPECT_GT(os_bugfix_version, -1);
+}
+#endif
+
+TEST_F(SysInfoTest, Uptime) {
+ base::TimeDelta up_time_1 = base::SysInfo::Uptime();
+ // UpTime() is implemented internally using TimeTicks::Now(), which documents
+ // system resolution as being 1-15ms. Sleep a little longer than that.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+ base::TimeDelta up_time_2 = base::SysInfo::Uptime();
+ EXPECT_GT(up_time_1.InMicroseconds(), 0);
+ EXPECT_GT(up_time_2.InMicroseconds(), up_time_1.InMicroseconds());
+}
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+TEST_F(SysInfoTest, HardwareModelName) {
+ std::string hardware_model = base::SysInfo::HardwareModelName();
+ EXPECT_FALSE(hardware_model.empty());
+}
+#endif
+
+#if defined(OS_CHROMEOS)
+
+TEST_F(SysInfoTest, GoogleChromeOSVersionNumbers) {
+ int32_t os_major_version = -1;
+ int32_t os_minor_version = -1;
+ int32_t os_bugfix_version = -1;
+ const char kLsbRelease[] =
+ "FOO=1234123.34.5\n"
+ "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
+ base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
+ EXPECT_EQ(1, os_major_version);
+ EXPECT_EQ(2, os_minor_version);
+ EXPECT_EQ(3, os_bugfix_version);
+}
+
+TEST_F(SysInfoTest, GoogleChromeOSVersionNumbersFirst) {
+ int32_t os_major_version = -1;
+ int32_t os_minor_version = -1;
+ int32_t os_bugfix_version = -1;
+ const char kLsbRelease[] =
+ "CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
+ "FOO=1234123.34.5\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
+ base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
+ EXPECT_EQ(1, os_major_version);
+ EXPECT_EQ(2, os_minor_version);
+ EXPECT_EQ(3, os_bugfix_version);
+}
+
+TEST_F(SysInfoTest, GoogleChromeOSNoVersionNumbers) {
+ int32_t os_major_version = -1;
+ int32_t os_minor_version = -1;
+ int32_t os_bugfix_version = -1;
+ const char kLsbRelease[] = "FOO=1234123.34.5\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
+ base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+ &os_minor_version,
+ &os_bugfix_version);
+ EXPECT_EQ(0, os_major_version);
+ EXPECT_EQ(0, os_minor_version);
+ EXPECT_EQ(0, os_bugfix_version);
+}
+
+TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
+ const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
+ // Use a fake time that can be safely displayed as a string.
+ const base::Time lsb_release_time(base::Time::FromDoubleT(12345.6));
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
+ base::Time parsed_lsb_release_time = base::SysInfo::GetLsbReleaseTime();
+ EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
+ parsed_lsb_release_time.ToDoubleT());
+}
+
+TEST_F(SysInfoTest, IsRunningOnChromeOS) {
+ base::SysInfo::SetChromeOSVersionInfoForTest("", base::Time());
+ EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+
+ const char kLsbRelease1[] =
+ "CHROMEOS_RELEASE_NAME=Non Chrome OS\n"
+ "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
+ EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
+
+ const char kLsbRelease2[] =
+ "CHROMEOS_RELEASE_NAME=Chrome OS\n"
+ "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
+ EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+
+ const char kLsbRelease3[] =
+ "CHROMEOS_RELEASE_NAME=Chromium OS\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, base::Time());
+ EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
+}
+
+#endif // OS_CHROMEOS
diff --git a/libchrome/base/task/cancelable_task_tracker.cc b/libchrome/base/task/cancelable_task_tracker.cc
new file mode 100644
index 0000000..6f39410
--- /dev/null
+++ b/libchrome/base/task/cancelable_task_tracker.cc
@@ -0,0 +1,191 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/cancelable_task_tracker.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/cancellation_flag.h"
+#include "base/task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+using base::Bind;
+using base::CancellationFlag;
+using base::Closure;
+using base::hash_map;
+using base::TaskRunner;
+
+namespace {
+
+void RunIfNotCanceled(const CancellationFlag* flag, const Closure& task) {
+ if (!flag->IsSet())
+ task.Run();
+}
+
+void RunIfNotCanceledThenUntrack(const CancellationFlag* flag,
+ const Closure& task,
+ const Closure& untrack) {
+ RunIfNotCanceled(flag, task);
+ untrack.Run();
+}
+
+bool IsCanceled(const CancellationFlag* flag,
+ base::ScopedClosureRunner* /*cleanup_runner*/) {
+ return flag->IsSet();
+}
+
+void RunAndDeleteFlag(const Closure& closure, const CancellationFlag* flag) {
+ closure.Run();
+ delete flag;
+}
+
+void RunOrPostToTaskRunner(TaskRunner* task_runner, const Closure& closure) {
+ if (task_runner->RunsTasksOnCurrentThread())
+ closure.Run();
+ else
+ task_runner->PostTask(FROM_HERE, closure);
+}
+
+} // namespace
+
+namespace base {
+
+// static
+const CancelableTaskTracker::TaskId CancelableTaskTracker::kBadTaskId = 0;
+
+CancelableTaskTracker::CancelableTaskTracker()
+ : next_id_(1),weak_factory_(this) {}
+
+CancelableTaskTracker::~CancelableTaskTracker() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ TryCancelAll();
+}
+
+CancelableTaskTracker::TaskId CancelableTaskTracker::PostTask(
+ TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ return PostTaskAndReply(task_runner, from_here, task, Bind(&base::DoNothing));
+}
+
+CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
+ TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // We need a MessageLoop to run reply.
+ DCHECK(base::ThreadTaskRunnerHandle::IsSet());
+
+ // Owned by reply callback below.
+ CancellationFlag* flag = new CancellationFlag();
+
+ TaskId id = next_id_;
+ next_id_++; // int64_t is big enough that we ignore the potential overflow.
+
+ const Closure& untrack_closure =
+ Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id);
+ bool success =
+ task_runner->PostTaskAndReply(from_here,
+ Bind(&RunIfNotCanceled, flag, task),
+ Bind(&RunIfNotCanceledThenUntrack,
+ base::Owned(flag),
+ reply,
+ untrack_closure));
+
+ if (!success)
+ return kBadTaskId;
+
+ Track(id, flag);
+ return id;
+}
+
+CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
+ IsCanceledCallback* is_canceled_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(base::ThreadTaskRunnerHandle::IsSet());
+
+ TaskId id = next_id_;
+ next_id_++; // int64_t is big enough that we ignore the potential overflow.
+
+ // Will be deleted by |untrack_and_delete_flag| after Untrack().
+ CancellationFlag* flag = new CancellationFlag();
+
+ Closure untrack_and_delete_flag = Bind(
+ &RunAndDeleteFlag,
+ Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id),
+ flag);
+
+ // Will always run |untrack_and_delete_flag| on current MessageLoop.
+ base::ScopedClosureRunner* untrack_and_delete_flag_runner =
+ new base::ScopedClosureRunner(
+ Bind(&RunOrPostToTaskRunner,
+ RetainedRef(base::ThreadTaskRunnerHandle::Get()),
+ untrack_and_delete_flag));
+
+ *is_canceled_cb =
+ Bind(&IsCanceled, flag, base::Owned(untrack_and_delete_flag_runner));
+
+ Track(id, flag);
+ return id;
+}
+
+void CancelableTaskTracker::TryCancel(TaskId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ hash_map<TaskId, CancellationFlag*>::const_iterator it = task_flags_.find(id);
+ if (it == task_flags_.end()) {
+ // Two possibilities:
+ //
+ // 1. The task has already been untracked.
+ // 2. The TaskId is bad or unknown.
+ //
+ // Since this function is best-effort, it's OK to ignore these.
+ return;
+ }
+ it->second->Set();
+}
+
+void CancelableTaskTracker::TryCancelAll() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ for (hash_map<TaskId, CancellationFlag*>::const_iterator it =
+ task_flags_.begin();
+ it != task_flags_.end();
+ ++it) {
+ it->second->Set();
+ }
+}
+
+bool CancelableTaskTracker::HasTrackedTasks() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return !task_flags_.empty();
+}
+
+void CancelableTaskTracker::Track(TaskId id, CancellationFlag* flag) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ bool success = task_flags_.insert(std::make_pair(id, flag)).second;
+ DCHECK(success);
+}
+
+void CancelableTaskTracker::Untrack(TaskId id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ size_t num = task_flags_.erase(id);
+ DCHECK_EQ(1u, num);
+}
+
+} // namespace base
diff --git a/libchrome/base/task/cancelable_task_tracker.h b/libchrome/base/task/cancelable_task_tracker.h
new file mode 100644
index 0000000..86b5a45
--- /dev/null
+++ b/libchrome/base/task/cancelable_task_tracker.h
@@ -0,0 +1,142 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CancelableTaskTracker posts tasks (in the form of a Closure) to a
+// TaskRunner, and is able to cancel the task later if it's not needed
+// anymore. On destruction, CancelableTaskTracker will cancel all
+// tracked tasks.
+//
+// Each cancelable task can be associated with a reply (also a Closure). After
+// the task is run on the TaskRunner, |reply| will be posted back to
+// originating TaskRunner.
+//
+// NOTE:
+//
+// CancelableCallback (base/cancelable_callback.h) and WeakPtr binding are
+// preferred solutions for canceling a task. However, they don't support
+// cancelation from another thread. This is sometimes a performance critical
+// requirement. E.g. We need to cancel database lookup task on DB thread when
+// user changes inputed text. If it is performance critical to do a best effort
+// cancelation of a task, then CancelableTaskTracker is appropriate,
+// otherwise use one of the other mechanisms.
+//
+// THREAD-SAFETY:
+//
+// 1. CancelableTaskTracker objects are not thread safe. They must
+// be created, used, and destroyed on the originating thread that posts the
+// task. It's safe to destroy a CancelableTaskTracker while there
+// are outstanding tasks. This is commonly used to cancel all outstanding
+// tasks.
+//
+// 2. Both task and reply are deleted on the originating thread.
+//
+// 3. IsCanceledCallback is thread safe and can be run or deleted on any
+// thread.
+#ifndef BASE_TASK_CANCELABLE_TASK_TRACKER_H_
+#define BASE_TASK_CANCELABLE_TASK_TRACKER_H_
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread_checker.h"
+
+namespace tracked_objects {
+class Location;
+} // namespace tracked_objects
+
+namespace base {
+
+class CancellationFlag;
+class TaskRunner;
+
+class BASE_EXPORT CancelableTaskTracker {
+ public:
+ // All values except kBadTaskId are valid.
+ typedef int64_t TaskId;
+ static const TaskId kBadTaskId;
+
+ typedef base::Callback<bool()> IsCanceledCallback;
+
+ CancelableTaskTracker();
+
+ // Cancels all tracked tasks.
+ ~CancelableTaskTracker();
+
+ TaskId PostTask(base::TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task);
+
+ TaskId PostTaskAndReply(base::TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ const base::Closure& reply);
+
+ template <typename TaskReturnType, typename ReplyArgType>
+ TaskId PostTaskAndReplyWithResult(
+ base::TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ const base::Callback<TaskReturnType(void)>& task,
+ const base::Callback<void(ReplyArgType)>& reply) {
+ TaskReturnType* result = new TaskReturnType();
+ return PostTaskAndReply(
+ task_runner,
+ from_here,
+ base::Bind(&base::internal::ReturnAsParamAdapter<TaskReturnType>,
+ task,
+ base::Unretained(result)),
+ base::Bind(&base::internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
+ reply,
+ base::Owned(result)));
+ }
+
+ // Creates a tracked TaskId and an associated IsCanceledCallback. Client can
+ // later call TryCancel() with the returned TaskId, and run |is_canceled_cb|
+ // from any thread to check whether the TaskId is canceled.
+ //
+ // The returned task ID is tracked until the last copy of
+ // |is_canceled_cb| is destroyed.
+ //
+ // Note. This function is used to address some special cancelation requirement
+ // in existing code. You SHOULD NOT need this function in new code.
+ TaskId NewTrackedTaskId(IsCanceledCallback* is_canceled_cb);
+
+ // After calling this function, |task| and |reply| will not run. If the
+ // cancelation happens when |task| is running or has finished running, |reply|
+ // will not run. If |reply| is running or has finished running, cancellation
+ // is a noop.
+ //
+ // Note. It's OK to cancel a |task| for more than once. The later calls are
+ // noops.
+ void TryCancel(TaskId id);
+
+ // It's OK to call this function for more than once. The later calls are
+ // noops.
+ void TryCancelAll();
+
+ // Returns true iff there are in-flight tasks that are still being
+ // tracked.
+ bool HasTrackedTasks() const;
+
+ private:
+ void Track(TaskId id, base::CancellationFlag* flag);
+ void Untrack(TaskId id);
+
+ base::hash_map<TaskId, base::CancellationFlag*> task_flags_;
+
+ TaskId next_id_;
+ base::ThreadChecker thread_checker_;
+
+ base::WeakPtrFactory<CancelableTaskTracker> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(CancelableTaskTracker);
+};
+
+} // namespace base
+
+#endif // BASE_TASK_CANCELABLE_TASK_TRACKER_H_
diff --git a/libchrome/base/task/cancelable_task_tracker_unittest.cc b/libchrome/base/task/cancelable_task_tracker_unittest.cc
new file mode 100644
index 0000000..ff9e40b
--- /dev/null
+++ b/libchrome/base/task/cancelable_task_tracker_unittest.cc
@@ -0,0 +1,424 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/cancelable_task_tracker.h"
+
+#include <cstddef>
+#include <deque>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class CancelableTaskTrackerTest : public testing::Test {
+ protected:
+ ~CancelableTaskTrackerTest() override { RunCurrentLoopUntilIdle(); }
+
+ void RunCurrentLoopUntilIdle() {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+
+ CancelableTaskTracker task_tracker_;
+
+ private:
+ // Needed by CancelableTaskTracker methods.
+ MessageLoop message_loop_;
+};
+
+void AddFailureAt(const tracked_objects::Location& location) {
+ ADD_FAILURE_AT(location.file_name(), location.line_number());
+}
+
+// Returns a closure that fails if run.
+Closure MakeExpectedNotRunClosure(const tracked_objects::Location& location) {
+ return Bind(&AddFailureAt, location);
+}
+
+// A helper class for MakeExpectedRunClosure() that fails if it is
+// destroyed without Run() having been called. This class may be used
+// from multiple threads as long as Run() is called at most once
+// before destruction.
+class RunChecker {
+ public:
+ explicit RunChecker(const tracked_objects::Location& location)
+ : location_(location), called_(false) {}
+
+ ~RunChecker() {
+ if (!called_) {
+ ADD_FAILURE_AT(location_.file_name(), location_.line_number());
+ }
+ }
+
+ void Run() { called_ = true; }
+
+ private:
+ tracked_objects::Location location_;
+ bool called_;
+};
+
+// Returns a closure that fails on destruction if it hasn't been run.
+Closure MakeExpectedRunClosure(const tracked_objects::Location& location) {
+ return Bind(&RunChecker::Run, Owned(new RunChecker(location)));
+}
+
+} // namespace
+
+// With the task tracker, post a task, a task with a reply, and get a
+// new task id without canceling any of them. The tasks and the reply
+// should run and the "is canceled" callback should return false.
+TEST_F(CancelableTaskTrackerTest, NoCancel) {
+ Thread worker_thread("worker thread");
+ ASSERT_TRUE(worker_thread.Start());
+
+ ignore_result(task_tracker_.PostTask(worker_thread.task_runner().get(),
+ FROM_HERE,
+ MakeExpectedRunClosure(FROM_HERE)));
+
+ ignore_result(task_tracker_.PostTaskAndReply(
+ worker_thread.task_runner().get(), FROM_HERE,
+ MakeExpectedRunClosure(FROM_HERE), MakeExpectedRunClosure(FROM_HERE)));
+
+ CancelableTaskTracker::IsCanceledCallback is_canceled;
+ ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+
+ worker_thread.Stop();
+
+ RunCurrentLoopUntilIdle();
+
+ EXPECT_FALSE(is_canceled.Run());
+}
+
+// Post a task with the task tracker but cancel it before running the
+// task runner. The task should not run.
+TEST_F(CancelableTaskTrackerTest, CancelPostedTask) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ CancelableTaskTracker::TaskId task_id = task_tracker_.PostTask(
+ test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE));
+ EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+ EXPECT_EQ(1U, test_task_runner->GetPendingTasks().size());
+
+ task_tracker_.TryCancel(task_id);
+
+ test_task_runner->RunUntilIdle();
+}
+
+// Post a task with reply with the task tracker and cancel it before
+// running the task runner. Neither the task nor the reply should
+// run.
+TEST_F(CancelableTaskTrackerTest, CancelPostedTaskAndReply) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ CancelableTaskTracker::TaskId task_id =
+ task_tracker_.PostTaskAndReply(test_task_runner.get(),
+ FROM_HERE,
+ MakeExpectedNotRunClosure(FROM_HERE),
+ MakeExpectedNotRunClosure(FROM_HERE));
+ EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+ task_tracker_.TryCancel(task_id);
+
+ test_task_runner->RunUntilIdle();
+}
+
+// Post a task with reply with the task tracker and cancel it after
+// running the task runner but before running the current message
+// loop. The task should run but the reply should not.
+TEST_F(CancelableTaskTrackerTest, CancelReply) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ CancelableTaskTracker::TaskId task_id =
+ task_tracker_.PostTaskAndReply(test_task_runner.get(),
+ FROM_HERE,
+ MakeExpectedRunClosure(FROM_HERE),
+ MakeExpectedNotRunClosure(FROM_HERE));
+ EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+ test_task_runner->RunUntilIdle();
+
+ task_tracker_.TryCancel(task_id);
+}
+
+// Post a task with reply with the task tracker on a worker thread and
+// cancel it before running the current message loop. The task should
+// run but the reply should not.
+TEST_F(CancelableTaskTrackerTest, CancelReplyDifferentThread) {
+ Thread worker_thread("worker thread");
+ ASSERT_TRUE(worker_thread.Start());
+
+ CancelableTaskTracker::TaskId task_id = task_tracker_.PostTaskAndReply(
+ worker_thread.task_runner().get(), FROM_HERE, Bind(&DoNothing),
+ MakeExpectedNotRunClosure(FROM_HERE));
+ EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+ task_tracker_.TryCancel(task_id);
+
+ worker_thread.Stop();
+}
+
+void ExpectIsCanceled(
+ const CancelableTaskTracker::IsCanceledCallback& is_canceled,
+ bool expected_is_canceled) {
+ EXPECT_EQ(expected_is_canceled, is_canceled.Run());
+}
+
+// Create a new task ID and check its status on a separate thread
+// before and after canceling. The is-canceled callback should be
+// thread-safe (i.e., nothing should blow up).
+TEST_F(CancelableTaskTrackerTest, NewTrackedTaskIdDifferentThread) {
+ CancelableTaskTracker::IsCanceledCallback is_canceled;
+ CancelableTaskTracker::TaskId task_id =
+ task_tracker_.NewTrackedTaskId(&is_canceled);
+
+ EXPECT_FALSE(is_canceled.Run());
+
+ Thread other_thread("other thread");
+ ASSERT_TRUE(other_thread.Start());
+ other_thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&ExpectIsCanceled, is_canceled, false));
+ other_thread.Stop();
+
+ task_tracker_.TryCancel(task_id);
+
+ ASSERT_TRUE(other_thread.Start());
+ other_thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&ExpectIsCanceled, is_canceled, true));
+ other_thread.Stop();
+}
+
+// With the task tracker, post a task, a task with a reply, get a new
+// task id, and then cancel all of them. None of the tasks nor the
+// reply should run and the "is canceled" callback should return
+// true.
+TEST_F(CancelableTaskTrackerTest, CancelAll) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ ignore_result(task_tracker_.PostTask(
+ test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE)));
+
+ ignore_result(
+ task_tracker_.PostTaskAndReply(test_task_runner.get(),
+ FROM_HERE,
+ MakeExpectedNotRunClosure(FROM_HERE),
+ MakeExpectedNotRunClosure(FROM_HERE)));
+
+ CancelableTaskTracker::IsCanceledCallback is_canceled;
+ ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+
+ task_tracker_.TryCancelAll();
+
+ test_task_runner->RunUntilIdle();
+
+ RunCurrentLoopUntilIdle();
+
+ EXPECT_TRUE(is_canceled.Run());
+}
+
+// With the task tracker, post a task, a task with a reply, get a new
+// task id, and then cancel all of them. None of the tasks nor the
+// reply should run and the "is canceled" callback should return
+// true.
+TEST_F(CancelableTaskTrackerTest, DestructionCancelsAll) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ CancelableTaskTracker::IsCanceledCallback is_canceled;
+
+ {
+ // Create another task tracker with a smaller scope.
+ CancelableTaskTracker task_tracker;
+
+ ignore_result(task_tracker.PostTask(test_task_runner.get(),
+ FROM_HERE,
+ MakeExpectedNotRunClosure(FROM_HERE)));
+
+ ignore_result(
+ task_tracker.PostTaskAndReply(test_task_runner.get(),
+ FROM_HERE,
+ MakeExpectedNotRunClosure(FROM_HERE),
+ MakeExpectedNotRunClosure(FROM_HERE)));
+
+ ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+ }
+
+ test_task_runner->RunUntilIdle();
+
+ RunCurrentLoopUntilIdle();
+
+ EXPECT_FALSE(is_canceled.Run());
+}
+
+// Post a task and cancel it. HasTrackedTasks() should return true
+// from when the task is posted until the (do-nothing) reply task is
+// flushed.
+TEST_F(CancelableTaskTrackerTest, HasTrackedTasksPost) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+ ignore_result(task_tracker_.PostTask(
+ test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE)));
+
+ task_tracker_.TryCancelAll();
+
+ test_task_runner->RunUntilIdle();
+
+ EXPECT_TRUE(task_tracker_.HasTrackedTasks());
+
+ RunCurrentLoopUntilIdle();
+
+ EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+}
+
+// Post a task with a reply and cancel it. HasTrackedTasks() should
+// return true from when the task is posted until it is canceled.
+TEST_F(CancelableTaskTrackerTest, HasTrackedTasksPostWithReply) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+ ignore_result(
+ task_tracker_.PostTaskAndReply(test_task_runner.get(),
+ FROM_HERE,
+ MakeExpectedNotRunClosure(FROM_HERE),
+ MakeExpectedNotRunClosure(FROM_HERE)));
+
+ task_tracker_.TryCancelAll();
+
+ test_task_runner->RunUntilIdle();
+
+ EXPECT_TRUE(task_tracker_.HasTrackedTasks());
+
+ RunCurrentLoopUntilIdle();
+
+ EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+}
+
+// Create a new tracked task ID. HasTrackedTasks() should return true
+// until the IsCanceledCallback is destroyed.
+TEST_F(CancelableTaskTrackerTest, HasTrackedTasksIsCancelled) {
+ EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+
+ CancelableTaskTracker::IsCanceledCallback is_canceled;
+ ignore_result(task_tracker_.NewTrackedTaskId(&is_canceled));
+
+ task_tracker_.TryCancelAll();
+
+ EXPECT_TRUE(task_tracker_.HasTrackedTasks());
+
+ is_canceled.Reset();
+
+ EXPECT_FALSE(task_tracker_.HasTrackedTasks());
+}
+
+// The death tests below make sure that calling task tracker member
+// functions from a thread different from its owner thread DCHECKs in
+// debug mode.
+
+class CancelableTaskTrackerDeathTest : public CancelableTaskTrackerTest {
+ protected:
+ CancelableTaskTrackerDeathTest() {
+ // The default style "fast" does not support multi-threaded tests.
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ }
+};
+
+// Duplicated from base/threading/thread_checker.h so that we can be
+// good citizens there and undef the macro.
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define ENABLE_THREAD_CHECKER 1
+#else
+#define ENABLE_THREAD_CHECKER 0
+#endif
+
+// Runs |fn| with |task_tracker|, expecting it to crash in debug mode.
+void MaybeRunDeadlyTaskTrackerMemberFunction(
+ CancelableTaskTracker* task_tracker,
+ const Callback<void(CancelableTaskTracker*)>& fn) {
+// CancelableTask uses DCHECKs with its ThreadChecker (itself only
+// enabled in debug mode).
+#if ENABLE_THREAD_CHECKER
+ EXPECT_DEATH_IF_SUPPORTED(fn.Run(task_tracker), "");
+#endif
+}
+
+void PostDoNothingTask(CancelableTaskTracker* task_tracker) {
+ ignore_result(task_tracker->PostTask(
+ scoped_refptr<TestSimpleTaskRunner>(new TestSimpleTaskRunner()).get(),
+ FROM_HERE,
+ Bind(&DoNothing)));
+}
+
+TEST_F(CancelableTaskTrackerDeathTest, PostFromDifferentThread) {
+ Thread bad_thread("bad thread");
+ ASSERT_TRUE(bad_thread.Start());
+
+ bad_thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&MaybeRunDeadlyTaskTrackerMemberFunction,
+ Unretained(&task_tracker_), Bind(&PostDoNothingTask)));
+}
+
+void TryCancel(CancelableTaskTracker::TaskId task_id,
+ CancelableTaskTracker* task_tracker) {
+ task_tracker->TryCancel(task_id);
+}
+
+TEST_F(CancelableTaskTrackerDeathTest, CancelOnDifferentThread) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ Thread bad_thread("bad thread");
+ ASSERT_TRUE(bad_thread.Start());
+
+ CancelableTaskTracker::TaskId task_id = task_tracker_.PostTask(
+ test_task_runner.get(), FROM_HERE, Bind(&DoNothing));
+ EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+ bad_thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&MaybeRunDeadlyTaskTrackerMemberFunction,
+ Unretained(&task_tracker_), Bind(&TryCancel, task_id)));
+
+ test_task_runner->RunUntilIdle();
+}
+
+TEST_F(CancelableTaskTrackerDeathTest, CancelAllOnDifferentThread) {
+ scoped_refptr<TestSimpleTaskRunner> test_task_runner(
+ new TestSimpleTaskRunner());
+
+ Thread bad_thread("bad thread");
+ ASSERT_TRUE(bad_thread.Start());
+
+ CancelableTaskTracker::TaskId task_id = task_tracker_.PostTask(
+ test_task_runner.get(), FROM_HERE, Bind(&DoNothing));
+ EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
+
+ bad_thread.task_runner()->PostTask(
+ FROM_HERE,
+ Bind(&MaybeRunDeadlyTaskTrackerMemberFunction, Unretained(&task_tracker_),
+ Bind(&CancelableTaskTracker::TryCancelAll)));
+
+ test_task_runner->RunUntilIdle();
+}
+
+} // namespace base
diff --git a/libchrome/base/task_runner.cc b/libchrome/base/task_runner.cc
new file mode 100644
index 0000000..262e1f8
--- /dev/null
+++ b/libchrome/base/task_runner.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_runner.h"
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/threading/post_task_and_reply_impl.h"
+
+namespace base {
+
+namespace {
+
+// TODO(akalin): There's only one other implementation of
+// PostTaskAndReplyImpl in WorkerPool. Investigate whether it'll be
+// possible to merge the two.
+class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
+ public:
+ explicit PostTaskAndReplyTaskRunner(TaskRunner* destination);
+
+ private:
+ bool PostTask(const tracked_objects::Location& from_here,
+ const Closure& task) override;
+
+ // Non-owning.
+ TaskRunner* destination_;
+};
+
+PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
+ TaskRunner* destination) : destination_(destination) {
+ DCHECK(destination_);
+}
+
+bool PostTaskAndReplyTaskRunner::PostTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ return destination_->PostTask(from_here, task);
+}
+
+} // namespace
+
+bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
+ const Closure& task) {
+ return PostDelayedTask(from_here, task, base::TimeDelta());
+}
+
+bool TaskRunner::PostTaskAndReply(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply) {
+ return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
+ from_here, task, reply);
+}
+
+TaskRunner::TaskRunner() {}
+
+TaskRunner::~TaskRunner() {}
+
+void TaskRunner::OnDestruct() const {
+ delete this;
+}
+
+void TaskRunnerTraits::Destruct(const TaskRunner* task_runner) {
+ task_runner->OnDestruct();
+}
+
+} // namespace base
diff --git a/libchrome/base/task_runner.h b/libchrome/base/task_runner.h
new file mode 100644
index 0000000..9593835
--- /dev/null
+++ b/libchrome/base/task_runner.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_RUNNER_H_
+#define BASE_TASK_RUNNER_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+
+namespace base {
+
+struct TaskRunnerTraits;
+
+// A TaskRunner is an object that runs posted tasks (in the form of
+// Closure objects). The TaskRunner interface provides a way of
+// decoupling task posting from the mechanics of how each task will be
+// run. TaskRunner provides very weak guarantees as to how posted
+// tasks are run (or if they're run at all). In particular, it only
+// guarantees:
+//
+// - Posting a task will not run it synchronously. That is, no
+// Post*Task method will call task.Run() directly.
+//
+// - Increasing the delay can only delay when the task gets run.
+// That is, increasing the delay may not affect when the task gets
+// run, or it could make it run later than it normally would, but
+// it won't make it run earlier than it normally would.
+//
+// TaskRunner does not guarantee the order in which posted tasks are
+// run, whether tasks overlap, or whether they're run on a particular
+// thread. Also it does not guarantee a memory model for shared data
+// between tasks. (In other words, you should use your own
+// synchronization/locking primitives if you need to share data
+// between tasks.)
+//
+// Implementations of TaskRunner should be thread-safe in that all
+// methods must be safe to call on any thread. Ownership semantics
+// for TaskRunners are in general not clear, which is why the
+// interface itself is RefCountedThreadSafe.
+//
+// Some theoretical implementations of TaskRunner:
+//
+// - A TaskRunner that uses a thread pool to run posted tasks.
+//
+// - A TaskRunner that, for each task, spawns a non-joinable thread
+// to run that task and immediately quit.
+//
+// - A TaskRunner that stores the list of posted tasks and has a
+// method Run() that runs each runnable task in random order.
+class BASE_EXPORT TaskRunner
+ : public RefCountedThreadSafe<TaskRunner, TaskRunnerTraits> {
+ public:
+ // Posts the given task to be run. Returns true if the task may be
+ // run at some point in the future, and false if the task definitely
+ // will not be run.
+ //
+ // Equivalent to PostDelayedTask(from_here, task, 0).
+ bool PostTask(const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ // Like PostTask, but tries to run the posted task only after
+ // |delay_ms| has passed.
+ //
+ // It is valid for an implementation to ignore |delay_ms|; that is,
+ // to have PostDelayedTask behave the same as PostTask.
+ virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ base::TimeDelta delay) = 0;
+
+ // Returns true if the current thread is a thread on which a task
+ // may be run, and false if no task will be run on the current
+ // thread.
+ //
+ // It is valid for an implementation to always return true, or in
+ // general to use 'true' as a default value.
+ virtual bool RunsTasksOnCurrentThread() const = 0;
+
+ // Posts |task| on the current TaskRunner. On completion, |reply|
+ // is posted to the thread that called PostTaskAndReply(). Both
+ // |task| and |reply| are guaranteed to be deleted on the thread
+ // from which PostTaskAndReply() is invoked. This allows objects
+ // that must be deleted on the originating thread to be bound into
+ // the |task| and |reply| Closures. In particular, it can be useful
+ // to use WeakPtr<> in the |reply| Closure so that the reply
+ // operation can be canceled. See the following pseudo-code:
+ //
+ // class DataBuffer : public RefCountedThreadSafe<DataBuffer> {
+ // public:
+ // // Called to add data into a buffer.
+ // void AddData(void* buf, size_t length);
+ // ...
+ // };
+ //
+ //
+ // class DataLoader : public SupportsWeakPtr<DataLoader> {
+ // public:
+ // void GetData() {
+ // scoped_refptr<DataBuffer> buffer = new DataBuffer();
+ // target_thread_.task_runner()->PostTaskAndReply(
+ // FROM_HERE,
+ // base::Bind(&DataBuffer::AddData, buffer),
+ // base::Bind(&DataLoader::OnDataReceived, AsWeakPtr(), buffer));
+ // }
+ //
+ // private:
+ // void OnDataReceived(scoped_refptr<DataBuffer> buffer) {
+ // // Do something with buffer.
+ // }
+ // };
+ //
+ //
+ // Things to notice:
+ // * Results of |task| are shared with |reply| by binding a shared argument
+ // (a DataBuffer instance).
+ // * The DataLoader object has no special thread safety.
+ // * The DataLoader object can be deleted while |task| is still running,
+ // and the reply will cancel itself safely because it is bound to a
+ // WeakPtr<>.
+ bool PostTaskAndReply(const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply);
+
+ protected:
+ friend struct TaskRunnerTraits;
+
+ // Only the Windows debug build seems to need this: see
+ // http://crbug.com/112250.
+ friend class RefCountedThreadSafe<TaskRunner, TaskRunnerTraits>;
+
+ TaskRunner();
+ virtual ~TaskRunner();
+
+ // Called when this object should be destroyed. By default simply
+ // deletes |this|, but can be overridden to do something else, like
+ // delete on a certain thread.
+ virtual void OnDestruct() const;
+};
+
+struct BASE_EXPORT TaskRunnerTraits {
+ static void Destruct(const TaskRunner* task_runner);
+};
+
+} // namespace base
+
+#endif // BASE_TASK_RUNNER_H_
diff --git a/libchrome/base/task_runner_util.h b/libchrome/base/task_runner_util.h
new file mode 100644
index 0000000..ba8e120
--- /dev/null
+++ b/libchrome/base/task_runner_util.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_RUNNER_UTIL_H_
+#define BASE_TASK_RUNNER_UTIL_H_
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/task_runner.h"
+
+namespace base {
+
+namespace internal {
+
+// Adapts a function that produces a result via a return value to
+// one that returns via an output parameter.
+template <typename ReturnType>
+void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
+ ReturnType* result) {
+ *result = func.Run();
+}
+
+// Adapts a T* result to a callblack that expects a T.
+template <typename TaskReturnType, typename ReplyArgType>
+void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+ TaskReturnType* result) {
+ // TODO(ajwong): Remove this conditional and add a DCHECK to enforce that
+ // |reply| must be non-null in PostTaskAndReplyWithResult() below after
+ // current code that relies on this API softness has been removed.
+ // http://crbug.com/162712
+ if (!callback.is_null())
+ callback.Run(std::move(*result));
+}
+
+} // namespace internal
+
+// When you have these methods
+//
+// R DoWorkAndReturn();
+// void Callback(const R& result);
+//
+// and want to call them in a PostTaskAndReply kind of fashion where the
+// result of DoWorkAndReturn is passed to the Callback, you can use
+// PostTaskAndReplyWithResult as in this example:
+//
+// PostTaskAndReplyWithResult(
+// target_thread_.task_runner(),
+// FROM_HERE,
+// Bind(&DoWorkAndReturn),
+// Bind(&Callback));
+template <typename TaskReturnType, typename ReplyArgType>
+bool PostTaskAndReplyWithResult(
+ TaskRunner* task_runner,
+ const tracked_objects::Location& from_here,
+ const Callback<TaskReturnType(void)>& task,
+ const Callback<void(ReplyArgType)>& reply) {
+ TaskReturnType* result = new TaskReturnType();
+ return task_runner->PostTaskAndReply(
+ from_here,
+ base::Bind(&internal::ReturnAsParamAdapter<TaskReturnType>, task,
+ result),
+ base::Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>, reply,
+ base::Owned(result)));
+}
+
+} // namespace base
+
+#endif // BASE_TASK_RUNNER_UTIL_H_
diff --git a/libchrome/base/task_runner_util_unittest.cc b/libchrome/base/task_runner_util_unittest.cc
new file mode 100644
index 0000000..1df5436
--- /dev/null
+++ b/libchrome/base/task_runner_util_unittest.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_runner_util.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+int ReturnFourtyTwo() {
+ return 42;
+}
+
+void StoreValue(int* destination, int value) {
+ *destination = value;
+}
+
+void StoreDoubleValue(double* destination, double value) {
+ *destination = value;
+}
+
+int g_foo_destruct_count = 0;
+int g_foo_free_count = 0;
+
+struct Foo {
+ ~Foo() {
+ ++g_foo_destruct_count;
+ }
+};
+
+std::unique_ptr<Foo> CreateFoo() {
+ return std::unique_ptr<Foo>(new Foo);
+}
+
+void ExpectFoo(std::unique_ptr<Foo> foo) {
+ EXPECT_TRUE(foo.get());
+ std::unique_ptr<Foo> local_foo(std::move(foo));
+ EXPECT_TRUE(local_foo.get());
+ EXPECT_FALSE(foo.get());
+}
+
+struct FooDeleter {
+ void operator()(Foo* foo) const {
+ ++g_foo_free_count;
+ delete foo;
+ };
+};
+
+std::unique_ptr<Foo, FooDeleter> CreateScopedFoo() {
+ return std::unique_ptr<Foo, FooDeleter>(new Foo);
+}
+
+void ExpectScopedFoo(std::unique_ptr<Foo, FooDeleter> foo) {
+ EXPECT_TRUE(foo.get());
+ std::unique_ptr<Foo, FooDeleter> local_foo(std::move(foo));
+ EXPECT_TRUE(local_foo.get());
+ EXPECT_FALSE(foo.get());
+}
+
+} // namespace
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResult) {
+ int result = 0;
+
+ MessageLoop message_loop;
+ PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+ Bind(&ReturnFourtyTwo),
+ Bind(&StoreValue, &result));
+
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(42, result);
+}
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResultImplicitConvert) {
+ double result = 0;
+
+ MessageLoop message_loop;
+ PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+ Bind(&ReturnFourtyTwo),
+ Bind(&StoreDoubleValue, &result));
+
+ RunLoop().RunUntilIdle();
+
+ EXPECT_DOUBLE_EQ(42.0, result);
+}
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResultPassed) {
+ g_foo_destruct_count = 0;
+ g_foo_free_count = 0;
+
+ MessageLoop message_loop;
+ PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+ Bind(&CreateFoo), Bind(&ExpectFoo));
+
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(1, g_foo_destruct_count);
+ EXPECT_EQ(0, g_foo_free_count);
+}
+
+TEST(TaskRunnerHelpersTest, PostTaskAndReplyWithResultPassedFreeProc) {
+ g_foo_destruct_count = 0;
+ g_foo_free_count = 0;
+
+ MessageLoop message_loop;
+ PostTaskAndReplyWithResult(message_loop.task_runner().get(), FROM_HERE,
+ Bind(&CreateScopedFoo), Bind(&ExpectScopedFoo));
+
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(1, g_foo_destruct_count);
+ EXPECT_EQ(1, g_foo_free_count);
+}
+
+} // namespace base
diff --git a/libchrome/base/task_scheduler/OWNERS b/libchrome/base/task_scheduler/OWNERS
new file mode 100644
index 0000000..e4b383c
--- /dev/null
+++ b/libchrome/base/task_scheduler/OWNERS
@@ -0,0 +1,3 @@
+fdoray@chromium.org
+gab@chromium.org
+robliao@chromium.org
diff --git a/libchrome/base/task_scheduler/scheduler_lock.h b/libchrome/base/task_scheduler/scheduler_lock.h
new file mode 100644
index 0000000..c969eb1
--- /dev/null
+++ b/libchrome/base/task_scheduler/scheduler_lock.h
@@ -0,0 +1,88 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+namespace base {
+namespace internal {
+
+// SchedulerLock should be used anywhere a lock would be used in the scheduler.
+// When DCHECK_IS_ON(), lock checking occurs. Otherwise, SchedulerLock is
+// equivalent to base::Lock.
+//
+// The shape of SchedulerLock is as follows:
+// SchedulerLock()
+// Default constructor, no predecessor lock.
+// DCHECKs
+// On Acquisition if any scheduler lock is acquired on this thread.
+//
+// SchedulerLock(const SchedulerLock* predecessor)
+// Constructor that specifies an allowed predecessor for that lock.
+// DCHECKs
+// On Construction if |predecessor| forms a predecessor lock cycle.
+// On Acquisition if the previous lock acquired on the thread is not
+// |predecessor|. Okay if there was no previous lock acquired.
+//
+// void Acquire()
+// Acquires the lock.
+//
+// void Release()
+// Releases the lock.
+//
+// void AssertAcquired().
+// DCHECKs if the lock is not acquired.
+//
+// std::unique_ptr<ConditionVariable> CreateConditionVariable()
+// Creates a condition variable using this as a lock.
+
+#if DCHECK_IS_ON()
+class SchedulerLock : public SchedulerLockImpl {
+ public:
+ SchedulerLock() = default;
+ explicit SchedulerLock(const SchedulerLock* predecessor)
+ : SchedulerLockImpl(predecessor) {}
+};
+#else // DCHECK_IS_ON()
+class SchedulerLock : public Lock {
+ public:
+ SchedulerLock() = default;
+ explicit SchedulerLock(const SchedulerLock*) {}
+
+ std::unique_ptr<ConditionVariable> CreateConditionVariable() {
+ return std::unique_ptr<ConditionVariable>(new ConditionVariable(this));
+ }
+};
+#endif // DCHECK_IS_ON()
+
+// Provides the same functionality as base::AutoLock for SchedulerLock.
+class AutoSchedulerLock {
+ public:
+ explicit AutoSchedulerLock(SchedulerLock& lock) : lock_(lock) {
+ lock_.Acquire();
+ }
+
+ ~AutoSchedulerLock() {
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ SchedulerLock& lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(AutoSchedulerLock);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
diff --git a/libchrome/base/task_scheduler/scheduler_lock_impl.cc b/libchrome/base/task_scheduler/scheduler_lock_impl.cc
new file mode 100644
index 0000000..7480e18
--- /dev/null
+++ b/libchrome/base/task_scheduler/scheduler_lock_impl.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class SafeAcquisitionTracker {
+ public:
+ SafeAcquisitionTracker() : tls_acquired_locks_(&OnTLSDestroy) {}
+
+ void RegisterLock(
+ const SchedulerLockImpl* const lock,
+ const SchedulerLockImpl* const predecessor) {
+ DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported.";
+ AutoLock auto_lock(allowed_predecessor_map_lock_);
+ allowed_predecessor_map_[lock] = predecessor;
+ AssertSafePredecessor(lock);
+ }
+
+ void UnregisterLock(const SchedulerLockImpl* const lock) {
+ AutoLock auto_lock(allowed_predecessor_map_lock_);
+ allowed_predecessor_map_.erase(lock);
+ }
+
+ void RecordAcquisition(const SchedulerLockImpl* const lock) {
+ AssertSafeAcquire(lock);
+ GetAcquiredLocksOnCurrentThread()->push_back(lock);
+ }
+
+ void RecordRelease(const SchedulerLockImpl* const lock) {
+ LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+ const auto iter_at_lock =
+ std::find(acquired_locks->begin(), acquired_locks->end(), lock);
+ DCHECK(iter_at_lock != acquired_locks->end());
+ acquired_locks->erase(iter_at_lock);
+ }
+
+ private:
+ using LockVector = std::vector<const SchedulerLockImpl*>;
+ using PredecessorMap = std::unordered_map<
+ const SchedulerLockImpl*, const SchedulerLockImpl*>;
+
+ // This asserts that the lock is safe to acquire. This means that this should
+ // be run before actually recording the acquisition.
+ void AssertSafeAcquire(const SchedulerLockImpl* const lock) {
+ const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+
+ // If the thread currently holds no locks, this is inherently safe.
+ if (acquired_locks->empty())
+ return;
+
+ // Otherwise, make sure that the previous lock acquired is an allowed
+ // predecessor.
+ AutoLock auto_lock(allowed_predecessor_map_lock_);
+ const SchedulerLockImpl* allowed_predecessor =
+ allowed_predecessor_map_.at(lock);
+ DCHECK_EQ(acquired_locks->back(), allowed_predecessor);
+ }
+
+ void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
+ allowed_predecessor_map_lock_.AssertAcquired();
+ for (const SchedulerLockImpl* predecessor =
+ allowed_predecessor_map_.at(lock);
+ predecessor != nullptr;
+ predecessor = allowed_predecessor_map_.at(predecessor)) {
+ DCHECK_NE(predecessor, lock) <<
+ "Scheduler lock predecessor cycle detected.";
+ }
+ }
+
+ LockVector* GetAcquiredLocksOnCurrentThread() {
+ if (!tls_acquired_locks_.Get())
+ tls_acquired_locks_.Set(new LockVector);
+
+ return reinterpret_cast<LockVector*>(tls_acquired_locks_.Get());
+ }
+
+ static void OnTLSDestroy(void* value) {
+ delete reinterpret_cast<LockVector*>(value);
+ }
+
+ // Synchronizes access to |allowed_predecessor_map_|.
+ Lock allowed_predecessor_map_lock_;
+
+ // A map of allowed predecessors.
+ PredecessorMap allowed_predecessor_map_;
+
+ // A thread-local slot holding a vector of locks currently acquired on the
+ // current thread.
+ ThreadLocalStorage::Slot tls_acquired_locks_;
+
+ DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
+};
+
+LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {}
+
+SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) {
+ g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
+}
+
+SchedulerLockImpl::~SchedulerLockImpl() {
+ g_safe_acquisition_tracker.Get().UnregisterLock(this);
+}
+
+void SchedulerLockImpl::Acquire() {
+ lock_.Acquire();
+ g_safe_acquisition_tracker.Get().RecordAcquisition(this);
+}
+
+void SchedulerLockImpl::Release() {
+ lock_.Release();
+ g_safe_acquisition_tracker.Get().RecordRelease(this);
+}
+
+void SchedulerLockImpl::AssertAcquired() const {
+ lock_.AssertAcquired();
+}
+
+std::unique_ptr<ConditionVariable>
+SchedulerLockImpl::CreateConditionVariable() {
+ return std::unique_ptr<ConditionVariable>(new ConditionVariable(&lock_));
+}
+
+} // namespace internal
+} // base
diff --git a/libchrome/base/task_scheduler/scheduler_lock_impl.h b/libchrome/base/task_scheduler/scheduler_lock_impl.h
new file mode 100644
index 0000000..65699bb
--- /dev/null
+++ b/libchrome/base/task_scheduler/scheduler_lock_impl.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class ConditionVariable;
+
+namespace internal {
+
+// A regular lock with simple deadlock correctness checking.
+// This lock tracks all of the available locks to make sure that any locks are
+// acquired in an expected order.
+// See scheduler_lock.h for details.
+class BASE_EXPORT SchedulerLockImpl {
+ public:
+ SchedulerLockImpl();
+ explicit SchedulerLockImpl(const SchedulerLockImpl* predecessor);
+ ~SchedulerLockImpl();
+
+ void Acquire();
+ void Release();
+
+ void AssertAcquired() const;
+
+ std::unique_ptr<ConditionVariable> CreateConditionVariable();
+
+ private:
+ Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerLockImpl);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
diff --git a/libchrome/base/task_scheduler/scheduler_lock_unittest.cc b/libchrome/base/task_scheduler/scheduler_lock_unittest.cc
new file mode 100644
index 0000000..daa5025
--- /dev/null
+++ b/libchrome/base/task_scheduler/scheduler_lock_unittest.cc
@@ -0,0 +1,296 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/rand_util.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace {
+
+// Adapted from base::Lock's BasicLockTestThread to make sure
+// Acquire()/Release() don't crash.
+class BasicLockTestThread : public SimpleThread {
+ public:
+ explicit BasicLockTestThread(SchedulerLock* lock)
+ : SimpleThread("BasicLockTestThread"),
+ lock_(lock),
+ acquired_(0) {}
+
+ int acquired() const { return acquired_; }
+
+ private:
+ void Run() override {
+ for (int i = 0; i < 10; i++) {
+ lock_->Acquire();
+ acquired_++;
+ lock_->Release();
+ }
+ for (int i = 0; i < 10; i++) {
+ lock_->Acquire();
+ acquired_++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+ lock_->Release();
+ }
+ }
+
+ SchedulerLock* const lock_;
+ int acquired_;
+
+ DISALLOW_COPY_AND_ASSIGN(BasicLockTestThread);
+};
+
+class BasicLockAcquireAndWaitThread : public SimpleThread {
+ public:
+ explicit BasicLockAcquireAndWaitThread(SchedulerLock* lock)
+ : SimpleThread("BasicLockAcquireAndWaitThread"),
+ lock_(lock),
+ lock_acquire_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ main_thread_continue_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
+ }
+
+ void WaitForLockAcquisition() {
+ lock_acquire_event_.Wait();
+ }
+
+ void ContinueMain() {
+ main_thread_continue_event_.Signal();
+ }
+
+ private:
+ void Run() override {
+ lock_->Acquire();
+ lock_acquire_event_.Signal();
+ main_thread_continue_event_.Wait();
+ lock_->Release();
+ }
+
+ SchedulerLock* const lock_;
+ WaitableEvent lock_acquire_event_;
+ WaitableEvent main_thread_continue_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(BasicLockAcquireAndWaitThread);
+};
+
+TEST(TaskSchedulerLock, Basic) {
+ SchedulerLock lock;
+ BasicLockTestThread thread(&lock);
+
+ thread.Start();
+
+ int acquired = 0;
+ for (int i = 0; i < 5; i++) {
+ lock.Acquire();
+ acquired++;
+ lock.Release();
+ }
+ for (int i = 0; i < 10; i++) {
+ lock.Acquire();
+ acquired++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+ lock.Release();
+ }
+ for (int i = 0; i < 5; i++) {
+ lock.Acquire();
+ acquired++;
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+ lock.Release();
+ }
+
+ thread.Join();
+
+ EXPECT_EQ(acquired, 20);
+ EXPECT_EQ(thread.acquired(), 20);
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessor) {
+ SchedulerLock predecessor;
+ SchedulerLock lock(&predecessor);
+ predecessor.Acquire();
+ lock.Acquire();
+ lock.Release();
+ predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessorWrongOrder) {
+ SchedulerLock predecessor;
+ SchedulerLock lock(&predecessor);
+ EXPECT_DCHECK_DEATH({
+ lock.Acquire();
+ predecessor.Acquire();
+ }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireNonPredecessor) {
+ SchedulerLock lock1;
+ SchedulerLock lock2;
+ EXPECT_DCHECK_DEATH({
+ lock1.Acquire();
+ lock2.Acquire();
+ }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInOrder) {
+ SchedulerLock lock1;
+ SchedulerLock lock2(&lock1);
+ SchedulerLock lock3(&lock2);
+ lock1.Acquire();
+ lock2.Acquire();
+ lock3.Acquire();
+ lock3.Release();
+ lock2.Release();
+ lock1.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInTheMiddleOfAChain) {
+ SchedulerLock lock1;
+ SchedulerLock lock2(&lock1);
+ SchedulerLock lock3(&lock2);
+ lock2.Acquire();
+ lock3.Acquire();
+ lock3.Release();
+ lock2.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksNoTransitivity) {
+ SchedulerLock lock1;
+ SchedulerLock lock2(&lock1);
+ SchedulerLock lock3(&lock2);
+ EXPECT_DCHECK_DEATH({
+ lock1.Acquire();
+ lock3.Acquire();
+ }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireLocksDifferentThreadsSafely) {
+ SchedulerLock lock1;
+ SchedulerLock lock2;
+ BasicLockAcquireAndWaitThread thread(&lock1);
+ thread.Start();
+
+ lock2.Acquire();
+ thread.WaitForLockAcquisition();
+ thread.ContinueMain();
+ thread.Join();
+ lock2.Release();
+}
+
+TEST(TaskSchedulerLock,
+ AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorFirst) {
+ // A lock and its predecessor may be safely acquired on different threads.
+ // This Thread Other Thread
+ // predecessor.Acquire()
+ // lock.Acquire()
+ // predecessor.Release()
+ // lock.Release()
+ SchedulerLock predecessor;
+ SchedulerLock lock(&predecessor);
+ predecessor.Acquire();
+ BasicLockAcquireAndWaitThread thread(&lock);
+ thread.Start();
+ thread.WaitForLockAcquisition();
+ predecessor.Release();
+ thread.ContinueMain();
+ thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+ AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorLast) {
+ // A lock and its predecessor may be safely acquired on different threads.
+ // This Thread Other Thread
+ // lock.Acquire()
+ // predecessor.Acquire()
+ // lock.Release()
+ // predecessor.Release()
+ SchedulerLock predecessor;
+ SchedulerLock lock(&predecessor);
+ lock.Acquire();
+ BasicLockAcquireAndWaitThread thread(&predecessor);
+ thread.Start();
+ thread.WaitForLockAcquisition();
+ lock.Release();
+ thread.ContinueMain();
+ thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+ AcquireLocksWithPredecessorDifferentThreadsSafelyNoInterference) {
+ // Acquisition of an unrelated lock on another thread should not affect a
+ // legal lock acquisition with a predecessor on this thread.
+ // This Thread Other Thread
+ // predecessor.Acquire()
+ // unrelated.Acquire()
+ // lock.Acquire()
+ // unrelated.Release()
+ // lock.Release()
+ // predecessor.Release();
+ SchedulerLock predecessor;
+ SchedulerLock lock(&predecessor);
+ predecessor.Acquire();
+ SchedulerLock unrelated;
+ BasicLockAcquireAndWaitThread thread(&unrelated);
+ thread.Start();
+ thread.WaitForLockAcquisition();
+ lock.Acquire();
+ thread.ContinueMain();
+ thread.Join();
+ lock.Release();
+ predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, SelfReferentialLock) {
+ struct SelfReferentialLock {
+ SelfReferentialLock() : lock(&lock) {}
+
+ SchedulerLock lock;
+ };
+
+ EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; }, "");
+}
+
+TEST(TaskSchedulerLock, PredecessorCycle) {
+ struct LockCycle {
+ LockCycle() : lock1(&lock2), lock2(&lock1) {}
+
+ SchedulerLock lock1;
+ SchedulerLock lock2;
+ };
+
+ EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+}
+
+TEST(TaskSchedulerLock, PredecessorLongerCycle) {
+ struct LockCycle {
+ LockCycle()
+ : lock1(&lock5),
+ lock2(&lock1),
+ lock3(&lock2),
+ lock4(&lock3),
+ lock5(&lock4) {}
+
+ SchedulerLock lock1;
+ SchedulerLock lock2;
+ SchedulerLock lock3;
+ SchedulerLock lock4;
+ SchedulerLock lock5;
+ };
+
+ EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+}
+
+} // namespace
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/task_scheduler/sequence.cc b/libchrome/base/task_scheduler/sequence.cc
new file mode 100644
index 0000000..4ecb605
--- /dev/null
+++ b/libchrome/base/task_scheduler/sequence.cc
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Sequence::Sequence() = default;
+
+bool Sequence::PushTask(std::unique_ptr<Task> task) {
+ DCHECK(task->sequenced_time.is_null());
+ task->sequenced_time = base::TimeTicks::Now();
+
+ AutoSchedulerLock auto_lock(lock_);
+ ++num_tasks_per_priority_[static_cast<int>(task->traits.priority())];
+ queue_.push(std::move(task));
+
+ // Return true if the sequence was empty before the push.
+ return queue_.size() == 1;
+}
+
+const Task* Sequence::PeekTask() const {
+ AutoSchedulerLock auto_lock(lock_);
+
+ if (queue_.empty())
+ return nullptr;
+
+ return queue_.front().get();
+}
+
+bool Sequence::PopTask() {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+
+ const int priority_index =
+ static_cast<int>(queue_.front()->traits.priority());
+ DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
+ --num_tasks_per_priority_[priority_index];
+
+ queue_.pop();
+ return queue_.empty();
+}
+
+SequenceSortKey Sequence::GetSortKey() const {
+ TaskPriority priority = TaskPriority::LOWEST;
+ base::TimeTicks next_task_sequenced_time;
+
+ {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+
+ // Find the highest task priority in the sequence.
+ const int highest_priority_index = static_cast<int>(TaskPriority::HIGHEST);
+ const int lowest_priority_index = static_cast<int>(TaskPriority::LOWEST);
+ for (int i = highest_priority_index; i > lowest_priority_index; --i) {
+ if (num_tasks_per_priority_[i] > 0) {
+ priority = static_cast<TaskPriority>(i);
+ break;
+ }
+ }
+
+ // Save the sequenced time of the next task in the sequence.
+ next_task_sequenced_time = queue_.front()->sequenced_time;
+ }
+
+ return SequenceSortKey(priority, next_task_sequenced_time);
+}
+
+Sequence::~Sequence() = default;
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/task_scheduler/sequence.h b/libchrome/base/task_scheduler/sequence.h
new file mode 100644
index 0000000..3fa037f
--- /dev/null
+++ b/libchrome/base/task_scheduler/sequence.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <queue>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+// A sequence holds tasks that must be executed in posting order.
+//
+// Note: there is a known refcounted-ownership cycle in the Scheduler
+// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
+// This is okay so long as the other owners of Sequence (PriorityQueue and
+// SchedulerWorker in alternation and
+// SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork()
+// temporarily) keep running it (and taking Tasks from it as a result). A
+// dangling reference cycle would only occur should they release their reference
+// to it while it's not empty. In other words, it is only correct for them to
+// release it after PopTask() returns false to indicate it was made empty by
+// that call (in which case the next PushTask() will return true to indicate to
+// the caller that the Sequence should be re-enqueued for execution).
+//
+// This class is thread-safe.
+class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
+ public:
+ Sequence();
+
+ // Adds |task| at the end of the sequence's queue. Returns true if the
+ // sequence was empty before this operation.
+ bool PushTask(std::unique_ptr<Task> task);
+
+ // Returns the task in front of the sequence's queue, if any.
+ const Task* PeekTask() const;
+
+ // Removes the task in front of the sequence's queue. Returns true if the
+ // sequence is empty after this operation. Cannot be called on an empty
+ // sequence.
+ bool PopTask();
+
+ // Returns a SequenceSortKey representing the priority of the sequence. Cannot
+ // be called on an empty sequence.
+ SequenceSortKey GetSortKey() const;
+
+ private:
+ friend class RefCountedThreadSafe<Sequence>;
+ ~Sequence();
+
+ // Synchronizes access to all members.
+ mutable SchedulerLock lock_;
+
+ // Queue of tasks to execute.
+ std::queue<std::unique_ptr<Task>> queue_;
+
+ // Number of tasks contained in the sequence for each priority.
+ size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
+ {};
+
+ DISALLOW_COPY_AND_ASSIGN(Sequence);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SEQUENCE_H_
diff --git a/libchrome/base/task_scheduler/sequence_sort_key.cc b/libchrome/base/task_scheduler/sequence_sort_key.cc
new file mode 100644
index 0000000..e356c8b
--- /dev/null
+++ b/libchrome/base/task_scheduler/sequence_sort_key.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+namespace base {
+namespace internal {
+
+SequenceSortKey::SequenceSortKey(TaskPriority priority,
+ TimeTicks next_task_sequenced_time)
+ : priority_(priority),
+ next_task_sequenced_time_(next_task_sequenced_time) {}
+
+bool SequenceSortKey::operator<(const SequenceSortKey& other) const {
+ // This SequenceSortKey is considered less important than |other| if it has a
+ // lower priority or if it has the same priority but its next task was posted
+ // later than |other|'s.
+ const int priority_diff =
+ static_cast<int>(priority_) - static_cast<int>(other.priority_);
+ if (priority_diff < 0)
+ return true;
+ if (priority_diff > 0)
+ return false;
+ return next_task_sequenced_time_ > other.next_task_sequenced_time_;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/task_scheduler/sequence_sort_key.h b/libchrome/base/task_scheduler/sequence_sort_key.h
new file mode 100644
index 0000000..eb81708
--- /dev/null
+++ b/libchrome/base/task_scheduler/sequence_sort_key.h
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+
+#include "base/base_export.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// An immutable but assignable representation of the priority of a Sequence.
+class BASE_EXPORT SequenceSortKey final {
+ public:
+ SequenceSortKey(TaskPriority priority, TimeTicks next_task_sequenced_time);
+
+ TaskPriority priority() const { return priority_; }
+
+ bool operator<(const SequenceSortKey& other) const;
+ bool operator>(const SequenceSortKey& other) const { return other < *this; }
+
+ bool operator==(const SequenceSortKey& other) const {
+ return priority_ == other.priority_ &&
+ next_task_sequenced_time_ == other.next_task_sequenced_time_;
+ }
+ bool operator!=(const SequenceSortKey& other) const {
+ return !(other == *this);
+ };
+
+ private:
+ // The private section allows this class to keep its immutable property while
+ // being copy-assignable (i.e. instead of making its members const).
+
+ // Highest task priority in the sequence at the time this sort key was
+ // created.
+ TaskPriority priority_;
+
+ // Sequenced time of the next task to run in the sequence at the time this
+ // sort key was created.
+ TimeTicks next_task_sequenced_time_;
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
diff --git a/libchrome/base/task_scheduler/sequence_sort_key_unittest.cc b/libchrome/base/task_scheduler/sequence_sort_key_unittest.cc
new file mode 100644
index 0000000..2c1d80d
--- /dev/null
+++ b/libchrome/base/task_scheduler/sequence_sort_key_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorLessThan) {
+ SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_e(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_f(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(2000));
+
+ EXPECT_FALSE(key_a < key_a);
+ EXPECT_LT(key_b, key_a);
+ EXPECT_LT(key_c, key_a);
+ EXPECT_LT(key_d, key_a);
+ EXPECT_LT(key_e, key_a);
+ EXPECT_LT(key_f, key_a);
+
+ EXPECT_FALSE(key_a < key_b);
+ EXPECT_FALSE(key_b < key_b);
+ EXPECT_LT(key_c, key_b);
+ EXPECT_LT(key_d, key_b);
+ EXPECT_LT(key_e, key_b);
+ EXPECT_LT(key_f, key_b);
+
+ EXPECT_FALSE(key_a < key_c);
+ EXPECT_FALSE(key_b < key_c);
+ EXPECT_FALSE(key_c < key_c);
+ EXPECT_LT(key_d, key_c);
+ EXPECT_LT(key_e, key_c);
+ EXPECT_LT(key_f, key_c);
+
+ EXPECT_FALSE(key_a < key_d);
+ EXPECT_FALSE(key_b < key_d);
+ EXPECT_FALSE(key_c < key_d);
+ EXPECT_FALSE(key_d < key_d);
+ EXPECT_LT(key_e, key_d);
+ EXPECT_LT(key_f, key_d);
+
+ EXPECT_FALSE(key_a < key_e);
+ EXPECT_FALSE(key_b < key_e);
+ EXPECT_FALSE(key_c < key_e);
+ EXPECT_FALSE(key_d < key_e);
+ EXPECT_FALSE(key_e < key_e);
+ EXPECT_LT(key_f, key_e);
+
+ EXPECT_FALSE(key_a < key_f);
+ EXPECT_FALSE(key_b < key_f);
+ EXPECT_FALSE(key_c < key_f);
+ EXPECT_FALSE(key_d < key_f);
+ EXPECT_FALSE(key_e < key_f);
+ EXPECT_FALSE(key_f < key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorGreaterThan) {
+ SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_e(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_f(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(2000));
+
+ EXPECT_FALSE(key_a > key_a);
+ EXPECT_FALSE(key_b > key_a);
+ EXPECT_FALSE(key_c > key_a);
+ EXPECT_FALSE(key_d > key_a);
+ EXPECT_FALSE(key_e > key_a);
+ EXPECT_FALSE(key_f > key_a);
+
+ EXPECT_GT(key_a, key_b);
+ EXPECT_FALSE(key_b > key_b);
+ EXPECT_FALSE(key_c > key_b);
+ EXPECT_FALSE(key_d > key_b);
+ EXPECT_FALSE(key_e > key_b);
+ EXPECT_FALSE(key_f > key_b);
+
+ EXPECT_GT(key_a, key_c);
+ EXPECT_GT(key_b, key_c);
+ EXPECT_FALSE(key_c > key_c);
+ EXPECT_FALSE(key_d > key_c);
+ EXPECT_FALSE(key_e > key_c);
+ EXPECT_FALSE(key_f > key_c);
+
+ EXPECT_GT(key_a, key_d);
+ EXPECT_GT(key_b, key_d);
+ EXPECT_GT(key_c, key_d);
+ EXPECT_FALSE(key_d > key_d);
+ EXPECT_FALSE(key_e > key_d);
+ EXPECT_FALSE(key_f > key_d);
+
+ EXPECT_GT(key_a, key_e);
+ EXPECT_GT(key_b, key_e);
+ EXPECT_GT(key_c, key_e);
+ EXPECT_GT(key_d, key_e);
+ EXPECT_FALSE(key_e > key_e);
+ EXPECT_FALSE(key_f > key_e);
+
+ EXPECT_GT(key_a, key_f);
+ EXPECT_GT(key_b, key_f);
+ EXPECT_GT(key_c, key_f);
+ EXPECT_GT(key_d, key_f);
+ EXPECT_GT(key_e, key_f);
+ EXPECT_FALSE(key_f > key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorEqual) {
+ SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_e(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_f(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(2000));
+
+ EXPECT_EQ(key_a, key_a);
+ EXPECT_FALSE(key_b == key_a);
+ EXPECT_FALSE(key_c == key_a);
+ EXPECT_FALSE(key_d == key_a);
+ EXPECT_FALSE(key_e == key_a);
+ EXPECT_FALSE(key_f == key_a);
+
+ EXPECT_FALSE(key_a == key_b);
+ EXPECT_EQ(key_b, key_b);
+ EXPECT_FALSE(key_c == key_b);
+ EXPECT_FALSE(key_d == key_b);
+ EXPECT_FALSE(key_e == key_b);
+ EXPECT_FALSE(key_f == key_b);
+
+ EXPECT_FALSE(key_a == key_c);
+ EXPECT_FALSE(key_b == key_c);
+ EXPECT_EQ(key_c, key_c);
+ EXPECT_FALSE(key_d == key_c);
+ EXPECT_FALSE(key_e == key_c);
+ EXPECT_FALSE(key_f == key_c);
+
+ EXPECT_FALSE(key_a == key_d);
+ EXPECT_FALSE(key_b == key_d);
+ EXPECT_FALSE(key_c == key_d);
+ EXPECT_EQ(key_d, key_d);
+ EXPECT_FALSE(key_e == key_d);
+ EXPECT_FALSE(key_f == key_d);
+
+ EXPECT_FALSE(key_a == key_e);
+ EXPECT_FALSE(key_b == key_e);
+ EXPECT_FALSE(key_c == key_e);
+ EXPECT_FALSE(key_d == key_e);
+ EXPECT_EQ(key_e, key_e);
+ EXPECT_FALSE(key_f == key_e);
+
+ EXPECT_FALSE(key_a == key_f);
+ EXPECT_FALSE(key_b == key_f);
+ EXPECT_FALSE(key_c == key_f);
+ EXPECT_FALSE(key_d == key_f);
+ EXPECT_FALSE(key_e == key_f);
+ EXPECT_EQ(key_f, key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorNotEqual) {
+ SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+ TimeTicks::FromInternalValue(2000));
+ SequenceSortKey key_e(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(1000));
+ SequenceSortKey key_f(TaskPriority::BACKGROUND,
+ TimeTicks::FromInternalValue(2000));
+
+ EXPECT_FALSE(key_a != key_a);
+ EXPECT_NE(key_b, key_a);
+ EXPECT_NE(key_c, key_a);
+ EXPECT_NE(key_d, key_a);
+ EXPECT_NE(key_e, key_a);
+ EXPECT_NE(key_f, key_a);
+
+ EXPECT_NE(key_a, key_b);
+ EXPECT_FALSE(key_b != key_b);
+ EXPECT_NE(key_c, key_b);
+ EXPECT_NE(key_d, key_b);
+ EXPECT_NE(key_e, key_b);
+ EXPECT_NE(key_f, key_b);
+
+ EXPECT_NE(key_a, key_c);
+ EXPECT_NE(key_b, key_c);
+ EXPECT_FALSE(key_c != key_c);
+ EXPECT_NE(key_d, key_c);
+ EXPECT_NE(key_e, key_c);
+ EXPECT_NE(key_f, key_c);
+
+ EXPECT_NE(key_a, key_d);
+ EXPECT_NE(key_b, key_d);
+ EXPECT_NE(key_c, key_d);
+ EXPECT_FALSE(key_d != key_d);
+ EXPECT_NE(key_e, key_d);
+ EXPECT_NE(key_f, key_d);
+
+ EXPECT_NE(key_a, key_e);
+ EXPECT_NE(key_b, key_e);
+ EXPECT_NE(key_c, key_e);
+ EXPECT_NE(key_d, key_e);
+ EXPECT_FALSE(key_e != key_e);
+ EXPECT_NE(key_f, key_e);
+
+ EXPECT_NE(key_a, key_f);
+ EXPECT_NE(key_b, key_f);
+ EXPECT_NE(key_c, key_f);
+ EXPECT_NE(key_d, key_f);
+ EXPECT_NE(key_e, key_f);
+ EXPECT_FALSE(key_f != key_f);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/task_scheduler/sequence_unittest.cc b/libchrome/base/task_scheduler/sequence_unittest.cc
new file mode 100644
index 0000000..6a15299
--- /dev/null
+++ b/libchrome/base/task_scheduler/sequence_unittest.cc
@@ -0,0 +1,189 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class TaskSchedulerSequenceTest : public testing::Test {
+ public:
+ TaskSchedulerSequenceTest()
+ : task_a_owned_(
+ new Task(FROM_HERE,
+ Closure(),
+ TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+ TimeDelta())),
+ task_b_owned_(
+ new Task(FROM_HERE,
+ Closure(),
+ TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
+ TimeDelta())),
+ task_c_owned_(
+ new Task(FROM_HERE,
+ Closure(),
+ TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+ TimeDelta())),
+ task_d_owned_(
+ new Task(FROM_HERE,
+ Closure(),
+ TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+ TimeDelta())),
+ task_e_owned_(
+ new Task(FROM_HERE,
+ Closure(),
+ TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+ TimeDelta())),
+ task_a_(task_a_owned_.get()),
+ task_b_(task_b_owned_.get()),
+ task_c_(task_c_owned_.get()),
+ task_d_(task_d_owned_.get()),
+ task_e_(task_e_owned_.get()) {}
+
+ protected:
+ // Tasks to be handed off to a Sequence for testing.
+ std::unique_ptr<Task> task_a_owned_;
+ std::unique_ptr<Task> task_b_owned_;
+ std::unique_ptr<Task> task_c_owned_;
+ std::unique_ptr<Task> task_d_owned_;
+ std::unique_ptr<Task> task_e_owned_;
+
+ // Raw pointers to those same tasks for verification. This is needed because
+ // the scoped_ptrs above no longer point to the tasks once they have been
+ // moved into a Sequence.
+ const Task* task_a_;
+ const Task* task_b_;
+ const Task* task_c_;
+ const Task* task_d_;
+ const Task* task_e_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSequenceTest);
+};
+
+} // namespace
+
+TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+
+ // Push task A in the sequence. Its sequenced time should be updated and it
+ // should be in front of the sequence.
+ EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
+ EXPECT_FALSE(task_a_->sequenced_time.is_null());
+ EXPECT_EQ(task_a_, sequence->PeekTask());
+
+ // Push task B, C and D in the sequence. Their sequenced time should be
+ // updated and task A should always remain in front of the sequence.
+ EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
+ EXPECT_FALSE(task_b_->sequenced_time.is_null());
+ EXPECT_EQ(task_a_, sequence->PeekTask());
+
+ EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
+ EXPECT_FALSE(task_c_->sequenced_time.is_null());
+ EXPECT_EQ(task_a_, sequence->PeekTask());
+
+ EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
+ EXPECT_FALSE(task_d_->sequenced_time.is_null());
+ EXPECT_EQ(task_a_, sequence->PeekTask());
+
+ // Pop task A. Task B should now be in front.
+ EXPECT_FALSE(sequence->PopTask());
+ EXPECT_EQ(task_b_, sequence->PeekTask());
+
+ // Pop task B. Task C should now be in front.
+ EXPECT_FALSE(sequence->PopTask());
+ EXPECT_EQ(task_c_, sequence->PeekTask());
+
+ // Pop task C. Task D should now be in front.
+ EXPECT_FALSE(sequence->PopTask());
+ EXPECT_EQ(task_d_, sequence->PeekTask());
+
+ // Push task E in the sequence. Its sequenced time should be updated and
+ // task D should remain in front.
+ EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
+ EXPECT_FALSE(task_e_->sequenced_time.is_null());
+ EXPECT_EQ(task_d_, sequence->PeekTask());
+
+ // Pop task D. Task E should now be in front.
+ EXPECT_FALSE(sequence->PopTask());
+ EXPECT_EQ(task_e_, sequence->PeekTask());
+
+ // Pop task E. The sequence should now be empty.
+ EXPECT_TRUE(sequence->PopTask());
+ EXPECT_EQ(nullptr, sequence->PeekTask());
+}
+
+TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+
+ // Push task A in the sequence. The highest priority is from task A
+ // (BACKGROUND). Task A is in front of the sequence.
+ sequence->PushTask(std::move(task_a_owned_));
+ EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_a_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Push task B in the sequence. The highest priority is from task B
+ // (USER_VISIBLE). Task A is still in front of the sequence.
+ sequence->PushTask(std::move(task_b_owned_));
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_VISIBLE, task_a_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Push task C in the sequence. The highest priority is from task C
+ // (USER_BLOCKING). Task A is still in front of the sequence.
+ sequence->PushTask(std::move(task_c_owned_));
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Push task D in the sequence. The highest priority is from tasks C/D
+ // (USER_BLOCKING). Task A is still in front of the sequence.
+ sequence->PushTask(std::move(task_d_owned_));
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Pop task A. The highest priority is still USER_BLOCKING. The task in front
+ // of the sequence is now task B.
+ sequence->PopTask();
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Pop task B. The highest priority is still USER_BLOCKING. The task in front
+ // of the sequence is now task C.
+ sequence->PopTask();
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Pop task C. The highest priority is still USER_BLOCKING. The task in front
+ // of the sequence is now task D.
+ sequence->PopTask();
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Push task E in the sequence. The highest priority is still USER_BLOCKING.
+ // The task in front of the sequence is still task D.
+ sequence->PushTask(std::move(task_e_owned_));
+ EXPECT_EQ(
+ SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
+ sequence->GetSortKey());
+
+ // Pop task D. The highest priority is now from task E (BACKGROUND). The
+ // task in front of the sequence is now task E.
+ sequence->PopTask();
+ EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
+ sequence->GetSortKey());
+}
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/task_scheduler/task.cc b/libchrome/base/task_scheduler/task.cc
new file mode 100644
index 0000000..8a589a2
--- /dev/null
+++ b/libchrome/base/task_scheduler/task.cc
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task.h"
+
+namespace base {
+namespace internal {
+
+Task::Task(const tracked_objects::Location& posted_from,
+ const Closure& task,
+ const TaskTraits& traits,
+ const TimeDelta& delay)
+ : PendingTask(posted_from,
+ task,
+ delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+ false), // Not nestable.
+ traits(traits) {}
+
+Task::~Task() = default;
+
+} // namespace internal
+} // namespace base
diff --git a/libchrome/base/task_scheduler/task.h b/libchrome/base/task_scheduler/task.h
new file mode 100644
index 0000000..2b53c69
--- /dev/null
+++ b/libchrome/base/task_scheduler/task.h
@@ -0,0 +1,64 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_H_
+#define BASE_TASK_SCHEDULER_TASK_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// A task is a unit of work inside the task scheduler. Support for tracing and
+// profiling inherited from PendingTask.
+struct BASE_EXPORT Task : public PendingTask {
+ // |posted_from| is the site the task was posted from. |task| is the closure
+ // to run. |traits| is metadata about the task. |delay| is a delay that must
+ // expire before the Task runs.
+ Task(const tracked_objects::Location& posted_from,
+ const Closure& task,
+ const TaskTraits& traits,
+ const TimeDelta& delay);
+ ~Task();
+
+ // The TaskTraits of this task.
+ const TaskTraits traits;
+
+ // The time at which the task was inserted in its sequence. For an undelayed
+ // task, this happens at post time. For a delayed task, this happens some
+ // time after the task's delay has expired. If the task hasn't been inserted
+ // in a sequence yet, this defaults to a null TimeTicks.
+ TimeTicks sequenced_time;
+
+ // A reference to the SequencedTaskRunner or SingleThreadTaskRunner that
+ // posted this task, if any. Used to set ThreadTaskRunnerHandle and/or
+ // SequencedTaskRunnerHandle while the task is running.
+ // Note: this creates an ownership cycle
+ // Sequence -> Task -> TaskRunner -> Sequence -> ...
+ // but that's okay as it's broken when the Task is popped from its Sequence
+ // after being executed which means this cycle forces the TaskRunner to stick
+ // around until all its tasks have been executed which is a requirement to
+ // support TaskRunnerHandles.
+ scoped_refptr<SequencedTaskRunner> sequenced_task_runner_ref;
+ scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner_ref;
+
+ private:
+ // Disallow copies to make sure no unnecessary ref-bumps are incurred. Making
+ // it move-only would be an option, but isn't necessary for now.
+ DISALLOW_COPY_AND_ASSIGN(Task);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TASK_H_
diff --git a/libchrome/base/task_scheduler/task_traits.cc b/libchrome/base/task_scheduler/task_traits.cc
new file mode 100644
index 0000000..dd55535
--- /dev/null
+++ b/libchrome/base/task_scheduler/task_traits.cc
@@ -0,0 +1,70 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_traits.h"
+
+#include <stddef.h>
+
+#include <ostream>
+
+namespace base {
+
+// Do not rely on defaults hard-coded below beyond the guarantees described in
+// the header; anything else is subject to change. Tasks should explicitly
+// request defaults if the behavior is critical to the task.
+TaskTraits::TaskTraits()
+ : with_file_io_(false),
+ priority_(TaskPriority::BACKGROUND),
+ shutdown_behavior_(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {}
+
+TaskTraits::~TaskTraits() = default;
+
+TaskTraits& TaskTraits::WithFileIO() {
+ with_file_io_ = true;
+ return *this;
+}
+
+TaskTraits& TaskTraits::WithPriority(TaskPriority priority) {
+ priority_ = priority;
+ return *this;
+}
+
+TaskTraits& TaskTraits::WithShutdownBehavior(
+ TaskShutdownBehavior shutdown_behavior) {
+ shutdown_behavior_ = shutdown_behavior;
+ return *this;
+}
+
+std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
+ switch (task_priority) {
+ case TaskPriority::BACKGROUND:
+ os << "BACKGROUND";
+ break;
+ case TaskPriority::USER_VISIBLE:
+ os << "USER_VISIBLE";
+ break;
+ case TaskPriority::USER_BLOCKING:
+ os << "USER_BLOCKING";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const TaskShutdownBehavior& shutdown_behavior) {
+ switch (shutdown_behavior) {
+ case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
+ os << "CONTINUE_ON_SHUTDOWN";
+ break;
+ case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
+ os << "SKIP_ON_SHUTDOWN";
+ break;
+ case TaskShutdownBehavior::BLOCK_SHUTDOWN:
+ os << "BLOCK_SHUTDOWN";
+ break;
+ }
+ return os;
+}
+
+} // namespace base
diff --git a/libchrome/base/task_scheduler/task_traits.h b/libchrome/base/task_scheduler/task_traits.h
new file mode 100644
index 0000000..0c0d304
--- /dev/null
+++ b/libchrome/base/task_scheduler/task_traits.h
@@ -0,0 +1,140 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+#define BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Valid priorities supported by the task scheduler. Note: internal algorithms
+// depend on priorities being expressed as a continuous zero-based list from
+// lowest to highest priority. Users of this API shouldn't otherwise care about
+// nor use the underlying values.
+enum class TaskPriority {
+ // This will always be equal to the lowest priority available.
+ LOWEST = 0,
+ // User won't notice if this task takes an arbitrarily long time to complete.
+ BACKGROUND = LOWEST,
+ // This task affects UI or responsiveness of future user interactions. It is
+ // not an immediate response to a user interaction.
+ // Examples:
+ // - Updating the UI to reflect progress on a long task.
+ // - Loading data that might be shown in the UI after a future user
+ // interaction.
+ USER_VISIBLE,
+ // This task affects UI immediately after a user interaction.
+ // Example: Generating data shown in the UI immediately after a click.
+ USER_BLOCKING,
+ // This will always be equal to the highest priority available.
+ HIGHEST = USER_BLOCKING,
+};
+
+// Valid shutdown behaviors supported by the task scheduler.
+enum class TaskShutdownBehavior {
+ // Tasks posted with this mode which have not started executing before
+ // shutdown is initiated will never run. Tasks with this mode running at
+ // shutdown will be ignored (the worker will not be joined).
+ //
+ // This option provides a nice way to post stuff you don't want blocking
+ // shutdown. For example, you might be doing a slow DNS lookup and if it's
+ // blocked on the OS, you may not want to stop shutdown, since the result
+ // doesn't really matter at that point.
+ //
+ // However, you need to be very careful what you do in your callback when you
+ // use this option. Since the thread will continue to run until the OS
+ // terminates the process, the app can be in the process of tearing down when
+ // you're running. This means any singletons or global objects you use may
+ // suddenly become invalid out from under you. For this reason, it's best to
+ // use this only for slow but simple operations like the DNS example.
+ CONTINUE_ON_SHUTDOWN,
+
+ // Tasks posted with this mode that have not started executing at
+ // shutdown will never run. However, any task that has already begun
+ // executing when shutdown is invoked will be allowed to continue and
+ // will block shutdown until completion.
+ //
+ // Note: Because TaskScheduler::Shutdown() may block while these tasks are
+ // executing, care must be taken to ensure that they do not block on the
+ // thread that called TaskScheduler::Shutdown(), as this may lead to deadlock.
+ SKIP_ON_SHUTDOWN,
+
+ // Tasks posted with this mode before shutdown is complete will block shutdown
+ // until they're executed. Generally, this should be used only to save
+ // critical user data.
+ //
+ // Note: Tasks with BACKGROUND priority that block shutdown will be promoted
+ // to USER_VISIBLE priority during shutdown.
+ BLOCK_SHUTDOWN,
+};
+
+// Describes metadata for a single task or a group of tasks.
+class BASE_EXPORT TaskTraits {
+ public:
+ // Constructs a default TaskTraits for tasks with
+ // (1) no I/O,
+ // (2) low priority, and
+ // (3) may block shutdown or be skipped on shutdown.
+ // Tasks that require stricter guarantees should highlight those by requesting
+ // explicit traits below.
+ TaskTraits();
+ TaskTraits(const TaskTraits& other) = default;
+ TaskTraits& operator=(const TaskTraits& other) = default;
+ ~TaskTraits();
+
+ // Allows tasks with these traits to do file I/O.
+ TaskTraits& WithFileIO();
+
+ // Applies |priority| to tasks with these traits.
+ TaskTraits& WithPriority(TaskPriority priority);
+
+ // Applies |shutdown_behavior| to tasks with these traits.
+ TaskTraits& WithShutdownBehavior(TaskShutdownBehavior shutdown_behavior);
+
+ // Returns true if file I/O is allowed by these traits.
+ bool with_file_io() const { return with_file_io_; }
+
+ // Returns the priority of tasks with these traits.
+ TaskPriority priority() const { return priority_; }
+
+ // Returns the shutdown behavior of tasks with these traits.
+ TaskShutdownBehavior shutdown_behavior() const { return shutdown_behavior_; }
+
+ private:
+ bool with_file_io_;
+ TaskPriority priority_;
+ TaskShutdownBehavior shutdown_behavior_;
+};
+
+// Describes how tasks are executed by a task runner.
+enum class ExecutionMode {
+ // Can execute multiple tasks at a time in any order.
+ PARALLEL,
+
+ // Executes one task at a time in posting order. The sequence’s priority is
+ // equivalent to the highest priority pending task in the sequence.
+ SEQUENCED,
+
+ // Executes one task at a time on a single thread in posting order.
+ SINGLE_THREADED,
+};
+
+// Stream operators so TaskPriority and TaskShutdownBehavior can be used in
+// DCHECK statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os,
+ const TaskPriority& shutdown_behavior);
+
+BASE_EXPORT std::ostream& operator<<(
+ std::ostream& os,
+ const TaskShutdownBehavior& shutdown_behavior);
+
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TASK_TRAITS_H_
diff --git a/libchrome/base/task_scheduler/test_utils.h b/libchrome/base/task_scheduler/test_utils.h
new file mode 100644
index 0000000..bafd09a
--- /dev/null
+++ b/libchrome/base/task_scheduler/test_utils.h
@@ -0,0 +1,19 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
+#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
+#else
+#define EXPECT_DCHECK_DEATH(statement, regex)
+#endif
+
+#endif // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/libchrome/base/template_util.h b/libchrome/base/template_util.h
new file mode 100644
index 0000000..1bfc1ac
--- /dev/null
+++ b/libchrome/base/template_util.h
@@ -0,0 +1,133 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEMPLATE_UTIL_H_
+#define BASE_TEMPLATE_UTIL_H_
+
+#include <stddef.h>
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "build/build_config.h"
+
+// This hacks around libstdc++ 4.6 missing stuff in type_traits, while we need
+// to support it.
+#define CR_GLIBCXX_4_7_0 20120322
+#define CR_GLIBCXX_4_5_4 20120702
+#define CR_GLIBCXX_4_6_4 20121127
+#if defined(__GLIBCXX__) && \
+ (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
+ __GLIBCXX__ == CR_GLIBCXX_4_6_4)
+#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
+namespace base {
+
+template <class T> struct is_non_const_reference : std::false_type {};
+template <class T> struct is_non_const_reference<T&> : std::true_type {};
+template <class T> struct is_non_const_reference<const T&> : std::false_type {};
+
+// is_assignable
+
+namespace internal {
+
+template <typename First, typename Second>
+struct SelectSecond {
+ using type = Second;
+};
+
+struct Any {
+ Any(...);
+};
+
+// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
+// value is a true_type.
+template <class Lvalue, class Rvalue>
+typename internal::SelectSecond<
+ decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
+ std::true_type>::type
+IsAssignableTest(Lvalue&&, Rvalue&&);
+
+// False case: Otherwise the return value is a false_type.
+template <class Rvalue>
+std::false_type IsAssignableTest(internal::Any, Rvalue&&);
+
+// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
+// determine the type of IsAssignableImpl.
+template <class Lvalue,
+ class Rvalue,
+ bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
+struct IsAssignableImpl
+ : public std::common_type<decltype(
+ internal::IsAssignableTest(std::declval<Lvalue>(),
+ std::declval<Rvalue>()))>::type {};
+
+// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
+// is false_type.
+template <class Lvalue, class Rvalue>
+struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+ decltype(void(std::declval<std::ostream&>()
+ << std::declval<T>()))>
+ : std::true_type {};
+
+} // namespace internal
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class Lvalue, class Rvalue>
+struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
+
+// is_copy_assignable is true if a T const& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_copy_assignable
+ : public is_assignable<typename std::add_lvalue_reference<T>::type,
+ typename std::add_lvalue_reference<
+ typename std::add_const<T>::type>::type> {};
+
+// is_move_assignable is true if a T&& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_move_assignable
+ : public is_assignable<typename std::add_lvalue_reference<T>::type,
+ const typename std::add_rvalue_reference<T>::type> {
+};
+
+// underlying_type produces the integer type backing an enum type.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <typename T>
+struct underlying_type {
+ using type = __underlying_type(T);
+};
+#else
+template <typename T>
+using underlying_type = std::underlying_type<T>;
+#endif
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <class T>
+using is_trivially_destructible = std::has_trivial_destructor<T>;
+#else
+template <class T>
+using is_trivially_destructible = std::is_trivially_destructible<T>;
+#endif
+
+} // namespace base
+
+#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+
+#endif // BASE_TEMPLATE_UTIL_H_
diff --git a/libchrome/base/template_util_unittest.cc b/libchrome/base/template_util_unittest.cc
new file mode 100644
index 0000000..9215964
--- /dev/null
+++ b/libchrome/base/template_util_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/template_util.h"
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+enum SimpleEnum { SIMPLE_ENUM };
+enum EnumWithExplicitType : uint64_t { ENUM_WITH_EXPLICIT_TYPE };
+enum class ScopedEnum { SCOPED_ENUM };
+enum class ScopedEnumWithOperator { SCOPED_ENUM_WITH_OPERATOR };
+std::ostream& operator<<(std::ostream& os, ScopedEnumWithOperator v) {
+ return os;
+}
+struct SimpleStruct {};
+struct StructWithOperator {};
+std::ostream& operator<<(std::ostream& os, const StructWithOperator& v) {
+ return os;
+}
+
+// is_non_const_reference<Type>
+static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
+static_assert(!is_non_const_reference<const int&>::value,
+ "IsNonConstReference");
+static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
+
+class AssignParent {};
+class AssignChild : AssignParent {};
+
+// is_assignable<Type1, Type2>
+static_assert(!is_assignable<int, int>::value, "IsAssignable"); // 1 = 1;
+static_assert(!is_assignable<int, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int>::value, "IsAssignable");
+static_assert(is_assignable<int&, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int&>::value, "IsAssignable");
+static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
+static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
+static_assert(!is_assignable<AssignParent&, AssignChild>::value,
+ "IsAssignable");
+static_assert(!is_assignable<AssignChild&, AssignParent>::value,
+ "IsAssignable");
+
+struct AssignCopy {};
+struct AssignNoCopy {
+ AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
+ AssignNoCopy& operator=(const AssignNoCopy&) = delete;
+};
+struct AssignNoMove {
+ AssignNoMove& operator=(AssignNoMove&&) = delete;
+ AssignNoMove& operator=(const AssignNoMove&) = delete;
+};
+
+static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
+static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
+
+static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
+static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
+static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
+
+// A few standard types that definitely support printing.
+static_assert(internal::SupportsOstreamOperator<int>::value,
+ "ints should be printable");
+static_assert(internal::SupportsOstreamOperator<const char*>::value,
+ "C strings should be printable");
+static_assert(internal::SupportsOstreamOperator<std::string>::value,
+ "std::string should be printable");
+
+// Various kinds of enums operator<< support.
+static_assert(internal::SupportsOstreamOperator<SimpleEnum>::value,
+ "simple enum should be printable by value");
+static_assert(internal::SupportsOstreamOperator<const SimpleEnum&>::value,
+ "simple enum should be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<EnumWithExplicitType>::value,
+ "enum with explicit type should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const EnumWithExplicitType&>::value,
+ "enum with explicit type should be printable by const ref");
+static_assert(!internal::SupportsOstreamOperator<ScopedEnum>::value,
+ "scoped enum should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const ScopedEnum&>::value,
+ "simple enum should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<ScopedEnumWithOperator>::value,
+ "scoped enum with operator<< should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const ScopedEnumWithOperator&>::value,
+ "scoped enum with operator<< should be printable by const ref");
+
+// operator<< support on structs.
+static_assert(!internal::SupportsOstreamOperator<SimpleStruct>::value,
+ "simple struct should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const SimpleStruct&>::value,
+ "simple struct should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<StructWithOperator>::value,
+ "struct with operator<< should be printable by value");
+static_assert(
+ internal::SupportsOstreamOperator<const StructWithOperator&>::value,
+ "struct with operator<< should be printable by const ref");
+
+// underlying type of enums
+static_assert(std::is_integral<underlying_type<SimpleEnum>::type>::value,
+ "simple enum must have some integral type");
+static_assert(
+ std::is_same<underlying_type<EnumWithExplicitType>::type, uint64_t>::value,
+ "explicit type must be detected");
+static_assert(std::is_same<underlying_type<ScopedEnum>::type, int>::value,
+ "scoped enum defaults to int");
+
+struct TriviallyDestructible {
+ int field;
+};
+
+class NonTriviallyDestructible {
+ ~NonTriviallyDestructible() {}
+};
+
+static_assert(is_trivially_destructible<int>::value, "IsTriviallyDestructible");
+static_assert(is_trivially_destructible<TriviallyDestructible>::value,
+ "IsTriviallyDestructible");
+static_assert(!is_trivially_destructible<NonTriviallyDestructible>::value,
+ "IsTriviallyDestructible");
+
+} // namespace
+} // namespace base
diff --git a/libchrome/base/test/BUILD.gn b/libchrome/base/test/BUILD.gn
new file mode 100644
index 0000000..51863a2
--- /dev/null
+++ b/libchrome/base/test/BUILD.gn
@@ -0,0 +1,279 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/ui.gni")
+import("//build/config/nacl/config.gni")
+
+if (is_android) {
+ import("//build/config/android/rules.gni")
+}
+
+static_library("test_config") {
+ testonly = true
+ sources = [
+ "test_switches.cc",
+ "test_switches.h",
+ "test_timeouts.cc",
+ "test_timeouts.h",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+# GYP: //base/base.gyp:test_support_base
+static_library("test_support") {
+ testonly = true
+ sources = [
+ "../trace_event/trace_config_memory_test_util.h",
+ "gtest_util.cc",
+ "gtest_util.h",
+ "gtest_xml_unittest_result_printer.cc",
+ "gtest_xml_unittest_result_printer.h",
+ "gtest_xml_util.cc",
+ "gtest_xml_util.h",
+ "histogram_tester.cc",
+ "histogram_tester.h",
+ "ios/wait_util.h",
+ "ios/wait_util.mm",
+ "launcher/test_result.cc",
+ "launcher/test_result.h",
+ "launcher/test_results_tracker.h",
+ "launcher/unit_test_launcher.h",
+ "mock_chrome_application_mac.h",
+ "mock_chrome_application_mac.mm",
+ "mock_devices_changed_observer.cc",
+ "mock_devices_changed_observer.h",
+ "mock_entropy_provider.cc",
+ "mock_entropy_provider.h",
+ "mock_log.cc",
+ "mock_log.h",
+ "multiprocess_test.h",
+ "null_task_runner.cc",
+ "null_task_runner.h",
+ "opaque_ref_counted.cc",
+ "opaque_ref_counted.h",
+ "perf_log.cc",
+ "perf_log.h",
+ "perf_test_suite.cc",
+ "perf_test_suite.h",
+ "perf_time_logger.cc",
+ "perf_time_logger.h",
+ "power_monitor_test_base.cc",
+ "power_monitor_test_base.h",
+ "scoped_command_line.cc",
+ "scoped_command_line.h",
+ "scoped_locale.cc",
+ "scoped_locale.h",
+ "scoped_path_override.cc",
+ "scoped_path_override.h",
+ "sequenced_task_runner_test_template.cc",
+ "sequenced_task_runner_test_template.h",
+ "sequenced_worker_pool_owner.cc",
+ "sequenced_worker_pool_owner.h",
+ "simple_test_clock.cc",
+ "simple_test_clock.h",
+ "simple_test_tick_clock.cc",
+ "simple_test_tick_clock.h",
+ "task_runner_test_template.cc",
+ "task_runner_test_template.h",
+ "test_discardable_memory_allocator.cc",
+ "test_discardable_memory_allocator.h",
+ "test_file_util.cc",
+ "test_file_util.h",
+ "test_file_util_android.cc",
+ "test_file_util_linux.cc",
+ "test_file_util_mac.cc",
+ "test_file_util_posix.cc",
+ "test_file_util_win.cc",
+ "test_io_thread.cc",
+ "test_io_thread.h",
+ "test_listener_ios.h",
+ "test_listener_ios.mm",
+ "test_message_loop.cc",
+ "test_message_loop.h",
+ "test_mock_time_task_runner.cc",
+ "test_mock_time_task_runner.h",
+ "test_pending_task.cc",
+ "test_pending_task.h",
+ "test_reg_util_win.cc",
+ "test_reg_util_win.h",
+ "test_shortcut_win.cc",
+ "test_shortcut_win.h",
+ "test_simple_task_runner.cc",
+ "test_simple_task_runner.h",
+ "test_suite.cc",
+ "test_suite.h",
+ "test_support_android.cc",
+ "test_support_android.h",
+ "test_support_ios.h",
+ "test_support_ios.mm",
+ "test_ui_thread_android.cc",
+ "test_ui_thread_android.h",
+ "thread_test_helper.cc",
+ "thread_test_helper.h",
+ "trace_event_analyzer.cc",
+ "trace_event_analyzer.h",
+ "trace_to_file.cc",
+ "trace_to_file.h",
+ "user_action_tester.cc",
+ "user_action_tester.h",
+ "values_test_util.cc",
+ "values_test_util.h",
+ ]
+
+ if (is_ios) {
+ sources += [ "launcher/unit_test_launcher_ios.cc" ]
+ } else if (!is_nacl_nonsfi) {
+ sources += [
+ "launcher/test_launcher.cc",
+ "launcher/test_launcher.h",
+ "launcher/test_launcher_tracer.cc",
+ "launcher/test_launcher_tracer.h",
+ "launcher/test_results_tracker.cc",
+ "launcher/unit_test_launcher.cc",
+ "multiprocess_test.cc",
+ "multiprocess_test_android.cc",
+ ]
+ }
+
+ configs += [ "//build/config:precompiled_headers" ]
+
+ data = [
+ # The isolate needs this script for setting up the test. It's not actually
+ # needed to run this target locally.
+ "//testing/test_env.py",
+ ]
+
+ public_deps = [
+ ":test_config",
+ "//base",
+ "//base:base_static",
+ "//base:i18n",
+ ]
+ deps = [
+ "//base/third_party/dynamic_annotations",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/icu:icuuc",
+ "//third_party/libxml",
+ ]
+
+ if (!is_posix) {
+ sources -= [
+ "scoped_locale.cc",
+ "scoped_locale.h",
+ ]
+ }
+
+ if (is_ios) {
+ set_sources_assignment_filter([])
+ sources += [ "test_file_util_mac.cc" ]
+ set_sources_assignment_filter(sources_assignment_filter)
+ }
+
+ if (is_mac) {
+ libs = [ "AppKit.framework" ]
+ }
+
+ if (is_android) {
+ deps += [ ":base_unittests_jni_headers" ]
+ }
+
+ if (is_nacl_nonsfi) {
+ sources += [
+ "launcher/test_launcher.h",
+ "launcher/test_result.h",
+ "launcher/unit_test_launcher.h",
+ "launcher/unit_test_launcher_nacl_nonsfi.cc",
+ ]
+ sources -= [
+ "gtest_xml_util.cc",
+ "gtest_xml_util.h",
+ "perf_test_suite.cc",
+ "perf_test_suite.h",
+ "scoped_path_override.cc",
+ "scoped_path_override.h",
+ "test_discardable_memory_allocator.cc",
+ "test_discardable_memory_allocator.h",
+ "test_file_util.cc",
+ "test_file_util.h",
+ "test_file_util_posix.cc",
+ "test_suite.cc",
+ "test_suite.h",
+ "trace_to_file.cc",
+ "trace_to_file.h",
+ ]
+ public_deps -= [ "//base:i18n" ]
+ deps -= [
+ "//third_party/icu:icuuc",
+ "//third_party/libxml",
+ ]
+ }
+}
+
+config("perf_test_config") {
+ defines = [ "PERF_TEST" ]
+}
+
+# This is a source set instead of a static library because it seems like some
+# linkers get confused when "main" is in a static library, and if you link to
+# this, you always want the object file anyway.
+source_set("test_support_perf") {
+ testonly = true
+ sources = [
+ "run_all_perftests.cc",
+ ]
+ deps = [
+ ":test_support",
+ "//base",
+ "//testing/gtest",
+ ]
+
+ public_configs = [ ":perf_test_config" ]
+}
+
+static_library("test_launcher_nacl_nonsfi") {
+ testonly = true
+ sources = [
+ "launcher/test_launcher_nacl_nonsfi.cc",
+ "launcher/test_launcher_nacl_nonsfi.h",
+ ]
+ deps = [
+ ":test_support",
+ ]
+}
+
+static_library("run_all_unittests") {
+ testonly = true
+ sources = [
+ "run_all_unittests.cc",
+ ]
+ deps = [
+ ":test_support",
+ ]
+}
+
+if (is_linux) {
+ shared_library("malloc_wrapper") {
+ testonly = true
+ sources = [
+ "malloc_wrapper.cc",
+ ]
+ deps = [
+ "//base",
+ "//build/config/sanitizers:deps",
+ ]
+ }
+}
+
+if (is_android) {
+ generate_jni("base_unittests_jni_headers") {
+ sources = [
+ "android/java/src/org/chromium/base/ContentUriTestUtils.java",
+ "android/java/src/org/chromium/base/TestUiThread.java",
+ ]
+ jni_package = "base"
+ }
+}
diff --git a/libchrome/base/test/DEPS b/libchrome/base/test/DEPS
new file mode 100644
index 0000000..5827c26
--- /dev/null
+++ b/libchrome/base/test/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+third_party/libxml",
+]
diff --git a/libchrome/base/test/OWNERS b/libchrome/base/test/OWNERS
new file mode 100644
index 0000000..92ecc88
--- /dev/null
+++ b/libchrome/base/test/OWNERS
@@ -0,0 +1 @@
+phajdan.jr@chromium.org
diff --git a/libchrome/base/test/data/file_util/binary_file.bin b/libchrome/base/test/data/file_util/binary_file.bin
new file mode 100644
index 0000000..f53cc82
--- /dev/null
+++ b/libchrome/base/test/data/file_util/binary_file.bin
Binary files differ
diff --git a/libchrome/base/test/data/file_util/binary_file_diff.bin b/libchrome/base/test/data/file_util/binary_file_diff.bin
new file mode 100644
index 0000000..103b26d
--- /dev/null
+++ b/libchrome/base/test/data/file_util/binary_file_diff.bin
Binary files differ
diff --git a/libchrome/base/test/data/file_util/binary_file_same.bin b/libchrome/base/test/data/file_util/binary_file_same.bin
new file mode 100644
index 0000000..f53cc82
--- /dev/null
+++ b/libchrome/base/test/data/file_util/binary_file_same.bin
Binary files differ
diff --git a/libchrome/base/test/data/file_util/blank_line.txt b/libchrome/base/test/data/file_util/blank_line.txt
new file mode 100644
index 0000000..8892069
--- /dev/null
+++ b/libchrome/base/test/data/file_util/blank_line.txt
@@ -0,0 +1,3 @@
+The next line is blank.
+
+But this one isn't.
diff --git a/libchrome/base/test/data/file_util/blank_line_crlf.txt b/libchrome/base/test/data/file_util/blank_line_crlf.txt
new file mode 100644
index 0000000..3aefe52
--- /dev/null
+++ b/libchrome/base/test/data/file_util/blank_line_crlf.txt
@@ -0,0 +1,3 @@
+The next line is blank.
+
+But this one isn't.
diff --git a/libchrome/base/test/data/file_util/crlf.txt b/libchrome/base/test/data/file_util/crlf.txt
new file mode 100644
index 0000000..0e62728
--- /dev/null
+++ b/libchrome/base/test/data/file_util/crlf.txt
@@ -0,0 +1 @@
+This file is the same.
diff --git a/libchrome/base/test/data/file_util/different.txt b/libchrome/base/test/data/file_util/different.txt
new file mode 100644
index 0000000..5b9f9c4
--- /dev/null
+++ b/libchrome/base/test/data/file_util/different.txt
@@ -0,0 +1 @@
+This file is different.
diff --git a/libchrome/base/test/data/file_util/different_first.txt b/libchrome/base/test/data/file_util/different_first.txt
new file mode 100644
index 0000000..8661d66
--- /dev/null
+++ b/libchrome/base/test/data/file_util/different_first.txt
@@ -0,0 +1 @@
+this file is the same.
diff --git a/libchrome/base/test/data/file_util/different_last.txt b/libchrome/base/test/data/file_util/different_last.txt
new file mode 100644
index 0000000..e8b3e5a
--- /dev/null
+++ b/libchrome/base/test/data/file_util/different_last.txt
@@ -0,0 +1 @@
+This file is the same.
\ No newline at end of file
diff --git a/libchrome/base/test/data/file_util/empty1.txt b/libchrome/base/test/data/file_util/empty1.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libchrome/base/test/data/file_util/empty1.txt
diff --git a/libchrome/base/test/data/file_util/empty2.txt b/libchrome/base/test/data/file_util/empty2.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libchrome/base/test/data/file_util/empty2.txt
diff --git a/libchrome/base/test/data/file_util/first1.txt b/libchrome/base/test/data/file_util/first1.txt
new file mode 100644
index 0000000..2c6e300
--- /dev/null
+++ b/libchrome/base/test/data/file_util/first1.txt
@@ -0,0 +1,2 @@
+The first line is the same.
+The second line is different.
diff --git a/libchrome/base/test/data/file_util/first2.txt b/libchrome/base/test/data/file_util/first2.txt
new file mode 100644
index 0000000..e39b5ec
--- /dev/null
+++ b/libchrome/base/test/data/file_util/first2.txt
@@ -0,0 +1,2 @@
+The first line is the same.
+The second line is not.
diff --git a/libchrome/base/test/data/file_util/original.txt b/libchrome/base/test/data/file_util/original.txt
new file mode 100644
index 0000000..4422f57
--- /dev/null
+++ b/libchrome/base/test/data/file_util/original.txt
@@ -0,0 +1 @@
+This file is the same.
diff --git a/libchrome/base/test/data/file_util/red.png b/libchrome/base/test/data/file_util/red.png
new file mode 100644
index 0000000..0806141
--- /dev/null
+++ b/libchrome/base/test/data/file_util/red.png
Binary files differ
diff --git a/libchrome/base/test/data/file_util/same.txt b/libchrome/base/test/data/file_util/same.txt
new file mode 100644
index 0000000..4422f57
--- /dev/null
+++ b/libchrome/base/test/data/file_util/same.txt
@@ -0,0 +1 @@
+This file is the same.
diff --git a/libchrome/base/test/data/file_util/same_length.txt b/libchrome/base/test/data/file_util/same_length.txt
new file mode 100644
index 0000000..157405c
--- /dev/null
+++ b/libchrome/base/test/data/file_util/same_length.txt
@@ -0,0 +1 @@
+This file is not same.
diff --git a/libchrome/base/test/data/file_util/shortened.txt b/libchrome/base/test/data/file_util/shortened.txt
new file mode 100644
index 0000000..2bee82c
--- /dev/null
+++ b/libchrome/base/test/data/file_util/shortened.txt
@@ -0,0 +1 @@
+This file is the
\ No newline at end of file
diff --git a/libchrome/base/test/data/file_version_info_unittest/FileVersionInfoTest1.dll b/libchrome/base/test/data/file_version_info_unittest/FileVersionInfoTest1.dll
new file mode 100755
index 0000000..bdf8dc0
--- /dev/null
+++ b/libchrome/base/test/data/file_version_info_unittest/FileVersionInfoTest1.dll
Binary files differ
diff --git a/libchrome/base/test/data/file_version_info_unittest/FileVersionInfoTest2.dll b/libchrome/base/test/data/file_version_info_unittest/FileVersionInfoTest2.dll
new file mode 100755
index 0000000..51e7966
--- /dev/null
+++ b/libchrome/base/test/data/file_version_info_unittest/FileVersionInfoTest2.dll
Binary files differ
diff --git a/libchrome/base/test/data/json/bom_feff.json b/libchrome/base/test/data/json/bom_feff.json
new file mode 100644
index 0000000..b05ae50
--- /dev/null
+++ b/libchrome/base/test/data/json/bom_feff.json
@@ -0,0 +1,10 @@
+{
+ "appName": {
+ "message": "Gmail",
+ "description": "App name."
+ },
+ "appDesc": {
+ "message": "بريد إلكتروني يوفر إمكانية البحث مع مقدار أقل من الرسائل غير المرغوب فيها.",
+ "description":"App description."
+ }
+}
\ No newline at end of file
diff --git a/libchrome/base/test/data/pe_image/pe_image_test_32.dll b/libchrome/base/test/data/pe_image/pe_image_test_32.dll
new file mode 100755
index 0000000..539d631
--- /dev/null
+++ b/libchrome/base/test/data/pe_image/pe_image_test_32.dll
Binary files differ
diff --git a/libchrome/base/test/data/pe_image/pe_image_test_64.dll b/libchrome/base/test/data/pe_image/pe_image_test_64.dll
new file mode 100755
index 0000000..8801e23
--- /dev/null
+++ b/libchrome/base/test/data/pe_image/pe_image_test_64.dll
Binary files differ
diff --git a/libchrome/base/test/data/serializer_nested_test.json b/libchrome/base/test/data/serializer_nested_test.json
new file mode 100644
index 0000000..cfea8e8
--- /dev/null
+++ b/libchrome/base/test/data/serializer_nested_test.json
@@ -0,0 +1,17 @@
+{
+ "bool": true,
+ "dict": {
+ "bool": true,
+ "dict": {
+ "bees": "knees",
+ "cats": "meow"
+ },
+ "foos": "bar",
+ "list": [ 3.4, "second", null ]
+ },
+ "int": 42,
+ "list": [ 1, 2 ],
+ "null": null,
+ "real": 3.14,
+ "string": "hello"
+}
diff --git a/libchrome/base/test/data/serializer_test.json b/libchrome/base/test/data/serializer_test.json
new file mode 100644
index 0000000..446925e
--- /dev/null
+++ b/libchrome/base/test/data/serializer_test.json
@@ -0,0 +1,8 @@
+{
+ "bool": true,
+ "int": 42,
+ "list": [ 1, 2 ],
+ "null": null,
+ "real": 3.14,
+ "string": "hello"
+}
diff --git a/libchrome/base/test/data/serializer_test_nowhitespace.json b/libchrome/base/test/data/serializer_test_nowhitespace.json
new file mode 100644
index 0000000..a1afdc5
--- /dev/null
+++ b/libchrome/base/test/data/serializer_test_nowhitespace.json
@@ -0,0 +1 @@
+{"bool":true,"int":42,"list":[1,2],"null":null,"real":3.14,"string":"hello"}
\ No newline at end of file
diff --git a/libchrome/base/test/ios/OWNERS b/libchrome/base/test/ios/OWNERS
new file mode 100644
index 0000000..40a68c7
--- /dev/null
+++ b/libchrome/base/test/ios/OWNERS
@@ -0,0 +1 @@
+rohitrao@chromium.org
diff --git a/libchrome/base/test/multiprocess_test.cc b/libchrome/base/test/multiprocess_test.cc
new file mode 100644
index 0000000..de56e7f
--- /dev/null
+++ b/libchrome/base/test/multiprocess_test.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/multiprocess_test.h"
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
+Process SpawnMultiProcessTestChild(
+ const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options) {
+ CommandLine command_line(base_command_line);
+ // TODO(viettrungluu): See comment above |MakeCmdLine()| in the header file.
+ // This is a temporary hack, since |MakeCmdLine()| has to provide a full
+ // command line.
+ if (!command_line.HasSwitch(switches::kTestChildProcess))
+ command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+ return LaunchProcess(command_line, options);
+}
+#endif // !OS_ANDROID && !__ANDROID__ && !__ANDROID_HOST__
+
+CommandLine GetMultiProcessTestChildBaseCommandLine() {
+ CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+ cmd_line.SetProgram(MakeAbsoluteFilePath(cmd_line.GetProgram()));
+ return cmd_line;
+}
+
+// MultiProcessTest ------------------------------------------------------------
+
+MultiProcessTest::MultiProcessTest() {
+}
+
+Process MultiProcessTest::SpawnChild(const std::string& procname) {
+ LaunchOptions options;
+#if defined(OS_WIN)
+ options.start_hidden = true;
+#endif
+ return SpawnChildWithOptions(procname, options);
+}
+
+Process MultiProcessTest::SpawnChildWithOptions(
+ const std::string& procname,
+ const LaunchOptions& options) {
+ return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
+}
+
+CommandLine MultiProcessTest::MakeCmdLine(const std::string& procname) {
+ CommandLine command_line = GetMultiProcessTestChildBaseCommandLine();
+ command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+ return command_line;
+}
+
+} // namespace base
diff --git a/libchrome/base/test/multiprocess_test.h b/libchrome/base/test/multiprocess_test.h
new file mode 100644
index 0000000..ae4c3eb
--- /dev/null
+++ b/libchrome/base/test/multiprocess_test.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_MULTIPROCESS_TEST_H_
+#define BASE_TEST_MULTIPROCESS_TEST_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "base/process/launch.h"
+#include "base/process/process.h"
+#include "build/build_config.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+class CommandLine;
+
+// Helpers to spawn a child for a multiprocess test and execute a designated
+// function. Use these when you already have another base class for your test
+// fixture, but you want (some) of your tests to be multiprocess (otherwise you
+// may just want to derive your fixture from |MultiProcessTest|, below).
+//
+// Use these helpers as follows:
+//
+// TEST_F(MyTest, ATest) {
+// CommandLine command_line(
+// base::GetMultiProcessTestChildBaseCommandLine());
+// // Maybe add our own switches to |command_line|....
+//
+// LaunchOptions options;
+// // Maybe set some options (e.g., |start_hidden| on Windows)....
+//
+// // Start a child process and run |a_test_func|.
+// base::Process test_child_process =
+// base::SpawnMultiProcessTestChild("a_test_func", command_line,
+// options);
+//
+// // Do stuff involving |test_child_process| and the child process....
+//
+// int rv = -1;
+// ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+// TestTimeouts::action_timeout(), &rv));
+// EXPECT_EQ(0, rv);
+// }
+//
+// // Note: |MULTIPROCESS_TEST_MAIN()| is defined in
+// // testing/multi_process_function_list.h.
+// MULTIPROCESS_TEST_MAIN(a_test_func) {
+// // Code here runs in a child process....
+// return 0;
+// }
+
+// Spawns a child process and executes the function |procname| declared using
+// |MULTIPROCESS_TEST_MAIN()| or |MULTIPROCESS_TEST_MAIN_WITH_SETUP()|.
+// |command_line| should be as provided by
+// |GetMultiProcessTestChildBaseCommandLine()| (below), possibly with arguments
+// added. Note: On Windows, you probably want to set |options.start_hidden|.
+Process SpawnMultiProcessTestChild(
+ const std::string& procname,
+ const CommandLine& command_line,
+ const LaunchOptions& options);
+
+// Gets the base command line for |SpawnMultiProcessTestChild()|. To this, you
+// may add any flags needed for your child process.
+CommandLine GetMultiProcessTestChildBaseCommandLine();
+
+#if defined(OS_ANDROID)
+
+// Enable the alternate test child implementation which support spawning a child
+// after threads have been created. If used, this MUST be the first line of
+// main(). The main function is passed in to avoid a link-time dependency in
+// component builds.
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**));
+
+// Returns true if the current process is a test child.
+bool AndroidIsChildProcess();
+
+// Wait for a test child to exit if the alternate test child implementation is
+// being used.
+bool AndroidWaitForChildExitWithTimeout(
+ const Process& process, TimeDelta timeout, int* exit_code)
+ WARN_UNUSED_RESULT;
+
+#endif // defined(OS_ANDROID)
+
+// MultiProcessTest ------------------------------------------------------------
+
+// A MultiProcessTest is a test class which makes it easier to
+// write a test which requires code running out of process.
+//
+// To create a multiprocess test simply follow these steps:
+//
+// 1) Derive your test from MultiProcessTest. Example:
+//
+// class MyTest : public MultiProcessTest {
+// };
+//
+// TEST_F(MyTest, TestCaseName) {
+// ...
+// }
+//
+// 2) Create a mainline function for the child processes and include
+// testing/multiprocess_func_list.h.
+// See the declaration of the MULTIPROCESS_TEST_MAIN macro
+// in that file for an example.
+// 3) Call SpawnChild("foo"), where "foo" is the name of
+// the function you wish to run in the child processes.
+// That's it!
+class MultiProcessTest : public PlatformTest {
+ public:
+ MultiProcessTest();
+
+ protected:
+ // Run a child process.
+ // 'procname' is the name of a function which the child will
+ // execute. It must be exported from this library in order to
+ // run.
+ //
+ // Example signature:
+ // extern "C" int __declspec(dllexport) FooBar() {
+ // // do client work here
+ // }
+ //
+ // Returns the child process.
+ Process SpawnChild(const std::string& procname);
+
+ // Run a child process using the given launch options.
+ //
+ // Note: On Windows, you probably want to set |options.start_hidden|.
+ Process SpawnChildWithOptions(const std::string& procname,
+ const LaunchOptions& options);
+
+ // Set up the command line used to spawn the child process.
+ // Override this to add things to the command line (calling this first in the
+ // override).
+ // Note that currently some tests rely on this providing a full command line,
+ // which they then use directly with |LaunchProcess()|.
+ // TODO(viettrungluu): Remove this and add a virtual
+ // |ModifyChildCommandLine()|; make the two divergent uses more sane.
+ virtual CommandLine MakeCmdLine(const std::string& procname);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MultiProcessTest);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_MULTIPROCESS_TEST_H_
diff --git a/libchrome/base/test/multiprocess_test_android.cc b/libchrome/base/test/multiprocess_test_android.cc
new file mode 100644
index 0000000..f58b452
--- /dev/null
+++ b/libchrome/base/test/multiprocess_test_android.cc
@@ -0,0 +1,454 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/multiprocess_test.h"
+
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/posix/global_descriptors.h"
+#include "base/posix/unix_domain_socket_linux.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+const int kMaxMessageSize = 1024 * 1024;
+const int kFragmentSize = 4096;
+
+// Message sent between parent process and helper child process.
+enum class MessageType : uint32_t {
+ START_REQUEST,
+ START_RESPONSE,
+ WAIT_REQUEST,
+ WAIT_RESPONSE,
+};
+
+struct MessageHeader {
+ uint32_t size;
+ MessageType type;
+};
+
+struct StartProcessRequest {
+ MessageHeader header =
+ {sizeof(StartProcessRequest), MessageType::START_REQUEST};
+
+ uint32_t num_args = 0;
+ uint32_t num_fds = 0;
+};
+
+struct StartProcessResponse {
+ MessageHeader header =
+ {sizeof(StartProcessResponse), MessageType::START_RESPONSE};
+
+ pid_t child_pid;
+};
+
+struct WaitProcessRequest {
+ MessageHeader header =
+ {sizeof(WaitProcessRequest), MessageType::WAIT_REQUEST};
+
+ pid_t pid;
+ uint64_t timeout_ms;
+};
+
+struct WaitProcessResponse {
+ MessageHeader header =
+ {sizeof(WaitProcessResponse), MessageType::WAIT_RESPONSE};
+
+ bool success = false;
+ int32_t exit_code = 0;
+};
+
+// Helper class that implements an alternate test child launcher for
+// multi-process tests. The default implementation doesn't work if the child is
+// launched after starting threads. However, for some tests (i.e. Mojo), this
+// is necessary. This implementation works around that issue by forking a helper
+// process very early in main(), before any real work is done. Then, when a
+// child needs to be spawned, a message is sent to that helper process, which
+// then forks and returns the result to the parent. The forked child then calls
+// main() and things look as though a brand new process has been fork/exec'd.
+class LaunchHelper {
+ public:
+ using MainFunction = int (*)(int, char**);
+
+ LaunchHelper() {}
+
+ // Initialise the alternate test child implementation.
+ void Init(MainFunction main);
+
+ // Starts a child test helper process.
+ Process StartChildTestHelper(const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options);
+
+ // Waits for a child test helper process.
+ bool WaitForChildExitWithTimeout(const Process& process, TimeDelta timeout,
+ int* exit_code);
+
+ bool IsReady() const { return child_fd_ != -1; }
+ bool IsChild() const { return is_child_; }
+
+ private:
+ // Wrappers around sendmsg/recvmsg that supports message fragmentation.
+ void Send(int fd, const MessageHeader* msg, const std::vector<int>& fds);
+ ssize_t Recv(int fd, void* buf, std::vector<ScopedFD>* fds);
+
+ // Parent process implementation.
+ void DoParent(int fd);
+ // Helper process implementation.
+ void DoHelper(int fd);
+
+ void StartProcessInHelper(const StartProcessRequest* request,
+ std::vector<ScopedFD> fds);
+ void WaitForChildInHelper(const WaitProcessRequest* request);
+
+ bool is_child_ = false;
+
+ // Parent vars.
+ int child_fd_ = -1;
+
+ // Helper vars.
+ int parent_fd_ = -1;
+ MainFunction main_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(LaunchHelper);
+};
+
+void LaunchHelper::Init(MainFunction main) {
+ main_ = main;
+
+ // Create a communication channel between the parent and child launch helper.
+ // fd[0] belongs to the parent, fd[1] belongs to the child.
+ int fds[2] = {-1, -1};
+ int rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds);
+ PCHECK(rv == 0);
+ CHECK_NE(-1, fds[0]);
+ CHECK_NE(-1, fds[1]);
+
+ pid_t pid = fork();
+ PCHECK(pid >= 0) << "Fork failed";
+ if (pid) {
+ // Parent.
+ rv = close(fds[1]);
+ PCHECK(rv == 0);
+ DoParent(fds[0]);
+ } else {
+ // Helper.
+ rv = close(fds[0]);
+ PCHECK(rv == 0);
+ DoHelper(fds[1]);
+ NOTREACHED();
+ _exit(0);
+ }
+}
+
+void LaunchHelper::Send(
+ int fd, const MessageHeader* msg, const std::vector<int>& fds) {
+ uint32_t bytes_remaining = msg->size;
+ const char* buf = reinterpret_cast<const char*>(msg);
+ while (bytes_remaining) {
+ size_t send_size =
+ (bytes_remaining > kFragmentSize) ? kFragmentSize : bytes_remaining;
+ bool success = UnixDomainSocket::SendMsg(
+ fd, buf, send_size,
+ (bytes_remaining == msg->size) ? fds : std::vector<int>());
+ CHECK(success);
+ bytes_remaining -= send_size;
+ buf += send_size;
+ }
+}
+
+ssize_t LaunchHelper::Recv(int fd, void* buf, std::vector<ScopedFD>* fds) {
+ ssize_t size = UnixDomainSocket::RecvMsg(fd, buf, kFragmentSize, fds);
+ if (size <= 0)
+ return size;
+
+ const MessageHeader* header = reinterpret_cast<const MessageHeader*>(buf);
+ CHECK(header->size < kMaxMessageSize);
+ uint32_t bytes_remaining = header->size - size;
+ char* buffer = reinterpret_cast<char*>(buf);
+ buffer += size;
+ while (bytes_remaining) {
+ std::vector<ScopedFD> dummy_fds;
+ size = UnixDomainSocket::RecvMsg(fd, buffer, kFragmentSize, &dummy_fds);
+ if (size <= 0)
+ return size;
+
+ CHECK(dummy_fds.empty());
+ CHECK(size == kFragmentSize ||
+ static_cast<size_t>(size) == bytes_remaining);
+ bytes_remaining -= size;
+ buffer += size;
+ }
+ return header->size;
+}
+
+void LaunchHelper::DoParent(int fd) {
+ child_fd_ = fd;
+}
+
+void LaunchHelper::DoHelper(int fd) {
+ parent_fd_ = fd;
+ is_child_ = true;
+ std::unique_ptr<char[]> buf(new char[kMaxMessageSize]);
+ while (true) {
+ // Wait for a message from the parent.
+ std::vector<ScopedFD> fds;
+ ssize_t size = Recv(parent_fd_, buf.get(), &fds);
+ if (size == 0 || (size < 0 && errno == ECONNRESET)) {
+ _exit(0);
+ }
+ PCHECK(size > 0);
+
+ const MessageHeader* header =
+ reinterpret_cast<const MessageHeader*>(buf.get());
+ CHECK_EQ(static_cast<ssize_t>(header->size), size);
+ switch (header->type) {
+ case MessageType::START_REQUEST:
+ StartProcessInHelper(
+ reinterpret_cast<const StartProcessRequest*>(buf.get()),
+ std::move(fds));
+ break;
+ case MessageType::WAIT_REQUEST:
+ WaitForChildInHelper(
+ reinterpret_cast<const WaitProcessRequest*>(buf.get()));
+ break;
+ default:
+ LOG(FATAL) << "Unsupported message type: "
+ << static_cast<uint32_t>(header->type);
+ }
+ }
+}
+
+void LaunchHelper::StartProcessInHelper(const StartProcessRequest* request,
+ std::vector<ScopedFD> fds) {
+ pid_t pid = fork();
+ PCHECK(pid >= 0) << "Fork failed";
+ if (pid) {
+ // Helper.
+ StartProcessResponse resp;
+ resp.child_pid = pid;
+ Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+ std::vector<int>());
+ } else {
+ // Child.
+ PCHECK(close(parent_fd_) == 0);
+ parent_fd_ = -1;
+ CommandLine::Reset();
+
+ Pickle serialised_extra(reinterpret_cast<const char*>(request + 1),
+ request->header.size - sizeof(StartProcessRequest));
+ PickleIterator iter(serialised_extra);
+ std::vector<std::string> args;
+ for (size_t i = 0; i < request->num_args; i++) {
+ std::string arg;
+ CHECK(iter.ReadString(&arg));
+ args.push_back(std::move(arg));
+ }
+
+ CHECK_EQ(request->num_fds, fds.size());
+ for (size_t i = 0; i < request->num_fds; i++) {
+ int new_fd;
+ CHECK(iter.ReadInt(&new_fd));
+ int old_fd = fds[i].release();
+ if (new_fd != old_fd) {
+ if (dup2(old_fd, new_fd) < 0) {
+ PLOG(FATAL) << "dup2";
+ }
+ PCHECK(close(old_fd) == 0);
+ }
+ }
+
+ // argv has argc+1 elements, where the last element is NULL.
+ std::unique_ptr<char*[]> argv(new char*[args.size() + 1]);
+ for (size_t i = 0; i < args.size(); i++) {
+ argv[i] = const_cast<char*>(args[i].c_str());
+ }
+ argv[args.size()] = nullptr;
+ _exit(main_(args.size(), argv.get()));
+ NOTREACHED();
+ }
+}
+
+void LaunchHelper::WaitForChildInHelper(const WaitProcessRequest* request) {
+ Process process(request->pid);
+ TimeDelta timeout = TimeDelta::FromMilliseconds(request->timeout_ms);
+ int exit_code = -1;
+ bool success = process.WaitForExitWithTimeout(timeout, &exit_code);
+
+ WaitProcessResponse resp;
+ resp.exit_code = exit_code;
+ resp.success = success;
+ Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+ std::vector<int>());
+}
+
+Process LaunchHelper::StartChildTestHelper(const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options) {
+
+ CommandLine command_line(base_command_line);
+ if (!command_line.HasSwitch(switches::kTestChildProcess))
+ command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+ StartProcessRequest request;
+ Pickle serialised_extra;
+ const CommandLine::StringVector& argv = command_line.argv();
+ for (const auto& arg : argv)
+ CHECK(serialised_extra.WriteString(arg));
+ request.num_args = argv.size();
+
+ std::vector<int> fds_to_send;
+ if (options.fds_to_remap) {
+ for (auto p : *options.fds_to_remap) {
+ CHECK(serialised_extra.WriteInt(p.second));
+ fds_to_send.push_back(p.first);
+ }
+ request.num_fds = options.fds_to_remap->size();
+ }
+
+ size_t buf_size = sizeof(StartProcessRequest) + serialised_extra.size();
+ request.header.size = buf_size;
+ std::unique_ptr<char[]> buffer(new char[buf_size]);
+ memcpy(buffer.get(), &request, sizeof(StartProcessRequest));
+ memcpy(buffer.get() + sizeof(StartProcessRequest), serialised_extra.data(),
+ serialised_extra.size());
+
+ // Send start message.
+ Send(child_fd_, reinterpret_cast<const MessageHeader*>(buffer.get()),
+ fds_to_send);
+
+ // Synchronously get response.
+ StartProcessResponse response;
+ std::vector<ScopedFD> recv_fds;
+ ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+ PCHECK(resp_size == sizeof(StartProcessResponse));
+
+ return Process(response.child_pid);
+}
+
+bool LaunchHelper::WaitForChildExitWithTimeout(
+ const Process& process, TimeDelta timeout, int* exit_code) {
+
+ WaitProcessRequest request;
+ request.pid = process.Handle();
+ request.timeout_ms = timeout.InMilliseconds();
+
+ Send(child_fd_, reinterpret_cast<const MessageHeader*>(&request),
+ std::vector<int>());
+
+ WaitProcessResponse response;
+ std::vector<ScopedFD> recv_fds;
+ ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+ PCHECK(resp_size == sizeof(WaitProcessResponse));
+
+ if (!response.success)
+ return false;
+
+ *exit_code = response.exit_code;
+ return true;
+}
+
+LazyInstance<LaunchHelper>::Leaky g_launch_helper;
+
+} // namespace
+
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**)) {
+ DCHECK(main);
+ // Don't allow child processes to themselves create new child processes.
+ if (g_launch_helper.Get().IsChild())
+ return;
+ g_launch_helper.Get().Init(main);
+}
+
+bool AndroidIsChildProcess() {
+ return g_launch_helper.Get().IsChild();
+}
+
+bool AndroidWaitForChildExitWithTimeout(
+ const Process& process, TimeDelta timeout, int* exit_code) {
+ CHECK(g_launch_helper.Get().IsReady());
+ return g_launch_helper.Get().WaitForChildExitWithTimeout(
+ process, timeout, exit_code);
+}
+
+// A very basic implementation for Android. On Android tests can run in an APK
+// and we don't have an executable to exec*. This implementation does the bare
+// minimum to execute the method specified by procname (in the child process).
+// - All options except |fds_to_remap| are ignored.
+Process SpawnMultiProcessTestChild(const std::string& procname,
+ const CommandLine& base_command_line,
+ const LaunchOptions& options) {
+ if (g_launch_helper.Get().IsReady()) {
+ return g_launch_helper.Get().StartChildTestHelper(
+ procname, base_command_line, options);
+ }
+
+ // TODO(viettrungluu): The FD-remapping done below is wrong in the presence of
+ // cycles (e.g., fd1 -> fd2, fd2 -> fd1). crbug.com/326576
+ FileHandleMappingVector empty;
+ const FileHandleMappingVector* fds_to_remap =
+ options.fds_to_remap ? options.fds_to_remap : ∅
+
+ pid_t pid = fork();
+
+ if (pid < 0) {
+ PLOG(ERROR) << "fork";
+ return Process();
+ }
+ if (pid > 0) {
+ // Parent process.
+ return Process(pid);
+ }
+ // Child process.
+ base::hash_set<int> fds_to_keep_open;
+ for (FileHandleMappingVector::const_iterator it = fds_to_remap->begin();
+ it != fds_to_remap->end(); ++it) {
+ fds_to_keep_open.insert(it->first);
+ }
+ // Keep standard FDs (stdin, stdout, stderr, etc.) open since this
+ // is not meant to spawn a daemon.
+ int base = GlobalDescriptors::kBaseDescriptor;
+ for (int fd = base; fd < sysconf(_SC_OPEN_MAX); ++fd) {
+ if (fds_to_keep_open.find(fd) == fds_to_keep_open.end()) {
+ close(fd);
+ }
+ }
+ for (FileHandleMappingVector::const_iterator it = fds_to_remap->begin();
+ it != fds_to_remap->end(); ++it) {
+ int old_fd = it->first;
+ int new_fd = it->second;
+ if (dup2(old_fd, new_fd) < 0) {
+ PLOG(FATAL) << "dup2";
+ }
+ close(old_fd);
+ }
+ CommandLine::Reset();
+ CommandLine::Init(0, nullptr);
+ CommandLine* command_line = CommandLine::ForCurrentProcess();
+ command_line->InitFromArgv(base_command_line.argv());
+ if (!command_line->HasSwitch(switches::kTestChildProcess))
+ command_line->AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+ _exit(multi_process_function_list::InvokeChildProcessTest(procname));
+ return Process();
+}
+
+} // namespace base
diff --git a/libchrome/base/test/opaque_ref_counted.cc b/libchrome/base/test/opaque_ref_counted.cc
new file mode 100644
index 0000000..ed6c36f
--- /dev/null
+++ b/libchrome/base/test/opaque_ref_counted.cc
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/opaque_ref_counted.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class OpaqueRefCounted : public RefCounted<OpaqueRefCounted> {
+ public:
+ OpaqueRefCounted() {}
+
+ int Return42() { return 42; }
+
+ private:
+ virtual ~OpaqueRefCounted() {}
+
+ friend RefCounted<OpaqueRefCounted>;
+ DISALLOW_COPY_AND_ASSIGN(OpaqueRefCounted);
+};
+
+scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted() {
+ return new OpaqueRefCounted();
+}
+
+void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p) {
+ EXPECT_EQ(42, p->Return42());
+}
+
+} // namespace base
+
+template class scoped_refptr<base::OpaqueRefCounted>;
diff --git a/libchrome/base/test/opaque_ref_counted.h b/libchrome/base/test/opaque_ref_counted.h
new file mode 100644
index 0000000..faf6a65
--- /dev/null
+++ b/libchrome/base/test/opaque_ref_counted.h
@@ -0,0 +1,24 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_OPAQUE_REF_COUNTED_H_
+#define BASE_TEST_OPAQUE_REF_COUNTED_H_
+
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+// OpaqueRefCounted is a test class for scoped_refptr to ensure it still works
+// when the pointed-to type is opaque (i.e., incomplete).
+class OpaqueRefCounted;
+
+// Test functions that return and accept scoped_refptr<OpaqueRefCounted> values.
+scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted();
+void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p);
+
+} // namespace base
+
+extern template class scoped_refptr<base::OpaqueRefCounted>;
+
+#endif // BASE_TEST_OPAQUE_REF_COUNTED_H_
diff --git a/libchrome/base/test/scoped_locale.cc b/libchrome/base/test/scoped_locale.cc
new file mode 100644
index 0000000..35b3fbe
--- /dev/null
+++ b/libchrome/base/test/scoped_locale.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/scoped_locale.h"
+
+#include <locale.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+ScopedLocale::ScopedLocale(const std::string& locale) {
+ prev_locale_ = setlocale(LC_ALL, NULL);
+ EXPECT_TRUE(setlocale(LC_ALL, locale.c_str()) != NULL) <<
+ "Failed to set locale: " << locale;
+}
+
+ScopedLocale::~ScopedLocale() {
+ EXPECT_STREQ(prev_locale_.c_str(), setlocale(LC_ALL, prev_locale_.c_str()));
+}
+
+} // namespace base
diff --git a/libchrome/base/test/scoped_locale.h b/libchrome/base/test/scoped_locale.h
new file mode 100644
index 0000000..ef64e98
--- /dev/null
+++ b/libchrome/base/test/scoped_locale.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SCOPED_LOCALE_H_
+#define BASE_TEST_SCOPED_LOCALE_H_
+
+#include <string>
+
+#include "base/macros.h"
+
+namespace base {
+
+// Sets the given |locale| on construction, and restores the previous locale
+// on destruction.
+class ScopedLocale {
+ public:
+ explicit ScopedLocale(const std::string& locale);
+ ~ScopedLocale();
+
+ private:
+ std::string prev_locale_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedLocale);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_SCOPED_LOCALE_H_
diff --git a/libchrome/base/test/sequenced_worker_pool_owner.cc b/libchrome/base/test/sequenced_worker_pool_owner.cc
new file mode 100644
index 0000000..8781495
--- /dev/null
+++ b/libchrome/base/test/sequenced_worker_pool_owner.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/sequenced_worker_pool_owner.h"
+
+#include "base/location.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+SequencedWorkerPoolOwner::SequencedWorkerPoolOwner(
+ size_t max_threads,
+ const std::string& thread_name_prefix)
+ : constructor_message_loop_(MessageLoop::current()),
+ pool_(new SequencedWorkerPool(max_threads, thread_name_prefix, this)),
+ has_work_call_count_(0) {}
+
+SequencedWorkerPoolOwner::~SequencedWorkerPoolOwner() {
+ pool_->Shutdown();
+ pool_ = NULL;
+
+ // Spin the current message loop until SWP destruction verified in OnDestruct.
+ exit_loop_.Run();
+}
+
+const scoped_refptr<SequencedWorkerPool>& SequencedWorkerPoolOwner::pool() {
+ return pool_;
+}
+
+void SequencedWorkerPoolOwner::SetWillWaitForShutdownCallback(
+ const Closure& callback) {
+ will_wait_for_shutdown_callback_ = callback;
+}
+
+int SequencedWorkerPoolOwner::has_work_call_count() const {
+ AutoLock lock(has_work_lock_);
+ return has_work_call_count_;
+}
+
+void SequencedWorkerPoolOwner::OnHasWork() {
+ AutoLock lock(has_work_lock_);
+ ++has_work_call_count_;
+}
+
+void SequencedWorkerPoolOwner::WillWaitForShutdown() {
+ if (!will_wait_for_shutdown_callback_.is_null()) {
+ will_wait_for_shutdown_callback_.Run();
+
+ // Release the reference to the callback to prevent retain cycles.
+ will_wait_for_shutdown_callback_ = Closure();
+ }
+}
+
+void SequencedWorkerPoolOwner::OnDestruct() {
+ constructor_message_loop_->task_runner()->PostTask(FROM_HERE,
+ exit_loop_.QuitClosure());
+}
+
+} // namespace base
diff --git a/libchrome/base/test/sequenced_worker_pool_owner.h b/libchrome/base/test/sequenced_worker_pool_owner.h
new file mode 100644
index 0000000..05fc750
--- /dev/null
+++ b/libchrome/base/test/sequenced_worker_pool_owner.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SEQUENCED_WORKER_POOL_OWNER_H_
+#define BASE_TEST_SEQUENCED_WORKER_POOL_OWNER_H_
+
+#include <stddef.h>
+
+#include <cstddef>
+#include <string>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/run_loop.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_worker_pool.h"
+
+namespace base {
+
+class MessageLoop;
+
+// Wrapper around SequencedWorkerPool for testing that blocks destruction
+// until the pool is actually destroyed. This is so that a
+// SequencedWorkerPool from one test doesn't outlive its test and cause
+// strange races with other tests that touch global stuff (like histograms and
+// logging). However, this requires that nothing else on this thread holds a
+// ref to the pool when the SequencedWorkerPoolOwner is destroyed.
+//
+// This class calls Shutdown on the owned SequencedWorkerPool in the destructor.
+// Tests may themselves call Shutdown earlier to test shutdown behavior.
+class SequencedWorkerPoolOwner : public SequencedWorkerPool::TestingObserver {
+ public:
+ SequencedWorkerPoolOwner(size_t max_threads,
+ const std::string& thread_name_prefix);
+
+ ~SequencedWorkerPoolOwner() override;
+
+ // Don't change the returned pool's testing observer.
+ const scoped_refptr<SequencedWorkerPool>& pool();
+
+ // The given callback will be called on WillWaitForShutdown().
+ void SetWillWaitForShutdownCallback(const Closure& callback);
+
+ int has_work_call_count() const;
+
+ private:
+ // SequencedWorkerPool::TestingObserver implementation.
+ void OnHasWork() override;
+ void WillWaitForShutdown() override;
+ void OnDestruct() override;
+
+ // Used to run the current thread's message loop until the
+ // SequencedWorkerPool's destruction has been verified.
+ base::RunLoop exit_loop_;
+ MessageLoop* const constructor_message_loop_;
+
+ scoped_refptr<SequencedWorkerPool> pool_;
+ Closure will_wait_for_shutdown_callback_;
+
+ mutable Lock has_work_lock_;
+ int has_work_call_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPoolOwner);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_SEQUENCED_WORKER_POOL_OWNER_H_
diff --git a/libchrome/base/test/simple_test_clock.cc b/libchrome/base/test/simple_test_clock.cc
new file mode 100644
index 0000000..a2bdc2a
--- /dev/null
+++ b/libchrome/base/test/simple_test_clock.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_clock.h"
+
+namespace base {
+
+SimpleTestClock::SimpleTestClock() {}
+
+SimpleTestClock::~SimpleTestClock() {}
+
+Time SimpleTestClock::Now() {
+ AutoLock lock(lock_);
+ return now_;
+}
+
+void SimpleTestClock::Advance(TimeDelta delta) {
+ AutoLock lock(lock_);
+ now_ += delta;
+}
+
+void SimpleTestClock::SetNow(Time now) {
+ AutoLock lock(lock_);
+ now_ = now;
+}
+
+} // namespace base
diff --git a/libchrome/base/test/simple_test_clock.h b/libchrome/base/test/simple_test_clock.h
new file mode 100644
index 0000000..a70f99c
--- /dev/null
+++ b/libchrome/base/test/simple_test_clock.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SIMPLE_TEST_CLOCK_H_
+#define BASE_TEST_SIMPLE_TEST_CLOCK_H_
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/time/clock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// SimpleTestClock is a Clock implementation that gives control over
+// the returned Time objects. All methods may be called from any
+// thread.
+class SimpleTestClock : public Clock {
+ public:
+ // Starts off with a clock set to Time().
+ SimpleTestClock();
+ ~SimpleTestClock() override;
+
+ Time Now() override;
+
+ // Advances the clock by |delta|.
+ void Advance(TimeDelta delta);
+
+ // Sets the clock to the given time.
+ void SetNow(Time now);
+
+ private:
+ // Protects |now_|.
+ Lock lock_;
+
+ Time now_;
+};
+
+} // namespace base
+
+#endif // BASE_TEST_SIMPLE_TEST_CLOCK_H_
diff --git a/libchrome/base/test/simple_test_tick_clock.cc b/libchrome/base/test/simple_test_tick_clock.cc
new file mode 100644
index 0000000..c6375bd
--- /dev/null
+++ b/libchrome/base/test/simple_test_tick_clock.cc
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/simple_test_tick_clock.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+SimpleTestTickClock::SimpleTestTickClock() {}
+
+SimpleTestTickClock::~SimpleTestTickClock() {}
+
+TimeTicks SimpleTestTickClock::NowTicks() {
+ AutoLock lock(lock_);
+ return now_ticks_;
+}
+
+void SimpleTestTickClock::Advance(TimeDelta delta) {
+ AutoLock lock(lock_);
+ DCHECK(delta >= TimeDelta());
+ now_ticks_ += delta;
+}
+
+void SimpleTestTickClock::SetNowTicks(TimeTicks ticks) {
+ AutoLock lock(lock_);
+ now_ticks_ = ticks;
+}
+
+} // namespace base
diff --git a/libchrome/base/test/simple_test_tick_clock.h b/libchrome/base/test/simple_test_tick_clock.h
new file mode 100644
index 0000000..f2f7581
--- /dev/null
+++ b/libchrome/base/test/simple_test_tick_clock.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_
+#define BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// SimpleTestTickClock is a TickClock implementation that gives
+// control over the returned TimeTicks objects. All methods may be
+// called from any thread.
+class SimpleTestTickClock : public TickClock {
+ public:
+ // Starts off with a clock set to TimeTicks().
+ SimpleTestTickClock();
+ ~SimpleTestTickClock() override;
+
+ TimeTicks NowTicks() override;
+
+ // Advances the clock by |delta|, which must not be negative.
+ void Advance(TimeDelta delta);
+
+ // Sets the clock to the given time.
+ void SetNowTicks(TimeTicks ticks);
+
+ private:
+ // Protects |now_ticks_|.
+ Lock lock_;
+
+ TimeTicks now_ticks_;
+};
+
+} // namespace base
+
+#endif // BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_
diff --git a/libchrome/base/test/test_file_util.cc b/libchrome/base/test/test_file_util.cc
new file mode 100644
index 0000000..40b25f0
--- /dev/null
+++ b/libchrome/base/test/test_file_util.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+bool EvictFileFromSystemCacheWithRetry(const FilePath& path) {
+ const int kCycles = 10;
+ const TimeDelta kDelay = TestTimeouts::action_timeout() / kCycles;
+ for (int i = 0; i < kCycles; i++) {
+ if (EvictFileFromSystemCache(path))
+ return true;
+ PlatformThread::Sleep(kDelay);
+ }
+ return false;
+}
+
+// Declared in base/files/file_path.h.
+void PrintTo(const FilePath& path, std::ostream* out) {
+ *out << path.value();
+}
+
+} // namespace base
diff --git a/libchrome/base/test/test_file_util.h b/libchrome/base/test/test_file_util.h
new file mode 100644
index 0000000..7042e48
--- /dev/null
+++ b/libchrome/base/test/test_file_util.h
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_FILE_UTIL_H_
+#define BASE_TEST_TEST_FILE_UTIL_H_
+
+// File utility functions used only by tests.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <jni.h>
+#endif
+
+namespace base {
+
+class FilePath;
+
+// Clear a specific file from the system cache like EvictFileFromSystemCache,
+// but on failure it will sleep and retry. On the Windows buildbots, eviction
+// can fail if the file is marked in use, and this will throw off timings that
+// rely on uncached files.
+bool EvictFileFromSystemCacheWithRetry(const FilePath& file);
+
+// Wrapper over base::Delete. On Windows repeatedly invokes Delete in case
+// of failure to workaround Windows file locking semantics. Returns true on
+// success.
+bool DieFileDie(const FilePath& file, bool recurse);
+
+// Clear a specific file from the system cache. After this call, trying
+// to access this file will result in a cold load from the hard drive.
+bool EvictFileFromSystemCache(const FilePath& file);
+
+#if defined(OS_WIN)
+// Returns true if the volume supports Alternate Data Streams.
+bool VolumeSupportsADS(const FilePath& path);
+
+// Returns true if the ZoneIdentifier is correctly set to "Internet" (3).
+// Note that this function must be called from the same process as
+// the one that set the zone identifier. I.e. don't use it in UI/automation
+// based tests.
+bool HasInternetZoneIdentifier(const FilePath& full_path);
+#endif // defined(OS_WIN)
+
+// For testing, make the file unreadable or unwritable.
+// In POSIX, this does not apply to the root user.
+bool MakeFileUnreadable(const FilePath& path) WARN_UNUSED_RESULT;
+bool MakeFileUnwritable(const FilePath& path) WARN_UNUSED_RESULT;
+
+// Saves the current permissions for a path, and restores it on destruction.
+class FilePermissionRestorer {
+ public:
+ explicit FilePermissionRestorer(const FilePath& path);
+ ~FilePermissionRestorer();
+
+ private:
+ const FilePath path_;
+ void* info_; // The opaque stored permission information.
+ size_t length_; // The length of the stored permission information.
+
+ DISALLOW_COPY_AND_ASSIGN(FilePermissionRestorer);
+};
+
+#if defined(OS_ANDROID)
+// Register the ContentUriTestUrils JNI bindings.
+bool RegisterContentUriTestUtils(JNIEnv* env);
+
+// Insert an image file into the MediaStore, and retrieve the content URI for
+// testing purpose.
+FilePath InsertImageIntoMediaStore(const FilePath& path);
+#endif // defined(OS_ANDROID)
+
+} // namespace base
+
+#endif // BASE_TEST_TEST_FILE_UTIL_H_
diff --git a/libchrome/base/test/test_file_util_linux.cc b/libchrome/base/test/test_file_util_linux.cc
new file mode 100644
index 0000000..0ef5c0a
--- /dev/null
+++ b/libchrome/base/test/test_file_util_linux.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/files/file_path.h"
+#include "base/files/scoped_file.h"
+
+namespace base {
+
+bool EvictFileFromSystemCache(const FilePath& file) {
+ ScopedFD fd(open(file.value().c_str(), O_RDONLY));
+ if (!fd.is_valid())
+ return false;
+ if (fdatasync(fd.get()) != 0)
+ return false;
+ if (posix_fadvise(fd.get(), 0, 0, POSIX_FADV_DONTNEED) != 0)
+ return false;
+ return true;
+}
+
+} // namespace base
diff --git a/libchrome/base/test/test_file_util_posix.cc b/libchrome/base/test/test_file_util_posix.cc
new file mode 100644
index 0000000..b817283
--- /dev/null
+++ b/libchrome/base/test/test_file_util_posix.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_file_util.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Deny |permission| on the file |path|.
+bool DenyFilePermission(const FilePath& path, mode_t permission) {
+ struct stat stat_buf;
+ if (stat(path.value().c_str(), &stat_buf) != 0)
+ return false;
+ stat_buf.st_mode &= ~permission;
+
+ int rv = HANDLE_EINTR(chmod(path.value().c_str(), stat_buf.st_mode));
+ return rv == 0;
+}
+
+// Gets a blob indicating the permission information for |path|.
+// |length| is the length of the blob. Zero on failure.
+// Returns the blob pointer, or NULL on failure.
+void* GetPermissionInfo(const FilePath& path, size_t* length) {
+ DCHECK(length);
+ *length = 0;
+
+ struct stat stat_buf;
+ if (stat(path.value().c_str(), &stat_buf) != 0)
+ return NULL;
+
+ *length = sizeof(mode_t);
+ mode_t* mode = new mode_t;
+ *mode = stat_buf.st_mode & ~S_IFMT; // Filter out file/path kind.
+
+ return mode;
+}
+
+// Restores the permission information for |path|, given the blob retrieved
+// using |GetPermissionInfo()|.
+// |info| is the pointer to the blob.
+// |length| is the length of the blob.
+// Either |info| or |length| may be NULL/0, in which case nothing happens.
+bool RestorePermissionInfo(const FilePath& path, void* info, size_t length) {
+ if (!info || (length == 0))
+ return false;
+
+ DCHECK_EQ(sizeof(mode_t), length);
+ mode_t* mode = reinterpret_cast<mode_t*>(info);
+
+ int rv = HANDLE_EINTR(chmod(path.value().c_str(), *mode));
+
+ delete mode;
+
+ return rv == 0;
+}
+
+} // namespace
+
+bool DieFileDie(const FilePath& file, bool recurse) {
+ // There is no need to workaround Windows problems on POSIX.
+ // Just pass-through.
+ return DeleteFile(file, recurse);
+}
+
+#if !defined(OS_LINUX) && !defined(OS_MACOSX)
+bool EvictFileFromSystemCache(const FilePath& file) {
+ // There doesn't seem to be a POSIX way to cool the disk cache.
+ NOTIMPLEMENTED();
+ return false;
+}
+#endif
+
+bool MakeFileUnreadable(const FilePath& path) {
+ return DenyFilePermission(path, S_IRUSR | S_IRGRP | S_IROTH);
+}
+
+bool MakeFileUnwritable(const FilePath& path) {
+ return DenyFilePermission(path, S_IWUSR | S_IWGRP | S_IWOTH);
+}
+
+FilePermissionRestorer::FilePermissionRestorer(const FilePath& path)
+ : path_(path), info_(NULL), length_(0) {
+ info_ = GetPermissionInfo(path_, &length_);
+ DCHECK(info_ != NULL);
+ DCHECK_NE(0u, length_);
+}
+
+FilePermissionRestorer::~FilePermissionRestorer() {
+ if (!RestorePermissionInfo(path_, info_, length_))
+ NOTREACHED();
+}
+
+} // namespace base
diff --git a/libchrome/base/test/test_io_thread.cc b/libchrome/base/test/test_io_thread.cc
new file mode 100644
index 0000000..1fa0412
--- /dev/null
+++ b/libchrome/base/test/test_io_thread.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_io_thread.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace {
+
+void PostTaskAndWaitHelper(base::WaitableEvent* event,
+ const base::Closure& task) {
+ task.Run();
+ event->Signal();
+}
+
+} // namespace
+
+namespace base {
+
+TestIOThread::TestIOThread(Mode mode)
+ : io_thread_("test_io_thread"), io_thread_started_(false) {
+ switch (mode) {
+ case kAutoStart:
+ Start();
+ return;
+ case kManualStart:
+ return;
+ }
+ CHECK(false) << "Invalid mode";
+}
+
+TestIOThread::~TestIOThread() {
+ Stop();
+}
+
+void TestIOThread::Start() {
+ CHECK(!io_thread_started_);
+ io_thread_started_ = true;
+ CHECK(io_thread_.StartWithOptions(
+ base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+}
+
+void TestIOThread::Stop() {
+ // Note: It's okay to call |Stop()| even if the thread isn't running.
+ io_thread_.Stop();
+ io_thread_started_ = false;
+}
+
+void TestIOThread::PostTask(const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ task_runner()->PostTask(from_here, task);
+}
+
+void TestIOThread::PostTaskAndWait(const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ base::WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner()->PostTask(from_here,
+ base::Bind(&PostTaskAndWaitHelper, &event, task));
+ event.Wait();
+}
+
+} // namespace base
diff --git a/libchrome/base/test/test_io_thread.h b/libchrome/base/test/test_io_thread.h
new file mode 100644
index 0000000..c2ed187
--- /dev/null
+++ b/libchrome/base/test/test_io_thread.h
@@ -0,0 +1,59 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_IO_THREAD_H_
+#define BASE_TEST_TEST_IO_THREAD_H_
+
+#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// Create and run an IO thread with a MessageLoop, and
+// making the MessageLoop accessible from its client.
+// It also provides some ideomatic API like PostTaskAndWait().
+class TestIOThread {
+ public:
+ enum Mode { kAutoStart, kManualStart };
+ explicit TestIOThread(Mode mode);
+ // Stops the I/O thread if necessary.
+ ~TestIOThread();
+
+ // |Start()|/|Stop()| should only be called from the main (creation) thread.
+ // After |Stop()|, |Start()| may be called again to start a new I/O thread.
+ // |Stop()| may be called even when the I/O thread is not started.
+ void Start();
+ void Stop();
+
+ // Post |task| to the IO thread.
+ void PostTask(const tracked_objects::Location& from_here,
+ const base::Closure& task);
+ // Posts |task| to the IO-thread with an WaitableEvent associated blocks on
+ // it until the posted |task| is executed, then returns.
+ void PostTaskAndWait(const tracked_objects::Location& from_here,
+ const base::Closure& task);
+
+ base::MessageLoopForIO* message_loop() {
+ return static_cast<base::MessageLoopForIO*>(io_thread_.message_loop());
+ }
+
+ scoped_refptr<SingleThreadTaskRunner> task_runner() {
+ return message_loop()->task_runner();
+ }
+
+ private:
+ base::Thread io_thread_;
+ bool io_thread_started_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestIOThread);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_TEST_IO_THREAD_H_
diff --git a/libchrome/base/test/test_pending_task.cc b/libchrome/base/test/test_pending_task.cc
new file mode 100644
index 0000000..87b107e
--- /dev/null
+++ b/libchrome/base/test/test_pending_task.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/test/test_pending_task.h"
+
+namespace base {
+
+TestPendingTask::TestPendingTask() : nestability(NESTABLE) {}
+
+TestPendingTask::TestPendingTask(
+ const tracked_objects::Location& location,
+ const Closure& task,
+ TimeTicks post_time,
+ TimeDelta delay,
+ TestNestability nestability)
+ : location(location),
+ task(task),
+ post_time(post_time),
+ delay(delay),
+ nestability(nestability) {}
+
+TestPendingTask::TestPendingTask(const TestPendingTask& other) = default;
+
+TimeTicks TestPendingTask::GetTimeToRun() const {
+ return post_time + delay;
+}
+
+bool TestPendingTask::ShouldRunBefore(const TestPendingTask& other) const {
+ if (nestability != other.nestability)
+ return (nestability == NESTABLE);
+ return GetTimeToRun() < other.GetTimeToRun();
+}
+
+TestPendingTask::~TestPendingTask() {}
+
+void TestPendingTask::AsValueInto(base::trace_event::TracedValue* state) const {
+ state->SetInteger("run_at", GetTimeToRun().ToInternalValue());
+ state->SetString("posting_function", location.ToString());
+ state->SetInteger("post_time", post_time.ToInternalValue());
+ state->SetInteger("delay", delay.ToInternalValue());
+ switch (nestability) {
+ case NESTABLE:
+ state->SetString("nestability", "NESTABLE");
+ break;
+ case NON_NESTABLE:
+ state->SetString("nestability", "NON_NESTABLE");
+ break;
+ }
+ state->SetInteger("delay", delay.ToInternalValue());
+}
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TestPendingTask::AsValue() const {
+ std::unique_ptr<base::trace_event::TracedValue> state(
+ new base::trace_event::TracedValue());
+ AsValueInto(state.get());
+ return std::move(state);
+}
+
+std::string TestPendingTask::ToString() const {
+ std::string output("TestPendingTask(");
+ AsValue()->AppendAsTraceFormat(&output);
+ output += ")";
+ return output;
+}
+
+std::ostream& operator<<(std::ostream& os, const TestPendingTask& task) {
+ PrintTo(task, &os);
+ return os;
+}
+
+void PrintTo(const TestPendingTask& task, std::ostream* os) {
+ *os << task.ToString();
+}
+
+} // namespace base
diff --git a/libchrome/base/test/test_pending_task.h b/libchrome/base/test/test_pending_task.h
new file mode 100644
index 0000000..2dbdb7e
--- /dev/null
+++ b/libchrome/base/test/test_pending_task.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_PENDING_TASK_H_
+#define BASE_TEST_TEST_PENDING_TASK_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+
+// TestPendingTask is a helper class for test TaskRunner
+// implementations. See test_simple_task_runner.h for example usage.
+
+struct TestPendingTask {
+ enum TestNestability { NESTABLE, NON_NESTABLE };
+
+ TestPendingTask();
+ TestPendingTask(const TestPendingTask& other);
+ TestPendingTask(const tracked_objects::Location& location,
+ const Closure& task,
+ TimeTicks post_time,
+ TimeDelta delay,
+ TestNestability nestability);
+ ~TestPendingTask();
+
+ // Returns post_time + delay.
+ TimeTicks GetTimeToRun() const;
+
+ // Returns true if this task is nestable and |other| isn't, or if
+ // this task's time to run is strictly earlier than |other|'s time
+ // to run.
+ //
+ // Note that two tasks may both have the same nestability and delay.
+ // In that case, the caller must use some other criterion (probably
+ // the position in some queue) to break the tie. Conveniently, the
+ // following STL functions already do so:
+ //
+ // - std::min_element
+ // - std::stable_sort
+ //
+ // but the following STL functions don't:
+ //
+ // - std::max_element
+ // - std::sort.
+ bool ShouldRunBefore(const TestPendingTask& other) const;
+
+ tracked_objects::Location location;
+ Closure task;
+ TimeTicks post_time;
+ TimeDelta delay;
+ TestNestability nestability;
+
+ // Functions for using test pending task with tracing, useful in unit
+ // testing.
+ void AsValueInto(base::trace_event::TracedValue* state) const;
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
+ std::string ToString() const;
+};
+
+// gtest helpers which allow pretty printing of the tasks, very useful in unit
+// testing.
+std::ostream& operator<<(std::ostream& os, const TestPendingTask& task);
+void PrintTo(const TestPendingTask& task, std::ostream* os);
+
+} // namespace base
+
+#endif // BASE_TEST_TEST_PENDING_TASK_H_
diff --git a/libchrome/base/test/test_simple_task_runner.cc b/libchrome/base/test/test_simple_task_runner.cc
new file mode 100644
index 0000000..cc39fab
--- /dev/null
+++ b/libchrome/base/test/test_simple_task_runner.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_simple_task_runner.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+TestSimpleTaskRunner::TestSimpleTaskRunner() {}
+
+TestSimpleTaskRunner::~TestSimpleTaskRunner() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+bool TestSimpleTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ pending_tasks_.push_back(
+ TestPendingTask(from_here, task, TimeTicks(), delay,
+ TestPendingTask::NESTABLE));
+ return true;
+}
+
+bool TestSimpleTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ pending_tasks_.push_back(
+ TestPendingTask(from_here, task, TimeTicks(), delay,
+ TestPendingTask::NON_NESTABLE));
+ return true;
+}
+
+bool TestSimpleTaskRunner::RunsTasksOnCurrentThread() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return true;
+}
+
+const std::deque<TestPendingTask>&
+TestSimpleTaskRunner::GetPendingTasks() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return pending_tasks_;
+}
+
+bool TestSimpleTaskRunner::HasPendingTask() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return !pending_tasks_.empty();
+}
+
+base::TimeDelta TestSimpleTaskRunner::NextPendingTaskDelay() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return pending_tasks_.front().GetTimeToRun() - base::TimeTicks();
+}
+
+void TestSimpleTaskRunner::ClearPendingTasks() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ pending_tasks_.clear();
+}
+
+void TestSimpleTaskRunner::RunPendingTasks() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Swap with a local variable to avoid re-entrancy problems.
+ std::deque<TestPendingTask> tasks_to_run;
+ tasks_to_run.swap(pending_tasks_);
+ for (std::deque<TestPendingTask>::iterator it = tasks_to_run.begin();
+ it != tasks_to_run.end(); ++it) {
+ it->task.Run();
+ }
+}
+
+void TestSimpleTaskRunner::RunUntilIdle() {
+ while (!pending_tasks_.empty()) {
+ RunPendingTasks();
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/test/test_simple_task_runner.h b/libchrome/base/test/test_simple_task_runner.h
new file mode 100644
index 0000000..338c634
--- /dev/null
+++ b/libchrome/base/test/test_simple_task_runner.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_
+#define BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_
+
+#include <deque>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/test_pending_task.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+
+class TimeDelta;
+
+// TestSimpleTaskRunner is a simple TaskRunner implementation that can
+// be used for testing. It implements SingleThreadTaskRunner as that
+// interface implements SequencedTaskRunner, which in turn implements
+// TaskRunner, so TestSimpleTaskRunner can be passed in to a function
+// that accepts any *TaskRunner object.
+//
+// TestSimpleTaskRunner has the following properties which make it simple:
+//
+// - It is non-thread safe; all member functions must be called on
+// the same thread.
+// - Tasks are simply stored in a queue in FIFO order, ignoring delay
+// and nestability.
+// - Tasks aren't guaranteed to be destroyed immediately after
+// they're run.
+//
+// However, TestSimpleTaskRunner allows for reentrancy, in that it
+// handles the running of tasks that in turn call back into itself
+// (e.g., to post more tasks).
+//
+// If you need more complicated properties, consider using this class
+// as a template for writing a test TaskRunner implementation using
+// TestPendingTask.
+//
+// Note that, like any TaskRunner, TestSimpleTaskRunner is
+// ref-counted.
+class TestSimpleTaskRunner : public SingleThreadTaskRunner {
+ public:
+ TestSimpleTaskRunner();
+
+ // SingleThreadTaskRunner implementation.
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+
+ bool RunsTasksOnCurrentThread() const override;
+
+ const std::deque<TestPendingTask>& GetPendingTasks() const;
+ bool HasPendingTask() const;
+ base::TimeDelta NextPendingTaskDelay() const;
+
+ // Clears the queue of pending tasks without running them.
+ void ClearPendingTasks();
+
+ // Runs each current pending task in order and clears the queue.
+ // Any tasks posted by the tasks are not run.
+ virtual void RunPendingTasks();
+
+ // Runs pending tasks until the queue is empty.
+ void RunUntilIdle();
+
+ protected:
+ ~TestSimpleTaskRunner() override;
+
+ std::deque<TestPendingTask> pending_tasks_;
+ ThreadChecker thread_checker_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestSimpleTaskRunner);
+};
+
+} // namespace base
+
+#endif // BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_
diff --git a/libchrome/base/test/test_switches.cc b/libchrome/base/test/test_switches.cc
new file mode 100644
index 0000000..817a38e
--- /dev/null
+++ b/libchrome/base/test/test_switches.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_switches.h"
+
+// Maximum number of tests to run in a single batch.
+const char switches::kTestLauncherBatchLimit[] = "test-launcher-batch-limit";
+
+// Sets defaults desirable for the continuous integration bots, e.g. parallel
+// test execution and test retries.
+const char switches::kTestLauncherBotMode[] =
+ "test-launcher-bot-mode";
+
+// Makes it possible to debug the launcher itself. By default the launcher
+// automatically switches to single process mode when it detects presence
+// of debugger.
+const char switches::kTestLauncherDebugLauncher[] =
+ "test-launcher-debug-launcher";
+
+// Force running all requested tests and retries even if too many test errors
+// occur.
+const char switches::kTestLauncherForceRunBrokenTests[] =
+ "test-launcher-force-run-broken-tests";
+
+// Path to file containing test filter (one pattern per line).
+const char switches::kTestLauncherFilterFile[] = "test-launcher-filter-file";
+
+// Number of parallel test launcher jobs.
+const char switches::kTestLauncherJobs[] = "test-launcher-jobs";
+
+// Path to list of compiled in tests.
+const char switches::kTestLauncherListTests[] = "test-launcher-list-tests";
+
+// Path to test results file in our custom test launcher format.
+const char switches::kTestLauncherOutput[] = "test-launcher-output";
+
+// Maximum number of times to retry a test after failure.
+const char switches::kTestLauncherRetryLimit[] = "test-launcher-retry-limit";
+
+// Path to test results file with all the info from the test launcher.
+const char switches::kTestLauncherSummaryOutput[] =
+ "test-launcher-summary-output";
+
+// Flag controlling when test stdio is displayed as part of the launcher's
+// standard output.
+const char switches::kTestLauncherPrintTestStdio[] =
+ "test-launcher-print-test-stdio";
+
+// Print a writable path and exit (for internal use).
+const char switches::kTestLauncherPrintWritablePath[] =
+ "test-launcher-print-writable-path";
+
+// Index of the test shard to run, starting from 0 (first shard) to total shards
+// minus one (last shard).
+const char switches::kTestLauncherShardIndex[] =
+ "test-launcher-shard-index";
+
+// Total number of shards. Must be the same for all shards.
+const char switches::kTestLauncherTotalShards[] =
+ "test-launcher-total-shards";
+
+// Time (in milliseconds) that the tests should wait before timing out.
+const char switches::kTestLauncherTimeout[] = "test-launcher-timeout";
+
+// Path where to save a trace of test launcher's execution.
+const char switches::kTestLauncherTrace[] = "test-launcher-trace";
+
+// TODO(phajdan.jr): Clean up the switch names.
+const char switches::kTestTinyTimeout[] = "test-tiny-timeout";
+const char switches::kUiTestActionTimeout[] = "ui-test-action-timeout";
+const char switches::kUiTestActionMaxTimeout[] = "ui-test-action-max-timeout";
diff --git a/libchrome/base/test/test_switches.h b/libchrome/base/test/test_switches.h
new file mode 100644
index 0000000..88ef0ce
--- /dev/null
+++ b/libchrome/base/test/test_switches.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_SWITCHES_H_
+#define BASE_TEST_TEST_SWITCHES_H_
+
+namespace switches {
+
+// All switches in alphabetical order. The switches should be documented
+// alongside the definition of their values in the .cc file.
+extern const char kTestLauncherBatchLimit[];
+extern const char kTestLauncherBotMode[];
+extern const char kTestLauncherDebugLauncher[];
+extern const char kTestLauncherForceRunBrokenTests[];
+extern const char kTestLauncherFilterFile[];
+extern const char kTestLauncherJobs[];
+extern const char kTestLauncherListTests[];
+extern const char kTestLauncherOutput[];
+extern const char kTestLauncherRetryLimit[];
+extern const char kTestLauncherSummaryOutput[];
+extern const char kTestLauncherPrintTestStdio[];
+extern const char kTestLauncherPrintWritablePath[];
+extern const char kTestLauncherShardIndex[];
+extern const char kTestLauncherTotalShards[];
+extern const char kTestLauncherTimeout[];
+extern const char kTestLauncherTrace[];
+extern const char kTestTinyTimeout[];
+extern const char kUiTestActionTimeout[];
+extern const char kUiTestActionMaxTimeout[];
+
+} // namespace switches
+
+#endif // BASE_TEST_TEST_SWITCHES_H_
diff --git a/libchrome/base/test/test_timeouts.cc b/libchrome/base/test/test_timeouts.cc
new file mode 100644
index 0000000..55e9a79
--- /dev/null
+++ b/libchrome/base/test/test_timeouts.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/test_timeouts.h"
+
+#include <algorithm>
+
+#include "base/command_line.h"
+#include "base/debug/debugger.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/test/test_switches.h"
+#include "build/build_config.h"
+
+namespace {
+
+// ASan/TSan/MSan instrument each memory access. This may slow the execution
+// down significantly.
+#if defined(MEMORY_SANITIZER)
+// For MSan the slowdown depends heavily on the value of msan_track_origins GYP
+// flag. The multiplier below corresponds to msan_track_origins=1.
+static const int kTimeoutMultiplier = 6;
+#elif defined(ADDRESS_SANITIZER) && defined(OS_WIN)
+// Asan/Win has not been optimized yet, give it a higher
+// timeout multiplier. See http://crbug.com/412471
+static const int kTimeoutMultiplier = 3;
+#elif defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || \
+ defined(SYZYASAN)
+static const int kTimeoutMultiplier = 2;
+#else
+static const int kTimeoutMultiplier = 1;
+#endif
+
+const int kAlmostInfiniteTimeoutMs = 100000000;
+
+// Sets value to the greatest of:
+// 1) value's current value multiplied by kTimeoutMultiplier (assuming
+// InitializeTimeout is called only once per value).
+// 2) min_value.
+// 3) the numerical value given by switch_name on the command line multiplied
+// by kTimeoutMultiplier.
+void InitializeTimeout(const char* switch_name, int min_value, int* value) {
+ DCHECK(value);
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(switch_name)) {
+ std::string string_value(base::CommandLine::ForCurrentProcess()->
+ GetSwitchValueASCII(switch_name));
+ int timeout;
+ base::StringToInt(string_value, &timeout);
+ *value = std::max(*value, timeout);
+ }
+ *value *= kTimeoutMultiplier;
+ *value = std::max(*value, min_value);
+}
+
+// Sets value to the greatest of:
+// 1) value's current value multiplied by kTimeoutMultiplier.
+// 2) 0
+// 3) the numerical value given by switch_name on the command line multiplied
+// by kTimeoutMultiplier.
+void InitializeTimeout(const char* switch_name, int* value) {
+ InitializeTimeout(switch_name, 0, value);
+}
+
+} // namespace
+
+// static
+bool TestTimeouts::initialized_ = false;
+
+// The timeout values should increase in the order they appear in this block.
+// static
+int TestTimeouts::tiny_timeout_ms_ = 100;
+int TestTimeouts::action_timeout_ms_ = 10000;
+#ifndef NDEBUG
+int TestTimeouts::action_max_timeout_ms_ = 45000;
+#else
+int TestTimeouts::action_max_timeout_ms_ = 30000;
+#endif // NDEBUG
+
+int TestTimeouts::test_launcher_timeout_ms_ = 45000;
+
+// static
+void TestTimeouts::Initialize() {
+ if (initialized_) {
+ NOTREACHED();
+ return;
+ }
+ initialized_ = true;
+
+ if (base::debug::BeingDebugged()) {
+ fprintf(stdout,
+ "Detected presence of a debugger, running without test timeouts.\n");
+ }
+
+ // Note that these timeouts MUST be initialized in the correct order as
+ // per the CHECKS below.
+ InitializeTimeout(switches::kTestTinyTimeout, &tiny_timeout_ms_);
+ InitializeTimeout(switches::kUiTestActionTimeout,
+ base::debug::BeingDebugged() ? kAlmostInfiniteTimeoutMs
+ : tiny_timeout_ms_,
+ &action_timeout_ms_);
+ InitializeTimeout(switches::kUiTestActionMaxTimeout, action_timeout_ms_,
+ &action_max_timeout_ms_);
+
+ // Test launcher timeout is independent from anything above action timeout.
+ InitializeTimeout(switches::kTestLauncherTimeout, action_timeout_ms_,
+ &test_launcher_timeout_ms_);
+
+ // The timeout values should be increasing in the right order.
+ CHECK(tiny_timeout_ms_ <= action_timeout_ms_);
+ CHECK(action_timeout_ms_ <= action_max_timeout_ms_);
+
+ CHECK(action_timeout_ms_ <= test_launcher_timeout_ms_);
+}
diff --git a/libchrome/base/test/test_timeouts.h b/libchrome/base/test/test_timeouts.h
new file mode 100644
index 0000000..ddaf05b
--- /dev/null
+++ b/libchrome/base/test/test_timeouts.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_TEST_TIMEOUTS_H_
+#define BASE_TEST_TEST_TIMEOUTS_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+
+// Returns common timeouts to use in tests. Makes it possible to adjust
+// the timeouts for different environments (like Valgrind).
+class TestTimeouts {
+ public:
+ // Initializes the timeouts. Non thread-safe. Should be called exactly once
+ // by the test suite.
+ static void Initialize();
+
+ // Timeout for actions that are expected to finish "almost instantly".
+ static base::TimeDelta tiny_timeout() {
+ DCHECK(initialized_);
+ return base::TimeDelta::FromMilliseconds(tiny_timeout_ms_);
+ }
+
+ // Timeout to wait for something to happen. If you are not sure
+ // which timeout to use, this is the one you want.
+ static base::TimeDelta action_timeout() {
+ DCHECK(initialized_);
+ return base::TimeDelta::FromMilliseconds(action_timeout_ms_);
+ }
+
+ // Timeout longer than the above, but still suitable to use
+ // multiple times in a single test. Use if the timeout above
+ // is not sufficient.
+ static base::TimeDelta action_max_timeout() {
+ DCHECK(initialized_);
+ return base::TimeDelta::FromMilliseconds(action_max_timeout_ms_);
+ }
+
+ // Timeout for a single test launched used built-in test launcher.
+ // Do not use outside of the test launcher.
+ static base::TimeDelta test_launcher_timeout() {
+ DCHECK(initialized_);
+ return base::TimeDelta::FromMilliseconds(test_launcher_timeout_ms_);
+ }
+
+ private:
+ static bool initialized_;
+
+ static int tiny_timeout_ms_;
+ static int action_timeout_ms_;
+ static int action_max_timeout_ms_;
+ static int test_launcher_timeout_ms_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TestTimeouts);
+};
+
+#endif // BASE_TEST_TEST_TIMEOUTS_H_
diff --git a/libchrome/base/test/trace_event_analyzer.cc b/libchrome/base/test/trace_event_analyzer.cc
new file mode 100644
index 0000000..64436dc
--- /dev/null
+++ b/libchrome/base/test/trace_event_analyzer.cc
@@ -0,0 +1,999 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/trace_event_analyzer.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <memory>
+#include <set>
+
+#include "base/json/json_reader.h"
+#include "base/strings/pattern.h"
+#include "base/values.h"
+
+namespace trace_analyzer {
+
+// TraceEvent
+
+TraceEvent::TraceEvent()
+ : thread(0, 0),
+ timestamp(0),
+ duration(0),
+ phase(TRACE_EVENT_PHASE_BEGIN),
+ other_event(NULL) {
+}
+
+TraceEvent::TraceEvent(TraceEvent&& other) = default;
+
+TraceEvent::~TraceEvent() {
+}
+
+TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default;
+
+bool TraceEvent::SetFromJSON(const base::Value* event_value) {
+ if (event_value->GetType() != base::Value::TYPE_DICTIONARY) {
+ LOG(ERROR) << "Value must be TYPE_DICTIONARY";
+ return false;
+ }
+ const base::DictionaryValue* dictionary =
+ static_cast<const base::DictionaryValue*>(event_value);
+
+ std::string phase_str;
+ const base::DictionaryValue* args = NULL;
+
+ if (!dictionary->GetString("ph", &phase_str)) {
+ LOG(ERROR) << "ph is missing from TraceEvent JSON";
+ return false;
+ }
+
+ phase = *phase_str.data();
+
+ bool may_have_duration = (phase == TRACE_EVENT_PHASE_COMPLETE);
+ bool require_origin = (phase != TRACE_EVENT_PHASE_METADATA);
+ bool require_id = (phase == TRACE_EVENT_PHASE_ASYNC_BEGIN ||
+ phase == TRACE_EVENT_PHASE_ASYNC_STEP_INTO ||
+ phase == TRACE_EVENT_PHASE_ASYNC_STEP_PAST ||
+ phase == TRACE_EVENT_PHASE_MEMORY_DUMP ||
+ phase == TRACE_EVENT_PHASE_ENTER_CONTEXT ||
+ phase == TRACE_EVENT_PHASE_LEAVE_CONTEXT ||
+ phase == TRACE_EVENT_PHASE_CREATE_OBJECT ||
+ phase == TRACE_EVENT_PHASE_DELETE_OBJECT ||
+ phase == TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ||
+ phase == TRACE_EVENT_PHASE_ASYNC_END);
+
+ if (require_origin && !dictionary->GetInteger("pid", &thread.process_id)) {
+ LOG(ERROR) << "pid is missing from TraceEvent JSON";
+ return false;
+ }
+ if (require_origin && !dictionary->GetInteger("tid", &thread.thread_id)) {
+ LOG(ERROR) << "tid is missing from TraceEvent JSON";
+ return false;
+ }
+ if (require_origin && !dictionary->GetDouble("ts", ×tamp)) {
+ LOG(ERROR) << "ts is missing from TraceEvent JSON";
+ return false;
+ }
+ if (may_have_duration) {
+ dictionary->GetDouble("dur", &duration);
+ }
+ if (!dictionary->GetString("cat", &category)) {
+ LOG(ERROR) << "cat is missing from TraceEvent JSON";
+ return false;
+ }
+ if (!dictionary->GetString("name", &name)) {
+ LOG(ERROR) << "name is missing from TraceEvent JSON";
+ return false;
+ }
+ if (!dictionary->GetDictionary("args", &args)) {
+ LOG(ERROR) << "args is missing from TraceEvent JSON";
+ return false;
+ }
+ if (require_id && !dictionary->GetString("id", &id)) {
+ LOG(ERROR) << "id is missing from ASYNC_BEGIN/ASYNC_END TraceEvent JSON";
+ return false;
+ }
+
+ // For each argument, copy the type and create a trace_analyzer::TraceValue.
+ for (base::DictionaryValue::Iterator it(*args); !it.IsAtEnd();
+ it.Advance()) {
+ std::string str;
+ bool boolean = false;
+ int int_num = 0;
+ double double_num = 0.0;
+ if (it.value().GetAsString(&str)) {
+ arg_strings[it.key()] = str;
+ } else if (it.value().GetAsInteger(&int_num)) {
+ arg_numbers[it.key()] = static_cast<double>(int_num);
+ } else if (it.value().GetAsBoolean(&boolean)) {
+ arg_numbers[it.key()] = static_cast<double>(boolean ? 1 : 0);
+ } else if (it.value().GetAsDouble(&double_num)) {
+ arg_numbers[it.key()] = double_num;
+ }
+ // Record all arguments as values.
+ arg_values[it.key()] = it.value().CreateDeepCopy();
+ }
+
+ return true;
+}
+
+double TraceEvent::GetAbsTimeToOtherEvent() const {
+ return fabs(other_event->timestamp - timestamp);
+}
+
+bool TraceEvent::GetArgAsString(const std::string& name,
+ std::string* arg) const {
+ const auto it = arg_strings.find(name);
+ if (it != arg_strings.end()) {
+ *arg = it->second;
+ return true;
+ }
+ return false;
+}
+
+bool TraceEvent::GetArgAsNumber(const std::string& name,
+ double* arg) const {
+ const auto it = arg_numbers.find(name);
+ if (it != arg_numbers.end()) {
+ *arg = it->second;
+ return true;
+ }
+ return false;
+}
+
+bool TraceEvent::GetArgAsValue(const std::string& name,
+ std::unique_ptr<base::Value>* arg) const {
+ const auto it = arg_values.find(name);
+ if (it != arg_values.end()) {
+ *arg = it->second->CreateDeepCopy();
+ return true;
+ }
+ return false;
+}
+
+bool TraceEvent::HasStringArg(const std::string& name) const {
+ return (arg_strings.find(name) != arg_strings.end());
+}
+
+bool TraceEvent::HasNumberArg(const std::string& name) const {
+ return (arg_numbers.find(name) != arg_numbers.end());
+}
+
+bool TraceEvent::HasArg(const std::string& name) const {
+ return (arg_values.find(name) != arg_values.end());
+}
+
+std::string TraceEvent::GetKnownArgAsString(const std::string& name) const {
+ std::string arg_string;
+ bool result = GetArgAsString(name, &arg_string);
+ DCHECK(result);
+ return arg_string;
+}
+
+double TraceEvent::GetKnownArgAsDouble(const std::string& name) const {
+ double arg_double = 0;
+ bool result = GetArgAsNumber(name, &arg_double);
+ DCHECK(result);
+ return arg_double;
+}
+
+int TraceEvent::GetKnownArgAsInt(const std::string& name) const {
+ double arg_double = 0;
+ bool result = GetArgAsNumber(name, &arg_double);
+ DCHECK(result);
+ return static_cast<int>(arg_double);
+}
+
+bool TraceEvent::GetKnownArgAsBool(const std::string& name) const {
+ double arg_double = 0;
+ bool result = GetArgAsNumber(name, &arg_double);
+ DCHECK(result);
+ return (arg_double != 0.0);
+}
+
+std::unique_ptr<base::Value> TraceEvent::GetKnownArgAsValue(
+ const std::string& name) const {
+ std::unique_ptr<base::Value> arg_value;
+ bool result = GetArgAsValue(name, &arg_value);
+ DCHECK(result);
+ return arg_value;
+}
+
+// QueryNode
+
+QueryNode::QueryNode(const Query& query) : query_(query) {
+}
+
+QueryNode::~QueryNode() {
+}
+
+// Query
+
+Query::Query(TraceEventMember member)
+ : type_(QUERY_EVENT_MEMBER),
+ operator_(OP_INVALID),
+ member_(member),
+ number_(0),
+ is_pattern_(false) {
+}
+
+Query::Query(TraceEventMember member, const std::string& arg_name)
+ : type_(QUERY_EVENT_MEMBER),
+ operator_(OP_INVALID),
+ member_(member),
+ number_(0),
+ string_(arg_name),
+ is_pattern_(false) {
+}
+
+Query::Query(const Query& query)
+ : type_(query.type_),
+ operator_(query.operator_),
+ left_(query.left_),
+ right_(query.right_),
+ member_(query.member_),
+ number_(query.number_),
+ string_(query.string_),
+ is_pattern_(query.is_pattern_) {
+}
+
+Query::~Query() {
+}
+
+Query Query::String(const std::string& str) {
+ return Query(str);
+}
+
+Query Query::Double(double num) {
+ return Query(num);
+}
+
+Query Query::Int(int32_t num) {
+ return Query(static_cast<double>(num));
+}
+
+Query Query::Uint(uint32_t num) {
+ return Query(static_cast<double>(num));
+}
+
+Query Query::Bool(bool boolean) {
+ return Query(boolean ? 1.0 : 0.0);
+}
+
+Query Query::Phase(char phase) {
+ return Query(static_cast<double>(phase));
+}
+
+Query Query::Pattern(const std::string& pattern) {
+ Query query(pattern);
+ query.is_pattern_ = true;
+ return query;
+}
+
+bool Query::Evaluate(const TraceEvent& event) const {
+ // First check for values that can convert to bool.
+
+ // double is true if != 0:
+ double bool_value = 0.0;
+ bool is_bool = GetAsDouble(event, &bool_value);
+ if (is_bool)
+ return (bool_value != 0.0);
+
+ // string is true if it is non-empty:
+ std::string str_value;
+ bool is_str = GetAsString(event, &str_value);
+ if (is_str)
+ return !str_value.empty();
+
+ DCHECK_EQ(QUERY_BOOLEAN_OPERATOR, type_)
+ << "Invalid query: missing boolean expression";
+ DCHECK(left_.get());
+ DCHECK(right_.get() || is_unary_operator());
+
+ if (is_comparison_operator()) {
+ DCHECK(left().is_value() && right().is_value())
+ << "Invalid query: comparison operator used between event member and "
+ "value.";
+ bool compare_result = false;
+ if (CompareAsDouble(event, &compare_result))
+ return compare_result;
+ if (CompareAsString(event, &compare_result))
+ return compare_result;
+ return false;
+ }
+ // It's a logical operator.
+ switch (operator_) {
+ case OP_AND:
+ return left().Evaluate(event) && right().Evaluate(event);
+ case OP_OR:
+ return left().Evaluate(event) || right().Evaluate(event);
+ case OP_NOT:
+ return !left().Evaluate(event);
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool Query::CompareAsDouble(const TraceEvent& event, bool* result) const {
+ double lhs, rhs;
+ if (!left().GetAsDouble(event, &lhs) || !right().GetAsDouble(event, &rhs))
+ return false;
+ switch (operator_) {
+ case OP_EQ:
+ *result = (lhs == rhs);
+ return true;
+ case OP_NE:
+ *result = (lhs != rhs);
+ return true;
+ case OP_LT:
+ *result = (lhs < rhs);
+ return true;
+ case OP_LE:
+ *result = (lhs <= rhs);
+ return true;
+ case OP_GT:
+ *result = (lhs > rhs);
+ return true;
+ case OP_GE:
+ *result = (lhs >= rhs);
+ return true;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool Query::CompareAsString(const TraceEvent& event, bool* result) const {
+ std::string lhs, rhs;
+ if (!left().GetAsString(event, &lhs) || !right().GetAsString(event, &rhs))
+ return false;
+ switch (operator_) {
+ case OP_EQ:
+ if (right().is_pattern_)
+ *result = base::MatchPattern(lhs, rhs);
+ else if (left().is_pattern_)
+ *result = base::MatchPattern(rhs, lhs);
+ else
+ *result = (lhs == rhs);
+ return true;
+ case OP_NE:
+ if (right().is_pattern_)
+ *result = !base::MatchPattern(lhs, rhs);
+ else if (left().is_pattern_)
+ *result = !base::MatchPattern(rhs, lhs);
+ else
+ *result = (lhs != rhs);
+ return true;
+ case OP_LT:
+ *result = (lhs < rhs);
+ return true;
+ case OP_LE:
+ *result = (lhs <= rhs);
+ return true;
+ case OP_GT:
+ *result = (lhs > rhs);
+ return true;
+ case OP_GE:
+ *result = (lhs >= rhs);
+ return true;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool Query::EvaluateArithmeticOperator(const TraceEvent& event,
+ double* num) const {
+ DCHECK_EQ(QUERY_ARITHMETIC_OPERATOR, type_);
+ DCHECK(left_.get());
+ DCHECK(right_.get() || is_unary_operator());
+
+ double lhs = 0, rhs = 0;
+ if (!left().GetAsDouble(event, &lhs))
+ return false;
+ if (!is_unary_operator() && !right().GetAsDouble(event, &rhs))
+ return false;
+
+ switch (operator_) {
+ case OP_ADD:
+ *num = lhs + rhs;
+ return true;
+ case OP_SUB:
+ *num = lhs - rhs;
+ return true;
+ case OP_MUL:
+ *num = lhs * rhs;
+ return true;
+ case OP_DIV:
+ *num = lhs / rhs;
+ return true;
+ case OP_MOD:
+ *num = static_cast<double>(static_cast<int64_t>(lhs) %
+ static_cast<int64_t>(rhs));
+ return true;
+ case OP_NEGATE:
+ *num = -lhs;
+ return true;
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+bool Query::GetAsDouble(const TraceEvent& event, double* num) const {
+ switch (type_) {
+ case QUERY_ARITHMETIC_OPERATOR:
+ return EvaluateArithmeticOperator(event, num);
+ case QUERY_EVENT_MEMBER:
+ return GetMemberValueAsDouble(event, num);
+ case QUERY_NUMBER:
+ *num = number_;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Query::GetAsString(const TraceEvent& event, std::string* str) const {
+ switch (type_) {
+ case QUERY_EVENT_MEMBER:
+ return GetMemberValueAsString(event, str);
+ case QUERY_STRING:
+ *str = string_;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Query::GetMemberValueAsDouble(const TraceEvent& event,
+ double* num) const {
+ DCHECK_EQ(QUERY_EVENT_MEMBER, type_);
+
+ // This could be a request for a member of |event| or a member of |event|'s
+ // associated event. Store the target event in the_event:
+ const TraceEvent* the_event = (member_ < OTHER_PID) ?
+ &event : event.other_event;
+
+ // Request for member of associated event, but there is no associated event.
+ if (!the_event)
+ return false;
+
+ switch (member_) {
+ case EVENT_PID:
+ case OTHER_PID:
+ *num = static_cast<double>(the_event->thread.process_id);
+ return true;
+ case EVENT_TID:
+ case OTHER_TID:
+ *num = static_cast<double>(the_event->thread.thread_id);
+ return true;
+ case EVENT_TIME:
+ case OTHER_TIME:
+ *num = the_event->timestamp;
+ return true;
+ case EVENT_DURATION:
+ if (!the_event->has_other_event())
+ return false;
+ *num = the_event->GetAbsTimeToOtherEvent();
+ return true;
+ case EVENT_COMPLETE_DURATION:
+ if (the_event->phase != TRACE_EVENT_PHASE_COMPLETE)
+ return false;
+ *num = the_event->duration;
+ return true;
+ case EVENT_PHASE:
+ case OTHER_PHASE:
+ *num = static_cast<double>(the_event->phase);
+ return true;
+ case EVENT_HAS_STRING_ARG:
+ case OTHER_HAS_STRING_ARG:
+ *num = (the_event->HasStringArg(string_) ? 1.0 : 0.0);
+ return true;
+ case EVENT_HAS_NUMBER_ARG:
+ case OTHER_HAS_NUMBER_ARG:
+ *num = (the_event->HasNumberArg(string_) ? 1.0 : 0.0);
+ return true;
+ case EVENT_ARG:
+ case OTHER_ARG: {
+ // Search for the argument name and return its value if found.
+ std::map<std::string, double>::const_iterator num_i =
+ the_event->arg_numbers.find(string_);
+ if (num_i == the_event->arg_numbers.end())
+ return false;
+ *num = num_i->second;
+ return true;
+ }
+ case EVENT_HAS_OTHER:
+ // return 1.0 (true) if the other event exists
+ *num = event.other_event ? 1.0 : 0.0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Query::GetMemberValueAsString(const TraceEvent& event,
+ std::string* str) const {
+ DCHECK_EQ(QUERY_EVENT_MEMBER, type_);
+
+ // This could be a request for a member of |event| or a member of |event|'s
+ // associated event. Store the target event in the_event:
+ const TraceEvent* the_event = (member_ < OTHER_PID) ?
+ &event : event.other_event;
+
+ // Request for member of associated event, but there is no associated event.
+ if (!the_event)
+ return false;
+
+ switch (member_) {
+ case EVENT_CATEGORY:
+ case OTHER_CATEGORY:
+ *str = the_event->category;
+ return true;
+ case EVENT_NAME:
+ case OTHER_NAME:
+ *str = the_event->name;
+ return true;
+ case EVENT_ID:
+ case OTHER_ID:
+ *str = the_event->id;
+ return true;
+ case EVENT_ARG:
+ case OTHER_ARG: {
+ // Search for the argument name and return its value if found.
+ std::map<std::string, std::string>::const_iterator str_i =
+ the_event->arg_strings.find(string_);
+ if (str_i == the_event->arg_strings.end())
+ return false;
+ *str = str_i->second;
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+Query::Query(const std::string& str)
+ : type_(QUERY_STRING),
+ operator_(OP_INVALID),
+ member_(EVENT_INVALID),
+ number_(0),
+ string_(str),
+ is_pattern_(false) {
+}
+
+Query::Query(double num)
+ : type_(QUERY_NUMBER),
+ operator_(OP_INVALID),
+ member_(EVENT_INVALID),
+ number_(num),
+ is_pattern_(false) {
+}
+const Query& Query::left() const {
+ return left_->query();
+}
+
+const Query& Query::right() const {
+ return right_->query();
+}
+
+Query Query::operator==(const Query& rhs) const {
+ return Query(*this, rhs, OP_EQ);
+}
+
+Query Query::operator!=(const Query& rhs) const {
+ return Query(*this, rhs, OP_NE);
+}
+
+Query Query::operator<(const Query& rhs) const {
+ return Query(*this, rhs, OP_LT);
+}
+
+Query Query::operator<=(const Query& rhs) const {
+ return Query(*this, rhs, OP_LE);
+}
+
+Query Query::operator>(const Query& rhs) const {
+ return Query(*this, rhs, OP_GT);
+}
+
+Query Query::operator>=(const Query& rhs) const {
+ return Query(*this, rhs, OP_GE);
+}
+
+Query Query::operator&&(const Query& rhs) const {
+ return Query(*this, rhs, OP_AND);
+}
+
+Query Query::operator||(const Query& rhs) const {
+ return Query(*this, rhs, OP_OR);
+}
+
+Query Query::operator!() const {
+ return Query(*this, OP_NOT);
+}
+
+Query Query::operator+(const Query& rhs) const {
+ return Query(*this, rhs, OP_ADD);
+}
+
+Query Query::operator-(const Query& rhs) const {
+ return Query(*this, rhs, OP_SUB);
+}
+
+Query Query::operator*(const Query& rhs) const {
+ return Query(*this, rhs, OP_MUL);
+}
+
+Query Query::operator/(const Query& rhs) const {
+ return Query(*this, rhs, OP_DIV);
+}
+
+Query Query::operator%(const Query& rhs) const {
+ return Query(*this, rhs, OP_MOD);
+}
+
+Query Query::operator-() const {
+ return Query(*this, OP_NEGATE);
+}
+
+
+Query::Query(const Query& left, const Query& right, Operator binary_op)
+ : operator_(binary_op),
+ left_(new QueryNode(left)),
+ right_(new QueryNode(right)),
+ member_(EVENT_INVALID),
+ number_(0) {
+ type_ = (binary_op < OP_ADD ?
+ QUERY_BOOLEAN_OPERATOR : QUERY_ARITHMETIC_OPERATOR);
+}
+
+Query::Query(const Query& left, Operator unary_op)
+ : operator_(unary_op),
+ left_(new QueryNode(left)),
+ member_(EVENT_INVALID),
+ number_(0) {
+ type_ = (unary_op < OP_ADD ?
+ QUERY_BOOLEAN_OPERATOR : QUERY_ARITHMETIC_OPERATOR);
+}
+
+namespace {
+
+// Search |events| for |query| and add matches to |output|.
+size_t FindMatchingEvents(const std::vector<TraceEvent>& events,
+ const Query& query,
+ TraceEventVector* output,
+ bool ignore_metadata_events) {
+ for (size_t i = 0; i < events.size(); ++i) {
+ if (ignore_metadata_events && events[i].phase == TRACE_EVENT_PHASE_METADATA)
+ continue;
+ if (query.Evaluate(events[i]))
+ output->push_back(&events[i]);
+ }
+ return output->size();
+}
+
+bool ParseEventsFromJson(const std::string& json,
+ std::vector<TraceEvent>* output) {
+ std::unique_ptr<base::Value> root = base::JSONReader::Read(json);
+
+ base::ListValue* root_list = NULL;
+ if (!root.get() || !root->GetAsList(&root_list))
+ return false;
+
+ for (size_t i = 0; i < root_list->GetSize(); ++i) {
+ base::Value* item = NULL;
+ if (root_list->Get(i, &item)) {
+ TraceEvent event;
+ if (event.SetFromJSON(item))
+ output->push_back(std::move(event));
+ else
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace
+
+// TraceAnalyzer
+
+TraceAnalyzer::TraceAnalyzer()
+ : ignore_metadata_events_(false),
+ allow_assocation_changes_(true) {}
+
+TraceAnalyzer::~TraceAnalyzer() {
+}
+
+// static
+TraceAnalyzer* TraceAnalyzer::Create(const std::string& json_events) {
+ std::unique_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer());
+ if (analyzer->SetEvents(json_events))
+ return analyzer.release();
+ return NULL;
+}
+
+bool TraceAnalyzer::SetEvents(const std::string& json_events) {
+ raw_events_.clear();
+ if (!ParseEventsFromJson(json_events, &raw_events_))
+ return false;
+ std::stable_sort(raw_events_.begin(), raw_events_.end());
+ ParseMetadata();
+ return true;
+}
+
+void TraceAnalyzer::AssociateBeginEndEvents() {
+ using trace_analyzer::Query;
+
+ Query begin(Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN));
+ Query end(Query::EventPhaseIs(TRACE_EVENT_PHASE_END));
+ Query match(Query::EventName() == Query::OtherName() &&
+ Query::EventCategory() == Query::OtherCategory() &&
+ Query::EventTid() == Query::OtherTid() &&
+ Query::EventPid() == Query::OtherPid());
+
+ AssociateEvents(begin, end, match);
+}
+
+void TraceAnalyzer::AssociateAsyncBeginEndEvents() {
+ using trace_analyzer::Query;
+
+ Query begin(
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_INTO) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_PAST));
+ Query end(Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_END) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_INTO) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_PAST));
+ Query match(Query::EventName() == Query::OtherName() &&
+ Query::EventCategory() == Query::OtherCategory() &&
+ Query::EventId() == Query::OtherId());
+
+ AssociateEvents(begin, end, match);
+}
+
+void TraceAnalyzer::AssociateEvents(const Query& first,
+ const Query& second,
+ const Query& match) {
+ DCHECK(allow_assocation_changes_)
+ << "AssociateEvents not allowed after FindEvents";
+
+ // Search for matching begin/end event pairs. When a matching end is found,
+ // it is associated with the begin event.
+ std::vector<TraceEvent*> begin_stack;
+ for (size_t event_index = 0; event_index < raw_events_.size();
+ ++event_index) {
+
+ TraceEvent& this_event = raw_events_[event_index];
+
+ if (second.Evaluate(this_event)) {
+ // Search stack for matching begin, starting from end.
+ for (int stack_index = static_cast<int>(begin_stack.size()) - 1;
+ stack_index >= 0; --stack_index) {
+ TraceEvent& begin_event = *begin_stack[stack_index];
+
+ // Temporarily set other to test against the match query.
+ const TraceEvent* other_backup = begin_event.other_event;
+ begin_event.other_event = &this_event;
+ if (match.Evaluate(begin_event)) {
+ // Found a matching begin/end pair.
+ // Erase the matching begin event index from the stack.
+ begin_stack.erase(begin_stack.begin() + stack_index);
+ break;
+ }
+
+ // Not a match, restore original other and continue.
+ begin_event.other_event = other_backup;
+ }
+ }
+ // Even if this_event is a |second| event that has matched an earlier
+ // |first| event, it can still also be a |first| event and be associated
+ // with a later |second| event.
+ if (first.Evaluate(this_event)) {
+ begin_stack.push_back(&this_event);
+ }
+ }
+}
+
+void TraceAnalyzer::MergeAssociatedEventArgs() {
+ for (size_t i = 0; i < raw_events_.size(); ++i) {
+ // Merge all associated events with the first event.
+ const TraceEvent* other = raw_events_[i].other_event;
+ // Avoid looping by keeping set of encountered TraceEvents.
+ std::set<const TraceEvent*> encounters;
+ encounters.insert(&raw_events_[i]);
+ while (other && encounters.find(other) == encounters.end()) {
+ encounters.insert(other);
+ raw_events_[i].arg_numbers.insert(
+ other->arg_numbers.begin(),
+ other->arg_numbers.end());
+ raw_events_[i].arg_strings.insert(
+ other->arg_strings.begin(),
+ other->arg_strings.end());
+ other = other->other_event;
+ }
+ }
+}
+
+size_t TraceAnalyzer::FindEvents(const Query& query, TraceEventVector* output) {
+ allow_assocation_changes_ = false;
+ output->clear();
+ return FindMatchingEvents(
+ raw_events_, query, output, ignore_metadata_events_);
+}
+
+const TraceEvent* TraceAnalyzer::FindFirstOf(const Query& query) {
+ TraceEventVector output;
+ if (FindEvents(query, &output) > 0)
+ return output.front();
+ return NULL;
+}
+
+const TraceEvent* TraceAnalyzer::FindLastOf(const Query& query) {
+ TraceEventVector output;
+ if (FindEvents(query, &output) > 0)
+ return output.back();
+ return NULL;
+}
+
+const std::string& TraceAnalyzer::GetThreadName(
+ const TraceEvent::ProcessThreadID& thread) {
+ // If thread is not found, just add and return empty string.
+ return thread_names_[thread];
+}
+
+void TraceAnalyzer::ParseMetadata() {
+ for (size_t i = 0; i < raw_events_.size(); ++i) {
+ TraceEvent& this_event = raw_events_[i];
+ // Check for thread name metadata.
+ if (this_event.phase != TRACE_EVENT_PHASE_METADATA ||
+ this_event.name != "thread_name")
+ continue;
+ std::map<std::string, std::string>::const_iterator string_it =
+ this_event.arg_strings.find("name");
+ if (string_it != this_event.arg_strings.end())
+ thread_names_[this_event.thread] = string_it->second;
+ }
+}
+
+// TraceEventVector utility functions.
+
+bool GetRateStats(const TraceEventVector& events,
+ RateStats* stats,
+ const RateStatsOptions* options) {
+ DCHECK(stats);
+ // Need at least 3 events to calculate rate stats.
+ const size_t kMinEvents = 3;
+ if (events.size() < kMinEvents) {
+ LOG(ERROR) << "Not enough events: " << events.size();
+ return false;
+ }
+
+ std::vector<double> deltas;
+ size_t num_deltas = events.size() - 1;
+ for (size_t i = 0; i < num_deltas; ++i) {
+ double delta = events.at(i + 1)->timestamp - events.at(i)->timestamp;
+ if (delta < 0.0) {
+ LOG(ERROR) << "Events are out of order";
+ return false;
+ }
+ deltas.push_back(delta);
+ }
+
+ std::sort(deltas.begin(), deltas.end());
+
+ if (options) {
+ if (options->trim_min + options->trim_max > events.size() - kMinEvents) {
+ LOG(ERROR) << "Attempt to trim too many events";
+ return false;
+ }
+ deltas.erase(deltas.begin(), deltas.begin() + options->trim_min);
+ deltas.erase(deltas.end() - options->trim_max, deltas.end());
+ }
+
+ num_deltas = deltas.size();
+ double delta_sum = 0.0;
+ for (size_t i = 0; i < num_deltas; ++i)
+ delta_sum += deltas[i];
+
+ stats->min_us = *std::min_element(deltas.begin(), deltas.end());
+ stats->max_us = *std::max_element(deltas.begin(), deltas.end());
+ stats->mean_us = delta_sum / static_cast<double>(num_deltas);
+
+ double sum_mean_offsets_squared = 0.0;
+ for (size_t i = 0; i < num_deltas; ++i) {
+ double offset = fabs(deltas[i] - stats->mean_us);
+ sum_mean_offsets_squared += offset * offset;
+ }
+ stats->standard_deviation_us =
+ sqrt(sum_mean_offsets_squared / static_cast<double>(num_deltas - 1));
+
+ return true;
+}
+
+bool FindFirstOf(const TraceEventVector& events,
+ const Query& query,
+ size_t position,
+ size_t* return_index) {
+ DCHECK(return_index);
+ for (size_t i = position; i < events.size(); ++i) {
+ if (query.Evaluate(*events[i])) {
+ *return_index = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool FindLastOf(const TraceEventVector& events,
+ const Query& query,
+ size_t position,
+ size_t* return_index) {
+ DCHECK(return_index);
+ for (size_t i = std::min(position + 1, events.size()); i != 0; --i) {
+ if (query.Evaluate(*events[i - 1])) {
+ *return_index = i - 1;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool FindClosest(const TraceEventVector& events,
+ const Query& query,
+ size_t position,
+ size_t* return_closest,
+ size_t* return_second_closest) {
+ DCHECK(return_closest);
+ if (events.empty() || position >= events.size())
+ return false;
+ size_t closest = events.size();
+ size_t second_closest = events.size();
+ for (size_t i = 0; i < events.size(); ++i) {
+ if (!query.Evaluate(*events.at(i)))
+ continue;
+ if (closest == events.size()) {
+ closest = i;
+ continue;
+ }
+ if (fabs(events.at(i)->timestamp - events.at(position)->timestamp) <
+ fabs(events.at(closest)->timestamp - events.at(position)->timestamp)) {
+ second_closest = closest;
+ closest = i;
+ } else if (second_closest == events.size()) {
+ second_closest = i;
+ }
+ }
+
+ if (closest < events.size() &&
+ (!return_second_closest || second_closest < events.size())) {
+ *return_closest = closest;
+ if (return_second_closest)
+ *return_second_closest = second_closest;
+ return true;
+ }
+
+ return false;
+}
+
+size_t CountMatches(const TraceEventVector& events,
+ const Query& query,
+ size_t begin_position,
+ size_t end_position) {
+ if (begin_position >= events.size())
+ return 0u;
+ end_position = (end_position < events.size()) ? end_position : events.size();
+ size_t count = 0u;
+ for (size_t i = begin_position; i < end_position; ++i) {
+ if (query.Evaluate(*events.at(i)))
+ ++count;
+ }
+ return count;
+}
+
+} // namespace trace_analyzer
diff --git a/libchrome/base/test/trace_event_analyzer.h b/libchrome/base/test/trace_event_analyzer.h
new file mode 100644
index 0000000..0e2366b
--- /dev/null
+++ b/libchrome/base/test/trace_event_analyzer.h
@@ -0,0 +1,715 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Use trace_analyzer::Query and trace_analyzer::TraceAnalyzer to search for
+// specific trace events that were generated by the trace_event.h API.
+//
+// Basic procedure:
+// - Get trace events JSON string from base::trace_event::TraceLog.
+// - Create TraceAnalyzer with JSON string.
+// - Call TraceAnalyzer::AssociateBeginEndEvents (optional).
+// - Call TraceAnalyzer::AssociateEvents (zero or more times).
+// - Call TraceAnalyzer::FindEvents with queries to find specific events.
+//
+// A Query is a boolean expression tree that evaluates to true or false for a
+// given trace event. Queries can be combined into a tree using boolean,
+// arithmetic and comparison operators that refer to data of an individual trace
+// event.
+//
+// The events are returned as trace_analyzer::TraceEvent objects.
+// TraceEvent contains a single trace event's data, as well as a pointer to
+// a related trace event. The related trace event is typically the matching end
+// of a begin event or the matching begin of an end event.
+//
+// The following examples use this basic setup code to construct TraceAnalyzer
+// with the json trace string retrieved from TraceLog and construct an event
+// vector for retrieving events:
+//
+// TraceAnalyzer analyzer(json_events);
+// TraceEventVector events;
+//
+// EXAMPLE 1: Find events named "my_event".
+//
+// analyzer.FindEvents(Query(EVENT_NAME) == "my_event", &events);
+//
+// EXAMPLE 2: Find begin events named "my_event" with duration > 1 second.
+//
+// Query q = (Query(EVENT_NAME) == Query::String("my_event") &&
+// Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_BEGIN) &&
+// Query(EVENT_DURATION) > Query::Double(1000000.0));
+// analyzer.FindEvents(q, &events);
+//
+// EXAMPLE 3: Associating event pairs across threads.
+//
+// If the test needs to analyze something that starts and ends on different
+// threads, the test needs to use INSTANT events. The typical procedure is to
+// specify the same unique ID as a TRACE_EVENT argument on both the start and
+// finish INSTANT events. Then use the following procedure to associate those
+// events.
+//
+// Step 1: instrument code with custom begin/end trace events.
+// [Thread 1 tracing code]
+// TRACE_EVENT_INSTANT1("test_latency", "timing1_begin", "id", 3);
+// [Thread 2 tracing code]
+// TRACE_EVENT_INSTANT1("test_latency", "timing1_end", "id", 3);
+//
+// Step 2: associate these custom begin/end pairs.
+// Query begin(Query(EVENT_NAME) == Query::String("timing1_begin"));
+// Query end(Query(EVENT_NAME) == Query::String("timing1_end"));
+// Query match(Query(EVENT_ARG, "id") == Query(OTHER_ARG, "id"));
+// analyzer.AssociateEvents(begin, end, match);
+//
+// Step 3: search for "timing1_begin" events with existing other event.
+// Query q = (Query(EVENT_NAME) == Query::String("timing1_begin") &&
+// Query(EVENT_HAS_OTHER));
+// analyzer.FindEvents(q, &events);
+//
+// Step 4: analyze events, such as checking durations.
+// for (size_t i = 0; i < events.size(); ++i) {
+// double duration;
+// EXPECT_TRUE(events[i].GetAbsTimeToOtherEvent(&duration));
+// EXPECT_LT(duration, 1000000.0/60.0); // expect less than 1/60 second.
+// }
+
+
+#ifndef BASE_TEST_TRACE_EVENT_ANALYZER_H_
+#define BASE_TEST_TRACE_EVENT_ANALYZER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+class Value;
+}
+
+namespace trace_analyzer {
+class QueryNode;
+
+// trace_analyzer::TraceEvent is a more convenient form of the
+// base::trace_event::TraceEvent class to make tracing-based tests easier to
+// write.
+struct TraceEvent {
+ // ProcessThreadID contains a Process ID and Thread ID.
+ struct ProcessThreadID {
+ ProcessThreadID() : process_id(0), thread_id(0) {}
+ ProcessThreadID(int process_id, int thread_id)
+ : process_id(process_id), thread_id(thread_id) {}
+ bool operator< (const ProcessThreadID& rhs) const {
+ if (process_id != rhs.process_id)
+ return process_id < rhs.process_id;
+ return thread_id < rhs.thread_id;
+ }
+ int process_id;
+ int thread_id;
+ };
+
+ TraceEvent();
+ TraceEvent(TraceEvent&& other);
+ ~TraceEvent();
+
+ bool SetFromJSON(const base::Value* event_value) WARN_UNUSED_RESULT;
+
+ bool operator< (const TraceEvent& rhs) const {
+ return timestamp < rhs.timestamp;
+ }
+
+ TraceEvent& operator=(TraceEvent&& rhs);
+
+ bool has_other_event() const { return other_event; }
+
+ // Returns absolute duration in microseconds between this event and other
+ // event. Must have already verified that other_event exists by
+ // Query(EVENT_HAS_OTHER) or by calling has_other_event().
+ double GetAbsTimeToOtherEvent() const;
+
+ // Return the argument value if it exists and it is a string.
+ bool GetArgAsString(const std::string& name, std::string* arg) const;
+ // Return the argument value if it exists and it is a number.
+ bool GetArgAsNumber(const std::string& name, double* arg) const;
+ // Return the argument value if it exists.
+ bool GetArgAsValue(const std::string& name,
+ std::unique_ptr<base::Value>* arg) const;
+
+ // Check if argument exists and is string.
+ bool HasStringArg(const std::string& name) const;
+ // Check if argument exists and is number (double, int or bool).
+ bool HasNumberArg(const std::string& name) const;
+ // Check if argument exists.
+ bool HasArg(const std::string& name) const;
+
+ // Get known existing arguments as specific types.
+ // Useful when you have already queried the argument with
+ // Query(HAS_NUMBER_ARG) or Query(HAS_STRING_ARG).
+ std::string GetKnownArgAsString(const std::string& name) const;
+ double GetKnownArgAsDouble(const std::string& name) const;
+ int GetKnownArgAsInt(const std::string& name) const;
+ bool GetKnownArgAsBool(const std::string& name) const;
+ std::unique_ptr<base::Value> GetKnownArgAsValue(
+ const std::string& name) const;
+
+ // Process ID and Thread ID.
+ ProcessThreadID thread;
+
+ // Time since epoch in microseconds.
+ // Stored as double to match its JSON representation.
+ double timestamp;
+ double duration;
+ char phase;
+ std::string category;
+ std::string name;
+ std::string id;
+
+ // All numbers and bool values from TraceEvent args are cast to double.
+ // bool becomes 1.0 (true) or 0.0 (false).
+ std::map<std::string, double> arg_numbers;
+ std::map<std::string, std::string> arg_strings;
+ std::map<std::string, std::unique_ptr<base::Value>> arg_values;
+
+ // The other event associated with this event (or NULL).
+ const TraceEvent* other_event;
+};
+
+typedef std::vector<const TraceEvent*> TraceEventVector;
+
+class Query {
+ public:
+ Query(const Query& query);
+
+ ~Query();
+
+ ////////////////////////////////////////////////////////////////
+ // Query literal values
+
+ // Compare with the given string.
+ static Query String(const std::string& str);
+
+ // Compare with the given number.
+ static Query Double(double num);
+ static Query Int(int32_t num);
+ static Query Uint(uint32_t num);
+
+ // Compare with the given bool.
+ static Query Bool(bool boolean);
+
+ // Compare with the given phase.
+ static Query Phase(char phase);
+
+ // Compare with the given string pattern. Only works with == and != operators.
+ // Example: Query(EVENT_NAME) == Query::Pattern("MyEvent*")
+ static Query Pattern(const std::string& pattern);
+
+ ////////////////////////////////////////////////////////////////
+ // Query event members
+
+ static Query EventPid() { return Query(EVENT_PID); }
+
+ static Query EventTid() { return Query(EVENT_TID); }
+
+ // Return the timestamp of the event in microseconds since epoch.
+ static Query EventTime() { return Query(EVENT_TIME); }
+
+ // Return the absolute time between event and other event in microseconds.
+ // Only works if Query::EventHasOther() == true.
+ static Query EventDuration() { return Query(EVENT_DURATION); }
+
+ // Return the duration of a COMPLETE event.
+ static Query EventCompleteDuration() {
+ return Query(EVENT_COMPLETE_DURATION);
+ }
+
+ static Query EventPhase() { return Query(EVENT_PHASE); }
+
+ static Query EventCategory() { return Query(EVENT_CATEGORY); }
+
+ static Query EventName() { return Query(EVENT_NAME); }
+
+ static Query EventId() { return Query(EVENT_ID); }
+
+ static Query EventPidIs(int process_id) {
+ return Query(EVENT_PID) == Query::Int(process_id);
+ }
+
+ static Query EventTidIs(int thread_id) {
+ return Query(EVENT_TID) == Query::Int(thread_id);
+ }
+
+ static Query EventThreadIs(const TraceEvent::ProcessThreadID& thread) {
+ return EventPidIs(thread.process_id) && EventTidIs(thread.thread_id);
+ }
+
+ static Query EventTimeIs(double timestamp) {
+ return Query(EVENT_TIME) == Query::Double(timestamp);
+ }
+
+ static Query EventDurationIs(double duration) {
+ return Query(EVENT_DURATION) == Query::Double(duration);
+ }
+
+ static Query EventPhaseIs(char phase) {
+ return Query(EVENT_PHASE) == Query::Phase(phase);
+ }
+
+ static Query EventCategoryIs(const std::string& category) {
+ return Query(EVENT_CATEGORY) == Query::String(category);
+ }
+
+ static Query EventNameIs(const std::string& name) {
+ return Query(EVENT_NAME) == Query::String(name);
+ }
+
+ static Query EventIdIs(const std::string& id) {
+ return Query(EVENT_ID) == Query::String(id);
+ }
+
+ // Evaluates to true if arg exists and is a string.
+ static Query EventHasStringArg(const std::string& arg_name) {
+ return Query(EVENT_HAS_STRING_ARG, arg_name);
+ }
+
+ // Evaluates to true if arg exists and is a number.
+ // Number arguments include types double, int and bool.
+ static Query EventHasNumberArg(const std::string& arg_name) {
+ return Query(EVENT_HAS_NUMBER_ARG, arg_name);
+ }
+
+ // Evaluates to arg value (string or number).
+ static Query EventArg(const std::string& arg_name) {
+ return Query(EVENT_ARG, arg_name);
+ }
+
+ // Return true if associated event exists.
+ static Query EventHasOther() { return Query(EVENT_HAS_OTHER); }
+
+ // Access the associated other_event's members:
+
+ static Query OtherPid() { return Query(OTHER_PID); }
+
+ static Query OtherTid() { return Query(OTHER_TID); }
+
+ static Query OtherTime() { return Query(OTHER_TIME); }
+
+ static Query OtherPhase() { return Query(OTHER_PHASE); }
+
+ static Query OtherCategory() { return Query(OTHER_CATEGORY); }
+
+ static Query OtherName() { return Query(OTHER_NAME); }
+
+ static Query OtherId() { return Query(OTHER_ID); }
+
+ static Query OtherPidIs(int process_id) {
+ return Query(OTHER_PID) == Query::Int(process_id);
+ }
+
+ static Query OtherTidIs(int thread_id) {
+ return Query(OTHER_TID) == Query::Int(thread_id);
+ }
+
+ static Query OtherThreadIs(const TraceEvent::ProcessThreadID& thread) {
+ return OtherPidIs(thread.process_id) && OtherTidIs(thread.thread_id);
+ }
+
+ static Query OtherTimeIs(double timestamp) {
+ return Query(OTHER_TIME) == Query::Double(timestamp);
+ }
+
+ static Query OtherPhaseIs(char phase) {
+ return Query(OTHER_PHASE) == Query::Phase(phase);
+ }
+
+ static Query OtherCategoryIs(const std::string& category) {
+ return Query(OTHER_CATEGORY) == Query::String(category);
+ }
+
+ static Query OtherNameIs(const std::string& name) {
+ return Query(OTHER_NAME) == Query::String(name);
+ }
+
+ static Query OtherIdIs(const std::string& id) {
+ return Query(OTHER_ID) == Query::String(id);
+ }
+
+ // Evaluates to true if arg exists and is a string.
+ static Query OtherHasStringArg(const std::string& arg_name) {
+ return Query(OTHER_HAS_STRING_ARG, arg_name);
+ }
+
+ // Evaluates to true if arg exists and is a number.
+ // Number arguments include types double, int and bool.
+ static Query OtherHasNumberArg(const std::string& arg_name) {
+ return Query(OTHER_HAS_NUMBER_ARG, arg_name);
+ }
+
+ // Evaluates to arg value (string or number).
+ static Query OtherArg(const std::string& arg_name) {
+ return Query(OTHER_ARG, arg_name);
+ }
+
+ ////////////////////////////////////////////////////////////////
+ // Common queries:
+
+ // Find BEGIN events that have a corresponding END event.
+ static Query MatchBeginWithEnd() {
+ return (Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_BEGIN)) &&
+ Query(EVENT_HAS_OTHER);
+ }
+
+ // Find COMPLETE events.
+ static Query MatchComplete() {
+ return (Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_COMPLETE));
+ }
+
+ // Find ASYNC_BEGIN events that have a corresponding ASYNC_END event.
+ static Query MatchAsyncBeginWithNext() {
+ return (Query(EVENT_PHASE) ==
+ Query::Phase(TRACE_EVENT_PHASE_ASYNC_BEGIN)) &&
+ Query(EVENT_HAS_OTHER);
+ }
+
+ // Find BEGIN events of given |name| which also have associated END events.
+ static Query MatchBeginName(const std::string& name) {
+ return (Query(EVENT_NAME) == Query(name)) && MatchBeginWithEnd();
+ }
+
+ // Find COMPLETE events of given |name|.
+ static Query MatchCompleteName(const std::string& name) {
+ return (Query(EVENT_NAME) == Query(name)) && MatchComplete();
+ }
+
+ // Match given Process ID and Thread ID.
+ static Query MatchThread(const TraceEvent::ProcessThreadID& thread) {
+ return (Query(EVENT_PID) == Query::Int(thread.process_id)) &&
+ (Query(EVENT_TID) == Query::Int(thread.thread_id));
+ }
+
+ // Match event pair that spans multiple threads.
+ static Query MatchCrossThread() {
+ return (Query(EVENT_PID) != Query(OTHER_PID)) ||
+ (Query(EVENT_TID) != Query(OTHER_TID));
+ }
+
+ ////////////////////////////////////////////////////////////////
+ // Operators:
+
+ // Boolean operators:
+ Query operator==(const Query& rhs) const;
+ Query operator!=(const Query& rhs) const;
+ Query operator< (const Query& rhs) const;
+ Query operator<=(const Query& rhs) const;
+ Query operator> (const Query& rhs) const;
+ Query operator>=(const Query& rhs) const;
+ Query operator&&(const Query& rhs) const;
+ Query operator||(const Query& rhs) const;
+ Query operator!() const;
+
+ // Arithmetic operators:
+ // Following operators are applied to double arguments:
+ Query operator+(const Query& rhs) const;
+ Query operator-(const Query& rhs) const;
+ Query operator*(const Query& rhs) const;
+ Query operator/(const Query& rhs) const;
+ Query operator-() const;
+ // Mod operates on int64_t args (doubles are casted to int64_t beforehand):
+ Query operator%(const Query& rhs) const;
+
+ // Return true if the given event matches this query tree.
+ // This is a recursive method that walks the query tree.
+ bool Evaluate(const TraceEvent& event) const;
+
+ private:
+ enum TraceEventMember {
+ EVENT_INVALID,
+ EVENT_PID,
+ EVENT_TID,
+ EVENT_TIME,
+ EVENT_DURATION,
+ EVENT_COMPLETE_DURATION,
+ EVENT_PHASE,
+ EVENT_CATEGORY,
+ EVENT_NAME,
+ EVENT_ID,
+ EVENT_HAS_STRING_ARG,
+ EVENT_HAS_NUMBER_ARG,
+ EVENT_ARG,
+ EVENT_HAS_OTHER,
+ OTHER_PID,
+ OTHER_TID,
+ OTHER_TIME,
+ OTHER_PHASE,
+ OTHER_CATEGORY,
+ OTHER_NAME,
+ OTHER_ID,
+ OTHER_HAS_STRING_ARG,
+ OTHER_HAS_NUMBER_ARG,
+ OTHER_ARG,
+ };
+
+ enum Operator {
+ OP_INVALID,
+ // Boolean operators:
+ OP_EQ,
+ OP_NE,
+ OP_LT,
+ OP_LE,
+ OP_GT,
+ OP_GE,
+ OP_AND,
+ OP_OR,
+ OP_NOT,
+ // Arithmetic operators:
+ OP_ADD,
+ OP_SUB,
+ OP_MUL,
+ OP_DIV,
+ OP_MOD,
+ OP_NEGATE
+ };
+
+ enum QueryType {
+ QUERY_BOOLEAN_OPERATOR,
+ QUERY_ARITHMETIC_OPERATOR,
+ QUERY_EVENT_MEMBER,
+ QUERY_NUMBER,
+ QUERY_STRING
+ };
+
+ // Compare with the given member.
+ explicit Query(TraceEventMember member);
+
+ // Compare with the given member argument value.
+ Query(TraceEventMember member, const std::string& arg_name);
+
+ // Compare with the given string.
+ explicit Query(const std::string& str);
+
+ // Compare with the given number.
+ explicit Query(double num);
+
+ // Construct a boolean Query that returns (left <binary_op> right).
+ Query(const Query& left, const Query& right, Operator binary_op);
+
+ // Construct a boolean Query that returns (<binary_op> left).
+ Query(const Query& left, Operator unary_op);
+
+ // Try to compare left_ against right_ based on operator_.
+ // If either left or right does not convert to double, false is returned.
+ // Otherwise, true is returned and |result| is set to the comparison result.
+ bool CompareAsDouble(const TraceEvent& event, bool* result) const;
+
+ // Try to compare left_ against right_ based on operator_.
+ // If either left or right does not convert to string, false is returned.
+ // Otherwise, true is returned and |result| is set to the comparison result.
+ bool CompareAsString(const TraceEvent& event, bool* result) const;
+
+ // Attempt to convert this Query to a double. On success, true is returned
+ // and the double value is stored in |num|.
+ bool GetAsDouble(const TraceEvent& event, double* num) const;
+
+ // Attempt to convert this Query to a string. On success, true is returned
+ // and the string value is stored in |str|.
+ bool GetAsString(const TraceEvent& event, std::string* str) const;
+
+ // Evaluate this Query as an arithmetic operator on left_ and right_.
+ bool EvaluateArithmeticOperator(const TraceEvent& event,
+ double* num) const;
+
+ // For QUERY_EVENT_MEMBER Query: attempt to get the double value of the Query.
+ bool GetMemberValueAsDouble(const TraceEvent& event, double* num) const;
+
+ // For QUERY_EVENT_MEMBER Query: attempt to get the string value of the Query.
+ bool GetMemberValueAsString(const TraceEvent& event, std::string* num) const;
+
+ // Does this Query represent a value?
+ bool is_value() const { return type_ != QUERY_BOOLEAN_OPERATOR; }
+
+ bool is_unary_operator() const {
+ return operator_ == OP_NOT || operator_ == OP_NEGATE;
+ }
+
+ bool is_comparison_operator() const {
+ return operator_ != OP_INVALID && operator_ < OP_AND;
+ }
+
+ const Query& left() const;
+ const Query& right() const;
+
+ QueryType type_;
+ Operator operator_;
+ scoped_refptr<QueryNode> left_;
+ scoped_refptr<QueryNode> right_;
+ TraceEventMember member_;
+ double number_;
+ std::string string_;
+ bool is_pattern_;
+};
+
+// Implementation detail:
+// QueryNode allows Query to store a ref-counted query tree.
+class QueryNode : public base::RefCounted<QueryNode> {
+ public:
+ explicit QueryNode(const Query& query);
+ const Query& query() const { return query_; }
+
+ private:
+ friend class base::RefCounted<QueryNode>;
+ ~QueryNode();
+
+ Query query_;
+};
+
+// TraceAnalyzer helps tests search for trace events.
+class TraceAnalyzer {
+ public:
+ ~TraceAnalyzer();
+
+ // Use trace events from JSON string generated by tracing API.
+ // Returns non-NULL if the JSON is successfully parsed.
+ static TraceAnalyzer* Create(const std::string& json_events)
+ WARN_UNUSED_RESULT;
+
+ void SetIgnoreMetadataEvents(bool ignore) { ignore_metadata_events_ = true; }
+
+ // Associate BEGIN and END events with each other. This allows Query(OTHER_*)
+ // to access the associated event and enables Query(EVENT_DURATION).
+ // An end event will match the most recent begin event with the same name,
+ // category, process ID and thread ID. This matches what is shown in
+ // about:tracing. After association, the BEGIN event will point to the
+ // matching END event, but the END event will not point to the BEGIN event.
+ void AssociateBeginEndEvents();
+
+ // Associate ASYNC_BEGIN, ASYNC_STEP and ASYNC_END events with each other.
+ // An ASYNC_END event will match the most recent ASYNC_BEGIN or ASYNC_STEP
+ // event with the same name, category, and ID. This creates a singly linked
+ // list of ASYNC_BEGIN->ASYNC_STEP...->ASYNC_END.
+ void AssociateAsyncBeginEndEvents();
+
+ // AssociateEvents can be used to customize event associations by setting the
+ // other_event member of TraceEvent. This should be used to associate two
+ // INSTANT events.
+ //
+ // The assumptions are:
+ // - |first| events occur before |second| events.
+ // - the closest matching |second| event is the correct match.
+ //
+ // |first| - Eligible |first| events match this query.
+ // |second| - Eligible |second| events match this query.
+ // |match| - This query is run on the |first| event. The OTHER_* EventMember
+ // queries will point to an eligible |second| event. The query
+ // should evaluate to true if the |first|/|second| pair is a match.
+ //
+ // When a match is found, the pair will be associated by having the first
+ // event's other_event member point to the other. AssociateEvents does not
+ // clear previous associations, so it is possible to associate multiple pairs
+ // of events by calling AssociateEvents more than once with different queries.
+ //
+ // NOTE: AssociateEvents will overwrite existing other_event associations if
+ // the queries pass for events that already had a previous association.
+ //
+ // After calling any Find* method, it is not allowed to call AssociateEvents
+ // again.
+ void AssociateEvents(const Query& first,
+ const Query& second,
+ const Query& match);
+
+ // For each event, copy its arguments to the other_event argument map. If
+ // argument name already exists, it will not be overwritten.
+ void MergeAssociatedEventArgs();
+
+ // Find all events that match query and replace output vector.
+ size_t FindEvents(const Query& query, TraceEventVector* output);
+
+ // Find first event that matches query or NULL if not found.
+ const TraceEvent* FindFirstOf(const Query& query);
+
+ // Find last event that matches query or NULL if not found.
+ const TraceEvent* FindLastOf(const Query& query);
+
+ const std::string& GetThreadName(const TraceEvent::ProcessThreadID& thread);
+
+ private:
+ TraceAnalyzer();
+
+ bool SetEvents(const std::string& json_events) WARN_UNUSED_RESULT;
+
+ // Read metadata (thread names, etc) from events.
+ void ParseMetadata();
+
+ std::map<TraceEvent::ProcessThreadID, std::string> thread_names_;
+ std::vector<TraceEvent> raw_events_;
+ bool ignore_metadata_events_;
+ bool allow_assocation_changes_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceAnalyzer);
+};
+
+// Utility functions for TraceEventVector.
+
+struct RateStats {
+ double min_us;
+ double max_us;
+ double mean_us;
+ double standard_deviation_us;
+};
+
+struct RateStatsOptions {
+ RateStatsOptions() : trim_min(0u), trim_max(0u) {}
+ // After the times between events are sorted, the number of specified elements
+ // will be trimmed before calculating the RateStats. This is useful in cases
+ // where extreme outliers are tolerable and should not skew the overall
+ // average.
+ size_t trim_min; // Trim this many minimum times.
+ size_t trim_max; // Trim this many maximum times.
+};
+
+// Calculate min/max/mean and standard deviation from the times between
+// adjacent events.
+bool GetRateStats(const TraceEventVector& events,
+ RateStats* stats,
+ const RateStatsOptions* options);
+
+// Starting from |position|, find the first event that matches |query|.
+// Returns true if found, false otherwise.
+bool FindFirstOf(const TraceEventVector& events,
+ const Query& query,
+ size_t position,
+ size_t* return_index);
+
+// Starting from |position|, find the last event that matches |query|.
+// Returns true if found, false otherwise.
+bool FindLastOf(const TraceEventVector& events,
+ const Query& query,
+ size_t position,
+ size_t* return_index);
+
+// Find the closest events to |position| in time that match |query|.
+// return_second_closest may be NULL. Closeness is determined by comparing
+// with the event timestamp.
+// Returns true if found, false otherwise. If both return parameters are
+// requested, both must be found for a successful result.
+bool FindClosest(const TraceEventVector& events,
+ const Query& query,
+ size_t position,
+ size_t* return_closest,
+ size_t* return_second_closest);
+
+// Count matches, inclusive of |begin_position|, exclusive of |end_position|.
+size_t CountMatches(const TraceEventVector& events,
+ const Query& query,
+ size_t begin_position,
+ size_t end_position);
+
+// Count all matches.
+static inline size_t CountMatches(const TraceEventVector& events,
+ const Query& query) {
+ return CountMatches(events, query, 0u, events.size());
+}
+
+} // namespace trace_analyzer
+
+#endif // BASE_TEST_TRACE_EVENT_ANALYZER_H_
diff --git a/libchrome/base/test/trace_event_analyzer_unittest.cc b/libchrome/base/test/trace_event_analyzer_unittest.cc
new file mode 100644
index 0000000..086cfc9
--- /dev/null
+++ b/libchrome/base/test/trace_event_analyzer_unittest.cc
@@ -0,0 +1,955 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/trace_event_analyzer.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace trace_analyzer {
+
+namespace {
+
+class TraceEventAnalyzerTest : public testing::Test {
+ public:
+ void ManualSetUp();
+ void OnTraceDataCollected(
+ base::WaitableEvent* flush_complete_event,
+ const scoped_refptr<base::RefCountedString>& json_events_str,
+ bool has_more_events);
+ void BeginTracing();
+ void EndTracing();
+
+ base::trace_event::TraceResultBuffer::SimpleOutput output_;
+ base::trace_event::TraceResultBuffer buffer_;
+};
+
+void TraceEventAnalyzerTest::ManualSetUp() {
+ ASSERT_TRUE(base::trace_event::TraceLog::GetInstance());
+ buffer_.SetOutputCallback(output_.GetCallback());
+ output_.json_output.clear();
+}
+
+void TraceEventAnalyzerTest::OnTraceDataCollected(
+ base::WaitableEvent* flush_complete_event,
+ const scoped_refptr<base::RefCountedString>& json_events_str,
+ bool has_more_events) {
+ buffer_.AddFragment(json_events_str->data());
+ if (!has_more_events)
+ flush_complete_event->Signal();
+}
+
+void TraceEventAnalyzerTest::BeginTracing() {
+ output_.json_output.clear();
+ buffer_.Start();
+ base::trace_event::TraceLog::GetInstance()->SetEnabled(
+ base::trace_event::TraceConfig("*", ""),
+ base::trace_event::TraceLog::RECORDING_MODE);
+}
+
+void TraceEventAnalyzerTest::EndTracing() {
+ base::trace_event::TraceLog::GetInstance()->SetDisabled();
+ base::WaitableEvent flush_complete_event(
+ base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::trace_event::TraceLog::GetInstance()->Flush(
+ base::Bind(&TraceEventAnalyzerTest::OnTraceDataCollected,
+ base::Unretained(this),
+ base::Unretained(&flush_complete_event)));
+ flush_complete_event.Wait();
+ buffer_.Finish();
+}
+
+} // namespace
+
+TEST_F(TraceEventAnalyzerTest, NoEvents) {
+ ManualSetUp();
+
+ // Create an empty JSON event string:
+ buffer_.Start();
+ buffer_.Finish();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+
+ // Search for all events and verify that nothing is returned.
+ TraceEventVector found;
+ analyzer->FindEvents(Query::Bool(true), &found);
+ EXPECT_EQ(0u, found.size());
+}
+
+TEST_F(TraceEventAnalyzerTest, TraceEvent) {
+ ManualSetUp();
+
+ int int_num = 2;
+ double double_num = 3.5;
+ const char str[] = "the string";
+
+ TraceEvent event;
+ event.arg_numbers["false"] = 0.0;
+ event.arg_numbers["true"] = 1.0;
+ event.arg_numbers["int"] = static_cast<double>(int_num);
+ event.arg_numbers["double"] = double_num;
+ event.arg_strings["string"] = str;
+ event.arg_values["dict"] = WrapUnique(new base::DictionaryValue());
+
+ ASSERT_TRUE(event.HasNumberArg("false"));
+ ASSERT_TRUE(event.HasNumberArg("true"));
+ ASSERT_TRUE(event.HasNumberArg("int"));
+ ASSERT_TRUE(event.HasNumberArg("double"));
+ ASSERT_TRUE(event.HasStringArg("string"));
+ ASSERT_FALSE(event.HasNumberArg("notfound"));
+ ASSERT_FALSE(event.HasStringArg("notfound"));
+ ASSERT_TRUE(event.HasArg("dict"));
+ ASSERT_FALSE(event.HasArg("notfound"));
+
+ EXPECT_FALSE(event.GetKnownArgAsBool("false"));
+ EXPECT_TRUE(event.GetKnownArgAsBool("true"));
+ EXPECT_EQ(int_num, event.GetKnownArgAsInt("int"));
+ EXPECT_EQ(double_num, event.GetKnownArgAsDouble("double"));
+ EXPECT_STREQ(str, event.GetKnownArgAsString("string").c_str());
+
+ std::unique_ptr<base::Value> arg;
+ EXPECT_TRUE(event.GetArgAsValue("dict", &arg));
+ EXPECT_EQ(base::Value::TYPE_DICTIONARY, arg->GetType());
+}
+
+TEST_F(TraceEventAnalyzerTest, QueryEventMember) {
+ ManualSetUp();
+
+ TraceEvent event;
+ event.thread.process_id = 3;
+ event.thread.thread_id = 4;
+ event.timestamp = 1.5;
+ event.phase = TRACE_EVENT_PHASE_BEGIN;
+ event.category = "category";
+ event.name = "name";
+ event.id = "1";
+ event.arg_numbers["num"] = 7.0;
+ event.arg_strings["str"] = "the string";
+
+ // Other event with all different members:
+ TraceEvent other;
+ other.thread.process_id = 5;
+ other.thread.thread_id = 6;
+ other.timestamp = 2.5;
+ other.phase = TRACE_EVENT_PHASE_END;
+ other.category = "category2";
+ other.name = "name2";
+ other.id = "2";
+ other.arg_numbers["num2"] = 8.0;
+ other.arg_strings["str2"] = "the string 2";
+
+ event.other_event = &other;
+ ASSERT_TRUE(event.has_other_event());
+ double duration = event.GetAbsTimeToOtherEvent();
+
+ Query event_pid = Query::EventPidIs(event.thread.process_id);
+ Query event_tid = Query::EventTidIs(event.thread.thread_id);
+ Query event_time = Query::EventTimeIs(event.timestamp);
+ Query event_duration = Query::EventDurationIs(duration);
+ Query event_phase = Query::EventPhaseIs(event.phase);
+ Query event_category = Query::EventCategoryIs(event.category);
+ Query event_name = Query::EventNameIs(event.name);
+ Query event_id = Query::EventIdIs(event.id);
+ Query event_has_arg1 = Query::EventHasNumberArg("num");
+ Query event_has_arg2 = Query::EventHasStringArg("str");
+ Query event_arg1 =
+ (Query::EventArg("num") == Query::Double(event.arg_numbers["num"]));
+ Query event_arg2 =
+ (Query::EventArg("str") == Query::String(event.arg_strings["str"]));
+ Query event_has_other = Query::EventHasOther();
+ Query other_pid = Query::OtherPidIs(other.thread.process_id);
+ Query other_tid = Query::OtherTidIs(other.thread.thread_id);
+ Query other_time = Query::OtherTimeIs(other.timestamp);
+ Query other_phase = Query::OtherPhaseIs(other.phase);
+ Query other_category = Query::OtherCategoryIs(other.category);
+ Query other_name = Query::OtherNameIs(other.name);
+ Query other_id = Query::OtherIdIs(other.id);
+ Query other_has_arg1 = Query::OtherHasNumberArg("num2");
+ Query other_has_arg2 = Query::OtherHasStringArg("str2");
+ Query other_arg1 =
+ (Query::OtherArg("num2") == Query::Double(other.arg_numbers["num2"]));
+ Query other_arg2 =
+ (Query::OtherArg("str2") == Query::String(other.arg_strings["str2"]));
+
+ EXPECT_TRUE(event_pid.Evaluate(event));
+ EXPECT_TRUE(event_tid.Evaluate(event));
+ EXPECT_TRUE(event_time.Evaluate(event));
+ EXPECT_TRUE(event_duration.Evaluate(event));
+ EXPECT_TRUE(event_phase.Evaluate(event));
+ EXPECT_TRUE(event_category.Evaluate(event));
+ EXPECT_TRUE(event_name.Evaluate(event));
+ EXPECT_TRUE(event_id.Evaluate(event));
+ EXPECT_TRUE(event_has_arg1.Evaluate(event));
+ EXPECT_TRUE(event_has_arg2.Evaluate(event));
+ EXPECT_TRUE(event_arg1.Evaluate(event));
+ EXPECT_TRUE(event_arg2.Evaluate(event));
+ EXPECT_TRUE(event_has_other.Evaluate(event));
+ EXPECT_TRUE(other_pid.Evaluate(event));
+ EXPECT_TRUE(other_tid.Evaluate(event));
+ EXPECT_TRUE(other_time.Evaluate(event));
+ EXPECT_TRUE(other_phase.Evaluate(event));
+ EXPECT_TRUE(other_category.Evaluate(event));
+ EXPECT_TRUE(other_name.Evaluate(event));
+ EXPECT_TRUE(other_id.Evaluate(event));
+ EXPECT_TRUE(other_has_arg1.Evaluate(event));
+ EXPECT_TRUE(other_has_arg2.Evaluate(event));
+ EXPECT_TRUE(other_arg1.Evaluate(event));
+ EXPECT_TRUE(other_arg2.Evaluate(event));
+
+ // Evaluate event queries against other to verify the queries fail when the
+ // event members are wrong.
+ EXPECT_FALSE(event_pid.Evaluate(other));
+ EXPECT_FALSE(event_tid.Evaluate(other));
+ EXPECT_FALSE(event_time.Evaluate(other));
+ EXPECT_FALSE(event_duration.Evaluate(other));
+ EXPECT_FALSE(event_phase.Evaluate(other));
+ EXPECT_FALSE(event_category.Evaluate(other));
+ EXPECT_FALSE(event_name.Evaluate(other));
+ EXPECT_FALSE(event_id.Evaluate(other));
+ EXPECT_FALSE(event_has_arg1.Evaluate(other));
+ EXPECT_FALSE(event_has_arg2.Evaluate(other));
+ EXPECT_FALSE(event_arg1.Evaluate(other));
+ EXPECT_FALSE(event_arg2.Evaluate(other));
+ EXPECT_FALSE(event_has_other.Evaluate(other));
+}
+
+TEST_F(TraceEventAnalyzerTest, BooleanOperators) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ TRACE_EVENT_INSTANT1("cat1", "name1", TRACE_EVENT_SCOPE_THREAD, "num", 1);
+ TRACE_EVENT_INSTANT1("cat1", "name2", TRACE_EVENT_SCOPE_THREAD, "num", 2);
+ TRACE_EVENT_INSTANT1("cat2", "name3", TRACE_EVENT_SCOPE_THREAD, "num", 3);
+ TRACE_EVENT_INSTANT1("cat2", "name4", TRACE_EVENT_SCOPE_THREAD, "num", 4);
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer);
+ analyzer->SetIgnoreMetadataEvents(true);
+
+ TraceEventVector found;
+
+ // ==
+
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat1"), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name2", found[1]->name.c_str());
+
+ analyzer->FindEvents(Query::EventArg("num") == Query::Int(2), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("name2", found[0]->name.c_str());
+
+ // !=
+
+ analyzer->FindEvents(Query::EventCategory() != Query::String("cat1"), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name3", found[0]->name.c_str());
+ EXPECT_STREQ("name4", found[1]->name.c_str());
+
+ analyzer->FindEvents(Query::EventArg("num") != Query::Int(2), &found);
+ ASSERT_EQ(3u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name3", found[1]->name.c_str());
+ EXPECT_STREQ("name4", found[2]->name.c_str());
+
+ // <
+ analyzer->FindEvents(Query::EventArg("num") < Query::Int(2), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+
+ // <=
+ analyzer->FindEvents(Query::EventArg("num") <= Query::Int(2), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name2", found[1]->name.c_str());
+
+ // >
+ analyzer->FindEvents(Query::EventArg("num") > Query::Int(3), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("name4", found[0]->name.c_str());
+
+ // >=
+ analyzer->FindEvents(Query::EventArg("num") >= Query::Int(4), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("name4", found[0]->name.c_str());
+
+ // &&
+ analyzer->FindEvents(Query::EventName() != Query::String("name1") &&
+ Query::EventArg("num") < Query::Int(3), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("name2", found[0]->name.c_str());
+
+ // ||
+ analyzer->FindEvents(Query::EventName() == Query::String("name1") ||
+ Query::EventArg("num") == Query::Int(3), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name3", found[1]->name.c_str());
+
+ // !
+ analyzer->FindEvents(!(Query::EventName() == Query::String("name1") ||
+ Query::EventArg("num") == Query::Int(3)), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name2", found[0]->name.c_str());
+ EXPECT_STREQ("name4", found[1]->name.c_str());
+}
+
+TEST_F(TraceEventAnalyzerTest, ArithmeticOperators) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ // These events are searched for:
+ TRACE_EVENT_INSTANT2("cat1", "math1", TRACE_EVENT_SCOPE_THREAD,
+ "a", 10, "b", 5);
+ TRACE_EVENT_INSTANT2("cat1", "math2", TRACE_EVENT_SCOPE_THREAD,
+ "a", 10, "b", 10);
+ // Extra events that never match, for noise:
+ TRACE_EVENT_INSTANT2("noise", "math3", TRACE_EVENT_SCOPE_THREAD,
+ "a", 1, "b", 3);
+ TRACE_EVENT_INSTANT2("noise", "math4", TRACE_EVENT_SCOPE_THREAD,
+ "c", 10, "d", 5);
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+
+ TraceEventVector found;
+
+ // Verify that arithmetic operators function:
+
+ // +
+ analyzer->FindEvents(Query::EventArg("a") + Query::EventArg("b") ==
+ Query::Int(20), &found);
+ EXPECT_EQ(1u, found.size());
+ EXPECT_STREQ("math2", found.front()->name.c_str());
+
+ // -
+ analyzer->FindEvents(Query::EventArg("a") - Query::EventArg("b") ==
+ Query::Int(5), &found);
+ EXPECT_EQ(1u, found.size());
+ EXPECT_STREQ("math1", found.front()->name.c_str());
+
+ // *
+ analyzer->FindEvents(Query::EventArg("a") * Query::EventArg("b") ==
+ Query::Int(50), &found);
+ EXPECT_EQ(1u, found.size());
+ EXPECT_STREQ("math1", found.front()->name.c_str());
+
+ // /
+ analyzer->FindEvents(Query::EventArg("a") / Query::EventArg("b") ==
+ Query::Int(2), &found);
+ EXPECT_EQ(1u, found.size());
+ EXPECT_STREQ("math1", found.front()->name.c_str());
+
+ // %
+ analyzer->FindEvents(Query::EventArg("a") % Query::EventArg("b") ==
+ Query::Int(0), &found);
+ EXPECT_EQ(2u, found.size());
+
+ // - (negate)
+ analyzer->FindEvents(-Query::EventArg("b") == Query::Int(-10), &found);
+ EXPECT_EQ(1u, found.size());
+ EXPECT_STREQ("math2", found.front()->name.c_str());
+}
+
+TEST_F(TraceEventAnalyzerTest, StringPattern) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ TRACE_EVENT_INSTANT0("cat1", "name1", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "name2", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "no match", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "name3x", TRACE_EVENT_SCOPE_THREAD);
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->SetIgnoreMetadataEvents(true);
+
+ TraceEventVector found;
+
+ analyzer->FindEvents(Query::EventName() == Query::Pattern("name?"), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name2", found[1]->name.c_str());
+
+ analyzer->FindEvents(Query::EventName() == Query::Pattern("name*"), &found);
+ ASSERT_EQ(3u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name2", found[1]->name.c_str());
+ EXPECT_STREQ("name3x", found[2]->name.c_str());
+
+ analyzer->FindEvents(Query::EventName() != Query::Pattern("name*"), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("no match", found[0]->name.c_str());
+}
+
+// Test that duration queries work.
+TEST_F(TraceEventAnalyzerTest, BeginEndDuration) {
+ ManualSetUp();
+
+ const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(200);
+ // We will search for events that have a duration of greater than 90% of the
+ // sleep time, so that there is no flakiness.
+ int64_t duration_cutoff_us = (kSleepTime.InMicroseconds() * 9) / 10;
+
+ BeginTracing();
+ {
+ TRACE_EVENT_BEGIN0("cat1", "name1"); // found by duration query
+ TRACE_EVENT_BEGIN0("noise", "name2"); // not searched for, just noise
+ {
+ TRACE_EVENT_BEGIN0("cat2", "name3"); // found by duration query
+ // next event not searched for, just noise
+ TRACE_EVENT_INSTANT0("noise", "name4", TRACE_EVENT_SCOPE_THREAD);
+ base::PlatformThread::Sleep(kSleepTime);
+ TRACE_EVENT_BEGIN0("cat2", "name5"); // not found (duration too short)
+ TRACE_EVENT_END0("cat2", "name5"); // not found (duration too short)
+ TRACE_EVENT_END0("cat2", "name3"); // found by duration query
+ }
+ TRACE_EVENT_END0("noise", "name2"); // not searched for, just noise
+ TRACE_EVENT_END0("cat1", "name1"); // found by duration query
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->AssociateBeginEndEvents();
+
+ TraceEventVector found;
+ analyzer->FindEvents(
+ Query::MatchBeginWithEnd() &&
+ Query::EventDuration() >
+ Query::Int(static_cast<int>(duration_cutoff_us)) &&
+ (Query::EventCategory() == Query::String("cat1") ||
+ Query::EventCategory() == Query::String("cat2") ||
+ Query::EventCategory() == Query::String("cat3")),
+ &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name3", found[1]->name.c_str());
+}
+
+// Test that duration queries work.
+TEST_F(TraceEventAnalyzerTest, CompleteDuration) {
+ ManualSetUp();
+
+ const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(200);
+ // We will search for events that have a duration of greater than 90% of the
+ // sleep time, so that there is no flakiness.
+ int64_t duration_cutoff_us = (kSleepTime.InMicroseconds() * 9) / 10;
+
+ BeginTracing();
+ {
+ TRACE_EVENT0("cat1", "name1"); // found by duration query
+ TRACE_EVENT0("noise", "name2"); // not searched for, just noise
+ {
+ TRACE_EVENT0("cat2", "name3"); // found by duration query
+ // next event not searched for, just noise
+ TRACE_EVENT_INSTANT0("noise", "name4", TRACE_EVENT_SCOPE_THREAD);
+ base::PlatformThread::Sleep(kSleepTime);
+ TRACE_EVENT0("cat2", "name5"); // not found (duration too short)
+ }
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->AssociateBeginEndEvents();
+
+ TraceEventVector found;
+ analyzer->FindEvents(
+ Query::EventCompleteDuration() >
+ Query::Int(static_cast<int>(duration_cutoff_us)) &&
+ (Query::EventCategory() == Query::String("cat1") ||
+ Query::EventCategory() == Query::String("cat2") ||
+ Query::EventCategory() == Query::String("cat3")),
+ &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STREQ("name1", found[0]->name.c_str());
+ EXPECT_STREQ("name3", found[1]->name.c_str());
+}
+
+// Test AssociateBeginEndEvents
+TEST_F(TraceEventAnalyzerTest, BeginEndAssocations) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ TRACE_EVENT_END0("cat1", "name1"); // does not match out of order begin
+ TRACE_EVENT_BEGIN0("cat1", "name2");
+ TRACE_EVENT_INSTANT0("cat1", "name3", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_BEGIN0("cat1", "name1");
+ TRACE_EVENT_END0("cat1", "name2");
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->AssociateBeginEndEvents();
+
+ TraceEventVector found;
+ analyzer->FindEvents(Query::MatchBeginWithEnd(), &found);
+ ASSERT_EQ(1u, found.size());
+ EXPECT_STREQ("name2", found[0]->name.c_str());
+}
+
+// Test MergeAssociatedEventArgs
+TEST_F(TraceEventAnalyzerTest, MergeAssociatedEventArgs) {
+ ManualSetUp();
+
+ const char arg_string[] = "arg_string";
+ BeginTracing();
+ {
+ TRACE_EVENT_BEGIN0("cat1", "name1");
+ TRACE_EVENT_END1("cat1", "name1", "arg", arg_string);
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->AssociateBeginEndEvents();
+
+ TraceEventVector found;
+ analyzer->FindEvents(Query::MatchBeginName("name1"), &found);
+ ASSERT_EQ(1u, found.size());
+ std::string arg_actual;
+ EXPECT_FALSE(found[0]->GetArgAsString("arg", &arg_actual));
+
+ analyzer->MergeAssociatedEventArgs();
+ EXPECT_TRUE(found[0]->GetArgAsString("arg", &arg_actual));
+ EXPECT_STREQ(arg_string, arg_actual.c_str());
+}
+
+// Test AssociateAsyncBeginEndEvents
+TEST_F(TraceEventAnalyzerTest, AsyncBeginEndAssocations) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xA); // no match / out of order
+ TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xB);
+ TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xC);
+ TRACE_EVENT_INSTANT0("cat1", "name1", TRACE_EVENT_SCOPE_THREAD); // noise
+ TRACE_EVENT0("cat1", "name1"); // noise
+ TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xB);
+ TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xC);
+ TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xA); // no match / out of order
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->AssociateAsyncBeginEndEvents();
+
+ TraceEventVector found;
+ analyzer->FindEvents(Query::MatchAsyncBeginWithNext(), &found);
+ ASSERT_EQ(2u, found.size());
+ EXPECT_STRCASEEQ("0xb", found[0]->id.c_str());
+ EXPECT_STRCASEEQ("0xc", found[1]->id.c_str());
+}
+
+// Test AssociateAsyncBeginEndEvents
+TEST_F(TraceEventAnalyzerTest, AsyncBeginEndAssocationsWithSteps) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xA, "s1");
+ TRACE_EVENT_ASYNC_END0("c", "n", 0xA);
+ TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xB);
+ TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xC);
+ TRACE_EVENT_ASYNC_STEP_PAST0("c", "n", 0xB, "s1");
+ TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xC, "s1");
+ TRACE_EVENT_ASYNC_STEP_INTO1("c", "n", 0xC, "s2", "a", 1);
+ TRACE_EVENT_ASYNC_END0("c", "n", 0xB);
+ TRACE_EVENT_ASYNC_END0("c", "n", 0xC);
+ TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xA);
+ TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xA, "s2");
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+ analyzer->AssociateAsyncBeginEndEvents();
+
+ TraceEventVector found;
+ analyzer->FindEvents(Query::MatchAsyncBeginWithNext(), &found);
+ ASSERT_EQ(3u, found.size());
+
+ EXPECT_STRCASEEQ("0xb", found[0]->id.c_str());
+ EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, found[0]->other_event->phase);
+ EXPECT_TRUE(found[0]->other_event->other_event);
+ EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_END,
+ found[0]->other_event->other_event->phase);
+
+ EXPECT_STRCASEEQ("0xc", found[1]->id.c_str());
+ EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, found[1]->other_event->phase);
+ EXPECT_TRUE(found[1]->other_event->other_event);
+ EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO,
+ found[1]->other_event->other_event->phase);
+ double arg_actual = 0;
+ EXPECT_TRUE(found[1]->other_event->other_event->GetArgAsNumber(
+ "a", &arg_actual));
+ EXPECT_EQ(1.0, arg_actual);
+ EXPECT_TRUE(found[1]->other_event->other_event->other_event);
+ EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_END,
+ found[1]->other_event->other_event->other_event->phase);
+
+ EXPECT_STRCASEEQ("0xa", found[2]->id.c_str());
+ EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, found[2]->other_event->phase);
+}
+
+// Test that the TraceAnalyzer custom associations work.
+TEST_F(TraceEventAnalyzerTest, CustomAssociations) {
+ ManualSetUp();
+
+ // Add events that begin/end in pipelined ordering with unique ID parameter
+ // to match up the begin/end pairs.
+ BeginTracing();
+ {
+ // no begin match
+ TRACE_EVENT_INSTANT1("cat1", "end", TRACE_EVENT_SCOPE_THREAD, "id", 1);
+ // end is cat4
+ TRACE_EVENT_INSTANT1("cat2", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 2);
+ // end is cat5
+ TRACE_EVENT_INSTANT1("cat3", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 3);
+ TRACE_EVENT_INSTANT1("cat4", "end", TRACE_EVENT_SCOPE_THREAD, "id", 2);
+ TRACE_EVENT_INSTANT1("cat5", "end", TRACE_EVENT_SCOPE_THREAD, "id", 3);
+ // no end match
+ TRACE_EVENT_INSTANT1("cat6", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 1);
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+
+ // begin, end, and match queries to find proper begin/end pairs.
+ Query begin(Query::EventName() == Query::String("begin"));
+ Query end(Query::EventName() == Query::String("end"));
+ Query match(Query::EventArg("id") == Query::OtherArg("id"));
+ analyzer->AssociateEvents(begin, end, match);
+
+ TraceEventVector found;
+
+ // cat1 has no other_event.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat1") &&
+ Query::EventHasOther(), &found);
+ EXPECT_EQ(0u, found.size());
+
+ // cat1 has no other_event.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat1") &&
+ !Query::EventHasOther(), &found);
+ EXPECT_EQ(1u, found.size());
+
+ // cat6 has no other_event.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat6") &&
+ !Query::EventHasOther(), &found);
+ EXPECT_EQ(1u, found.size());
+
+ // cat2 and cat4 are associated.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat2") &&
+ Query::OtherCategory() == Query::String("cat4"), &found);
+ EXPECT_EQ(1u, found.size());
+
+ // cat4 and cat2 are not associated.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat4") &&
+ Query::OtherCategory() == Query::String("cat2"), &found);
+ EXPECT_EQ(0u, found.size());
+
+ // cat3 and cat5 are associated.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat3") &&
+ Query::OtherCategory() == Query::String("cat5"), &found);
+ EXPECT_EQ(1u, found.size());
+
+ // cat5 and cat3 are not associated.
+ analyzer->FindEvents(Query::EventCategory() == Query::String("cat5") &&
+ Query::OtherCategory() == Query::String("cat3"), &found);
+ EXPECT_EQ(0u, found.size());
+}
+
+// Verify that Query literals and types are properly casted.
+TEST_F(TraceEventAnalyzerTest, Literals) {
+ ManualSetUp();
+
+ // Since these queries don't refer to the event data, the dummy event below
+ // will never be accessed.
+ TraceEvent dummy;
+ char char_num = 5;
+ short short_num = -5;
+ EXPECT_TRUE((Query::Double(5.0) == Query::Int(char_num)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Double(-5.0) == Query::Int(short_num)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Double(1.0) == Query::Uint(1u)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Double(1.0) == Query::Int(1)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Double(-1.0) == Query::Int(-1)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Double(1.0) == Query::Double(1.0f)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Bool(true) == Query::Int(1)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Bool(false) == Query::Int(0)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Bool(true) == Query::Double(1.0f)).Evaluate(dummy));
+ EXPECT_TRUE((Query::Bool(false) == Query::Double(0.0f)).Evaluate(dummy));
+}
+
+// Test GetRateStats.
+TEST_F(TraceEventAnalyzerTest, RateStats) {
+ std::vector<TraceEvent> events;
+ events.reserve(100);
+ TraceEventVector event_ptrs;
+ double timestamp = 0.0;
+ double little_delta = 1.0;
+ double big_delta = 10.0;
+ double tiny_delta = 0.1;
+ RateStats stats;
+ RateStatsOptions options;
+
+ // Insert 10 events, each apart by little_delta.
+ for (int i = 0; i < 10; ++i) {
+ timestamp += little_delta;
+ TraceEvent event;
+ event.timestamp = timestamp;
+ events.push_back(std::move(event));
+ event_ptrs.push_back(&events.back());
+ }
+
+ ASSERT_TRUE(GetRateStats(event_ptrs, &stats, NULL));
+ EXPECT_EQ(little_delta, stats.mean_us);
+ EXPECT_EQ(little_delta, stats.min_us);
+ EXPECT_EQ(little_delta, stats.max_us);
+ EXPECT_EQ(0.0, stats.standard_deviation_us);
+
+ // Add an event apart by big_delta.
+ {
+ timestamp += big_delta;
+ TraceEvent event;
+ event.timestamp = timestamp;
+ events.push_back(std::move(event));
+ event_ptrs.push_back(&events.back());
+ }
+
+ ASSERT_TRUE(GetRateStats(event_ptrs, &stats, NULL));
+ EXPECT_LT(little_delta, stats.mean_us);
+ EXPECT_EQ(little_delta, stats.min_us);
+ EXPECT_EQ(big_delta, stats.max_us);
+ EXPECT_LT(0.0, stats.standard_deviation_us);
+
+ // Trim off the biggest delta and verify stats.
+ options.trim_min = 0;
+ options.trim_max = 1;
+ ASSERT_TRUE(GetRateStats(event_ptrs, &stats, &options));
+ EXPECT_EQ(little_delta, stats.mean_us);
+ EXPECT_EQ(little_delta, stats.min_us);
+ EXPECT_EQ(little_delta, stats.max_us);
+ EXPECT_EQ(0.0, stats.standard_deviation_us);
+
+ // Add an event apart by tiny_delta.
+ {
+ timestamp += tiny_delta;
+ TraceEvent event;
+ event.timestamp = timestamp;
+ events.push_back(std::move(event));
+ event_ptrs.push_back(&events.back());
+ }
+
+ // Trim off both the biggest and tiniest delta and verify stats.
+ options.trim_min = 1;
+ options.trim_max = 1;
+ ASSERT_TRUE(GetRateStats(event_ptrs, &stats, &options));
+ EXPECT_EQ(little_delta, stats.mean_us);
+ EXPECT_EQ(little_delta, stats.min_us);
+ EXPECT_EQ(little_delta, stats.max_us);
+ EXPECT_EQ(0.0, stats.standard_deviation_us);
+
+ // Verify smallest allowed number of events.
+ {
+ TraceEvent event;
+ TraceEventVector few_event_ptrs;
+ few_event_ptrs.push_back(&event);
+ few_event_ptrs.push_back(&event);
+ ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, NULL));
+ few_event_ptrs.push_back(&event);
+ ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, NULL));
+
+ // Trim off more than allowed and verify failure.
+ options.trim_min = 0;
+ options.trim_max = 1;
+ ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options));
+ }
+}
+
+// Test FindFirstOf and FindLastOf.
+TEST_F(TraceEventAnalyzerTest, FindOf) {
+ size_t num_events = 100;
+ size_t index = 0;
+ TraceEventVector event_ptrs;
+ EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(true), 0, &index));
+ EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(true), 10, &index));
+ EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(true), 0, &index));
+ EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(true), 10, &index));
+
+ std::vector<TraceEvent> events;
+ events.resize(num_events);
+ for (size_t i = 0; i < events.size(); ++i)
+ event_ptrs.push_back(&events[i]);
+ size_t bam_index = num_events/2;
+ events[bam_index].name = "bam";
+ Query query_bam = Query::EventName() == Query::String(events[bam_index].name);
+
+ // FindFirstOf
+ EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(false), 0, &index));
+ EXPECT_TRUE(FindFirstOf(event_ptrs, Query::Bool(true), 0, &index));
+ EXPECT_EQ(0u, index);
+ EXPECT_TRUE(FindFirstOf(event_ptrs, Query::Bool(true), 5, &index));
+ EXPECT_EQ(5u, index);
+
+ EXPECT_FALSE(FindFirstOf(event_ptrs, query_bam, bam_index + 1, &index));
+ EXPECT_TRUE(FindFirstOf(event_ptrs, query_bam, 0, &index));
+ EXPECT_EQ(bam_index, index);
+ EXPECT_TRUE(FindFirstOf(event_ptrs, query_bam, bam_index, &index));
+ EXPECT_EQ(bam_index, index);
+
+ // FindLastOf
+ EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(false), 1000, &index));
+ EXPECT_TRUE(FindLastOf(event_ptrs, Query::Bool(true), 1000, &index));
+ EXPECT_EQ(num_events - 1, index);
+ EXPECT_TRUE(FindLastOf(event_ptrs, Query::Bool(true), num_events - 5,
+ &index));
+ EXPECT_EQ(num_events - 5, index);
+
+ EXPECT_FALSE(FindLastOf(event_ptrs, query_bam, bam_index - 1, &index));
+ EXPECT_TRUE(FindLastOf(event_ptrs, query_bam, num_events, &index));
+ EXPECT_EQ(bam_index, index);
+ EXPECT_TRUE(FindLastOf(event_ptrs, query_bam, bam_index, &index));
+ EXPECT_EQ(bam_index, index);
+}
+
+// Test FindClosest.
+TEST_F(TraceEventAnalyzerTest, FindClosest) {
+ size_t index_1 = 0;
+ size_t index_2 = 0;
+ TraceEventVector event_ptrs;
+ EXPECT_FALSE(FindClosest(event_ptrs, Query::Bool(true), 0,
+ &index_1, &index_2));
+
+ size_t num_events = 5;
+ std::vector<TraceEvent> events;
+ events.resize(num_events);
+ for (size_t i = 0; i < events.size(); ++i) {
+ // timestamps go up exponentially so the lower index is always closer in
+ // time than the higher index.
+ events[i].timestamp = static_cast<double>(i) * static_cast<double>(i);
+ event_ptrs.push_back(&events[i]);
+ }
+ events[0].name = "one";
+ events[2].name = "two";
+ events[4].name = "three";
+ Query query_named = Query::EventName() != Query::String(std::string());
+ Query query_one = Query::EventName() == Query::String("one");
+
+ // Only one event matches query_one, so two closest can't be found.
+ EXPECT_FALSE(FindClosest(event_ptrs, query_one, 0, &index_1, &index_2));
+
+ EXPECT_TRUE(FindClosest(event_ptrs, query_one, 3, &index_1, NULL));
+ EXPECT_EQ(0u, index_1);
+
+ EXPECT_TRUE(FindClosest(event_ptrs, query_named, 1, &index_1, &index_2));
+ EXPECT_EQ(0u, index_1);
+ EXPECT_EQ(2u, index_2);
+
+ EXPECT_TRUE(FindClosest(event_ptrs, query_named, 4, &index_1, &index_2));
+ EXPECT_EQ(4u, index_1);
+ EXPECT_EQ(2u, index_2);
+
+ EXPECT_TRUE(FindClosest(event_ptrs, query_named, 3, &index_1, &index_2));
+ EXPECT_EQ(2u, index_1);
+ EXPECT_EQ(0u, index_2);
+}
+
+// Test CountMatches.
+TEST_F(TraceEventAnalyzerTest, CountMatches) {
+ TraceEventVector event_ptrs;
+ EXPECT_EQ(0u, CountMatches(event_ptrs, Query::Bool(true), 0, 10));
+
+ size_t num_events = 5;
+ size_t num_named = 3;
+ std::vector<TraceEvent> events;
+ events.resize(num_events);
+ for (size_t i = 0; i < events.size(); ++i)
+ event_ptrs.push_back(&events[i]);
+ events[0].name = "one";
+ events[2].name = "two";
+ events[4].name = "three";
+ Query query_named = Query::EventName() != Query::String(std::string());
+ Query query_one = Query::EventName() == Query::String("one");
+
+ EXPECT_EQ(0u, CountMatches(event_ptrs, Query::Bool(false)));
+ EXPECT_EQ(num_events, CountMatches(event_ptrs, Query::Bool(true)));
+ EXPECT_EQ(num_events - 1, CountMatches(event_ptrs, Query::Bool(true),
+ 1, num_events));
+ EXPECT_EQ(1u, CountMatches(event_ptrs, query_one));
+ EXPECT_EQ(num_events - 1, CountMatches(event_ptrs, !query_one));
+ EXPECT_EQ(num_named, CountMatches(event_ptrs, query_named));
+}
+
+TEST_F(TraceEventAnalyzerTest, ComplexArgument) {
+ ManualSetUp();
+
+ BeginTracing();
+ {
+ std::unique_ptr<base::trace_event::TracedValue> value(
+ new base::trace_event::TracedValue);
+ value->SetString("property", "value");
+ TRACE_EVENT1("cat", "name", "arg", std::move(value));
+ }
+ EndTracing();
+
+ std::unique_ptr<TraceAnalyzer> analyzer(
+ TraceAnalyzer::Create(output_.json_output));
+ ASSERT_TRUE(analyzer.get());
+
+ TraceEventVector events;
+ analyzer->FindEvents(Query::EventName() == Query::String("name"), &events);
+
+ EXPECT_EQ(1u, events.size());
+ EXPECT_EQ("cat", events[0]->category);
+ EXPECT_EQ("name", events[0]->name);
+ EXPECT_TRUE(events[0]->HasArg("arg"));
+
+ std::unique_ptr<base::Value> arg;
+ events[0]->GetArgAsValue("arg", &arg);
+ base::DictionaryValue* arg_dict;
+ EXPECT_TRUE(arg->GetAsDictionary(&arg_dict));
+ std::string property;
+ EXPECT_TRUE(arg_dict->GetString("property", &property));
+ EXPECT_EQ("value", property);
+}
+
+} // namespace trace_analyzer
diff --git a/libchrome/base/third_party/dynamic_annotations/dynamic_annotations.h b/libchrome/base/third_party/dynamic_annotations/dynamic_annotations.h
new file mode 100644
index 0000000..8d7f052
--- /dev/null
+++ b/libchrome/base/third_party/dynamic_annotations/dynamic_annotations.h
@@ -0,0 +1,595 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file defines dynamic annotations for use with dynamic analysis
+ tool such as valgrind, PIN, etc.
+
+ Dynamic annotation is a source code annotation that affects
+ the generated code (that is, the annotation is not a comment).
+ Each such annotation is attached to a particular
+ instruction and/or to a particular object (address) in the program.
+
+ The annotations that should be used by users are macros in all upper-case
+ (e.g., ANNOTATE_NEW_MEMORY).
+
+ Actual implementation of these macros may differ depending on the
+ dynamic analysis tool being used.
+
+ See http://code.google.com/p/data-race-test/ for more information.
+
+ This file supports the following dynamic analysis tools:
+ - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
+ Macros are defined empty.
+ - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
+ Macros are defined as calls to non-inlinable empty functions
+ that are intercepted by Valgrind. */
+
+#ifndef __DYNAMIC_ANNOTATIONS_H__
+#define __DYNAMIC_ANNOTATIONS_H__
+
+#ifndef DYNAMIC_ANNOTATIONS_PREFIX
+# define DYNAMIC_ANNOTATIONS_PREFIX
+#endif
+
+#ifndef DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND
+# define DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND 1
+#endif
+
+#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
+# ifdef __GNUC__
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+# else
+/* TODO(glider): for Windows support we may want to change this macro in order
+ to prepend __declspec(selectany) to the annotations' declarations. */
+# error weak annotations are not supported for your compiler
+# endif
+#else
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+#endif
+
+/* The following preprocessor magic prepends the value of
+ DYNAMIC_ANNOTATIONS_PREFIX to annotation function names. */
+#define DYNAMIC_ANNOTATIONS_GLUE0(A, B) A##B
+#define DYNAMIC_ANNOTATIONS_GLUE(A, B) DYNAMIC_ANNOTATIONS_GLUE0(A, B)
+#define DYNAMIC_ANNOTATIONS_NAME(name) \
+ DYNAMIC_ANNOTATIONS_GLUE(DYNAMIC_ANNOTATIONS_PREFIX, name)
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+# define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+ /* -------------------------------------------------------------
+ Annotations useful when implementing condition variables such as CondVar,
+ using conditional critical sections (Await/LockWhen) and when constructing
+ user-defined synchronization mechanisms.
+
+ The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
+ be used to define happens-before arcs in user-defined synchronization
+ mechanisms: the race detector will infer an arc from the former to the
+ latter when they share the same argument pointer.
+
+ Example 1 (reference counting):
+
+ void Unref() {
+ ANNOTATE_HAPPENS_BEFORE(&refcount_);
+ if (AtomicDecrementByOne(&refcount_) == 0) {
+ ANNOTATE_HAPPENS_AFTER(&refcount_);
+ delete this;
+ }
+ }
+
+ Example 2 (message queue):
+
+ void MyQueue::Put(Type *e) {
+ MutexLock lock(&mu_);
+ ANNOTATE_HAPPENS_BEFORE(e);
+ PutElementIntoMyQueue(e);
+ }
+
+ Type *MyQueue::Get() {
+ MutexLock lock(&mu_);
+ Type *e = GetElementFromMyQueue();
+ ANNOTATE_HAPPENS_AFTER(e);
+ return e;
+ }
+
+ Note: when possible, please use the existing reference counting and message
+ queue implementations instead of inventing new ones. */
+
+ /* Report that wait on the condition variable at address "cv" has succeeded
+ and the lock at address "lock" is held. */
+ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, lock)
+
+ /* Report that wait on the condition variable at "cv" has succeeded. Variant
+ w/o lock. */
+ #define ANNOTATE_CONDVAR_WAIT(cv) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, NULL)
+
+ /* Report that we are about to signal on the condition variable at address
+ "cv". */
+ #define ANNOTATE_CONDVAR_SIGNAL(cv) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(__FILE__, __LINE__, cv)
+
+ /* Report that we are about to signal_all on the condition variable at address
+ "cv". */
+ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(__FILE__, __LINE__, cv)
+
+ /* Annotations for user-defined synchronization mechanisms. */
+ #define ANNOTATE_HAPPENS_BEFORE(obj) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(__FILE__, __LINE__, obj)
+ #define ANNOTATE_HAPPENS_AFTER(obj) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(__FILE__, __LINE__, obj)
+
+ /* DEPRECATED. Don't use it. */
+ #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(__FILE__, __LINE__, \
+ pointer, size)
+
+ /* DEPRECATED. Don't use it. */
+ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(__FILE__, __LINE__, \
+ pointer, size)
+
+ /* DEPRECATED. Don't use it. */
+ #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \
+ do { \
+ ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
+ ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \
+ } while (0)
+
+ /* Instruct the tool to create a happens-before arc between mu->Unlock() and
+ mu->Lock(). This annotation may slow down the race detector and hide real
+ races. Normally it is used only when it would be difficult to annotate each
+ of the mutex's critical sections individually using the annotations above.
+ This annotation makes sense only for hybrid race detectors. For pure
+ happens-before detectors this is a no-op. For more details see
+ http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
+ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+ mu)
+
+ /* Opposite to ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX.
+ Instruct the tool to NOT create h-b arcs between Unlock and Lock, even in
+ pure happens-before mode. For a hybrid mode this is a no-op. */
+ #define ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(mu) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(__FILE__, __LINE__, mu)
+
+ /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
+ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+ mu)
+
+ /* -------------------------------------------------------------
+ Annotations useful when defining memory allocators, or when memory that
+ was protected in one way starts to be protected in another. */
+
+ /* Report that a new memory at "address" of size "size" has been allocated.
+ This might be used when the memory has been retrieved from a free list and
+ is about to be reused, or when a the locking discipline for a variable
+ changes. */
+ #define ANNOTATE_NEW_MEMORY(address, size) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(__FILE__, __LINE__, address, \
+ size)
+
+ /* -------------------------------------------------------------
+ Annotations useful when defining FIFO queues that transfer data between
+ threads. */
+
+ /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
+ address "pcq" has been created. The ANNOTATE_PCQ_* annotations
+ should be used only for FIFO queues. For non-FIFO queues use
+ ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
+ #define ANNOTATE_PCQ_CREATE(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(__FILE__, __LINE__, pcq)
+
+ /* Report that the queue at address "pcq" is about to be destroyed. */
+ #define ANNOTATE_PCQ_DESTROY(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(__FILE__, __LINE__, pcq)
+
+ /* Report that we are about to put an element into a FIFO queue at address
+ "pcq". */
+ #define ANNOTATE_PCQ_PUT(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(__FILE__, __LINE__, pcq)
+
+ /* Report that we've just got an element from a FIFO queue at address
+ "pcq". */
+ #define ANNOTATE_PCQ_GET(pcq) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(__FILE__, __LINE__, pcq)
+
+ /* -------------------------------------------------------------
+ Annotations that suppress errors. It is usually better to express the
+ program's synchronization using the other annotations, but these can
+ be used when all else fails. */
+
+ /* Report that we may have a benign race at "pointer", with size
+ "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
+ point where "pointer" has been allocated, preferably close to the point
+ where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */
+ #define ANNOTATE_BENIGN_RACE(pointer, description) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+ pointer, sizeof(*(pointer)), description)
+
+ /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
+ the memory range [address, address+size). */
+ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+ address, size, description)
+
+ /* Request the analysis tool to ignore all reads in the current thread
+ until ANNOTATE_IGNORE_READS_END is called.
+ Useful to ignore intentional racey reads, while still checking
+ other reads and all writes.
+ See also ANNOTATE_UNPROTECTED_READ. */
+ #define ANNOTATE_IGNORE_READS_BEGIN() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+ /* Stop ignoring reads. */
+ #define ANNOTATE_IGNORE_READS_END() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+ /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
+ #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+ /* Stop ignoring writes. */
+ #define ANNOTATE_IGNORE_WRITES_END() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+ /* Start ignoring all memory accesses (reads and writes). */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do {\
+ ANNOTATE_IGNORE_READS_BEGIN();\
+ ANNOTATE_IGNORE_WRITES_BEGIN();\
+ }while(0)\
+
+ /* Stop ignoring all memory accesses. */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do {\
+ ANNOTATE_IGNORE_WRITES_END();\
+ ANNOTATE_IGNORE_READS_END();\
+ }while(0)\
+
+ /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events:
+ RWLOCK* and CONDVAR*. */
+ #define ANNOTATE_IGNORE_SYNC_BEGIN() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(__FILE__, __LINE__)
+
+ /* Stop ignoring sync events. */
+ #define ANNOTATE_IGNORE_SYNC_END() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(__FILE__, __LINE__)
+
+
+ /* Enable (enable!=0) or disable (enable==0) race detection for all threads.
+ This annotation could be useful if you want to skip expensive race analysis
+ during some period of program execution, e.g. during initialization. */
+ #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(__FILE__, __LINE__, \
+ enable)
+
+ /* -------------------------------------------------------------
+ Annotations useful for debugging. */
+
+ /* Request to trace every access to "address". */
+ #define ANNOTATE_TRACE_MEMORY(address) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(__FILE__, __LINE__, address)
+
+ /* Report the current thread name to a race detector. */
+ #define ANNOTATE_THREAD_NAME(name) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+ /* -------------------------------------------------------------
+ Annotations useful when implementing locks. They are not
+ normally needed by modules that merely use locks.
+ The "lock" argument is a pointer to the lock object. */
+
+ /* Report that a lock has been created at address "lock". */
+ #define ANNOTATE_RWLOCK_CREATE(lock) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" is about to be destroyed. */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+ /* Report that the lock at address "lock" has been acquired.
+ is_w=1 for writer lock, is_w=0 for reader lock. */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(__FILE__, __LINE__, lock, \
+ is_w)
+
+ /* Report that the lock at address "lock" is about to be released. */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(__FILE__, __LINE__, lock, \
+ is_w)
+
+ /* -------------------------------------------------------------
+ Annotations useful when implementing barriers. They are not
+ normally needed by modules that merely use barriers.
+ The "barrier" argument is a pointer to the barrier object. */
+
+ /* Report that the "barrier" has been initialized with initial "count".
+ If 'reinitialization_allowed' is true, initialization is allowed to happen
+ multiple times w/o calling barrier_destroy() */
+ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(__FILE__, __LINE__, barrier, \
+ count, reinitialization_allowed)
+
+ /* Report that we are about to enter barrier_wait("barrier"). */
+ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(__FILE__, __LINE__, \
+ barrier)
+
+ /* Report that we just exited barrier_wait("barrier"). */
+ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(__FILE__, __LINE__, \
+ barrier)
+
+ /* Report that the "barrier" has been destroyed. */
+ #define ANNOTATE_BARRIER_DESTROY(barrier) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(__FILE__, __LINE__, \
+ barrier)
+
+ /* -------------------------------------------------------------
+ Annotations useful for testing race detectors. */
+
+ /* Report that we expect a race on the variable at "address".
+ Use only in unit tests for a race detector. */
+ #define ANNOTATE_EXPECT_RACE(address, description) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(__FILE__, __LINE__, address, \
+ description)
+
+ #define ANNOTATE_FLUSH_EXPECTED_RACES() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(__FILE__, __LINE__)
+
+ /* A no-op. Insert where you like to test the interceptors. */
+ #define ANNOTATE_NO_OP(arg) \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(__FILE__, __LINE__, arg)
+
+ /* Force the race detector to flush its state. The actual effect depends on
+ * the implementation of the detector. */
+ #define ANNOTATE_FLUSH_STATE() \
+ DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(__FILE__, __LINE__)
+
+
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+ #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
+ #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
+ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
+ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
+ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
+ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
+ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
+ #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
+ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
+ #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
+ #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
+ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
+ #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
+ #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
+ #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
+ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */
+ #define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */
+ #define ANNOTATE_PCQ_CREATE(pcq) /* empty */
+ #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
+ #define ANNOTATE_PCQ_PUT(pcq) /* empty */
+ #define ANNOTATE_PCQ_GET(pcq) /* empty */
+ #define ANNOTATE_NEW_MEMORY(address, size) /* empty */
+ #define ANNOTATE_EXPECT_RACE(address, description) /* empty */
+ #define ANNOTATE_FLUSH_EXPECTED_RACES(address, description) /* empty */
+ #define ANNOTATE_BENIGN_RACE(address, description) /* empty */
+ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
+ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
+ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
+ #define ANNOTATE_TRACE_MEMORY(arg) /* empty */
+ #define ANNOTATE_THREAD_NAME(name) /* empty */
+ #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_READS_END() /* empty */
+ #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_WRITES_END() /* empty */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
+ #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */
+ #define ANNOTATE_IGNORE_SYNC_END() /* empty */
+ #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
+ #define ANNOTATE_NO_OP(arg) /* empty */
+ #define ANNOTATE_FLUSH_STATE() /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+/* Use the macros above rather than using these functions directly. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+ const char *file, int line,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+ const char *file, int line,
+ const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+ const char *file, int line, const volatile void *barrier, long count,
+ long reinitialization_allowed) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+ const char *file, int line,
+ const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+ const char *file, int line,
+ const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+ const char *file, int line,
+ const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+ const char *file, int line, const volatile void *cv,
+ const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+ const char *file, int line,
+ const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+ const char *file, int line,
+ const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+ const char *file, int line,
+ const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+ const char *file, int line,
+ const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+ const char *file, int line,
+ const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+ const char *file, int line,
+ const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+ const char *file, int line,
+ const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+ const char *file, int line,
+ const volatile void *mem, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+ const char *file, int line, const volatile void *mem,
+ const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+ const char *file, int line, const volatile void *mem,
+ const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+ const char *file, int line, const volatile void *mem, long size,
+ const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+ const char *file, int line,
+ const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+ const char *file, int line,
+ const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+ const char *file, int line,
+ const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+ const char *file, int line,
+ const char *name) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+ const char *file, int line, int enable) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+ const char *file, int line,
+ const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+ const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+/* Return non-zero value if running under valgrind.
+
+ If "valgrind.h" is included into dynamic_annotations.c,
+ the regular valgrind mechanism will be used.
+ See http://valgrind.org/docs/manual/manual-core-adv.html about
+ RUNNING_ON_VALGRIND and other valgrind "client requests".
+ The file "valgrind.h" may be obtained by doing
+ svn co svn://svn.valgrind.org/valgrind/trunk/include
+
+ If for some reason you can't use "valgrind.h" or want to fake valgrind,
+ there are two ways to make this function return non-zero:
+ - Use environment variable: export RUNNING_ON_VALGRIND=1
+ - Make your tool intercept the function RunningOnValgrind() and
+ change its return value.
+ */
+int RunningOnValgrind(void) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
+
+ /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+
+ Instead of doing
+ ANNOTATE_IGNORE_READS_BEGIN();
+ ... = x;
+ ANNOTATE_IGNORE_READS_END();
+ one can use
+ ... = ANNOTATE_UNPROTECTED_READ(x); */
+ template <class T>
+ inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) {
+ ANNOTATE_IGNORE_READS_BEGIN();
+ T res = x;
+ ANNOTATE_IGNORE_READS_END();
+ return res;
+ }
+ /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
+ #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var ## _annotator { \
+ public: \
+ static_var ## _annotator() { \
+ ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
+ sizeof(static_var), \
+ # static_var ": " description); \
+ } \
+ }; \
+ static static_var ## _annotator the ## static_var ## _annotator;\
+ }
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+ #define ANNOTATE_UNPROTECTED_READ(x) (x)
+ #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+#endif /* __DYNAMIC_ANNOTATIONS_H__ */
diff --git a/libchrome/base/third_party/icu/LICENSE b/libchrome/base/third_party/icu/LICENSE
new file mode 100644
index 0000000..40282f4
--- /dev/null
+++ b/libchrome/base/third_party/icu/LICENSE
@@ -0,0 +1,32 @@
+ICU License - ICU 1.8.1 and later
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright (c) 1995-2009 International Business Machines Corporation and others
+
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, and/or sell copies of the Software, and to permit persons
+to whom the Software is furnished to do so, provided that the above
+copyright notice(s) and this permission notice appear in all copies of
+the Software and that both the above copyright notice(s) and this
+permission notice appear in supporting documentation.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY
+SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER
+RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale, use
+or other dealings in this Software without prior written authorization
+of the copyright holder.
diff --git a/libchrome/base/third_party/icu/README.chromium b/libchrome/base/third_party/icu/README.chromium
new file mode 100644
index 0000000..6a9a15a
--- /dev/null
+++ b/libchrome/base/third_party/icu/README.chromium
@@ -0,0 +1,16 @@
+Name: ICU
+URL: http://site.icu-project.org/
+License: MIT
+License File: NOT_SHIPPED
+
+This file has the relevant components from ICU copied to handle basic
+UTF8/16/32 conversions. Components are copied from utf.h utf8.h utf16.h and
+utf_impl.c
+
+The same module appears in third_party/icu, so we don't repeat the license
+file here.
+
+The main change is that U_/U8_/U16_ prefixes have been replaced with
+CBU_/CBU8_/CBU16_ (for "Chrome Base") to avoid confusion with the "real" ICU
+macros should ICU be in use on the system. For the same reason, the functions
+and types have been put in the "base_icu" namespace.
diff --git a/libchrome/base/third_party/icu/icu_utf.cc b/libchrome/base/third_party/icu/icu_utf.cc
new file mode 100644
index 0000000..2b67c5d
--- /dev/null
+++ b/libchrome/base/third_party/icu/icu_utf.cc
@@ -0,0 +1,227 @@
+/*
+******************************************************************************
+*
+* Copyright (C) 1999-2006, International Business Machines
+* Corporation and others. All Rights Reserved.
+*
+******************************************************************************
+* file name: utf_impl.c
+* encoding: US-ASCII
+* tab size: 8 (not used)
+* indentation:4
+*
+* created on: 1999sep13
+* created by: Markus W. Scherer
+*
+* This file provides implementation functions for macros in the utfXX.h
+* that would otherwise be too long as macros.
+*/
+
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base_icu {
+
+/**
+ * UTF8_ERROR_VALUE_1 and UTF8_ERROR_VALUE_2 are special error values for UTF-8,
+ * which need 1 or 2 bytes in UTF-8:
+ * \code
+ * U+0015 = NAK = Negative Acknowledge, C0 control character
+ * U+009f = highest C1 control character
+ * \endcode
+ *
+ * These are used by UTF8_..._SAFE macros so that they can return an error value
+ * that needs the same number of code units (bytes) as were seen by
+ * a macro. They should be tested with UTF_IS_ERROR() or UTF_IS_VALID().
+ *
+ * @deprecated ICU 2.4. Obsolete, see utf_old.h.
+ */
+#define CBUTF8_ERROR_VALUE_1 0x15
+
+/**
+ * See documentation on UTF8_ERROR_VALUE_1 for details.
+ *
+ * @deprecated ICU 2.4. Obsolete, see utf_old.h.
+ */
+#define CBUTF8_ERROR_VALUE_2 0x9f
+
+
+/**
+ * Error value for all UTFs. This code point value will be set by macros with e>
+ * checking if an error is detected.
+ *
+ * @deprecated ICU 2.4. Obsolete, see utf_old.h.
+ */
+#define CBUTF_ERROR_VALUE 0xffff
+
+/*
+ * This table could be replaced on many machines by
+ * a few lines of assembler code using an
+ * "index of first 0-bit from msb" instruction and
+ * one or two more integer instructions.
+ *
+ * For example, on an i386, do something like
+ * - MOV AL, leadByte
+ * - NOT AL (8-bit, leave b15..b8==0..0, reverse only b7..b0)
+ * - MOV AH, 0
+ * - BSR BX, AX (16-bit)
+ * - MOV AX, 6 (result)
+ * - JZ finish (ZF==1 if leadByte==0xff)
+ * - SUB AX, BX (result)
+ * -finish:
+ * (BSR: Bit Scan Reverse, scans for a 1-bit, starting from the MSB)
+ *
+ * In Unicode, all UTF-8 byte sequences with more than 4 bytes are illegal;
+ * lead bytes above 0xf4 are illegal.
+ * We keep them in this table for skipping long ISO 10646-UTF-8 sequences.
+ */
+const uint8_t utf8_countTrailBytes[256] =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 3, /* illegal in Unicode */
+ 4, 4, 4, 4, /* illegal in Unicode */
+ 5, 5, /* illegal in Unicode */
+ 0, 0 /* illegal bytes 0xfe and 0xff */
+};
+
+static const UChar32
+utf8_minLegal[4]={ 0, 0x80, 0x800, 0x10000 };
+
+static const UChar32
+utf8_errorValue[6]={
+ CBUTF8_ERROR_VALUE_1, CBUTF8_ERROR_VALUE_2, CBUTF_ERROR_VALUE, 0x10ffff,
+ 0x3ffffff, 0x7fffffff
+};
+
+/*
+ * Handle the non-inline part of the U8_NEXT() macro and its obsolete sibling
+ * UTF8_NEXT_CHAR_SAFE().
+ *
+ * The "strict" parameter controls the error behavior:
+ * <0 "Safe" behavior of U8_NEXT(): All illegal byte sequences yield a negative
+ * code point result.
+ * 0 Obsolete "safe" behavior of UTF8_NEXT_CHAR_SAFE(..., FALSE):
+ * All illegal byte sequences yield a positive code point such that this
+ * result code point would be encoded with the same number of bytes as
+ * the illegal sequence.
+ * >0 Obsolete "strict" behavior of UTF8_NEXT_CHAR_SAFE(..., TRUE):
+ * Same as the obsolete "safe" behavior, but non-characters are also treated
+ * like illegal sequences.
+ *
+ * The special negative (<0) value -2 is used for lenient treatment of surrogate
+ * code points as legal. Some implementations use this for roundtripping of
+ * Unicode 16-bit strings that are not well-formed UTF-16, that is, they
+ * contain unpaired surrogates.
+ *
+ * Note that a UBool is the same as an int8_t.
+ */
+UChar32 utf8_nextCharSafeBody(const uint8_t* s,
+ int32_t* pi,
+ int32_t length,
+ UChar32 c,
+ UBool strict) {
+ int32_t i = *pi;
+ uint8_t count = CBU8_COUNT_TRAIL_BYTES(c);
+ if((i)+count<=(length)) {
+ uint8_t trail, illegal = 0;
+
+ CBU8_MASK_LEAD_BYTE((c), count);
+ /* count==0 for illegally leading trail bytes and the illegal bytes 0xfe and 0xff */
+ switch(count) {
+ /* each branch falls through to the next one */
+ case 5:
+ case 4:
+ /* count>=4 is always illegal: no more than 3 trail bytes in Unicode's UTF-8 */
+ illegal=1;
+ break;
+ case 3:
+ trail=s[(i)++];
+ (c)=((c)<<6)|(trail&0x3f);
+ if(c<0x110) {
+ illegal|=(trail&0xc0)^0x80;
+ } else {
+ /* code point>0x10ffff, outside Unicode */
+ illegal=1;
+ break;
+ }
+ case 2:
+ trail=s[(i)++];
+ (c)=((c)<<6)|(trail&0x3f);
+ illegal|=(trail&0xc0)^0x80;
+ case 1:
+ trail=s[(i)++];
+ (c)=((c)<<6)|(trail&0x3f);
+ illegal|=(trail&0xc0)^0x80;
+ break;
+ case 0:
+ if(strict>=0) {
+ return CBUTF8_ERROR_VALUE_1;
+ } else {
+ return CBU_SENTINEL;
+ }
+ /* no default branch to optimize switch() - all values are covered */
+ }
+
+ /*
+ * All the error handling should return a value
+ * that needs count bytes so that UTF8_GET_CHAR_SAFE() works right.
+ *
+ * Starting with Unicode 3.0.1, non-shortest forms are illegal.
+ * Starting with Unicode 3.2, surrogate code points must not be
+ * encoded in UTF-8, and there are no irregular sequences any more.
+ *
+ * U8_ macros (new in ICU 2.4) return negative values for error conditions.
+ */
+
+ /* correct sequence - all trail bytes have (b7..b6)==(10)? */
+ /* illegal is also set if count>=4 */
+ if(illegal || (c)<utf8_minLegal[count] || (CBU_IS_SURROGATE(c) && strict!=-2)) {
+ /* error handling */
+ uint8_t errorCount = count;
+ /* don't go beyond this sequence */
+ i=*pi;
+ while(count>0 && CBU8_IS_TRAIL(s[i])) {
+ ++(i);
+ --count;
+ }
+ if(strict>=0) {
+ c=utf8_errorValue[errorCount-count];
+ } else {
+ c=CBU_SENTINEL;
+ }
+ } else if((strict)>0 && CBU_IS_UNICODE_NONCHAR(c)) {
+ /* strict: forbid non-characters like U+fffe */
+ c=utf8_errorValue[count];
+ }
+ } else /* too few bytes left */ {
+ /* error handling */
+ int32_t i0 = i;
+ /* don't just set (i)=(length) in case there is an illegal sequence */
+ while((i)<(length) && CBU8_IS_TRAIL(s[i])) {
+ ++(i);
+ }
+ if(strict>=0) {
+ c=utf8_errorValue[i-i0];
+ } else {
+ c=CBU_SENTINEL;
+ }
+ }
+ *pi=i;
+ return c;
+}
+
+} // namespace base_icu
diff --git a/libchrome/base/third_party/icu/icu_utf.h b/libchrome/base/third_party/icu/icu_utf.h
new file mode 100644
index 0000000..4370fde
--- /dev/null
+++ b/libchrome/base/third_party/icu/icu_utf.h
@@ -0,0 +1,400 @@
+/*
+*******************************************************************************
+*
+* Copyright (C) 1999-2004, International Business Machines
+* Corporation and others. All Rights Reserved.
+*
+*******************************************************************************
+* file name: utf.h
+* encoding: US-ASCII
+* tab size: 8 (not used)
+* indentation:4
+*
+* created on: 1999sep09
+* created by: Markus W. Scherer
+*/
+
+#ifndef BASE_THIRD_PARTY_ICU_ICU_UTF_H_
+#define BASE_THIRD_PARTY_ICU_ICU_UTF_H_
+
+#include <stdint.h>
+
+namespace base_icu {
+
+typedef int32_t UChar32;
+typedef uint16_t UChar;
+typedef int8_t UBool;
+
+// General ---------------------------------------------------------------------
+// from utf.h
+
+/**
+ * This value is intended for sentinel values for APIs that
+ * (take or) return single code points (UChar32).
+ * It is outside of the Unicode code point range 0..0x10ffff.
+ *
+ * For example, a "done" or "error" value in a new API
+ * could be indicated with CBU_SENTINEL.
+ *
+ * ICU APIs designed before ICU 2.4 usually define service-specific "done"
+ * values, mostly 0xffff.
+ * Those may need to be distinguished from
+ * actual U+ffff text contents by calling functions like
+ * CharacterIterator::hasNext() or UnicodeString::length().
+ *
+ * @return -1
+ * @see UChar32
+ * @stable ICU 2.4
+ */
+#define CBU_SENTINEL (-1)
+
+/**
+ * Is this code point a Unicode noncharacter?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_UNICODE_NONCHAR(c) \
+ ((c) >= 0xfdd0 && ((uint32_t)(c) <= 0xfdef || ((c)&0xfffe) == 0xfffe) && \
+ (uint32_t)(c) <= 0x10ffff)
+
+/**
+ * Is c a Unicode code point value (0..U+10ffff)
+ * that can be assigned a character?
+ *
+ * Code points that are not characters include:
+ * - single surrogate code points (U+d800..U+dfff, 2048 code points)
+ * - the last two code points on each plane (U+__fffe and U+__ffff, 34 code points)
+ * - U+fdd0..U+fdef (new with Unicode 3.1, 32 code points)
+ * - the highest Unicode code point value is U+10ffff
+ *
+ * This means that all code points below U+d800 are character code points,
+ * and that boundary is tested first for performance.
+ *
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_UNICODE_CHAR(c) \
+ ((uint32_t)(c) < 0xd800 || \
+ ((uint32_t)(c) > 0xdfff && (uint32_t)(c) <= 0x10ffff && \
+ !CBU_IS_UNICODE_NONCHAR(c)))
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_SURROGATE(c) (((c)&0xfffff800)==0xd800)
+
+/**
+ * Assuming c is a surrogate code point (U_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+
+// UTF-8 macros ----------------------------------------------------------------
+// from utf8.h
+
+extern const uint8_t utf8_countTrailBytes[256];
+
+/**
+ * Count the trail bytes for a UTF-8 lead byte.
+ * @internal
+ */
+#define CBU8_COUNT_TRAIL_BYTES(leadByte) \
+ (base_icu::utf8_countTrailBytes[(uint8_t)leadByte])
+
+/**
+ * Mask a UTF-8 lead byte, leave only the lower bits that form part of the code point value.
+ * @internal
+ */
+#define CBU8_MASK_LEAD_BYTE(leadByte, countTrailBytes) ((leadByte)&=(1<<(6-(countTrailBytes)))-1)
+
+/**
+ * Does this code unit (byte) encode a code point by itself (US-ASCII 0..0x7f)?
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_SINGLE(c) (((c)&0x80)==0)
+
+/**
+ * Is this code unit (byte) a UTF-8 lead byte?
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_LEAD(c) ((uint8_t)((c)-0xc0) < 0x3e)
+
+/**
+ * Is this code unit (byte) a UTF-8 trail byte?
+ * @param c 8-bit code unit (byte)
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU8_IS_TRAIL(c) (((c)&0xc0)==0x80)
+
+/**
+ * How many code units (bytes) are used for the UTF-8 encoding
+ * of this Unicode code point?
+ * @param c 32-bit code point
+ * @return 1..4, or 0 if c is a surrogate or not a Unicode code point
+ * @stable ICU 2.4
+ */
+#define CBU8_LENGTH(c) \
+ ((uint32_t)(c) <= 0x7f \
+ ? 1 \
+ : ((uint32_t)(c) <= 0x7ff \
+ ? 2 \
+ : ((uint32_t)(c) <= 0xd7ff \
+ ? 3 \
+ : ((uint32_t)(c) <= 0xdfff || (uint32_t)(c) > 0x10ffff \
+ ? 0 \
+ : ((uint32_t)(c) <= 0xffff ? 3 : 4)))))
+
+/**
+ * The maximum number of UTF-8 code units (bytes) per Unicode code point (U+0000..U+10ffff).
+ * @return 4
+ * @stable ICU 2.4
+ */
+#define CBU8_MAX_LENGTH 4
+
+/**
+ * Function for handling "next code point" with error-checking.
+ * @internal
+ */
+UChar32 utf8_nextCharSafeBody(const uint8_t* s,
+ int32_t* pi,
+ int32_t length,
+ UChar32 c,
+ UBool strict);
+
+/**
+ * Get a code point from a string at a code point boundary offset,
+ * and advance the offset to the next code point boundary.
+ * (Post-incrementing forward iteration.)
+ * "Safe" macro, checks for illegal sequences and for string boundaries.
+ *
+ * The offset may point to the lead byte of a multi-byte sequence,
+ * in which case the macro will read the whole sequence.
+ * If the offset points to a trail byte or an illegal UTF-8 sequence, then
+ * c is set to a negative value.
+ *
+ * @param s const uint8_t * string
+ * @param i string offset, i<length
+ * @param length string length
+ * @param c output UChar32 variable, set to <0 in case of an error
+ * @see CBU8_NEXT_UNSAFE
+ * @stable ICU 2.4
+ */
+#define CBU8_NEXT(s, i, length, c) \
+ { \
+ (c) = (s)[(i)++]; \
+ if (((uint8_t)(c)) >= 0x80) { \
+ if (CBU8_IS_LEAD(c)) { \
+ (c) = base_icu::utf8_nextCharSafeBody((const uint8_t*)s, &(i), \
+ (int32_t)(length), c, -1); \
+ } else { \
+ (c) = CBU_SENTINEL; \
+ } \
+ } \
+ }
+
+/**
+ * Append a code point to a string, overwriting 1 to 4 bytes.
+ * The offset points to the current end of the string contents
+ * and is advanced (post-increment).
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the
+ * string.
+ * Otherwise, the result is undefined.
+ *
+ * @param s const uint8_t * string buffer
+ * @param i string offset
+ * @param c code point to append
+ * @see CBU8_APPEND
+ * @stable ICU 2.4
+ */
+#define CBU8_APPEND_UNSAFE(s, i, c) \
+ { \
+ if ((uint32_t)(c) <= 0x7f) { \
+ (s)[(i)++] = (uint8_t)(c); \
+ } else { \
+ if ((uint32_t)(c) <= 0x7ff) { \
+ (s)[(i)++] = (uint8_t)(((c) >> 6) | 0xc0); \
+ } else { \
+ if ((uint32_t)(c) <= 0xffff) { \
+ (s)[(i)++] = (uint8_t)(((c) >> 12) | 0xe0); \
+ } else { \
+ (s)[(i)++] = (uint8_t)(((c) >> 18) | 0xf0); \
+ (s)[(i)++] = (uint8_t)((((c) >> 12) & 0x3f) | 0x80); \
+ } \
+ (s)[(i)++] = (uint8_t)((((c) >> 6) & 0x3f) | 0x80); \
+ } \
+ (s)[(i)++] = (uint8_t)(((c)&0x3f) | 0x80); \
+ } \
+ }
+
+// UTF-16 macros ---------------------------------------------------------------
+// from utf16.h
+
+/**
+ * Does this code unit alone encode a code point (BMP, not a surrogate)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SINGLE(c) !CBU_IS_SURROGATE(c)
+
+/**
+ * Is this code unit a lead surrogate (U+d800..U+dbff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_LEAD(c) (((c)&0xfffffc00)==0xd800)
+
+/**
+ * Is this code unit a trail surrogate (U+dc00..U+dfff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_TRAIL(c) (((c)&0xfffffc00)==0xdc00)
+
+/**
+ * Is this code unit a surrogate (U+d800..U+dfff)?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SURROGATE(c) CBU_IS_SURROGATE(c)
+
+/**
+ * Assuming c is a surrogate code point (U16_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 16-bit code unit
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define CBU16_IS_SURROGATE_LEAD(c) (((c)&0x400)==0)
+
+/**
+ * Helper constant for CBU16_GET_SUPPLEMENTARY.
+ * @internal
+ */
+#define CBU16_SURROGATE_OFFSET ((0xd800<<10UL)+0xdc00-0x10000)
+
+/**
+ * Get a supplementary code point value (U+10000..U+10ffff)
+ * from its lead and trail surrogates.
+ * The result is undefined if the input values are not
+ * lead and trail surrogates.
+ *
+ * @param lead lead surrogate (U+d800..U+dbff)
+ * @param trail trail surrogate (U+dc00..U+dfff)
+ * @return supplementary code point (U+10000..U+10ffff)
+ * @stable ICU 2.4
+ */
+#define CBU16_GET_SUPPLEMENTARY(lead, trail) \
+ (((base_icu::UChar32)(lead)<<10UL)+(base_icu::UChar32)(trail)-CBU16_SURROGATE_OFFSET)
+
+
+/**
+ * Get the lead surrogate (0xd800..0xdbff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return lead surrogate (U+d800..U+dbff) for supplementary
+ * @stable ICU 2.4
+ */
+#define CBU16_LEAD(supplementary) \
+ (base_icu::UChar)(((supplementary)>>10)+0xd7c0)
+
+/**
+ * Get the trail surrogate (0xdc00..0xdfff) for a
+ * supplementary code point (0x10000..0x10ffff).
+ * @param supplementary 32-bit code point (U+10000..U+10ffff)
+ * @return trail surrogate (U+dc00..U+dfff) for supplementary
+ * @stable ICU 2.4
+ */
+#define CBU16_TRAIL(supplementary) \
+ (base_icu::UChar)(((supplementary)&0x3ff)|0xdc00)
+
+/**
+ * How many 16-bit code units are used to encode this Unicode code point? (1 or 2)
+ * The result is not defined if c is not a Unicode code point (U+0000..U+10ffff).
+ * @param c 32-bit code point
+ * @return 1 or 2
+ * @stable ICU 2.4
+ */
+#define CBU16_LENGTH(c) ((uint32_t)(c) <= 0xffff ? 1 : 2)
+
+/**
+ * The maximum number of 16-bit code units per Unicode code point (U+0000..U+10ffff).
+ * @return 2
+ * @stable ICU 2.4
+ */
+#define CBU16_MAX_LENGTH 2
+
+/**
+ * Get a code point from a string at a code point boundary offset,
+ * and advance the offset to the next code point boundary.
+ * (Post-incrementing forward iteration.)
+ * "Safe" macro, handles unpaired surrogates and checks for string boundaries.
+ *
+ * The offset may point to the lead surrogate unit
+ * for a supplementary code point, in which case the macro will read
+ * the following trail surrogate as well.
+ * If the offset points to a trail surrogate or
+ * to a single, unpaired lead surrogate, then that itself
+ * will be returned as the code point.
+ *
+ * @param s const UChar * string
+ * @param i string offset, i<length
+ * @param length string length
+ * @param c output UChar32 variable
+ * @stable ICU 2.4
+ */
+#define CBU16_NEXT(s, i, length, c) \
+ { \
+ (c) = (s)[(i)++]; \
+ if (CBU16_IS_LEAD(c)) { \
+ uint16_t __c2; \
+ if ((i) < (length) && CBU16_IS_TRAIL(__c2 = (s)[(i)])) { \
+ ++(i); \
+ (c) = CBU16_GET_SUPPLEMENTARY((c), __c2); \
+ } \
+ } \
+ }
+
+/**
+ * Append a code point to a string, overwriting 1 or 2 code units.
+ * The offset points to the current end of the string contents
+ * and is advanced (post-increment).
+ * "Unsafe" macro, assumes a valid code point and sufficient space in the string.
+ * Otherwise, the result is undefined.
+ *
+ * @param s const UChar * string buffer
+ * @param i string offset
+ * @param c code point to append
+ * @see CBU16_APPEND
+ * @stable ICU 2.4
+ */
+#define CBU16_APPEND_UNSAFE(s, i, c) \
+ { \
+ if ((uint32_t)(c) <= 0xffff) { \
+ (s)[(i)++] = (uint16_t)(c); \
+ } else { \
+ (s)[(i)++] = (uint16_t)(((c) >> 10) + 0xd7c0); \
+ (s)[(i)++] = (uint16_t)(((c)&0x3ff) | 0xdc00); \
+ } \
+ }
+
+} // namesapce base_icu
+
+#endif // BASE_THIRD_PARTY_ICU_ICU_UTF_H_
diff --git a/libchrome/base/third_party/libevent/event.h b/libchrome/base/third_party/libevent/event.h
new file mode 100644
index 0000000..d47d797
--- /dev/null
+++ b/libchrome/base/third_party/libevent/event.h
@@ -0,0 +1,10 @@
+// The Chromium build contains its own checkout of libevent. This stub is used
+// when building the Chrome OS or Android libchrome package to instead use the
+// system headers.
+#if defined(__ANDROID__) || defined(__ANDROID_HOST__)
+#include <event2/event.h>
+#include <event2/event_compat.h>
+#include <event2/event_struct.h>
+#else
+#include <event.h>
+#endif
diff --git a/libchrome/base/third_party/nspr/LICENSE b/libchrome/base/third_party/nspr/LICENSE
new file mode 100644
index 0000000..eba7b77
--- /dev/null
+++ b/libchrome/base/third_party/nspr/LICENSE
@@ -0,0 +1,35 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape Portable Runtime (NSPR).
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
diff --git a/libchrome/base/third_party/nspr/OWNERS b/libchrome/base/third_party/nspr/OWNERS
new file mode 100644
index 0000000..20ba660
--- /dev/null
+++ b/libchrome/base/third_party/nspr/OWNERS
@@ -0,0 +1,2 @@
+rsleevi@chromium.org
+wtc@chromium.org
diff --git a/libchrome/base/third_party/nspr/README.chromium b/libchrome/base/third_party/nspr/README.chromium
new file mode 100644
index 0000000..3659a2c
--- /dev/null
+++ b/libchrome/base/third_party/nspr/README.chromium
@@ -0,0 +1,3 @@
+Name: Netscape Portable Runtime (NSPR)
+URL: http://www.mozilla.org/projects/nspr/
+License: MPL 1.1/GPL 2.0/LGPL 2.1
diff --git a/libchrome/base/third_party/nspr/prtime.cc b/libchrome/base/third_party/nspr/prtime.cc
new file mode 100644
index 0000000..97d2c27
--- /dev/null
+++ b/libchrome/base/third_party/nspr/prtime.cc
@@ -0,0 +1,1186 @@
+/* Portions are Copyright (C) 2011 Google Inc */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape Portable Runtime (NSPR).
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * prtime.cc --
+ * NOTE: The original nspr file name is prtime.c
+ *
+ * NSPR date and time functions
+ *
+ * CVS revision 3.37
+ */
+
+/*
+ * The following functions were copied from the NSPR prtime.c file.
+ * PR_ParseTimeString
+ * We inlined the new PR_ParseTimeStringToExplodedTime function to avoid
+ * copying PR_ExplodeTime and PR_LocalTimeParameters. (The PR_ExplodeTime
+ * and PR_ImplodeTime calls cancel each other out.)
+ * PR_NormalizeTime
+ * PR_GMTParameters
+ * PR_ImplodeTime
+ * Upstream implementation from
+ * http://lxr.mozilla.org/nspr/source/pr/src/misc/prtime.c#221
+ * All types and macros are defined in the base/third_party/prtime.h file.
+ * These have been copied from the following nspr files. We have only copied
+ * over the types we need.
+ * 1. prtime.h
+ * 2. prtypes.h
+ * 3. prlong.h
+ *
+ * Unit tests are in base/time/pr_time_unittest.cc.
+ */
+
+#include <limits.h>
+
+#include "base/logging.h"
+#include "base/third_party/nspr/prtime.h"
+#include "build/build_config.h"
+
+#include <errno.h> /* for EINVAL */
+#include <time.h>
+
+/*
+ * The COUNT_LEAPS macro counts the number of leap years passed by
+ * till the start of the given year Y. At the start of the year 4
+ * A.D. the number of leap years passed by is 0, while at the start of
+ * the year 5 A.D. this count is 1. The number of years divisible by
+ * 100 but not divisible by 400 (the non-leap years) is deducted from
+ * the count to get the correct number of leap years.
+ *
+ * The COUNT_DAYS macro counts the number of days since 01/01/01 till the
+ * start of the given year Y. The number of days at the start of the year
+ * 1 is 0 while the number of days at the start of the year 2 is 365
+ * (which is ((2)-1) * 365) and so on. The reference point is 01/01/01
+ * midnight 00:00:00.
+ */
+
+#define COUNT_LEAPS(Y) (((Y)-1) / 4 - ((Y)-1) / 100 + ((Y)-1) / 400)
+#define COUNT_DAYS(Y) (((Y)-1) * 365 + COUNT_LEAPS(Y))
+#define DAYS_BETWEEN_YEARS(A, B) (COUNT_DAYS(B) - COUNT_DAYS(A))
+
+/* Implements the Unix localtime_r() function for windows */
+#if defined(OS_WIN)
+static void localtime_r(const time_t* secs, struct tm* time) {
+ (void) localtime_s(time, secs);
+}
+#endif
+
+/*
+ * Static variables used by functions in this file
+ */
+
+/*
+ * The following array contains the day of year for the last day of
+ * each month, where index 1 is January, and day 0 is January 1.
+ */
+
+static const int lastDayOfMonth[2][13] = {
+ {-1, 30, 58, 89, 119, 150, 180, 211, 242, 272, 303, 333, 364},
+ {-1, 30, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}
+};
+
+/*
+ * The number of days in a month
+ */
+
+static const PRInt8 nDays[2][12] = {
+ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+ {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
+};
+
+/*
+ *------------------------------------------------------------------------
+ *
+ * PR_ImplodeTime --
+ *
+ * Cf. time_t mktime(struct tm *tp)
+ * Note that 1 year has < 2^25 seconds. So an PRInt32 is large enough.
+ *
+ *------------------------------------------------------------------------
+ */
+PRTime
+PR_ImplodeTime(const PRExplodedTime *exploded)
+{
+ PRExplodedTime copy;
+ PRTime retVal;
+ PRInt64 secPerDay, usecPerSec;
+ PRInt64 temp;
+ PRInt64 numSecs64;
+ PRInt32 numDays;
+ PRInt32 numSecs;
+
+ /* Normalize first. Do this on our copy */
+ copy = *exploded;
+ PR_NormalizeTime(©, PR_GMTParameters);
+
+ numDays = DAYS_BETWEEN_YEARS(1970, copy.tm_year);
+
+ numSecs = copy.tm_yday * 86400 + copy.tm_hour * 3600 + copy.tm_min * 60 +
+ copy.tm_sec;
+
+ LL_I2L(temp, numDays);
+ LL_I2L(secPerDay, 86400);
+ LL_MUL(temp, temp, secPerDay);
+ LL_I2L(numSecs64, numSecs);
+ LL_ADD(numSecs64, numSecs64, temp);
+
+ /* apply the GMT and DST offsets */
+ LL_I2L(temp, copy.tm_params.tp_gmt_offset);
+ LL_SUB(numSecs64, numSecs64, temp);
+ LL_I2L(temp, copy.tm_params.tp_dst_offset);
+ LL_SUB(numSecs64, numSecs64, temp);
+
+ LL_I2L(usecPerSec, 1000000L);
+ LL_MUL(temp, numSecs64, usecPerSec);
+ LL_I2L(retVal, copy.tm_usec);
+ LL_ADD(retVal, retVal, temp);
+
+ return retVal;
+}
+
+/*
+ *-------------------------------------------------------------------------
+ *
+ * IsLeapYear --
+ *
+ * Returns 1 if the year is a leap year, 0 otherwise.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static int IsLeapYear(PRInt16 year)
+{
+ if ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * 'secOffset' should be less than 86400 (i.e., a day).
+ * 'time' should point to a normalized PRExplodedTime.
+ */
+
+static void
+ApplySecOffset(PRExplodedTime *time, PRInt32 secOffset)
+{
+ time->tm_sec += secOffset;
+
+ /* Note that in this implementation we do not count leap seconds */
+ if (time->tm_sec < 0 || time->tm_sec >= 60) {
+ time->tm_min += time->tm_sec / 60;
+ time->tm_sec %= 60;
+ if (time->tm_sec < 0) {
+ time->tm_sec += 60;
+ time->tm_min--;
+ }
+ }
+
+ if (time->tm_min < 0 || time->tm_min >= 60) {
+ time->tm_hour += time->tm_min / 60;
+ time->tm_min %= 60;
+ if (time->tm_min < 0) {
+ time->tm_min += 60;
+ time->tm_hour--;
+ }
+ }
+
+ if (time->tm_hour < 0) {
+ /* Decrement mday, yday, and wday */
+ time->tm_hour += 24;
+ time->tm_mday--;
+ time->tm_yday--;
+ if (time->tm_mday < 1) {
+ time->tm_month--;
+ if (time->tm_month < 0) {
+ time->tm_month = 11;
+ time->tm_year--;
+ if (IsLeapYear(time->tm_year))
+ time->tm_yday = 365;
+ else
+ time->tm_yday = 364;
+ }
+ time->tm_mday = nDays[IsLeapYear(time->tm_year)][time->tm_month];
+ }
+ time->tm_wday--;
+ if (time->tm_wday < 0)
+ time->tm_wday = 6;
+ } else if (time->tm_hour > 23) {
+ /* Increment mday, yday, and wday */
+ time->tm_hour -= 24;
+ time->tm_mday++;
+ time->tm_yday++;
+ if (time->tm_mday >
+ nDays[IsLeapYear(time->tm_year)][time->tm_month]) {
+ time->tm_mday = 1;
+ time->tm_month++;
+ if (time->tm_month > 11) {
+ time->tm_month = 0;
+ time->tm_year++;
+ time->tm_yday = 0;
+ }
+ }
+ time->tm_wday++;
+ if (time->tm_wday > 6)
+ time->tm_wday = 0;
+ }
+}
+
+void
+PR_NormalizeTime(PRExplodedTime *time, PRTimeParamFn params)
+{
+ int daysInMonth;
+ PRInt32 numDays;
+
+ /* Get back to GMT */
+ time->tm_sec -= time->tm_params.tp_gmt_offset
+ + time->tm_params.tp_dst_offset;
+ time->tm_params.tp_gmt_offset = 0;
+ time->tm_params.tp_dst_offset = 0;
+
+ /* Now normalize GMT */
+
+ if (time->tm_usec < 0 || time->tm_usec >= 1000000) {
+ time->tm_sec += time->tm_usec / 1000000;
+ time->tm_usec %= 1000000;
+ if (time->tm_usec < 0) {
+ time->tm_usec += 1000000;
+ time->tm_sec--;
+ }
+ }
+
+ /* Note that we do not count leap seconds in this implementation */
+ if (time->tm_sec < 0 || time->tm_sec >= 60) {
+ time->tm_min += time->tm_sec / 60;
+ time->tm_sec %= 60;
+ if (time->tm_sec < 0) {
+ time->tm_sec += 60;
+ time->tm_min--;
+ }
+ }
+
+ if (time->tm_min < 0 || time->tm_min >= 60) {
+ time->tm_hour += time->tm_min / 60;
+ time->tm_min %= 60;
+ if (time->tm_min < 0) {
+ time->tm_min += 60;
+ time->tm_hour--;
+ }
+ }
+
+ if (time->tm_hour < 0 || time->tm_hour >= 24) {
+ time->tm_mday += time->tm_hour / 24;
+ time->tm_hour %= 24;
+ if (time->tm_hour < 0) {
+ time->tm_hour += 24;
+ time->tm_mday--;
+ }
+ }
+
+ /* Normalize month and year before mday */
+ if (time->tm_month < 0 || time->tm_month >= 12) {
+ time->tm_year += static_cast<PRInt16>(time->tm_month / 12);
+ time->tm_month %= 12;
+ if (time->tm_month < 0) {
+ time->tm_month += 12;
+ time->tm_year--;
+ }
+ }
+
+ /* Now that month and year are in proper range, normalize mday */
+
+ if (time->tm_mday < 1) {
+ /* mday too small */
+ do {
+ /* the previous month */
+ time->tm_month--;
+ if (time->tm_month < 0) {
+ time->tm_month = 11;
+ time->tm_year--;
+ }
+ time->tm_mday += nDays[IsLeapYear(time->tm_year)][time->tm_month];
+ } while (time->tm_mday < 1);
+ } else {
+ daysInMonth = nDays[IsLeapYear(time->tm_year)][time->tm_month];
+ while (time->tm_mday > daysInMonth) {
+ /* mday too large */
+ time->tm_mday -= daysInMonth;
+ time->tm_month++;
+ if (time->tm_month > 11) {
+ time->tm_month = 0;
+ time->tm_year++;
+ }
+ daysInMonth = nDays[IsLeapYear(time->tm_year)][time->tm_month];
+ }
+ }
+
+ /* Recompute yday and wday */
+ time->tm_yday = static_cast<PRInt16>(time->tm_mday +
+ lastDayOfMonth[IsLeapYear(time->tm_year)][time->tm_month]);
+
+ numDays = DAYS_BETWEEN_YEARS(1970, time->tm_year) + time->tm_yday;
+ time->tm_wday = (numDays + 4) % 7;
+ if (time->tm_wday < 0) {
+ time->tm_wday += 7;
+ }
+
+ /* Recompute time parameters */
+
+ time->tm_params = params(time);
+
+ ApplySecOffset(time, time->tm_params.tp_gmt_offset
+ + time->tm_params.tp_dst_offset);
+}
+
+/*
+ *------------------------------------------------------------------------
+ *
+ * PR_GMTParameters --
+ *
+ * Returns the PRTimeParameters for Greenwich Mean Time.
+ * Trivially, both the tp_gmt_offset and tp_dst_offset fields are 0.
+ *
+ *------------------------------------------------------------------------
+ */
+
+PRTimeParameters
+PR_GMTParameters(const PRExplodedTime* /*gmt*/)
+{
+ PRTimeParameters retVal = { 0, 0 };
+ return retVal;
+}
+
+/*
+ * The following code implements PR_ParseTimeString(). It is based on
+ * ns/lib/xp/xp_time.c, revision 1.25, by Jamie Zawinski <jwz@netscape.com>.
+ */
+
+/*
+ * We only recognize the abbreviations of a small subset of time zones
+ * in North America, Europe, and Japan.
+ *
+ * PST/PDT: Pacific Standard/Daylight Time
+ * MST/MDT: Mountain Standard/Daylight Time
+ * CST/CDT: Central Standard/Daylight Time
+ * EST/EDT: Eastern Standard/Daylight Time
+ * AST: Atlantic Standard Time
+ * NST: Newfoundland Standard Time
+ * GMT: Greenwich Mean Time
+ * BST: British Summer Time
+ * MET: Middle Europe Time
+ * EET: Eastern Europe Time
+ * JST: Japan Standard Time
+ */
+
+typedef enum
+{
+ TT_UNKNOWN,
+
+ TT_SUN, TT_MON, TT_TUE, TT_WED, TT_THU, TT_FRI, TT_SAT,
+
+ TT_JAN, TT_FEB, TT_MAR, TT_APR, TT_MAY, TT_JUN,
+ TT_JUL, TT_AUG, TT_SEP, TT_OCT, TT_NOV, TT_DEC,
+
+ TT_PST, TT_PDT, TT_MST, TT_MDT, TT_CST, TT_CDT, TT_EST, TT_EDT,
+ TT_AST, TT_NST, TT_GMT, TT_BST, TT_MET, TT_EET, TT_JST
+} TIME_TOKEN;
+
+/*
+ * This parses a time/date string into a PRTime
+ * (microseconds after "1-Jan-1970 00:00:00 GMT").
+ * It returns PR_SUCCESS on success, and PR_FAILURE
+ * if the time/date string can't be parsed.
+ *
+ * Many formats are handled, including:
+ *
+ * 14 Apr 89 03:20:12
+ * 14 Apr 89 03:20 GMT
+ * Fri, 17 Mar 89 4:01:33
+ * Fri, 17 Mar 89 4:01 GMT
+ * Mon Jan 16 16:12 PDT 1989
+ * Mon Jan 16 16:12 +0130 1989
+ * 6 May 1992 16:41-JST (Wednesday)
+ * 22-AUG-1993 10:59:12.82
+ * 22-AUG-1993 10:59pm
+ * 22-AUG-1993 12:59am
+ * 22-AUG-1993 12:59 PM
+ * Friday, August 04, 1995 3:54 PM
+ * 06/21/95 04:24:34 PM
+ * 20/06/95 21:07
+ * 95-06-08 19:32:48 EDT
+ * 1995-06-17T23:11:25.342156Z
+ *
+ * If the input string doesn't contain a description of the timezone,
+ * we consult the `default_to_gmt' to decide whether the string should
+ * be interpreted relative to the local time zone (PR_FALSE) or GMT (PR_TRUE).
+ * The correct value for this argument depends on what standard specified
+ * the time string which you are parsing.
+ */
+
+PRStatus
+PR_ParseTimeString(
+ const char *string,
+ PRBool default_to_gmt,
+ PRTime *result_imploded)
+{
+ PRExplodedTime tm;
+ PRExplodedTime *result = &tm;
+ TIME_TOKEN dotw = TT_UNKNOWN;
+ TIME_TOKEN month = TT_UNKNOWN;
+ TIME_TOKEN zone = TT_UNKNOWN;
+ int zone_offset = -1;
+ int dst_offset = 0;
+ int date = -1;
+ PRInt32 year = -1;
+ int hour = -1;
+ int min = -1;
+ int sec = -1;
+ int usec = -1;
+
+ const char *rest = string;
+
+ int iterations = 0;
+
+ PR_ASSERT(string && result);
+ if (!string || !result) return PR_FAILURE;
+
+ while (*rest)
+ {
+
+ if (iterations++ > 1000)
+ {
+ return PR_FAILURE;
+ }
+
+ switch (*rest)
+ {
+ case 'a': case 'A':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'p' || rest[1] == 'P') &&
+ (rest[2] == 'r' || rest[2] == 'R'))
+ month = TT_APR;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_AST;
+ else if (month == TT_UNKNOWN &&
+ (rest[1] == 'u' || rest[1] == 'U') &&
+ (rest[2] == 'g' || rest[2] == 'G'))
+ month = TT_AUG;
+ break;
+ case 'b': case 'B':
+ if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_BST;
+ break;
+ case 'c': case 'C':
+ if (zone == TT_UNKNOWN &&
+ (rest[1] == 'd' || rest[1] == 'D') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_CDT;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_CST;
+ break;
+ case 'd': case 'D':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'e' || rest[1] == 'E') &&
+ (rest[2] == 'c' || rest[2] == 'C'))
+ month = TT_DEC;
+ break;
+ case 'e': case 'E':
+ if (zone == TT_UNKNOWN &&
+ (rest[1] == 'd' || rest[1] == 'D') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_EDT;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 'e' || rest[1] == 'E') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_EET;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_EST;
+ break;
+ case 'f': case 'F':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'e' || rest[1] == 'E') &&
+ (rest[2] == 'b' || rest[2] == 'B'))
+ month = TT_FEB;
+ else if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'r' || rest[1] == 'R') &&
+ (rest[2] == 'i' || rest[2] == 'I'))
+ dotw = TT_FRI;
+ break;
+ case 'g': case 'G':
+ if (zone == TT_UNKNOWN &&
+ (rest[1] == 'm' || rest[1] == 'M') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_GMT;
+ break;
+ case 'j': case 'J':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'a' || rest[1] == 'A') &&
+ (rest[2] == 'n' || rest[2] == 'N'))
+ month = TT_JAN;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_JST;
+ else if (month == TT_UNKNOWN &&
+ (rest[1] == 'u' || rest[1] == 'U') &&
+ (rest[2] == 'l' || rest[2] == 'L'))
+ month = TT_JUL;
+ else if (month == TT_UNKNOWN &&
+ (rest[1] == 'u' || rest[1] == 'U') &&
+ (rest[2] == 'n' || rest[2] == 'N'))
+ month = TT_JUN;
+ break;
+ case 'm': case 'M':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'a' || rest[1] == 'A') &&
+ (rest[2] == 'r' || rest[2] == 'R'))
+ month = TT_MAR;
+ else if (month == TT_UNKNOWN &&
+ (rest[1] == 'a' || rest[1] == 'A') &&
+ (rest[2] == 'y' || rest[2] == 'Y'))
+ month = TT_MAY;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 'd' || rest[1] == 'D') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_MDT;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 'e' || rest[1] == 'E') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_MET;
+ else if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'o' || rest[1] == 'O') &&
+ (rest[2] == 'n' || rest[2] == 'N'))
+ dotw = TT_MON;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_MST;
+ break;
+ case 'n': case 'N':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'o' || rest[1] == 'O') &&
+ (rest[2] == 'v' || rest[2] == 'V'))
+ month = TT_NOV;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_NST;
+ break;
+ case 'o': case 'O':
+ if (month == TT_UNKNOWN &&
+ (rest[1] == 'c' || rest[1] == 'C') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ month = TT_OCT;
+ break;
+ case 'p': case 'P':
+ if (zone == TT_UNKNOWN &&
+ (rest[1] == 'd' || rest[1] == 'D') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_PDT;
+ else if (zone == TT_UNKNOWN &&
+ (rest[1] == 's' || rest[1] == 'S') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ zone = TT_PST;
+ break;
+ case 's': case 'S':
+ if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'a' || rest[1] == 'A') &&
+ (rest[2] == 't' || rest[2] == 'T'))
+ dotw = TT_SAT;
+ else if (month == TT_UNKNOWN &&
+ (rest[1] == 'e' || rest[1] == 'E') &&
+ (rest[2] == 'p' || rest[2] == 'P'))
+ month = TT_SEP;
+ else if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'u' || rest[1] == 'U') &&
+ (rest[2] == 'n' || rest[2] == 'N'))
+ dotw = TT_SUN;
+ break;
+ case 't': case 'T':
+ if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'h' || rest[1] == 'H') &&
+ (rest[2] == 'u' || rest[2] == 'U'))
+ dotw = TT_THU;
+ else if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'u' || rest[1] == 'U') &&
+ (rest[2] == 'e' || rest[2] == 'E'))
+ dotw = TT_TUE;
+ break;
+ case 'u': case 'U':
+ if (zone == TT_UNKNOWN &&
+ (rest[1] == 't' || rest[1] == 'T') &&
+ !(rest[2] >= 'A' && rest[2] <= 'Z') &&
+ !(rest[2] >= 'a' && rest[2] <= 'z'))
+ /* UT is the same as GMT but UTx is not. */
+ zone = TT_GMT;
+ break;
+ case 'w': case 'W':
+ if (dotw == TT_UNKNOWN &&
+ (rest[1] == 'e' || rest[1] == 'E') &&
+ (rest[2] == 'd' || rest[2] == 'D'))
+ dotw = TT_WED;
+ break;
+
+ case '+': case '-':
+ {
+ const char *end;
+ int sign;
+ if (zone_offset != -1)
+ {
+ /* already got one... */
+ rest++;
+ break;
+ }
+ if (zone != TT_UNKNOWN && zone != TT_GMT)
+ {
+ /* GMT+0300 is legal, but PST+0300 is not. */
+ rest++;
+ break;
+ }
+
+ sign = ((*rest == '+') ? 1 : -1);
+ rest++; /* move over sign */
+ end = rest;
+ while (*end >= '0' && *end <= '9')
+ end++;
+ if (rest == end) /* no digits here */
+ break;
+
+ if ((end - rest) == 4)
+ /* offset in HHMM */
+ zone_offset = (((((rest[0]-'0')*10) + (rest[1]-'0')) * 60) +
+ (((rest[2]-'0')*10) + (rest[3]-'0')));
+ else if ((end - rest) == 2)
+ /* offset in hours */
+ zone_offset = (((rest[0]-'0')*10) + (rest[1]-'0')) * 60;
+ else if ((end - rest) == 1)
+ /* offset in hours */
+ zone_offset = (rest[0]-'0') * 60;
+ else
+ /* 3 or >4 */
+ break;
+
+ zone_offset *= sign;
+ zone = TT_GMT;
+ break;
+ }
+
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ {
+ int tmp_hour = -1;
+ int tmp_min = -1;
+ int tmp_sec = -1;
+ int tmp_usec = -1;
+ const char *end = rest + 1;
+ while (*end >= '0' && *end <= '9')
+ end++;
+
+ /* end is now the first character after a range of digits. */
+
+ if (*end == ':')
+ {
+ if (hour >= 0 && min >= 0) /* already got it */
+ break;
+
+ /* We have seen "[0-9]+:", so this is probably HH:MM[:SS] */
+ if ((end - rest) > 2)
+ /* it is [0-9][0-9][0-9]+: */
+ break;
+ else if ((end - rest) == 2)
+ tmp_hour = ((rest[0]-'0')*10 +
+ (rest[1]-'0'));
+ else
+ tmp_hour = (rest[0]-'0');
+
+ /* move over the colon, and parse minutes */
+
+ rest = ++end;
+ while (*end >= '0' && *end <= '9')
+ end++;
+
+ if (end == rest)
+ /* no digits after first colon? */
+ break;
+ else if ((end - rest) > 2)
+ /* it is [0-9][0-9][0-9]+: */
+ break;
+ else if ((end - rest) == 2)
+ tmp_min = ((rest[0]-'0')*10 +
+ (rest[1]-'0'));
+ else
+ tmp_min = (rest[0]-'0');
+
+ /* now go for seconds */
+ rest = end;
+ if (*rest == ':')
+ rest++;
+ end = rest;
+ while (*end >= '0' && *end <= '9')
+ end++;
+
+ if (end == rest)
+ /* no digits after second colon - that's ok. */
+ ;
+ else if ((end - rest) > 2)
+ /* it is [0-9][0-9][0-9]+: */
+ break;
+ else if ((end - rest) == 2)
+ tmp_sec = ((rest[0]-'0')*10 +
+ (rest[1]-'0'));
+ else
+ tmp_sec = (rest[0]-'0');
+
+ /* fractional second */
+ rest = end;
+ if (*rest == '.')
+ {
+ rest++;
+ end++;
+ tmp_usec = 0;
+ /* use up to 6 digits, skip over the rest */
+ while (*end >= '0' && *end <= '9')
+ {
+ if (end - rest < 6)
+ tmp_usec = tmp_usec * 10 + *end - '0';
+ end++;
+ }
+ int ndigits = end - rest;
+ while (ndigits++ < 6)
+ tmp_usec *= 10;
+ rest = end;
+ }
+
+ if (*rest == 'Z')
+ {
+ zone = TT_GMT;
+ rest++;
+ }
+ else if (tmp_hour <= 12)
+ {
+ /* If we made it here, we've parsed hour and min,
+ and possibly sec, so the current token is a time.
+ Now skip over whitespace and see if there's an AM
+ or PM directly following the time.
+ */
+ const char *s = end;
+ while (*s && (*s == ' ' || *s == '\t'))
+ s++;
+ if ((s[0] == 'p' || s[0] == 'P') &&
+ (s[1] == 'm' || s[1] == 'M'))
+ /* 10:05pm == 22:05, and 12:05pm == 12:05 */
+ tmp_hour = (tmp_hour == 12 ? 12 : tmp_hour + 12);
+ else if (tmp_hour == 12 &&
+ (s[0] == 'a' || s[0] == 'A') &&
+ (s[1] == 'm' || s[1] == 'M'))
+ /* 12:05am == 00:05 */
+ tmp_hour = 0;
+ }
+
+ hour = tmp_hour;
+ min = tmp_min;
+ sec = tmp_sec;
+ usec = tmp_usec;
+ rest = end;
+ break;
+ }
+ else if ((*end == '/' || *end == '-') &&
+ end[1] >= '0' && end[1] <= '9')
+ {
+ /* Perhaps this is 6/16/95, 16/6/95, 6-16-95, or 16-6-95
+ or even 95-06-05 or 1995-06-22.
+ */
+ int n1, n2, n3;
+ const char *s;
+
+ if (month != TT_UNKNOWN)
+ /* if we saw a month name, this can't be. */
+ break;
+
+ s = rest;
+
+ n1 = (*s++ - '0'); /* first 1, 2 or 4 digits */
+ if (*s >= '0' && *s <= '9')
+ {
+ n1 = n1*10 + (*s++ - '0');
+
+ if (*s >= '0' && *s <= '9') /* optional digits 3 and 4 */
+ {
+ n1 = n1*10 + (*s++ - '0');
+ if (*s < '0' || *s > '9')
+ break;
+ n1 = n1*10 + (*s++ - '0');
+ }
+ }
+
+ if (*s != '/' && *s != '-') /* slash */
+ break;
+ s++;
+
+ if (*s < '0' || *s > '9') /* second 1 or 2 digits */
+ break;
+ n2 = (*s++ - '0');
+ if (*s >= '0' && *s <= '9')
+ n2 = n2*10 + (*s++ - '0');
+
+ if (*s != '/' && *s != '-') /* slash */
+ break;
+ s++;
+
+ if (*s < '0' || *s > '9') /* third 1, 2, 4, or 5 digits */
+ break;
+ n3 = (*s++ - '0');
+ if (*s >= '0' && *s <= '9')
+ n3 = n3*10 + (*s++ - '0');
+
+ if (*s >= '0' && *s <= '9') /* optional digits 3, 4, and 5 */
+ {
+ n3 = n3*10 + (*s++ - '0');
+ if (*s < '0' || *s > '9')
+ break;
+ n3 = n3*10 + (*s++ - '0');
+ if (*s >= '0' && *s <= '9')
+ n3 = n3*10 + (*s++ - '0');
+ }
+
+ if (*s == 'T' && s[1] >= '0' && s[1] <= '9')
+ /* followed by ISO 8601 T delimiter and number is ok */
+ ;
+ else if ((*s >= '0' && *s <= '9') ||
+ (*s >= 'A' && *s <= 'Z') ||
+ (*s >= 'a' && *s <= 'z'))
+ /* but other alphanumerics are not ok */
+ break;
+
+ /* Ok, we parsed three multi-digit numbers, with / or -
+ between them. Now decide what the hell they are
+ (DD/MM/YY or MM/DD/YY or [YY]YY/MM/DD.)
+ */
+
+ if (n1 > 31 || n1 == 0) /* must be [YY]YY/MM/DD */
+ {
+ if (n2 > 12) break;
+ if (n3 > 31) break;
+ year = n1;
+ if (year < 70)
+ year += 2000;
+ else if (year < 100)
+ year += 1900;
+ month = (TIME_TOKEN)(n2 + ((int)TT_JAN) - 1);
+ date = n3;
+ rest = s;
+ break;
+ }
+
+ if (n1 > 12 && n2 > 12) /* illegal */
+ {
+ rest = s;
+ break;
+ }
+
+ if (n3 < 70)
+ n3 += 2000;
+ else if (n3 < 100)
+ n3 += 1900;
+
+ if (n1 > 12) /* must be DD/MM/YY */
+ {
+ date = n1;
+ month = (TIME_TOKEN)(n2 + ((int)TT_JAN) - 1);
+ year = n3;
+ }
+ else /* assume MM/DD/YY */
+ {
+ /* #### In the ambiguous case, should we consult the
+ locale to find out the local default? */
+ month = (TIME_TOKEN)(n1 + ((int)TT_JAN) - 1);
+ date = n2;
+ year = n3;
+ }
+ rest = s;
+ }
+ else if ((*end >= 'A' && *end <= 'Z') ||
+ (*end >= 'a' && *end <= 'z'))
+ /* Digits followed by non-punctuation - what's that? */
+ ;
+ else if ((end - rest) == 5) /* five digits is a year */
+ year = (year < 0
+ ? ((rest[0]-'0')*10000L +
+ (rest[1]-'0')*1000L +
+ (rest[2]-'0')*100L +
+ (rest[3]-'0')*10L +
+ (rest[4]-'0'))
+ : year);
+ else if ((end - rest) == 4) /* four digits is a year */
+ year = (year < 0
+ ? ((rest[0]-'0')*1000L +
+ (rest[1]-'0')*100L +
+ (rest[2]-'0')*10L +
+ (rest[3]-'0'))
+ : year);
+ else if ((end - rest) == 2) /* two digits - date or year */
+ {
+ int n = ((rest[0]-'0')*10 +
+ (rest[1]-'0'));
+ /* If we don't have a date (day of the month) and we see a number
+ less than 32, then assume that is the date.
+
+ Otherwise, if we have a date and not a year, assume this is the
+ year. If it is less than 70, then assume it refers to the 21st
+ century. If it is two digits (>= 70), assume it refers to this
+ century. Otherwise, assume it refers to an unambiguous year.
+
+ The world will surely end soon.
+ */
+ if (date < 0 && n < 32)
+ date = n;
+ else if (year < 0)
+ {
+ if (n < 70)
+ year = 2000 + n;
+ else if (n < 100)
+ year = 1900 + n;
+ else
+ year = n;
+ }
+ /* else what the hell is this. */
+ }
+ else if ((end - rest) == 1) /* one digit - date */
+ date = (date < 0 ? (rest[0]-'0') : date);
+ /* else, three or more than five digits - what's that? */
+
+ break;
+ } /* case '0' .. '9' */
+ } /* switch */
+
+ /* Skip to the end of this token, whether we parsed it or not.
+ Tokens are delimited by whitespace, or ,;-+/()[] but explicitly not .:
+ 'T' is also treated as delimiter when followed by a digit (ISO 8601).
+ */
+ while (*rest &&
+ *rest != ' ' && *rest != '\t' &&
+ *rest != ',' && *rest != ';' &&
+ *rest != '-' && *rest != '+' &&
+ *rest != '/' &&
+ *rest != '(' && *rest != ')' && *rest != '[' && *rest != ']' &&
+ !(*rest == 'T' && rest[1] >= '0' && rest[1] <= '9')
+ )
+ rest++;
+ /* skip over uninteresting chars. */
+ SKIP_MORE:
+ while (*rest == ' ' || *rest == '\t' ||
+ *rest == ',' || *rest == ';' || *rest == '/' ||
+ *rest == '(' || *rest == ')' || *rest == '[' || *rest == ']')
+ rest++;
+
+ /* "-" is ignored at the beginning of a token if we have not yet
+ parsed a year (e.g., the second "-" in "30-AUG-1966"), or if
+ the character after the dash is not a digit. */
+ if (*rest == '-' && ((rest > string &&
+ isalpha((unsigned char)rest[-1]) && year < 0) ||
+ rest[1] < '0' || rest[1] > '9'))
+ {
+ rest++;
+ goto SKIP_MORE;
+ }
+
+ /* Skip T that may precede ISO 8601 time. */
+ if (*rest == 'T' && rest[1] >= '0' && rest[1] <= '9')
+ rest++;
+ } /* while */
+
+ if (zone != TT_UNKNOWN && zone_offset == -1)
+ {
+ switch (zone)
+ {
+ case TT_PST: zone_offset = -8 * 60; break;
+ case TT_PDT: zone_offset = -8 * 60; dst_offset = 1 * 60; break;
+ case TT_MST: zone_offset = -7 * 60; break;
+ case TT_MDT: zone_offset = -7 * 60; dst_offset = 1 * 60; break;
+ case TT_CST: zone_offset = -6 * 60; break;
+ case TT_CDT: zone_offset = -6 * 60; dst_offset = 1 * 60; break;
+ case TT_EST: zone_offset = -5 * 60; break;
+ case TT_EDT: zone_offset = -5 * 60; dst_offset = 1 * 60; break;
+ case TT_AST: zone_offset = -4 * 60; break;
+ case TT_NST: zone_offset = -3 * 60 - 30; break;
+ case TT_GMT: zone_offset = 0 * 60; break;
+ case TT_BST: zone_offset = 0 * 60; dst_offset = 1 * 60; break;
+ case TT_MET: zone_offset = 1 * 60; break;
+ case TT_EET: zone_offset = 2 * 60; break;
+ case TT_JST: zone_offset = 9 * 60; break;
+ default:
+ PR_ASSERT (0);
+ break;
+ }
+ }
+
+ /* If we didn't find a year, month, or day-of-the-month, we can't
+ possibly parse this, and in fact, mktime() will do something random
+ (I'm seeing it return "Tue Feb 5 06:28:16 2036", which is no doubt
+ a numerologically significant date... */
+ if (month == TT_UNKNOWN || date == -1 || year == -1 || year > PR_INT16_MAX)
+ return PR_FAILURE;
+
+ memset(result, 0, sizeof(*result));
+ if (usec != -1)
+ result->tm_usec = usec;
+ if (sec != -1)
+ result->tm_sec = sec;
+ if (min != -1)
+ result->tm_min = min;
+ if (hour != -1)
+ result->tm_hour = hour;
+ if (date != -1)
+ result->tm_mday = date;
+ if (month != TT_UNKNOWN)
+ result->tm_month = (((int)month) - ((int)TT_JAN));
+ if (year != -1)
+ result->tm_year = static_cast<PRInt16>(year);
+ if (dotw != TT_UNKNOWN)
+ result->tm_wday = static_cast<PRInt8>(((int)dotw) - ((int)TT_SUN));
+ /*
+ * Mainly to compute wday and yday, but normalized time is also required
+ * by the check below that works around a Visual C++ 2005 mktime problem.
+ */
+ PR_NormalizeTime(result, PR_GMTParameters);
+ /* The remaining work is to set the gmt and dst offsets in tm_params. */
+
+ if (zone == TT_UNKNOWN && default_to_gmt)
+ {
+ /* No zone was specified, so pretend the zone was GMT. */
+ zone = TT_GMT;
+ zone_offset = 0;
+ }
+
+ if (zone_offset == -1)
+ {
+ /* no zone was specified, and we're to assume that everything
+ is local. */
+ struct tm localTime;
+ time_t secs;
+
+ PR_ASSERT(result->tm_month > -1 &&
+ result->tm_mday > 0 &&
+ result->tm_hour > -1 &&
+ result->tm_min > -1 &&
+ result->tm_sec > -1);
+
+ /*
+ * To obtain time_t from a tm structure representing the local
+ * time, we call mktime(). However, we need to see if we are
+ * on 1-Jan-1970 or before. If we are, we can't call mktime()
+ * because mktime() will crash on win16. In that case, we
+ * calculate zone_offset based on the zone offset at
+ * 00:00:00, 2 Jan 1970 GMT, and subtract zone_offset from the
+ * date we are parsing to transform the date to GMT. We also
+ * do so if mktime() returns (time_t) -1 (time out of range).
+ */
+
+ /* month, day, hours, mins and secs are always non-negative
+ so we dont need to worry about them. */
+ if (result->tm_year >= 1970)
+ {
+ localTime.tm_sec = result->tm_sec;
+ localTime.tm_min = result->tm_min;
+ localTime.tm_hour = result->tm_hour;
+ localTime.tm_mday = result->tm_mday;
+ localTime.tm_mon = result->tm_month;
+ localTime.tm_year = result->tm_year - 1900;
+ /* Set this to -1 to tell mktime "I don't care". If you set
+ it to 0 or 1, you are making assertions about whether the
+ date you are handing it is in daylight savings mode or not;
+ and if you're wrong, it will "fix" it for you. */
+ localTime.tm_isdst = -1;
+
+#if _MSC_VER == 1400 /* 1400 = Visual C++ 2005 (8.0) */
+ /*
+ * mktime will return (time_t) -1 if the input is a date
+ * after 23:59:59, December 31, 3000, US Pacific Time (not
+ * UTC as documented):
+ * http://msdn.microsoft.com/en-us/library/d1y53h2a(VS.80).aspx
+ * But if the year is 3001, mktime also invokes the invalid
+ * parameter handler, causing the application to crash. This
+ * problem has been reported in
+ * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=266036.
+ * We avoid this crash by not calling mktime if the date is
+ * out of range. To use a simple test that works in any time
+ * zone, we consider year 3000 out of range as well. (See
+ * bug 480740.)
+ */
+ if (result->tm_year >= 3000) {
+ /* Emulate what mktime would have done. */
+ errno = EINVAL;
+ secs = (time_t) -1;
+ } else {
+ secs = mktime(&localTime);
+ }
+#else
+ secs = mktime(&localTime);
+#endif
+ if (secs != (time_t) -1)
+ {
+ *result_imploded = (PRInt64)secs * PR_USEC_PER_SEC;
+ *result_imploded += result->tm_usec;
+ return PR_SUCCESS;
+ }
+ }
+
+ /* So mktime() can't handle this case. We assume the
+ zone_offset for the date we are parsing is the same as
+ the zone offset on 00:00:00 2 Jan 1970 GMT. */
+ secs = 86400;
+ localtime_r(&secs, &localTime);
+ zone_offset = localTime.tm_min
+ + 60 * localTime.tm_hour
+ + 1440 * (localTime.tm_mday - 2);
+ }
+
+ result->tm_params.tp_gmt_offset = zone_offset * 60;
+ result->tm_params.tp_dst_offset = dst_offset * 60;
+
+ *result_imploded = PR_ImplodeTime(result);
+ return PR_SUCCESS;
+}
diff --git a/libchrome/base/third_party/nspr/prtime.h b/libchrome/base/third_party/nspr/prtime.h
new file mode 100644
index 0000000..20bae38
--- /dev/null
+++ b/libchrome/base/third_party/nspr/prtime.h
@@ -0,0 +1,263 @@
+/* Portions are Copyright (C) 2011 Google Inc */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape Portable Runtime (NSPR).
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ *---------------------------------------------------------------------------
+ *
+ * prtime.h --
+ *
+ * NSPR date and time functions
+ * CVS revision 3.10
+ * This file contains definitions of NSPR's basic types required by
+ * prtime.cc. These types have been copied over from the following NSPR
+ * files prtime.h, prtypes.h(CVS revision 3.35), prlong.h(CVS revision 3.13)
+ *
+ *---------------------------------------------------------------------------
+ */
+
+#ifndef BASE_PRTIME_H__
+#define BASE_PRTIME_H__
+
+#include <stdint.h>
+
+#include "base/base_export.h"
+
+typedef int8_t PRInt8;
+typedef int16_t PRInt16;
+typedef int32_t PRInt32;
+typedef int64_t PRInt64;
+typedef int PRIntn;
+
+typedef PRIntn PRBool;
+#define PR_TRUE 1
+#define PR_FALSE 0
+
+typedef enum { PR_FAILURE = -1, PR_SUCCESS = 0 } PRStatus;
+
+#define PR_ASSERT DCHECK
+#define PR_CALLBACK
+#define PR_INT16_MAX 32767
+#define NSPR_API(__type) extern __type
+
+/*
+ * Long-long (64-bit signed integer type) support macros used by
+ * PR_ImplodeTime().
+ * See http://lxr.mozilla.org/nspr/source/pr/include/prlong.h
+ */
+
+#define LL_I2L(l, i) ((l) = (PRInt64)(i))
+#define LL_MUL(r, a, b) ((r) = (a) * (b))
+#define LL_ADD(r, a, b) ((r) = (a) + (b))
+#define LL_SUB(r, a, b) ((r) = (a) - (b))
+
+/**********************************************************************/
+/************************* TYPES AND CONSTANTS ************************/
+/**********************************************************************/
+
+#define PR_MSEC_PER_SEC 1000UL
+#define PR_USEC_PER_SEC 1000000UL
+#define PR_NSEC_PER_SEC 1000000000UL
+#define PR_USEC_PER_MSEC 1000UL
+#define PR_NSEC_PER_MSEC 1000000UL
+
+/*
+ * PRTime --
+ *
+ * NSPR represents basic time as 64-bit signed integers relative
+ * to midnight (00:00:00), January 1, 1970 Greenwich Mean Time (GMT).
+ * (GMT is also known as Coordinated Universal Time, UTC.)
+ * The units of time are in microseconds. Negative times are allowed
+ * to represent times prior to the January 1970 epoch. Such values are
+ * intended to be exported to other systems or converted to human
+ * readable form.
+ *
+ * Notes on porting: PRTime corresponds to time_t in ANSI C. NSPR 1.0
+ * simply uses PRInt64.
+ */
+
+typedef PRInt64 PRTime;
+
+/*
+ * Time zone and daylight saving time corrections applied to GMT to
+ * obtain the local time of some geographic location
+ */
+
+typedef struct PRTimeParameters {
+ PRInt32 tp_gmt_offset; /* the offset from GMT in seconds */
+ PRInt32 tp_dst_offset; /* contribution of DST in seconds */
+} PRTimeParameters;
+
+/*
+ * PRExplodedTime --
+ *
+ * Time broken down into human-readable components such as year, month,
+ * day, hour, minute, second, and microsecond. Time zone and daylight
+ * saving time corrections may be applied. If they are applied, the
+ * offsets from the GMT must be saved in the 'tm_params' field so that
+ * all the information is available to reconstruct GMT.
+ *
+ * Notes on porting: PRExplodedTime corrresponds to struct tm in
+ * ANSI C, with the following differences:
+ * - an additional field tm_usec;
+ * - replacing tm_isdst by tm_params;
+ * - the month field is spelled tm_month, not tm_mon;
+ * - we use absolute year, AD, not the year since 1900.
+ * The corresponding type in NSPR 1.0 is called PRTime. Below is
+ * a table of date/time type correspondence in the three APIs:
+ * API time since epoch time in components
+ * ANSI C time_t struct tm
+ * NSPR 1.0 PRInt64 PRTime
+ * NSPR 2.0 PRTime PRExplodedTime
+ */
+
+typedef struct PRExplodedTime {
+ PRInt32 tm_usec; /* microseconds past tm_sec (0-99999) */
+ PRInt32 tm_sec; /* seconds past tm_min (0-61, accomodating
+ up to two leap seconds) */
+ PRInt32 tm_min; /* minutes past tm_hour (0-59) */
+ PRInt32 tm_hour; /* hours past tm_day (0-23) */
+ PRInt32 tm_mday; /* days past tm_mon (1-31, note that it
+ starts from 1) */
+ PRInt32 tm_month; /* months past tm_year (0-11, Jan = 0) */
+ PRInt16 tm_year; /* absolute year, AD (note that we do not
+ count from 1900) */
+
+ PRInt8 tm_wday; /* calculated day of the week
+ (0-6, Sun = 0) */
+ PRInt16 tm_yday; /* calculated day of the year
+ (0-365, Jan 1 = 0) */
+
+ PRTimeParameters tm_params; /* time parameters used by conversion */
+} PRExplodedTime;
+
+/*
+ * PRTimeParamFn --
+ *
+ * A function of PRTimeParamFn type returns the time zone and
+ * daylight saving time corrections for some geographic location,
+ * given the current time in GMT. The input argument gmt should
+ * point to a PRExplodedTime that is in GMT, i.e., whose
+ * tm_params contains all 0's.
+ *
+ * For any time zone other than GMT, the computation is intended to
+ * consist of two steps:
+ * - Figure out the time zone correction, tp_gmt_offset. This number
+ * usually depends on the geographic location only. But it may
+ * also depend on the current time. For example, all of China
+ * is one time zone right now. But this situation may change
+ * in the future.
+ * - Figure out the daylight saving time correction, tp_dst_offset.
+ * This number depends on both the geographic location and the
+ * current time. Most of the DST rules are expressed in local
+ * current time. If so, one should apply the time zone correction
+ * to GMT before applying the DST rules.
+ */
+
+typedef PRTimeParameters (PR_CALLBACK *PRTimeParamFn)(const PRExplodedTime *gmt);
+
+/**********************************************************************/
+/****************************** FUNCTIONS *****************************/
+/**********************************************************************/
+
+NSPR_API(PRTime)
+PR_ImplodeTime(const PRExplodedTime *exploded);
+
+/*
+ * Adjust exploded time to normalize field overflows after manipulation.
+ * Note that the following fields of PRExplodedTime should not be
+ * manipulated:
+ * - tm_month and tm_year: because the number of days in a month and
+ * number of days in a year are not constant, it is ambiguous to
+ * manipulate the month and year fields, although one may be tempted
+ * to. For example, what does "a month from January 31st" mean?
+ * - tm_wday and tm_yday: these fields are calculated by NSPR. Users
+ * should treat them as "read-only".
+ */
+
+NSPR_API(void) PR_NormalizeTime(
+ PRExplodedTime *exploded, PRTimeParamFn params);
+
+/**********************************************************************/
+/*********************** TIME PARAMETER FUNCTIONS *********************/
+/**********************************************************************/
+
+/* Time parameters that represent Greenwich Mean Time */
+NSPR_API(PRTimeParameters) PR_GMTParameters(const PRExplodedTime *gmt);
+
+/*
+ * This parses a time/date string into a PRTime
+ * (microseconds after "1-Jan-1970 00:00:00 GMT").
+ * It returns PR_SUCCESS on success, and PR_FAILURE
+ * if the time/date string can't be parsed.
+ *
+ * Many formats are handled, including:
+ *
+ * 14 Apr 89 03:20:12
+ * 14 Apr 89 03:20 GMT
+ * Fri, 17 Mar 89 4:01:33
+ * Fri, 17 Mar 89 4:01 GMT
+ * Mon Jan 16 16:12 PDT 1989
+ * Mon Jan 16 16:12 +0130 1989
+ * 6 May 1992 16:41-JST (Wednesday)
+ * 22-AUG-1993 10:59:12.82
+ * 22-AUG-1993 10:59pm
+ * 22-AUG-1993 12:59am
+ * 22-AUG-1993 12:59 PM
+ * Friday, August 04, 1995 3:54 PM
+ * 06/21/95 04:24:34 PM
+ * 20/06/95 21:07
+ * 95-06-08 19:32:48 EDT
+ * 1995-06-17T23:11:25.342156Z
+ *
+ * If the input string doesn't contain a description of the timezone,
+ * we consult the `default_to_gmt' to decide whether the string should
+ * be interpreted relative to the local time zone (PR_FALSE) or GMT (PR_TRUE).
+ * The correct value for this argument depends on what standard specified
+ * the time string which you are parsing.
+ */
+
+/*
+ * This is the only funtion that should be called from outside base, and only
+ * from the unit test.
+ */
+
+BASE_EXPORT PRStatus PR_ParseTimeString (
+ const char *string,
+ PRBool default_to_gmt,
+ PRTime *result);
+
+#endif // BASE_PRTIME_H__
diff --git a/libchrome/base/third_party/valgrind/memcheck.h b/libchrome/base/third_party/valgrind/memcheck.h
new file mode 100644
index 0000000..aac34fc
--- /dev/null
+++ b/libchrome/base/third_party/valgrind/memcheck.h
@@ -0,0 +1,282 @@
+#ifdef ANDROID
+ #include "memcheck/memcheck.h"
+#else
+/*
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (memcheck.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of MemCheck, a heavyweight Valgrind tool for
+ detecting memory errors.
+
+ Copyright (C) 2000-2010 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (memcheck.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+#ifndef __MEMCHECK_H
+#define __MEMCHECK_H
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query memory permissions
+ inside your own programs.
+
+ See comment near the top of valgrind.h on how to use them.
+*/
+
+#include "valgrind.h"
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+ enum {
+ VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
+ VG_USERREQ__MAKE_MEM_UNDEFINED,
+ VG_USERREQ__MAKE_MEM_DEFINED,
+ VG_USERREQ__DISCARD,
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+ VG_USERREQ__CHECK_MEM_IS_DEFINED,
+ VG_USERREQ__DO_LEAK_CHECK,
+ VG_USERREQ__COUNT_LEAKS,
+
+ VG_USERREQ__GET_VBITS,
+ VG_USERREQ__SET_VBITS,
+
+ VG_USERREQ__CREATE_BLOCK,
+
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+
+ /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+ VG_USERREQ__COUNT_LEAK_BLOCKS,
+
+ /* This is just for memcheck's internal use - don't use it */
+ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
+ = VG_USERREQ_TOOL_BASE('M','C') + 256
+ } Vg_MemCheckClientRequest;
+
+
+
+/* Client-code macros to manipulate the state of memory. */
+
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_NOACCESS, \
+ (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
+ for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_UNDEFINED, \
+ (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similarly, mark memory at _qzz_addr as addressable and defined
+ for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_DEFINED, \
+ (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+ not altered: bytes which are addressable are marked as defined,
+ but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
+ (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Create a block-description handle. The description is an ascii
+ string which is included in any messages pertaining to addresses
+ within the specified memory range. Has no other effect on the
+ properties of the memory range. */
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CREATE_BLOCK, \
+ (_qzz_addr), (_qzz_len), (_qzz_desc), \
+ 0, 0)
+
+/* Discard a block-description-handle. Returns 1 for an
+ invalid handle, 0 for a valid handle. */
+#define VALGRIND_DISCARD(_qzz_blkindex) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__DISCARD, \
+ 0, (_qzz_blkindex), 0, 0, 0)
+
+
+/* Client-code macros to check the state of memory. */
+
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
+ If suitable addressibility is not established, Valgrind prints an
+ error message and returns the address of the first offending byte.
+ Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
+ (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Check that memory at _qzz_addr is addressable and defined for
+ _qzz_len bytes. If suitable addressibility and definedness are not
+ established, Valgrind prints an error message and returns the
+ address of the first offending byte. Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__CHECK_MEM_IS_DEFINED, \
+ (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Use this macro to force the definedness and addressibility of an
+ lvalue to be checked. If suitable addressibility and definedness
+ are not established, Valgrind prints an error message and returns
+ the address of the first offending byte. Otherwise it returns
+ zero. */
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
+ VALGRIND_CHECK_MEM_IS_DEFINED( \
+ (volatile unsigned char *)&(__lvalue), \
+ (unsigned long)(sizeof (__lvalue)))
+
+
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
+#define VALGRIND_DO_LEAK_CHECK \
+ {unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 0, 0, 0, 0, 0); \
+ }
+
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
+#define VALGRIND_DO_QUICK_LEAK_CHECK \
+ {unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 1, 0, 0, 0, 0); \
+ }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+ all previous leak checks. They must be lvalues. */
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
+ /* For safety on 64-bit platforms we assign the results to private
+ unsigned long variables, then assign these to the lvalues the user
+ specified, which works no matter what type 'leaked', 'dubious', etc
+ are. We also initialise '_qzz_leaked', etc because
+ VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+ defined. */ \
+ {unsigned long _qzz_res; \
+ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
+ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAKS, \
+ &_qzz_leaked, &_qzz_dubious, \
+ &_qzz_reachable, &_qzz_suppressed, 0); \
+ leaked = _qzz_leaked; \
+ dubious = _qzz_dubious; \
+ reachable = _qzz_reachable; \
+ suppressed = _qzz_suppressed; \
+ }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+ all previous leak checks. They must be lvalues. */
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
+ /* For safety on 64-bit platforms we assign the results to private
+ unsigned long variables, then assign these to the lvalues the user
+ specified, which works no matter what type 'leaked', 'dubious', etc
+ are. We also initialise '_qzz_leaked', etc because
+ VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+ defined. */ \
+ {unsigned long _qzz_res; \
+ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
+ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAK_BLOCKS, \
+ &_qzz_leaked, &_qzz_dubious, \
+ &_qzz_reachable, &_qzz_suppressed, 0); \
+ leaked = _qzz_leaked; \
+ dubious = _qzz_dubious; \
+ reachable = _qzz_reachable; \
+ suppressed = _qzz_suppressed; \
+ }
+
+
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+ into the provided zzvbits array. Return values:
+ 0 if not running on valgrind
+ 1 success
+ 2 [previously indicated unaligned arrays; these are now allowed]
+ 3 if any parts of zzsrc/zzvbits are not addressable.
+ The metadata is not copied in cases 0, 2 or 3 so it should be
+ impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__GET_VBITS, \
+ (const char*)(zza), \
+ (char*)(zzvbits), \
+ (zznbytes), 0, 0)
+
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+ from the provided zzvbits array. Return values:
+ 0 if not running on valgrind
+ 1 success
+ 2 [previously indicated unaligned arrays; these are now allowed]
+ 3 if any parts of zza/zzvbits are not addressable.
+ The metadata is not copied in cases 0, 2 or 3 so it should be
+ impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__SET_VBITS, \
+ (const char*)(zza), \
+ (const char*)(zzvbits), \
+ (zznbytes), 0, 0 )
+
+#endif
+
+#endif
diff --git a/libchrome/base/third_party/valgrind/valgrind.h b/libchrome/base/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000..0668a71
--- /dev/null
+++ b/libchrome/base/third_party/valgrind/valgrind.h
@@ -0,0 +1,4797 @@
+#ifdef ANDROID
+ #include "include/valgrind.h"
+#else
+/* -*- c -*-
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (valgrind.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2010 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (valgrind.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query Valgrind's
+ execution inside your own programs.
+
+ The resulting executables will still run without Valgrind, just a
+ little bit more slowly than they otherwise would, but otherwise
+ unchanged. When not running on valgrind, each client request
+ consumes very few (eg. 7) instructions, so the resulting performance
+ loss is negligible unless you plan to execute client requests
+ millions of times per second. Nevertheless, if that is still a
+ problem, you can compile with the NVALGRIND symbol defined (gcc
+ -DNVALGRIND) so that client requests are not even compiled in. */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+ conditionally compile based on our version number. Note that these
+ were introduced at version 3.6 and so do not exist in version 3.5
+ or earlier. The recommended way to use them to check for "version
+ X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
+ && (__VALGRIND_MAJOR__ > 3 \
+ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__ 3
+#define __VALGRIND_MINOR__ 6
+
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi. So
+ we can't use C++ style "//" comments nor the "asm" keyword (instead
+ use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is. Note
+ that in this file we're using the compiler's CPP symbols for
+ identifying architectures, which are different to the ones we use
+ within the rest of Valgrind. Note, __powerpc__ is active for both
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+ latter (on Linux, that is).
+
+ Misc note: how to find out what's predefined in gcc by default:
+ gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_ppc64_aix5
+#undef PLAT_ppc32_aix5
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+
+#if defined(_AIX) && defined(__64BIT__)
+# define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+# define PLAT_ppc32_aix5 1
+#elif defined(__APPLE__) && defined(__i386__)
+# define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+# define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) || defined(_WIN32) && defined(_M_IX86)
+# define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
+# define PLAT_x86_linux 1
+#elif defined(__linux__) && defined(__x86_64__)
+# define PLAT_amd64_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
+# define PLAT_ppc32_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
+# define PLAT_ppc64_linux 1
+#elif defined(__linux__) && defined(__arm__)
+# define PLAT_arm_linux 1
+#else
+/* If we're not compiling for our target platform, don't generate
+ any inline asms. */
+# if !defined(NVALGRIND)
+# define NVALGRIND 1
+# endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
+/* in here of use to end-users -- skip to the next section. */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+ from the compiled code (analogous to NDEBUG's effects on
+ assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { \
+ (_zzq_rlval) = (_zzq_default); \
+ }
+
+#else /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+ spots and handles magically. Don't look too closely at them as
+ they will rot your brain.
+
+ The assembly code sequences for all architectures is in this one
+ file. This is because this file must be stand-alone, and we don't
+ want to have multiple files.
+
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+ value gets put in the return slot, so that everything works when
+ this is executed not under Valgrind. Args are passed in a memory
+ block, and so there's no intrinsic limit to the number that could
+ be passed, but it's currently five.
+
+ The macro args are:
+ _zzq_rlval result lvalue
+ _zzq_default default value (result returned when running on real CPU)
+ _zzq_request request code
+ _zzq_arg1..5 request params
+
+ The other two macros are used to support function wrapping, and are
+ a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
+ guest's NRADDR pseudo-register and whatever other information is
+ needed to safely run the call original from the wrapper: on
+ ppc64-linux, the R2 value at the divert point is also needed. This
+ information is abstracted into a user-visible type, OrigFn.
+
+ VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+ guest, but guarantees that the branch instruction will not be
+ redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+ branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
+ complete inline asm, since it needs to be combined with more magic
+ inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
+ || (defined(PLAT_x86_win32) && defined(__GNUC__))
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
+ "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ "xchgl %%ebx,%%ebx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ "xchgl %%ecx,%%ecx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%EAX */ \
+ "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm rol edi, 3 __asm rol edi, 13 \
+ __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile uintptr_t _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (uintptr_t)(_zzq_request); \
+ _zzq_args[1] = (uintptr_t)(_zzq_arg1); \
+ _zzq_args[2] = (uintptr_t)(_zzq_arg2); \
+ _zzq_args[3] = (uintptr_t)(_zzq_arg3); \
+ _zzq_args[4] = (uintptr_t)(_zzq_arg4); \
+ _zzq_args[5] = (uintptr_t)(_zzq_arg5); \
+ __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ __asm xchg ebx,ebx \
+ __asm mov _zzq_result, edx \
+ } \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ __asm xchg ecx,ecx \
+ __asm mov __addr, eax \
+ } \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned long long int _zzq_args[6]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RDX = client_request ( %RAX ) */ \
+ "xchgq %%rbx,%%rbx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RAX = guest_NRADDR */ \
+ "xchgq %%rcx,%%rcx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_RAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%RAX */ \
+ "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[6]; \
+ unsigned int _zzq_result; \
+ unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" /*result*/ \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[6]; \
+ register unsigned long long int _zzq_result __asm__("r3"); \
+ register unsigned long long int* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1" \
+ : "=r" (_zzq_result) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr __asm__("r3"); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
+ "mov r4, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* R3 = client_request ( R4 ) */ \
+ "orr r10, r10, r10\n\t" \
+ "mov %0, r3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "cc","memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* R3 = guest_NRADDR */ \
+ "orr r11, r11, r11\n\t" \
+ "mov %0, r3" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R4 */ \
+ "orr r12, r12, r12\n\t"
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ unsigned int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[7]; \
+ register unsigned int _zzq_result; \
+ register unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "lwz 3, 24(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+ struct {
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[7]; \
+ register unsigned long long int _zzq_result; \
+ register unsigned long long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
+ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int long long)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "ld 3, 48(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
+/* ugly. It's the least-worst tradeoff I can think of. */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+ guaranteed-no-redirection macros, so as to get from function
+ wrappers to the functions they are wrapping. The whole point is to
+ construct standard call sequences, but to do the call itself with a
+ special no-redirect call pseudo-instruction that the JIT
+ understands and handles specially. This section is long and
+ repetitious, and I can't see a way to make it shorter.
+
+ The naming scheme is as follows:
+
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+ 'W' stands for "word" and 'v' for "void". Hence there are
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+ and for each, the possibility of returning a word-typed result, or
+ no result.
+*/
+
+/* Use these to write the name of your wrapper. NOTE: duplicates
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+ args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
+ VG_CONCAT4(_vgwZU_,soname,_,fnname)
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
+ VG_CONCAT4(_vgwZZ_,soname,_,fnname)
+
+/* Use this macro from within a wrapper function to collect the
+ context (address and possibly other info) of the original function.
+ Once you have that you can then use it in one of the CALL_FN_
+ macros. The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+ returning void. */
+
+#define CALL_FN_v_v(fnptr) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
+
+/* These regs are trashed by the hidden call. No need to mention eax
+ as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subl $12, %%esp\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subl $8, %%esp\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subl $4, %%esp\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subl $12, %%esp\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subl $8, %%esp\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subl $4, %%esp\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subl $12, %%esp\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subl $8, %%esp\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subl $4, %%esp\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "pushl 48(%%eax)\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
+ "rdi", "r8", "r9", "r10", "r11"
+
+/* This is all pretty complex. It's so as to make stack unwinding
+ work reliably. See bug 243270. The basic problem is the sub and
+ add of 128 of %rsp in all of the following macros. If gcc believes
+ the CFA is in %rsp, then unwinding may fail, because what's at the
+ CFA is not what gcc "expected" when it constructs the CFIs for the
+ places where the macros are instantiated.
+
+ But we can't just add a CFI annotation to increase the CFA offset
+ by 128, to match the sub of 128 from %rsp, because we don't know
+ whether gcc has chosen %rsp as the CFA at that point, or whether it
+ has chosen some other register (eg, %rbp). In the latter case,
+ adding a CFI annotation to change the CFA offset is simply wrong.
+
+ So the solution is to get hold of the CFA using
+ __builtin_dwarf_cfa(), put it in a known register, and add a
+ CFI annotation to say what the register is. We choose %rbp for
+ this (perhaps perversely), because:
+
+ (1) %rbp is already subject to unwinding. If a new register was
+ chosen then the unwinder would have to unwind it in all stack
+ traces, which is expensive, and
+
+ (2) %rbp is already subject to precise exception updates in the
+ JIT. If a new register was chosen, we'd have to have precise
+ exceptions for it too, which reduces performance of the
+ generated code.
+
+ However .. one extra complication. We can't just whack the result
+ of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+ list of trashed registers at the end of the inline assembly
+ fragments; gcc won't allow %rbp to appear in that list. Hence
+ instead we need to stash %rbp in %r15 for the duration of the asm,
+ and say that %r15 is trashed instead. gcc seems happy to go with
+ that.
+
+ Oh .. and this all needs to be conditionalised so that it is
+ unchanged from before this commit, when compiled with older gccs
+ that don't support __builtin_dwarf_cfa. Furthermore, since
+ this header file is freestanding, it has to be independent of
+ config.h, and so the following conditionalisation cannot depend on
+ configure time checks.
+
+ Although it's not clear from
+ 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+ this expression excludes Darwin.
+ .cfi directives in Darwin assembly appear to be completely
+ different and I haven't investigated how they work.
+
+ For even more entertainment value, note we have to use the
+ completely undocumented __builtin_dwarf_cfa(), which appears to
+ really compute the CFA, whereas __builtin_frame_address(0) claims
+ to but actually doesn't. See
+ https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"r"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ "movq %%rbp, %%r15\n\t" \
+ "movq %2, %%rbp\n\t" \
+ ".cfi_remember_state\n\t" \
+ ".cfi_def_cfa rbp, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "movq %%r15, %%rbp\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE
+# define VALGRIND_CFI_EPILOGUE
+#endif
+
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+ long) == 8. */
+
+/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
+ macros. In order not to trash the stack redzone, we need to drop
+ %rsp by 128 before the hidden call, and restore afterwards. The
+ nastyness is that it is only by luck that the stack still appears
+ to be unwindable during the hidden call - since then the behaviour
+ of any routine using this macro does not match what the CFI data
+ says. Sigh.
+
+ Why is this important? Imagine that a wrapper has a stack
+ allocated local, and passes to the hidden call, a pointer to it.
+ Because gcc does not know about the hidden call, it may allocate
+ that local in the redzone. Unfortunately the hidden call may then
+ trash it before it comes to use it. So we must step clear of the
+ redzone, for the duration of the hidden call, to make it safe.
+
+ Probably the same problem afflicts the other redzone-style ABIs too
+ (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ self describing (none of this CFI nonsense) so at least messing
+ with the stack pointer doesn't give a danger of non-unwindable
+ stack. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $8, %%rsp\n" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $16, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $24, %%rsp\n" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $32, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $40, %%rsp\n" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $48, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+ int g9 ( void ) {
+ return f9(11,22,33,44,55,66,77,88,99);
+ }
+ int g10 ( void ) {
+ return f10(11,22,33,44,55,66,77,88,99,110);
+ }
+ int g11 ( void ) {
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
+ }
+ int g12 ( void ) {
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+ }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux,
+ sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,20(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #4 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #8 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #12 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "push {r0, r1, r2, r3} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #16 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #20 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #24 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #28 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "ldr r2, [%1, #48] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #32 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "lwz 3," #_n_fr "(1)\n\t" \
+ "stw 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,68(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "ld 3," #_n_fr "(1)\n\t" \
+ "std 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
+/* */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes. There are many more of these, but most are not
+ exposed to end-user view. These are the public ones, all of the
+ form 0x1000 + small_number.
+
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
+ ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+ embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+ enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
+ function, etc. */
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
+ /* Memory pool support. */
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+
+ /* Allow printfs to valgrind log. */
+ /* The first two pass the va_list argument by value, which
+ assumes it is the same size as or smaller than a UWord,
+ which generally isn't the case. Hence are deprecated.
+ The second two pass the vargs by reference and so are
+ immune to this problem. */
+ /* both :: char* fmt, va_list vargs (DEPRECATED) */
+ VG_USERREQ__PRINTF = 0x1401,
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+ /* both :: char* fmt, va_list* vargs */
+ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
+
+ /* Stack support. */
+ VG_USERREQ__STACK_REGISTER = 0x1501,
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+ /* Querying of debug info. */
+ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
+ } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+# define __extension__ /* */
+#endif
+
+
+/*
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result.
+ */
+
+#if defined(NVALGRIND)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ (_zzq_default)
+
+#else /*defined(NVALGRIND)*/
+
+#if defined(_MSC_VER)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ (vg_VALGRIND_DO_CLIENT_REQUEST_EXPR((uintptr_t)(_zzq_default), \
+ (_zzq_request), (uintptr_t)(_zzq_arg1), (uintptr_t)(_zzq_arg2), \
+ (uintptr_t)(_zzq_arg3), (uintptr_t)(_zzq_arg4), \
+ (uintptr_t)(_zzq_arg5)))
+
+static __inline unsigned
+vg_VALGRIND_DO_CLIENT_REQUEST_EXPR(uintptr_t _zzq_default,
+ unsigned _zzq_request, uintptr_t _zzq_arg1,
+ uintptr_t _zzq_arg2, uintptr_t _zzq_arg3,
+ uintptr_t _zzq_arg4, uintptr_t _zzq_arg5)
+{
+ unsigned _zzq_rlval;
+ VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request,
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5);
+ return _zzq_rlval;
+}
+
+#else /*defined(_MSC_VER)*/
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ (__extension__({unsigned int _zzq_rlval; \
+ VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ _zzq_rlval; \
+ }))
+
+#endif /*defined(_MSC_VER)*/
+
+#endif /*defined(NVALGRIND)*/
+
+
+/* Returns the number of Valgrinds this code is running under. That
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
+ running under Valgrind which is running under another Valgrind,
+ etc. */
+#define RUNNING_ON_VALGRIND \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0) \
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+ _qzz_len - 1]. Useful if you are debugging a JITter or some such,
+ since it provides a way to make sure valgrind will retranslate the
+ invalidated area. Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ }
+
+
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. The return value
+ is the number of characters printed, excluding the "**<pid>** " part at the
+ start and the backtrace (if present). */
+
+#if defined(NVALGRIND)
+
+# define VALGRIND_PRINTF(...)
+# define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+#if !defined(_MSC_VER)
+/* Modern GCC will optimize the static routine out if unused,
+ and unused attribute will shut down warnings about it. */
+static int VALGRIND_PRINTF(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+#if defined(_MSC_VER)
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+#if !defined(_MSC_VER)
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+ __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start(vargs, format);
+#if defined(_MSC_VER)
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
+ 0, 0, 0);
+#else
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
+ va_end(vargs);
+ return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+ real CPU, calling an arbitary function.
+
+ Note that the current ThreadId is inserted as the first argument.
+ So this call:
+
+ VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+ requires f to have this signature:
+
+ Word f(Word tid, Word arg1, Word arg2)
+
+ where "Word" is a word-sized type.
+
+ Note that these client requests are not entirely reliable. For example,
+ if you call a function with them that subsequently calls printf(),
+ there's a high chance Valgrind will crash. Generally, your prospects of
+ these working are made higher if the called function does not refer to
+ any global variables, and does not refer to any libc or other functions
+ (printf et al). Any kind of entanglement with libc or dynamic linking is
+ likely to have a bad outcome, for tricky reasons which we've grappled
+ with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0); \
+ _qyy_res; \
+ })
+
+
+/* Counts the number of errors that have been recorded by a tool. Nb:
+ the tool must record the errors with VG_(maybe_record_error)() or
+ VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS \
+ __extension__ \
+ ({unsigned int _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__COUNT_ERRORS, \
+ 0, 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+ when heap blocks are allocated in order to give accurate results. This
+ happens automatically for the standard allocator functions such as
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+ delete[], etc.
+
+ But if your program uses a custom allocator, this doesn't automatically
+ happen, and Valgrind will not do as well. For example, if you allocate
+ superblocks with mmap() and then allocates chunks of the superblocks, all
+ Valgrind's observations will be at the mmap() level and it won't know that
+ the chunks should be considered separate entities. In Memcheck's case,
+ that means you probably won't get heap block overrun detection (because
+ there won't be redzones marked as unaddressable) and you definitely won't
+ get any leak detection.
+
+ The following client requests allow a custom allocator to be annotated so
+ that it can be handled accurately by Valgrind.
+
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+ by a malloc()-like function. For Memcheck (an illustrative case), this
+ does two things:
+
+ - It records that the block has been allocated. This means any addresses
+ within the block mentioned in error messages will be
+ identified as belonging to the block. It also means that if the block
+ isn't freed it will be detected by the leak checker.
+
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
+ not set), or addressable and defined (if 'is_zeroed' is set). This
+ controls how accesses to the block by the program are handled.
+
+ 'addr' is the start of the usable block (ie. after any
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
+ can apply redzones -- these are blocks of padding at the start and end of
+ each block. Adding redzones is recommended as it makes it much more likely
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
+ zeroed (or filled with another predictable value), as is the case for
+ calloc().
+
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+ heap block -- that will be used by the client program -- is allocated.
+ It's best to put it at the outermost level of the allocator if possible;
+ for example, if you have a function my_alloc() which calls
+ internal_alloc(), and the client request is put inside internal_alloc(),
+ stack traces relating to the heap block will contain entries for both
+ my_alloc() and internal_alloc(), which is probably not what you want.
+
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
+
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
+ Memcheck, it does two things:
+
+ - It records that the block has been deallocated. This assumes that the
+ block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - It marks the block as being unaddressable.
+
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+ heap block is deallocated.
+
+ In many cases, these two client requests will not be enough to get your
+ allocator working well with Memcheck. More specifically, if your allocator
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+ will be necessary to mark the memory as addressable just before the zeroing
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
+ you'll need to do this if your allocator recycles freed blocks, but it
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+ Really, what's happening is a blurring of the lines between the client
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+ memory should be considered unaddressable to the client program, but the
+ allocator knows more than the rest of the client program and so may be able
+ to safely access it. Extra client requests are necessary for Valgrind to
+ understand the distinction between the allocator and the rest of the
+ program.
+
+ Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it
+ has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
+
+ Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed, 0); \
+ }
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0, 0); \
+ }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0, 0); \
+ }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0, 0); \
+ }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0, 0); \
+ }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_TRIM, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MOVE_MEMPOOL, \
+ poolA, poolB, 0, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_CHANGE, \
+ pool, addrA, addrB, size, 0); \
+ }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_EXISTS, \
+ pool, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_REGISTER, \
+ start, end, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Unmark the piece of memory associated with a stack id as being a
+ stack. */
+#define VALGRIND_STACK_DEREGISTER(id) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0, 0); \
+ }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0, 0); \
+ }
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__LOAD_PDB_DEBUGINFO, \
+ fd, ptr, total_size, delta, 0); \
+ }
+
+/* Map a code address to a source file name and line number. buf64
+ must point to a 64-byte buffer in the caller's address space. The
+ result will be dumped in there and is guaranteed to be zero
+ terminated. If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MAP_IP_TO_SRCLOC, \
+ addr, buf64, 0, 0, 0); \
+ }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif /* __VALGRIND_H */
+
+#endif
diff --git a/libchrome/base/threading/OWNERS b/libchrome/base/threading/OWNERS
new file mode 100644
index 0000000..4198e99
--- /dev/null
+++ b/libchrome/base/threading/OWNERS
@@ -0,0 +1,2 @@
+# For thread_resrictions.*
+jam@chromium.org
diff --git a/libchrome/base/threading/non_thread_safe.h b/libchrome/base/threading/non_thread_safe.h
new file mode 100644
index 0000000..d41c086
--- /dev/null
+++ b/libchrome/base/threading/non_thread_safe.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_NON_THREAD_SAFE_H_
+#define BASE_THREADING_NON_THREAD_SAFE_H_
+
+// Classes deriving from NonThreadSafe may need to suppress MSVC warning 4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// There is a specific macro to do it: NON_EXPORTED_BASE(), defined in
+// compiler_specific.h
+#include "base/compiler_specific.h"
+
+// See comment at top of thread_checker.h
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_NON_THREAD_SAFE 1
+#else
+#define ENABLE_NON_THREAD_SAFE 0
+#endif
+
+#include "base/threading/non_thread_safe_impl.h"
+
+namespace base {
+
+// Do nothing implementation of NonThreadSafe, for release mode.
+//
+// Note: You should almost always use the NonThreadSafe class to get
+// the right version of the class for your build configuration.
+class NonThreadSafeDoNothing {
+ public:
+ bool CalledOnValidThread() const {
+ return true;
+ }
+
+ protected:
+ ~NonThreadSafeDoNothing() {}
+ void DetachFromThread() {}
+};
+
+// NonThreadSafe is a helper class used to help verify that methods of a
+// class are called from the same thread. One can inherit from this class
+// and use CalledOnValidThread() to verify.
+//
+// This is intended to be used with classes that appear to be thread safe, but
+// aren't. For example, a service or a singleton like the preferences system.
+//
+// Example:
+// class MyClass : public base::NonThreadSafe {
+// public:
+// void Foo() {
+// DCHECK(CalledOnValidThread());
+// ... (do stuff) ...
+// }
+// }
+//
+// Note that base::ThreadChecker offers identical functionality to
+// NonThreadSafe, but does not require inheritance. In general, it is preferable
+// to have a base::ThreadChecker as a member, rather than inherit from
+// NonThreadSafe. For more details about when to choose one over the other, see
+// the documentation for base::ThreadChecker.
+#if ENABLE_NON_THREAD_SAFE
+typedef NonThreadSafeImpl NonThreadSafe;
+#else
+typedef NonThreadSafeDoNothing NonThreadSafe;
+#endif // ENABLE_NON_THREAD_SAFE
+
+#undef ENABLE_NON_THREAD_SAFE
+
+} // namespace base
+
+#endif // BASE_THREADING_NON_THREAD_SAFE_H_
diff --git a/libchrome/base/threading/non_thread_safe_impl.cc b/libchrome/base/threading/non_thread_safe_impl.cc
new file mode 100644
index 0000000..7e729d9
--- /dev/null
+++ b/libchrome/base/threading/non_thread_safe_impl.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/non_thread_safe_impl.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+bool NonThreadSafeImpl::CalledOnValidThread() const {
+ return thread_checker_.CalledOnValidThread();
+}
+
+NonThreadSafeImpl::~NonThreadSafeImpl() {
+ DCHECK(CalledOnValidThread());
+}
+
+void NonThreadSafeImpl::DetachFromThread() {
+ thread_checker_.DetachFromThread();
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/non_thread_safe_impl.h b/libchrome/base/threading/non_thread_safe_impl.h
new file mode 100644
index 0000000..a3a356d
--- /dev/null
+++ b/libchrome/base/threading/non_thread_safe_impl.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_NON_THREAD_SAFE_IMPL_H_
+#define BASE_THREADING_NON_THREAD_SAFE_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/threading/thread_checker_impl.h"
+
+namespace base {
+
+// Full implementation of NonThreadSafe, for debug mode or for occasional
+// temporary use in release mode e.g. when you need to CHECK on a thread
+// bug that only occurs in the wild.
+//
+// Note: You should almost always use the NonThreadSafe class to get
+// the right version of the class for your build configuration.
+class BASE_EXPORT NonThreadSafeImpl {
+ public:
+ bool CalledOnValidThread() const;
+
+ protected:
+ ~NonThreadSafeImpl();
+
+ // Changes the thread that is checked for in CalledOnValidThread. The next
+ // call to CalledOnValidThread will attach this class to a new thread. It is
+ // up to the NonThreadSafe derived class to decide to expose this or not.
+ // This may be useful when an object may be created on one thread and then
+ // used exclusively on another thread.
+ void DetachFromThread();
+
+ private:
+ ThreadCheckerImpl thread_checker_;
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_NON_THREAD_SAFE_IMPL_H_
diff --git a/libchrome/base/threading/non_thread_safe_unittest.cc b/libchrome/base/threading/non_thread_safe_unittest.cc
new file mode 100644
index 0000000..d523fc5
--- /dev/null
+++ b/libchrome/base/threading/non_thread_safe_unittest.cc
@@ -0,0 +1,165 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/non_thread_safe.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Duplicated from base/threading/non_thread_safe.h so that we can be
+// good citizens there and undef the macro.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_NON_THREAD_SAFE 1
+#else
+#define ENABLE_NON_THREAD_SAFE 0
+#endif
+
+namespace base {
+
+namespace {
+
+// Simple class to exersice the basics of NonThreadSafe.
+// Both the destructor and DoStuff should verify that they were
+// called on the same thread as the constructor.
+class NonThreadSafeClass : public NonThreadSafe {
+ public:
+ NonThreadSafeClass() {}
+
+ // Verifies that it was called on the same thread as the constructor.
+ void DoStuff() {
+ DCHECK(CalledOnValidThread());
+ }
+
+ void DetachFromThread() {
+ NonThreadSafe::DetachFromThread();
+ }
+
+ static void MethodOnDifferentThreadImpl();
+ static void DestructorOnDifferentThreadImpl();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(NonThreadSafeClass);
+};
+
+// Calls NonThreadSafeClass::DoStuff on another thread.
+class CallDoStuffOnThread : public SimpleThread {
+ public:
+ explicit CallDoStuffOnThread(NonThreadSafeClass* non_thread_safe_class)
+ : SimpleThread("call_do_stuff_on_thread"),
+ non_thread_safe_class_(non_thread_safe_class) {
+ }
+
+ void Run() override { non_thread_safe_class_->DoStuff(); }
+
+ private:
+ NonThreadSafeClass* non_thread_safe_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallDoStuffOnThread);
+};
+
+// Deletes NonThreadSafeClass on a different thread.
+class DeleteNonThreadSafeClassOnThread : public SimpleThread {
+ public:
+ explicit DeleteNonThreadSafeClassOnThread(
+ NonThreadSafeClass* non_thread_safe_class)
+ : SimpleThread("delete_non_thread_safe_class_on_thread"),
+ non_thread_safe_class_(non_thread_safe_class) {
+ }
+
+ void Run() override { non_thread_safe_class_.reset(); }
+
+ private:
+ std::unique_ptr<NonThreadSafeClass> non_thread_safe_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeleteNonThreadSafeClassOnThread);
+};
+
+} // namespace
+
+TEST(NonThreadSafeTest, CallsAllowedOnSameThread) {
+ std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
+ new NonThreadSafeClass);
+
+ // Verify that DoStuff doesn't assert.
+ non_thread_safe_class->DoStuff();
+
+ // Verify that the destructor doesn't assert.
+ non_thread_safe_class.reset();
+}
+
+TEST(NonThreadSafeTest, DetachThenDestructOnDifferentThread) {
+ std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
+ new NonThreadSafeClass);
+
+ // Verify that the destructor doesn't assert when called on a different thread
+ // after a detach.
+ non_thread_safe_class->DetachFromThread();
+ DeleteNonThreadSafeClassOnThread delete_on_thread(
+ non_thread_safe_class.release());
+
+ delete_on_thread.Start();
+ delete_on_thread.Join();
+}
+
+#if GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
+
+void NonThreadSafeClass::MethodOnDifferentThreadImpl() {
+ std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
+ new NonThreadSafeClass);
+
+ // Verify that DoStuff asserts in debug builds only when called
+ // on a different thread.
+ CallDoStuffOnThread call_on_thread(non_thread_safe_class.get());
+
+ call_on_thread.Start();
+ call_on_thread.Join();
+}
+
+#if ENABLE_NON_THREAD_SAFE
+TEST(NonThreadSafeDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
+ ASSERT_DEATH({
+ NonThreadSafeClass::MethodOnDifferentThreadImpl();
+ }, "");
+}
+#else
+TEST(NonThreadSafeTest, MethodAllowedOnDifferentThreadInRelease) {
+ NonThreadSafeClass::MethodOnDifferentThreadImpl();
+}
+#endif // ENABLE_NON_THREAD_SAFE
+
+void NonThreadSafeClass::DestructorOnDifferentThreadImpl() {
+ std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
+ new NonThreadSafeClass);
+
+ // Verify that the destructor asserts in debug builds only
+ // when called on a different thread.
+ DeleteNonThreadSafeClassOnThread delete_on_thread(
+ non_thread_safe_class.release());
+
+ delete_on_thread.Start();
+ delete_on_thread.Join();
+}
+
+#if ENABLE_NON_THREAD_SAFE
+TEST(NonThreadSafeDeathTest, DestructorNotAllowedOnDifferentThreadInDebug) {
+ ASSERT_DEATH({
+ NonThreadSafeClass::DestructorOnDifferentThreadImpl();
+ }, "");
+}
+#else
+TEST(NonThreadSafeTest, DestructorAllowedOnDifferentThreadInRelease) {
+ NonThreadSafeClass::DestructorOnDifferentThreadImpl();
+}
+#endif // ENABLE_NON_THREAD_SAFE
+
+#endif // GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_NON_THREAD_SAFE
+
+} // namespace base
diff --git a/libchrome/base/threading/platform_thread.h b/libchrome/base/threading/platform_thread.h
new file mode 100644
index 0000000..9b217a9
--- /dev/null
+++ b/libchrome/base/threading/platform_thread.h
@@ -0,0 +1,204 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should *NOT* be using this class directly. PlatformThread is
+// the low-level platform-specific abstraction to the OS's threading interface.
+// You should instead be using a message-loop driven Thread, see thread.h.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_H_
+#define BASE_THREADING_PLATFORM_THREAD_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+// Used for logging. Always an integer value.
+#if defined(OS_WIN)
+typedef DWORD PlatformThreadId;
+#elif defined(OS_POSIX)
+typedef pid_t PlatformThreadId;
+#endif
+
+// Used for thread checking and debugging.
+// Meant to be as fast as possible.
+// These are produced by PlatformThread::CurrentRef(), and used to later
+// check if we are on the same thread or not by using ==. These are safe
+// to copy between threads, but can't be copied to another process as they
+// have no meaning there. Also, the internal identifier can be re-used
+// after a thread dies, so a PlatformThreadRef cannot be reliably used
+// to distinguish a new thread from an old, dead thread.
+class PlatformThreadRef {
+ public:
+#if defined(OS_WIN)
+ typedef DWORD RefType;
+#elif defined(OS_POSIX)
+ typedef pthread_t RefType;
+#endif
+ PlatformThreadRef()
+ : id_(0) {
+ }
+
+ explicit PlatformThreadRef(RefType id)
+ : id_(id) {
+ }
+
+ bool operator==(PlatformThreadRef other) const {
+ return id_ == other.id_;
+ }
+
+ bool is_null() const {
+ return id_ == 0;
+ }
+ private:
+ RefType id_;
+};
+
+// Used to operate on threads.
+class PlatformThreadHandle {
+ public:
+#if defined(OS_WIN)
+ typedef void* Handle;
+#elif defined(OS_POSIX)
+ typedef pthread_t Handle;
+#endif
+
+ PlatformThreadHandle() : handle_(0) {}
+
+ explicit PlatformThreadHandle(Handle handle) : handle_(handle) {}
+
+ bool is_equal(const PlatformThreadHandle& other) const {
+ return handle_ == other.handle_;
+ }
+
+ bool is_null() const {
+ return !handle_;
+ }
+
+ Handle platform_handle() const {
+ return handle_;
+ }
+
+ private:
+ Handle handle_;
+};
+
+const PlatformThreadId kInvalidThreadId(0);
+
+// Valid values for priority of Thread::Options and SimpleThread::Options, and
+// SetCurrentThreadPriority(), listed in increasing order of importance.
+enum class ThreadPriority : int {
+ // Suitable for threads that shouldn't disrupt high priority work.
+ BACKGROUND,
+ // Default priority level.
+ NORMAL,
+ // Suitable for threads which generate data for the display (at ~60Hz).
+ DISPLAY,
+ // Suitable for low-latency, glitch-resistant audio.
+ REALTIME_AUDIO,
+};
+
+// A namespace for low-level thread functions.
+class BASE_EXPORT PlatformThread {
+ public:
+ // Implement this interface to run code on a background thread. Your
+ // ThreadMain method will be called on the newly created thread.
+ class BASE_EXPORT Delegate {
+ public:
+ virtual void ThreadMain() = 0;
+
+ protected:
+ virtual ~Delegate() {}
+ };
+
+ // Gets the current thread id, which may be useful for logging purposes.
+ static PlatformThreadId CurrentId();
+
+ // Gets the current thread reference, which can be used to check if
+ // we're on the right thread quickly.
+ static PlatformThreadRef CurrentRef();
+
+ // Get the handle representing the current thread. On Windows, this is a
+ // pseudo handle constant which will always represent the thread using it and
+ // hence should not be shared with other threads nor be used to differentiate
+ // the current thread from another.
+ static PlatformThreadHandle CurrentHandle();
+
+ // Yield the current thread so another thread can be scheduled.
+ static void YieldCurrentThread();
+
+ // Sleeps for the specified duration.
+ static void Sleep(base::TimeDelta duration);
+
+ // Sets the thread name visible to debuggers/tools. This will try to
+ // initialize the context for current thread unless it's a WorkerThread.
+ static void SetName(const std::string& name);
+
+ // Gets the thread name, if previously set by SetName.
+ static const char* GetName();
+
+ // Creates a new thread. The |stack_size| parameter can be 0 to indicate
+ // that the default stack size should be used. Upon success,
+ // |*thread_handle| will be assigned a handle to the newly created thread,
+ // and |delegate|'s ThreadMain method will be executed on the newly created
+ // thread.
+ // NOTE: When you are done with the thread handle, you must call Join to
+ // release system resources associated with the thread. You must ensure that
+ // the Delegate object outlives the thread.
+ static bool Create(size_t stack_size,
+ Delegate* delegate,
+ PlatformThreadHandle* thread_handle) {
+ return CreateWithPriority(stack_size, delegate, thread_handle,
+ ThreadPriority::NORMAL);
+ }
+
+ // CreateWithPriority() does the same thing as Create() except the priority of
+ // the thread is set based on |priority|.
+ static bool CreateWithPriority(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority);
+
+ // CreateNonJoinable() does the same thing as Create() except the thread
+ // cannot be Join()'d. Therefore, it also does not output a
+ // PlatformThreadHandle.
+ static bool CreateNonJoinable(size_t stack_size, Delegate* delegate);
+
+ // Joins with a thread created via the Create function. This function blocks
+ // the caller until the designated thread exits. This will invalidate
+ // |thread_handle|.
+ static void Join(PlatformThreadHandle thread_handle);
+
+ // Detaches and releases the thread handle. The thread is no longer joinable
+ // and |thread_handle| is invalidated after this call.
+ static void Detach(PlatformThreadHandle thread_handle);
+
+ // Toggles the current thread's priority at runtime. A thread may not be able
+ // to raise its priority back up after lowering it if the process does not
+ // have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
+ // able to lower its priority back down after raising it to REALTIME_AUDIO.
+ // Since changing other threads' priority is not permitted in favor of
+ // security, this interface is restricted to change only the current thread
+ // priority (https://crbug.com/399473).
+ static void SetCurrentThreadPriority(ThreadPriority priority);
+
+ static ThreadPriority GetCurrentThreadPriority();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_PLATFORM_THREAD_H_
diff --git a/libchrome/base/threading/platform_thread_internal_posix.cc b/libchrome/base/threading/platform_thread_internal_posix.cc
new file mode 100644
index 0000000..378a24d
--- /dev/null
+++ b/libchrome/base/threading/platform_thread_internal_posix.cc
@@ -0,0 +1,39 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread_internal_posix.h"
+
+#include "base/containers/adapters.h"
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+int ThreadPriorityToNiceValue(ThreadPriority priority) {
+ for (const auto& pair : kThreadPriorityToNiceValueMap) {
+ if (pair.priority == priority)
+ return pair.nice_value;
+ }
+ NOTREACHED() << "Unknown ThreadPriority";
+ return 0;
+}
+
+ThreadPriority NiceValueToThreadPriority(int nice_value) {
+ // Try to find a priority that best describes |nice_value|. If there isn't
+ // an exact match, this method returns the closest priority whose nice value
+ // is higher (lower priority) than |nice_value|.
+ for (const auto& pair : Reversed(kThreadPriorityToNiceValueMap)) {
+ if (pair.nice_value >= nice_value)
+ return pair.priority;
+ }
+
+ // Reaching here means |nice_value| is more than any of the defined
+ // priorities. The lowest priority is suitable in this case.
+ return ThreadPriority::BACKGROUND;
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/libchrome/base/threading/platform_thread_internal_posix.h b/libchrome/base/threading/platform_thread_internal_posix.h
new file mode 100644
index 0000000..5f4a215
--- /dev/null
+++ b/libchrome/base/threading/platform_thread_internal_posix.h
@@ -0,0 +1,48 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+#define BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
+
+#include "base/base_export.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+namespace internal {
+
+struct ThreadPriorityToNiceValuePair {
+ ThreadPriority priority;
+ int nice_value;
+};
+// The elements must be listed in the order of increasing priority (lowest
+// priority first), that is, in the order of decreasing nice values (highest
+// nice value first).
+BASE_EXPORT extern
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
+
+// Returns the nice value matching |priority| based on the platform-specific
+// implementation of kThreadPriorityToNiceValueMap.
+int ThreadPriorityToNiceValue(ThreadPriority priority);
+
+// Returns the ThreadPrioirty matching |nice_value| based on the platform-
+// specific implementation of kThreadPriorityToNiceValueMap.
+BASE_EXPORT ThreadPriority NiceValueToThreadPriority(int nice_value);
+
+// Allows platform specific tweaks to the generic POSIX solution for
+// SetCurrentThreadPriority. Returns true if the platform-specific
+// implementation handled this |priority| change, false if the generic
+// implementation should instead proceed.
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority);
+
+// Returns true if there is a platform-specific ThreadPriority set on the
+// current thread (and returns the actual ThreadPriority via |priority|).
+// Returns false otherwise, leaving |priority| untouched.
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority);
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
diff --git a/libchrome/base/threading/platform_thread_linux.cc b/libchrome/base/threading/platform_thread_linux.cc
new file mode 100644
index 0000000..ab7c97e
--- /dev/null
+++ b/libchrome/base/threading/platform_thread_linux.cc
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <sched.h>
+#include <stddef.h>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/tracked_objects.h"
+#include "build/build_config.h"
+
+#if !defined(OS_NACL)
+#include <pthread.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+namespace internal {
+
+namespace {
+#if !defined(OS_NACL)
+const struct sched_param kRealTimePrio = {8};
+#endif
+} // namespace
+
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
+ {ThreadPriority::BACKGROUND, 10},
+ {ThreadPriority::NORMAL, 0},
+ {ThreadPriority::DISPLAY, -8},
+ {ThreadPriority::REALTIME_AUDIO, -10},
+};
+
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
+#if !defined(OS_NACL)
+ return priority == ThreadPriority::REALTIME_AUDIO &&
+ pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
+#else
+ return false;
+#endif
+}
+
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
+#if !defined(OS_NACL)
+ int maybe_sched_rr = 0;
+ struct sched_param maybe_realtime_prio = {0};
+ if (pthread_getschedparam(pthread_self(), &maybe_sched_rr,
+ &maybe_realtime_prio) == 0 &&
+ maybe_sched_rr == SCHED_RR &&
+ maybe_realtime_prio.sched_priority == kRealTimePrio.sched_priority) {
+ *priority = ThreadPriority::REALTIME_AUDIO;
+ return true;
+ }
+#endif
+ return false;
+}
+
+} // namespace internal
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+ ThreadIdNameManager::GetInstance()->SetName(CurrentId(), name);
+ tracked_objects::ThreadData::InitializeThreadContext(name);
+
+#if !defined(OS_NACL)
+ // On linux we can get the thread names to show up in the debugger by setting
+ // the process name for the LWP. We don't want to do this for the main
+ // thread because that would rename the process, causing tools like killall
+ // to stop working.
+ if (PlatformThread::CurrentId() == getpid())
+ return;
+
+ // http://0pointer.de/blog/projects/name-your-threads.html
+ // Set the name for the LWP (which gets truncated to 15 characters).
+ // Note that glibc also has a 'pthread_setname_np' api, but it may not be
+ // available everywhere and it's only benefit over using prctl directly is
+ // that it can set the name of threads other than the current thread.
+ int err = prctl(PR_SET_NAME, name.c_str());
+ // We expect EPERM failures in sandboxed processes, just ignore those.
+ if (err < 0 && errno != EPERM)
+ DPLOG(ERROR) << "prctl(PR_SET_NAME)";
+#endif // !defined(OS_NACL)
+}
+
+void InitThreading() {}
+
+void TerminateOnThread() {}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& /*attributes*/) {
+#if !defined(THREAD_SANITIZER)
+ return 0;
+#else
+ // ThreadSanitizer bloats the stack heavily. Evidence has been that the
+ // default stack size isn't enough for some browser tests.
+ return 2 * (1 << 23); // 2 times 8192K (the default stack size on Linux).
+#endif
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/platform_thread_mac.mm b/libchrome/base/threading/platform_thread_mac.mm
new file mode 100644
index 0000000..51f3621
--- /dev/null
+++ b/libchrome/base/threading/platform_thread_mac.mm
@@ -0,0 +1,247 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#import <Foundation/Foundation.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/thread_policy.h>
+#include <stddef.h>
+#include <sys/resource.h>
+
+#include <algorithm>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/tracked_objects.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+NSString* const kThreadPriorityKey = @"CrThreadPriorityKey";
+} // namespace
+
+// If Cocoa is to be used on more than one thread, it must know that the
+// application is multithreaded. Since it's possible to enter Cocoa code
+// from threads created by pthread_thread_create, Cocoa won't necessarily
+// be aware that the application is multithreaded. Spawning an NSThread is
+// enough to get Cocoa to set up for multithreaded operation, so this is done
+// if necessary before pthread_thread_create spawns any threads.
+//
+// http://developer.apple.com/documentation/Cocoa/Conceptual/Multithreading/CreatingThreads/chapter_4_section_4.html
+void InitThreading() {
+ static BOOL multithreaded = [NSThread isMultiThreaded];
+ if (!multithreaded) {
+ // +[NSObject class] is idempotent.
+ [NSThread detachNewThreadSelector:@selector(class)
+ toTarget:[NSObject class]
+ withObject:nil];
+ multithreaded = YES;
+
+ DCHECK([NSThread isMultiThreaded]);
+ }
+}
+
+// static
+void PlatformThread::SetName(const std::string& name) {
+ ThreadIdNameManager::GetInstance()->SetName(CurrentId(), name);
+ tracked_objects::ThreadData::InitializeThreadContext(name);
+
+ // Mac OS X does not expose the length limit of the name, so
+ // hardcode it.
+ const int kMaxNameLength = 63;
+ std::string shortened_name = name.substr(0, kMaxNameLength);
+ // pthread_setname() fails (harmlessly) in the sandbox, ignore when it does.
+ // See http://crbug.com/47058
+ pthread_setname_np(shortened_name.c_str());
+}
+
+namespace {
+
+void SetPriorityNormal(mach_port_t mach_thread_id) {
+ // Make thread standard policy.
+ // Please note that this call could fail in rare cases depending
+ // on runtime conditions.
+ thread_standard_policy policy;
+ kern_return_t result =
+ thread_policy_set(mach_thread_id,
+ THREAD_STANDARD_POLICY,
+ reinterpret_cast<thread_policy_t>(&policy),
+ THREAD_STANDARD_POLICY_COUNT);
+
+ if (result != KERN_SUCCESS)
+ MACH_DVLOG(1, result) << "thread_policy_set";
+}
+
+// Enables time-contraint policy and priority suitable for low-latency,
+// glitch-resistant audio.
+void SetPriorityRealtimeAudio(mach_port_t mach_thread_id) {
+ // Increase thread priority to real-time.
+
+ // Please note that the thread_policy_set() calls may fail in
+ // rare cases if the kernel decides the system is under heavy load
+ // and is unable to handle boosting the thread priority.
+ // In these cases we just return early and go on with life.
+
+ // Make thread fixed priority.
+ thread_extended_policy_data_t policy;
+ policy.timeshare = 0; // Set to 1 for a non-fixed thread.
+ kern_return_t result =
+ thread_policy_set(mach_thread_id,
+ THREAD_EXTENDED_POLICY,
+ reinterpret_cast<thread_policy_t>(&policy),
+ THREAD_EXTENDED_POLICY_COUNT);
+ if (result != KERN_SUCCESS) {
+ MACH_DVLOG(1, result) << "thread_policy_set";
+ return;
+ }
+
+ // Set to relatively high priority.
+ thread_precedence_policy_data_t precedence;
+ precedence.importance = 63;
+ result = thread_policy_set(mach_thread_id,
+ THREAD_PRECEDENCE_POLICY,
+ reinterpret_cast<thread_policy_t>(&precedence),
+ THREAD_PRECEDENCE_POLICY_COUNT);
+ if (result != KERN_SUCCESS) {
+ MACH_DVLOG(1, result) << "thread_policy_set";
+ return;
+ }
+
+ // Most important, set real-time constraints.
+
+ // Define the guaranteed and max fraction of time for the audio thread.
+ // These "duty cycle" values can range from 0 to 1. A value of 0.5
+ // means the scheduler would give half the time to the thread.
+ // These values have empirically been found to yield good behavior.
+ // Good means that audio performance is high and other threads won't starve.
+ const double kGuaranteedAudioDutyCycle = 0.75;
+ const double kMaxAudioDutyCycle = 0.85;
+
+ // Define constants determining how much time the audio thread can
+ // use in a given time quantum. All times are in milliseconds.
+
+ // About 128 frames @44.1KHz
+ const double kTimeQuantum = 2.9;
+
+ // Time guaranteed each quantum.
+ const double kAudioTimeNeeded = kGuaranteedAudioDutyCycle * kTimeQuantum;
+
+ // Maximum time each quantum.
+ const double kMaxTimeAllowed = kMaxAudioDutyCycle * kTimeQuantum;
+
+ // Get the conversion factor from milliseconds to absolute time
+ // which is what the time-constraints call needs.
+ mach_timebase_info_data_t tb_info;
+ mach_timebase_info(&tb_info);
+ double ms_to_abs_time =
+ (static_cast<double>(tb_info.denom) / tb_info.numer) * 1000000;
+
+ thread_time_constraint_policy_data_t time_constraints;
+ time_constraints.period = kTimeQuantum * ms_to_abs_time;
+ time_constraints.computation = kAudioTimeNeeded * ms_to_abs_time;
+ time_constraints.constraint = kMaxTimeAllowed * ms_to_abs_time;
+ time_constraints.preemptible = 0;
+
+ result =
+ thread_policy_set(mach_thread_id,
+ THREAD_TIME_CONSTRAINT_POLICY,
+ reinterpret_cast<thread_policy_t>(&time_constraints),
+ THREAD_TIME_CONSTRAINT_POLICY_COUNT);
+ MACH_DVLOG_IF(1, result != KERN_SUCCESS, result) << "thread_policy_set";
+
+ return;
+}
+
+} // anonymous namespace
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+ // Convert from pthread_t to mach thread identifier.
+ mach_port_t mach_thread_id =
+ pthread_mach_thread_np(PlatformThread::CurrentHandle().platform_handle());
+
+ switch (priority) {
+ case ThreadPriority::NORMAL:
+ case ThreadPriority::BACKGROUND:
+ case ThreadPriority::DISPLAY:
+ // Add support for non-NORMAL thread priorities. https://crbug.com/554651
+ SetPriorityNormal(mach_thread_id);
+ break;
+ case ThreadPriority::REALTIME_AUDIO:
+ SetPriorityRealtimeAudio(mach_thread_id);
+ break;
+ }
+
+ [[[NSThread currentThread] threadDictionary]
+ setObject:@(static_cast<int>(priority))
+ forKey:kThreadPriorityKey];
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+ NSNumber* priority = base::mac::ObjCCast<NSNumber>([[[NSThread currentThread]
+ threadDictionary] objectForKey:kThreadPriorityKey]);
+
+ if (!priority)
+ return ThreadPriority::NORMAL;
+
+ ThreadPriority thread_priority =
+ static_cast<ThreadPriority>(priority.intValue);
+ switch (thread_priority) {
+ case ThreadPriority::BACKGROUND:
+ case ThreadPriority::NORMAL:
+ case ThreadPriority::DISPLAY:
+ case ThreadPriority::REALTIME_AUDIO:
+ return thread_priority;
+ default:
+ NOTREACHED() << "Unknown priority.";
+ return ThreadPriority::NORMAL;
+ }
+}
+
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
+#if defined(OS_IOS)
+ return 0;
+#else
+ // The Mac OS X default for a pthread stack size is 512kB.
+ // Libc-594.1.4/pthreads/pthread.c's pthread_attr_init uses
+ // DEFAULT_STACK_SIZE for this purpose.
+ //
+ // 512kB isn't quite generous enough for some deeply recursive threads that
+ // otherwise request the default stack size by specifying 0. Here, adopt
+ // glibc's behavior as on Linux, which is to use the current stack size
+ // limit (ulimit -s) as the default stack size. See
+ // glibc-2.11.1/nptl/nptl-init.c's __pthread_initialize_minimal_internal. To
+ // avoid setting the limit below the Mac OS X default or the minimum usable
+ // stack size, these values are also considered. If any of these values
+ // can't be determined, or if stack size is unlimited (ulimit -s unlimited),
+ // stack_size is left at 0 to get the system default.
+ //
+ // Mac OS X normally only applies ulimit -s to the main thread stack. On
+ // contemporary OS X and Linux systems alike, this value is generally 8MB
+ // or in that neighborhood.
+ size_t default_stack_size = 0;
+ struct rlimit stack_rlimit;
+ if (pthread_attr_getstacksize(&attributes, &default_stack_size) == 0 &&
+ getrlimit(RLIMIT_STACK, &stack_rlimit) == 0 &&
+ stack_rlimit.rlim_cur != RLIM_INFINITY) {
+ default_stack_size =
+ std::max(std::max(default_stack_size,
+ static_cast<size_t>(PTHREAD_STACK_MIN)),
+ static_cast<size_t>(stack_rlimit.rlim_cur));
+ }
+ return default_stack_size;
+#endif
+}
+
+void TerminateOnThread() {
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/platform_thread_posix.cc b/libchrome/base/threading/platform_thread_posix.cc
new file mode 100644
index 0000000..2321b3c
--- /dev/null
+++ b/libchrome/base/threading/platform_thread_posix.cc
@@ -0,0 +1,271 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/platform_thread.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#include <memory>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/platform_thread_internal_posix.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX)
+#include <sys/syscall.h>
+#elif defined(OS_ANDROID)
+#include <sys/types.h>
+#endif
+
+namespace base {
+
+void InitThreading();
+void TerminateOnThread();
+size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
+
+namespace {
+
+struct ThreadParams {
+ ThreadParams()
+ : delegate(NULL), joinable(false), priority(ThreadPriority::NORMAL) {}
+
+ PlatformThread::Delegate* delegate;
+ bool joinable;
+ ThreadPriority priority;
+};
+
+void* ThreadFunc(void* params) {
+ PlatformThread::Delegate* delegate = nullptr;
+
+ {
+ std::unique_ptr<ThreadParams> thread_params(
+ static_cast<ThreadParams*>(params));
+
+ delegate = thread_params->delegate;
+ if (!thread_params->joinable)
+ base::ThreadRestrictions::SetSingletonAllowed(false);
+
+#if !defined(OS_NACL)
+ // Threads on linux/android may inherit their priority from the thread
+ // where they were created. This explicitly sets the priority of all new
+ // threads.
+ PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#endif
+ }
+
+ ThreadIdNameManager::GetInstance()->RegisterThread(
+ PlatformThread::CurrentHandle().platform_handle(),
+ PlatformThread::CurrentId());
+
+ delegate->ThreadMain();
+
+ ThreadIdNameManager::GetInstance()->RemoveName(
+ PlatformThread::CurrentHandle().platform_handle(),
+ PlatformThread::CurrentId());
+
+ base::TerminateOnThread();
+ return NULL;
+}
+
+bool CreateThread(size_t stack_size,
+ bool joinable,
+ PlatformThread::Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority) {
+ DCHECK(thread_handle);
+ base::InitThreading();
+
+ pthread_attr_t attributes;
+ pthread_attr_init(&attributes);
+
+ // Pthreads are joinable by default, so only specify the detached
+ // attribute if the thread should be non-joinable.
+ if (!joinable)
+ pthread_attr_setdetachstate(&attributes, PTHREAD_CREATE_DETACHED);
+
+ // Get a better default if available.
+ if (stack_size == 0)
+ stack_size = base::GetDefaultThreadStackSize(attributes);
+
+ if (stack_size > 0)
+ pthread_attr_setstacksize(&attributes, stack_size);
+
+ std::unique_ptr<ThreadParams> params(new ThreadParams);
+ params->delegate = delegate;
+ params->joinable = joinable;
+ params->priority = priority;
+
+ pthread_t handle;
+ int err = pthread_create(&handle, &attributes, ThreadFunc, params.get());
+ bool success = !err;
+ if (success) {
+ // ThreadParams should be deleted on the created thread after used.
+ ignore_result(params.release());
+ } else {
+ // Value of |handle| is undefined if pthread_create fails.
+ handle = 0;
+ errno = err;
+ PLOG(ERROR) << "pthread_create";
+ }
+ *thread_handle = PlatformThreadHandle(handle);
+
+ pthread_attr_destroy(&attributes);
+
+ return success;
+}
+
+} // namespace
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+ // Pthreads doesn't have the concept of a thread ID, so we have to reach down
+ // into the kernel.
+#if defined(OS_MACOSX)
+ return pthread_mach_thread_np(pthread_self());
+#elif defined(OS_LINUX)
+ return syscall(__NR_gettid);
+#elif defined(OS_ANDROID)
+ return gettid();
+#elif defined(OS_SOLARIS) || defined(OS_QNX)
+ return pthread_self();
+#elif defined(OS_NACL) && defined(__GLIBC__)
+ return pthread_self();
+#elif defined(OS_NACL) && !defined(__GLIBC__)
+ // Pointers are 32-bits in NaCl.
+ return reinterpret_cast<int32_t>(pthread_self());
+#elif defined(OS_POSIX)
+ return reinterpret_cast<int64_t>(pthread_self());
+#endif
+}
+
+// static
+PlatformThreadRef PlatformThread::CurrentRef() {
+ return PlatformThreadRef(pthread_self());
+}
+
+// static
+PlatformThreadHandle PlatformThread::CurrentHandle() {
+ return PlatformThreadHandle(pthread_self());
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+ sched_yield();
+}
+
+// static
+void PlatformThread::Sleep(TimeDelta duration) {
+ struct timespec sleep_time, remaining;
+
+ // Break the duration into seconds and nanoseconds.
+ // NOTE: TimeDelta's microseconds are int64s while timespec's
+ // nanoseconds are longs, so this unpacking must prevent overflow.
+ sleep_time.tv_sec = duration.InSeconds();
+ duration -= TimeDelta::FromSeconds(sleep_time.tv_sec);
+ sleep_time.tv_nsec = duration.InMicroseconds() * 1000; // nanoseconds
+
+ while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR)
+ sleep_time = remaining;
+}
+
+// static
+const char* PlatformThread::GetName() {
+ return ThreadIdNameManager::GetInstance()->GetName(CurrentId());
+}
+
+// static
+bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle,
+ ThreadPriority priority) {
+ return CreateThread(stack_size, true, // joinable thread
+ delegate, thread_handle, priority);
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+ PlatformThreadHandle unused;
+
+ bool result = CreateThread(stack_size, false /* non-joinable thread */,
+ delegate, &unused, ThreadPriority::NORMAL);
+ return result;
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+ // Joining another thread may block the current thread for a long time, since
+ // the thread referred to by |thread_handle| may still be running long-lived /
+ // blocking tasks.
+ base::ThreadRestrictions::AssertIOAllowed();
+ CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), NULL));
+}
+
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+ CHECK_EQ(0, pthread_detach(thread_handle.platform_handle()));
+}
+
+// Mac has its own Set/GetCurrentThreadPriority() implementations.
+#if !defined(OS_MACOSX)
+
+// static
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
+#if defined(OS_NACL)
+ NOTIMPLEMENTED();
+#else
+ if (internal::SetCurrentThreadPriorityForPlatform(priority))
+ return;
+
+ // setpriority(2) should change the whole thread group's (i.e. process)
+ // priority. However, as stated in the bugs section of
+ // http://man7.org/linux/man-pages/man2/getpriority.2.html: "under the current
+ // Linux/NPTL implementation of POSIX threads, the nice value is a per-thread
+ // attribute". Also, 0 is prefered to the current thread id since it is
+ // equivalent but makes sandboxing easier (https://crbug.com/399473).
+ const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
+ if (setpriority(PRIO_PROCESS, 0, nice_setting)) {
+ DVPLOG(1) << "Failed to set nice value of thread ("
+ << PlatformThread::CurrentId() << ") to " << nice_setting;
+ }
+#endif // defined(OS_NACL)
+}
+
+// static
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+#if defined(OS_NACL)
+ NOTIMPLEMENTED();
+ return ThreadPriority::NORMAL;
+#else
+ // Mirrors SetCurrentThreadPriority()'s implementation.
+ ThreadPriority platform_specific_priority;
+ if (internal::GetCurrentThreadPriorityForPlatform(
+ &platform_specific_priority)) {
+ return platform_specific_priority;
+ }
+
+ // Need to clear errno before calling getpriority():
+ // http://man7.org/linux/man-pages/man2/getpriority.2.html
+ errno = 0;
+ int nice_value = getpriority(PRIO_PROCESS, 0);
+ if (errno != 0) {
+ DVPLOG(1) << "Failed to get nice value of thread ("
+ << PlatformThread::CurrentId() << ")";
+ return ThreadPriority::NORMAL;
+ }
+
+ return internal::NiceValueToThreadPriority(nice_value);
+#endif // !defined(OS_NACL)
+}
+
+#endif // !defined(OS_MACOSX)
+
+} // namespace base
diff --git a/libchrome/base/threading/platform_thread_unittest.cc b/libchrome/base/threading/platform_thread_unittest.cc
new file mode 100644
index 0000000..2d99ed8
--- /dev/null
+++ b/libchrome/base/threading/platform_thread_unittest.cc
@@ -0,0 +1,365 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <sys/types.h>
+#include <unistd.h>
+#include "base/threading/platform_thread_internal_posix.h"
+#elif defined(OS_WIN)
+#include <windows.h>
+#endif
+
+namespace base {
+
+// Trivial tests that thread runs and doesn't crash on create, join, or detach -
+
+namespace {
+
+class TrivialThread : public PlatformThread::Delegate {
+ public:
+ TrivialThread() : run_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ void ThreadMain() override { run_event_.Signal(); }
+
+ WaitableEvent& run_event() { return run_event_; }
+
+ private:
+ WaitableEvent run_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(TrivialThread);
+};
+
+} // namespace
+
+TEST(PlatformThreadTest, TrivialJoin) {
+ TrivialThread thread;
+ PlatformThreadHandle handle;
+
+ ASSERT_FALSE(thread.run_event().IsSignaled());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ PlatformThread::Join(handle);
+ ASSERT_TRUE(thread.run_event().IsSignaled());
+}
+
+TEST(PlatformThreadTest, TrivialJoinTimesTen) {
+ TrivialThread thread[10];
+ PlatformThreadHandle handle[arraysize(thread)];
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_FALSE(thread[n].run_event().IsSignaled());
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+ for (size_t n = 0; n < arraysize(thread); n++)
+ PlatformThread::Join(handle[n]);
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_TRUE(thread[n].run_event().IsSignaled());
+}
+
+// The following detach tests are by nature racy. The run_event approximates the
+// end and termination of the thread, but threads could persist shortly after
+// the test completes.
+TEST(PlatformThreadTest, TrivialDetach) {
+ TrivialThread thread;
+ PlatformThreadHandle handle;
+
+ ASSERT_FALSE(thread.run_event().IsSignaled());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ PlatformThread::Detach(handle);
+ thread.run_event().Wait();
+}
+
+TEST(PlatformThreadTest, TrivialDetachTimesTen) {
+ TrivialThread thread[10];
+ PlatformThreadHandle handle[arraysize(thread)];
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_FALSE(thread[n].run_event().IsSignaled());
+ for (size_t n = 0; n < arraysize(thread); n++) {
+ ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+ PlatformThread::Detach(handle[n]);
+ }
+ for (size_t n = 0; n < arraysize(thread); n++)
+ thread[n].run_event().Wait();
+}
+
+// Tests of basic thread functions ---------------------------------------------
+
+namespace {
+
+class FunctionTestThread : public PlatformThread::Delegate {
+ public:
+ FunctionTestThread()
+ : thread_id_(kInvalidThreadId),
+ termination_ready_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ terminate_thread_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ done_(false) {}
+ ~FunctionTestThread() override {
+ EXPECT_TRUE(terminate_thread_.IsSignaled())
+ << "Need to mark thread for termination and join the underlying thread "
+ << "before destroying a FunctionTestThread as it owns the "
+ << "WaitableEvent blocking the underlying thread's main.";
+ }
+
+ // Grabs |thread_id_|, runs an optional test on that thread, signals
+ // |termination_ready_|, and then waits for |terminate_thread_| to be
+ // signaled before exiting.
+ void ThreadMain() override {
+ thread_id_ = PlatformThread::CurrentId();
+ EXPECT_NE(thread_id_, kInvalidThreadId);
+
+ // Make sure that the thread ID is the same across calls.
+ EXPECT_EQ(thread_id_, PlatformThread::CurrentId());
+
+ // Run extra tests.
+ RunTest();
+
+ termination_ready_.Signal();
+ terminate_thread_.Wait();
+
+ done_ = true;
+ }
+
+ PlatformThreadId thread_id() const {
+ EXPECT_TRUE(termination_ready_.IsSignaled()) << "Thread ID still unknown";
+ return thread_id_;
+ }
+
+ bool IsRunning() const {
+ return termination_ready_.IsSignaled() && !done_;
+ }
+
+ // Blocks until this thread is started and ready to be terminated.
+ void WaitForTerminationReady() { termination_ready_.Wait(); }
+
+ // Marks this thread for termination (callers must then join this thread to be
+ // guaranteed of termination).
+ void MarkForTermination() { terminate_thread_.Signal(); }
+
+ private:
+ // Runs an optional test on the newly created thread.
+ virtual void RunTest() {}
+
+ PlatformThreadId thread_id_;
+
+ mutable WaitableEvent termination_ready_;
+ WaitableEvent terminate_thread_;
+ bool done_;
+
+ DISALLOW_COPY_AND_ASSIGN(FunctionTestThread);
+};
+
+} // namespace
+
+TEST(PlatformThreadTest, Function) {
+ PlatformThreadId main_thread_id = PlatformThread::CurrentId();
+
+ FunctionTestThread thread;
+ PlatformThreadHandle handle;
+
+ ASSERT_FALSE(thread.IsRunning());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ thread.WaitForTerminationReady();
+ ASSERT_TRUE(thread.IsRunning());
+ EXPECT_NE(thread.thread_id(), main_thread_id);
+
+ thread.MarkForTermination();
+ PlatformThread::Join(handle);
+ ASSERT_FALSE(thread.IsRunning());
+
+ // Make sure that the thread ID is the same across calls.
+ EXPECT_EQ(main_thread_id, PlatformThread::CurrentId());
+}
+
+TEST(PlatformThreadTest, FunctionTimesTen) {
+ PlatformThreadId main_thread_id = PlatformThread::CurrentId();
+
+ FunctionTestThread thread[10];
+ PlatformThreadHandle handle[arraysize(thread)];
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_FALSE(thread[n].IsRunning());
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+ for (size_t n = 0; n < arraysize(thread); n++)
+ thread[n].WaitForTerminationReady();
+
+ for (size_t n = 0; n < arraysize(thread); n++) {
+ ASSERT_TRUE(thread[n].IsRunning());
+ EXPECT_NE(thread[n].thread_id(), main_thread_id);
+
+ // Make sure no two threads get the same ID.
+ for (size_t i = 0; i < n; ++i) {
+ EXPECT_NE(thread[i].thread_id(), thread[n].thread_id());
+ }
+ }
+
+ for (size_t n = 0; n < arraysize(thread); n++)
+ thread[n].MarkForTermination();
+ for (size_t n = 0; n < arraysize(thread); n++)
+ PlatformThread::Join(handle[n]);
+ for (size_t n = 0; n < arraysize(thread); n++)
+ ASSERT_FALSE(thread[n].IsRunning());
+
+ // Make sure that the thread ID is the same across calls.
+ EXPECT_EQ(main_thread_id, PlatformThread::CurrentId());
+}
+
+namespace {
+
+const ThreadPriority kThreadPriorityTestValues[] = {
+// The order should be higher to lower to cover as much cases as possible on
+// Linux trybots running without CAP_SYS_NICE permission.
+#if !defined(OS_ANDROID)
+ // PlatformThread::GetCurrentThreadPriority() on Android does not support
+ // REALTIME_AUDIO case. See http://crbug.com/505474.
+ ThreadPriority::REALTIME_AUDIO,
+#endif
+ ThreadPriority::DISPLAY,
+ // This redundant BACKGROUND priority is to test backgrounding from other
+ // priorities, and unbackgrounding.
+ ThreadPriority::BACKGROUND,
+ ThreadPriority::NORMAL,
+ ThreadPriority::BACKGROUND};
+
+bool IsBumpingPriorityAllowed() {
+#if defined(OS_POSIX)
+ // Only root can raise thread priority on POSIX environment. On Linux, users
+ // who have CAP_SYS_NICE permission also can raise the thread priority, but
+ // libcap.so would be needed to check the capability.
+ return geteuid() == 0;
+#else
+ return true;
+#endif
+}
+
+class ThreadPriorityTestThread : public FunctionTestThread {
+ public:
+ explicit ThreadPriorityTestThread(ThreadPriority priority)
+ : priority_(priority) {}
+ ~ThreadPriorityTestThread() override = default;
+
+ private:
+ void RunTest() override {
+ // Confirm that the current thread's priority is as expected.
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+
+ // Alter and verify the current thread's priority.
+ PlatformThread::SetCurrentThreadPriority(priority_);
+ EXPECT_EQ(priority_, PlatformThread::GetCurrentThreadPriority());
+ }
+
+ const ThreadPriority priority_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadPriorityTestThread);
+};
+
+} // namespace
+
+// Test changing a created thread's priority (which has different semantics on
+// some platforms).
+TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
+ const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
+ if (bumping_priority_allowed) {
+ // Bump the priority in order to verify that new threads are started with
+ // normal priority.
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
+ }
+
+ // Toggle each supported priority on the thread and confirm it affects it.
+ for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
+ if (!bumping_priority_allowed &&
+ kThreadPriorityTestValues[i] >
+ PlatformThread::GetCurrentThreadPriority()) {
+ continue;
+ }
+
+ ThreadPriorityTestThread thread(kThreadPriorityTestValues[i]);
+ PlatformThreadHandle handle;
+
+ ASSERT_FALSE(thread.IsRunning());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ thread.WaitForTerminationReady();
+ ASSERT_TRUE(thread.IsRunning());
+
+ thread.MarkForTermination();
+ PlatformThread::Join(handle);
+ ASSERT_FALSE(thread.IsRunning());
+ }
+}
+
+// Test for a function defined in platform_thread_internal_posix.cc. On OSX and
+// iOS, platform_thread_internal_posix.cc is not compiled, so these platforms
+// are excluded here, too.
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_IOS)
+TEST(PlatformThreadTest, GetNiceValueToThreadPriority) {
+ using internal::NiceValueToThreadPriority;
+ using internal::kThreadPriorityToNiceValueMap;
+
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ kThreadPriorityToNiceValueMap[0].priority);
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ kThreadPriorityToNiceValueMap[1].priority);
+ EXPECT_EQ(ThreadPriority::DISPLAY,
+ kThreadPriorityToNiceValueMap[2].priority);
+ EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+ kThreadPriorityToNiceValueMap[3].priority);
+
+ static const int kBackgroundNiceValue =
+ kThreadPriorityToNiceValueMap[0].nice_value;
+ static const int kNormalNiceValue =
+ kThreadPriorityToNiceValueMap[1].nice_value;
+ static const int kDisplayNiceValue =
+ kThreadPriorityToNiceValueMap[2].nice_value;
+ static const int kRealtimeAudioNiceValue =
+ kThreadPriorityToNiceValueMap[3].nice_value;
+
+ // The tests below assume the nice values specified in the map are within
+ // the range below (both ends exclusive).
+ static const int kHighestNiceValue = 19;
+ static const int kLowestNiceValue = -20;
+
+ EXPECT_GT(kHighestNiceValue, kBackgroundNiceValue);
+ EXPECT_GT(kBackgroundNiceValue, kNormalNiceValue);
+ EXPECT_GT(kNormalNiceValue, kDisplayNiceValue);
+ EXPECT_GT(kDisplayNiceValue, kRealtimeAudioNiceValue);
+ EXPECT_GT(kRealtimeAudioNiceValue, kLowestNiceValue);
+
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ NiceValueToThreadPriority(kHighestNiceValue));
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ NiceValueToThreadPriority(kBackgroundNiceValue + 1));
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ NiceValueToThreadPriority(kBackgroundNiceValue));
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ NiceValueToThreadPriority(kNormalNiceValue + 1));
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ NiceValueToThreadPriority(kNormalNiceValue));
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ NiceValueToThreadPriority(kDisplayNiceValue + 1));
+ EXPECT_EQ(ThreadPriority::DISPLAY,
+ NiceValueToThreadPriority(kDisplayNiceValue));
+ EXPECT_EQ(ThreadPriority::DISPLAY,
+ NiceValueToThreadPriority(kRealtimeAudioNiceValue + 1));
+ EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+ NiceValueToThreadPriority(kRealtimeAudioNiceValue));
+ EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+ NiceValueToThreadPriority(kLowestNiceValue));
+}
+#endif
+
+} // namespace base
diff --git a/libchrome/base/threading/post_task_and_reply_impl.cc b/libchrome/base/threading/post_task_and_reply_impl.cc
new file mode 100644
index 0000000..c906866
--- /dev/null
+++ b/libchrome/base/threading/post_task_and_reply_impl.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/post_task_and_reply_impl.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+// This relay class remembers the MessageLoop that it was created on, and
+// ensures that both the |task| and |reply| Closures are deleted on this same
+// thread. Also, |task| is guaranteed to be deleted before |reply| is run or
+// deleted.
+//
+// If this is not possible because the originating MessageLoop is no longer
+// available, the the |task| and |reply| Closures are leaked. Leaking is
+// considered preferable to having a thread-safetey violations caused by
+// invoking the Closure destructor on the wrong thread.
+class PostTaskAndReplyRelay {
+ public:
+ PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply)
+ : from_here_(from_here),
+ origin_task_runner_(ThreadTaskRunnerHandle::Get()) {
+ task_ = task;
+ reply_ = reply;
+ }
+
+ ~PostTaskAndReplyRelay() {
+ DCHECK(origin_task_runner_->BelongsToCurrentThread());
+ task_.Reset();
+ reply_.Reset();
+ }
+
+ void Run() {
+ task_.Run();
+ origin_task_runner_->PostTask(
+ from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
+ base::Unretained(this)));
+ }
+
+ private:
+ void RunReplyAndSelfDestruct() {
+ DCHECK(origin_task_runner_->BelongsToCurrentThread());
+
+ // Force |task_| to be released before |reply_| is to ensure that no one
+ // accidentally depends on |task_| keeping one of its arguments alive while
+ // |reply_| is executing.
+ task_.Reset();
+
+ reply_.Run();
+
+ // Cue mission impossible theme.
+ delete this;
+ }
+
+ tracked_objects::Location from_here_;
+ scoped_refptr<SingleThreadTaskRunner> origin_task_runner_;
+ Closure reply_;
+ Closure task_;
+};
+
+} // namespace
+
+namespace internal {
+
+bool PostTaskAndReplyImpl::PostTaskAndReply(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply) {
+ // TODO(tzik): Use DCHECK here once the crash is gone. http://crbug.com/541319
+ CHECK(!task.is_null()) << from_here.ToString();
+ CHECK(!reply.is_null()) << from_here.ToString();
+ PostTaskAndReplyRelay* relay =
+ new PostTaskAndReplyRelay(from_here, task, reply);
+ if (!PostTask(from_here, Bind(&PostTaskAndReplyRelay::Run,
+ Unretained(relay)))) {
+ delete relay;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/libchrome/base/threading/post_task_and_reply_impl.h b/libchrome/base/threading/post_task_and_reply_impl.h
new file mode 100644
index 0000000..d21ab78
--- /dev/null
+++ b/libchrome/base/threading/post_task_and_reply_impl.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation shared by
+// TaskRunner::PostTaskAndReply and WorkerPool::PostTaskAndReply.
+
+#ifndef BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
+#define BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
+
+#include "base/callback_forward.h"
+#include "base/location.h"
+
+namespace base {
+namespace internal {
+
+// Inherit from this in a class that implements PostTask appropriately
+// for sending to a destination thread.
+//
+// Note that 'reply' will always get posted back to your current
+// MessageLoop.
+//
+// If you're looking for a concrete implementation of
+// PostTaskAndReply, you probably want base::SingleThreadTaskRunner, or you
+// may want base::WorkerPool.
+class PostTaskAndReplyImpl {
+ public:
+ virtual ~PostTaskAndReplyImpl() = default;
+
+ // Implementation for TaskRunner::PostTaskAndReply and
+ // WorkerPool::PostTaskAndReply.
+ bool PostTaskAndReply(const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply);
+
+ private:
+ virtual bool PostTask(const tracked_objects::Location& from_here,
+ const Closure& task) = 0;
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
diff --git a/libchrome/base/threading/sequenced_task_runner_handle.cc b/libchrome/base/threading/sequenced_task_runner_handle.cc
new file mode 100644
index 0000000..88b36a8
--- /dev/null
+++ b/libchrome/base/threading/sequenced_task_runner_handle.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequenced_task_runner_handle.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+base::LazyInstance<base::ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+ lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+// static
+scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
+ // Return the registered SequencedTaskRunner, if any.
+ const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
+ if (handle) {
+ // Various modes of setting SequencedTaskRunnerHandle don't combine.
+ DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+ DCHECK(!SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread());
+ return handle->task_runner_;
+ }
+
+ // Return the SequencedTaskRunner obtained from SequencedWorkerPool, if any.
+ scoped_refptr<base::SequencedTaskRunner> task_runner =
+ SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread();
+ if (task_runner) {
+ DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+ return task_runner;
+ }
+
+ // Return the SingleThreadTaskRunner for the current thread otherwise.
+ return base::ThreadTaskRunnerHandle::Get();
+}
+
+// static
+bool SequencedTaskRunnerHandle::IsSet() {
+ return lazy_tls_ptr.Pointer()->Get() ||
+ SequencedWorkerPool::GetWorkerPoolForCurrentThread() ||
+ base::ThreadTaskRunnerHandle::IsSet();
+}
+
+SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
+ scoped_refptr<SequencedTaskRunner> task_runner)
+ : task_runner_(std::move(task_runner)) {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(!SequencedTaskRunnerHandle::IsSet());
+ lazy_tls_ptr.Pointer()->Set(this);
+}
+
+SequencedTaskRunnerHandle::~SequencedTaskRunnerHandle() {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
+ DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
+ lazy_tls_ptr.Pointer()->Set(nullptr);
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/sequenced_task_runner_handle.h b/libchrome/base/threading/sequenced_task_runner_handle.h
new file mode 100644
index 0000000..e6dec1e
--- /dev/null
+++ b/libchrome/base/threading/sequenced_task_runner_handle.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+class BASE_EXPORT SequencedTaskRunnerHandle {
+ public:
+ // Returns a SequencedTaskRunner which guarantees that posted tasks will only
+ // run after the current task is finished and will satisfy a SequenceChecker.
+ // It should only be called if IsSet() returns true (see the comment there for
+ // the requirements).
+ static scoped_refptr<SequencedTaskRunner> Get();
+
+ // Returns true if one of the following conditions is fulfilled:
+ // a) A SequencedTaskRunner has been assigned to the current thread by
+ // instantiating a SequencedTaskRunnerHandle.
+ // b) The current thread has a ThreadTaskRunnerHandle (which includes any
+ // thread that has a MessageLoop associated with it), or
+ // c) The current thread is a worker thread belonging to a
+ // SequencedWorkerPool.
+ static bool IsSet();
+
+ // Binds |task_runner| to the current thread.
+ explicit SequencedTaskRunnerHandle(
+ scoped_refptr<SequencedTaskRunner> task_runner);
+ ~SequencedTaskRunnerHandle();
+
+ private:
+ scoped_refptr<SequencedTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequencedTaskRunnerHandle);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
diff --git a/libchrome/base/threading/sequenced_worker_pool.cc b/libchrome/base/threading/sequenced_worker_pool.cc
new file mode 100644
index 0000000..57961b5
--- /dev/null
+++ b/libchrome/base/threading/sequenced_worker_pool.cc
@@ -0,0 +1,1408 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequenced_worker_pool.h"
+
+#include <stdint.h>
+
+#include <list>
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/atomic_sequence_num.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/critical_closure.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_event.h"
+#include "base/tracked_objects.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/scoped_nsautorelease_pool.h"
+#elif defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#endif
+
+#if !defined(OS_NACL)
+#include "base/metrics/histogram.h"
+#endif
+
+namespace base {
+
+namespace {
+
+struct SequencedTask : public TrackingInfo {
+ SequencedTask()
+ : sequence_token_id(0),
+ trace_id(0),
+ sequence_task_number(0),
+ shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {}
+
+ explicit SequencedTask(const tracked_objects::Location& from_here)
+ : base::TrackingInfo(from_here, TimeTicks()),
+ sequence_token_id(0),
+ trace_id(0),
+ sequence_task_number(0),
+ shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {}
+
+ ~SequencedTask() {}
+
+ int sequence_token_id;
+ int trace_id;
+ int64_t sequence_task_number;
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior;
+ tracked_objects::Location posted_from;
+ Closure task;
+
+ // Non-delayed tasks and delayed tasks are managed together by time-to-run
+ // order. We calculate the time by adding the posted time and the given delay.
+ TimeTicks time_to_run;
+};
+
+struct SequencedTaskLessThan {
+ public:
+ bool operator()(const SequencedTask& lhs, const SequencedTask& rhs) const {
+ if (lhs.time_to_run < rhs.time_to_run)
+ return true;
+
+ if (lhs.time_to_run > rhs.time_to_run)
+ return false;
+
+ // If the time happen to match, then we use the sequence number to decide.
+ return lhs.sequence_task_number < rhs.sequence_task_number;
+ }
+};
+
+// SequencedWorkerPoolTaskRunner ---------------------------------------------
+// A TaskRunner which posts tasks to a SequencedWorkerPool with a
+// fixed ShutdownBehavior.
+//
+// Note that this class is RefCountedThreadSafe (inherited from TaskRunner).
+class SequencedWorkerPoolTaskRunner : public TaskRunner {
+ public:
+ SequencedWorkerPoolTaskRunner(
+ scoped_refptr<SequencedWorkerPool> pool,
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior);
+
+ // TaskRunner implementation
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+ bool RunsTasksOnCurrentThread() const override;
+
+ private:
+ ~SequencedWorkerPoolTaskRunner() override;
+
+ const scoped_refptr<SequencedWorkerPool> pool_;
+
+ const SequencedWorkerPool::WorkerShutdown shutdown_behavior_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPoolTaskRunner);
+};
+
+SequencedWorkerPoolTaskRunner::SequencedWorkerPoolTaskRunner(
+ scoped_refptr<SequencedWorkerPool> pool,
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior)
+ : pool_(std::move(pool)), shutdown_behavior_(shutdown_behavior) {}
+
+SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
+}
+
+bool SequencedWorkerPoolTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ if (delay.is_zero()) {
+ return pool_->PostWorkerTaskWithShutdownBehavior(
+ from_here, task, shutdown_behavior_);
+ }
+ return pool_->PostDelayedWorkerTask(from_here, task, delay);
+}
+
+bool SequencedWorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
+ return pool_->RunsTasksOnCurrentThread();
+}
+
+// SequencedWorkerPoolSequencedTaskRunner ------------------------------------
+// A SequencedTaskRunner which posts tasks to a SequencedWorkerPool with a
+// fixed sequence token.
+//
+// Note that this class is RefCountedThreadSafe (inherited from TaskRunner).
+class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+ SequencedWorkerPoolSequencedTaskRunner(
+ scoped_refptr<SequencedWorkerPool> pool,
+ SequencedWorkerPool::SequenceToken token,
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior);
+
+ // TaskRunner implementation
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+ bool RunsTasksOnCurrentThread() const override;
+
+ // SequencedTaskRunner implementation
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+
+ private:
+ ~SequencedWorkerPoolSequencedTaskRunner() override;
+
+ const scoped_refptr<SequencedWorkerPool> pool_;
+
+ const SequencedWorkerPool::SequenceToken token_;
+
+ const SequencedWorkerPool::WorkerShutdown shutdown_behavior_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPoolSequencedTaskRunner);
+};
+
+SequencedWorkerPoolSequencedTaskRunner::SequencedWorkerPoolSequencedTaskRunner(
+ scoped_refptr<SequencedWorkerPool> pool,
+ SequencedWorkerPool::SequenceToken token,
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior)
+ : pool_(std::move(pool)),
+ token_(token),
+ shutdown_behavior_(shutdown_behavior) {}
+
+SequencedWorkerPoolSequencedTaskRunner::
+~SequencedWorkerPoolSequencedTaskRunner() {
+}
+
+bool SequencedWorkerPoolSequencedTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ if (delay.is_zero()) {
+ return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
+ token_, from_here, task, shutdown_behavior_);
+ }
+ return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
+}
+
+bool SequencedWorkerPoolSequencedTaskRunner::RunsTasksOnCurrentThread() const {
+ return pool_->IsRunningSequenceOnCurrentThread(token_);
+}
+
+bool SequencedWorkerPoolSequencedTaskRunner::PostNonNestableDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ // There's no way to run nested tasks, so simply forward to
+ // PostDelayedTask.
+ return PostDelayedTask(from_here, task, delay);
+}
+
+// Create a process-wide unique ID to represent this task in trace events. This
+// will be mangled with a Process ID hash to reduce the likelyhood of colliding
+// with MessageLoop pointers on other processes.
+uint64_t GetTaskTraceID(const SequencedTask& task, void* pool) {
+ return (static_cast<uint64_t>(task.trace_id) << 32) |
+ static_cast<uint64_t>(reinterpret_cast<intptr_t>(pool));
+}
+
+} // namespace
+
+// Worker ---------------------------------------------------------------------
+
+class SequencedWorkerPool::Worker : public SimpleThread {
+ public:
+ // Hold a (cyclic) ref to |worker_pool|, since we want to keep it
+ // around as long as we are running.
+ Worker(scoped_refptr<SequencedWorkerPool> worker_pool,
+ int thread_number,
+ const std::string& thread_name_prefix);
+ ~Worker() override;
+
+ // SimpleThread implementation. This actually runs the background thread.
+ void Run() override;
+
+ // Gets the worker for the current thread out of thread-local storage.
+ static Worker* GetForCurrentThread();
+
+ // Indicates that a task is about to be run. The parameters provide
+ // additional metainformation about the task being run.
+ void set_running_task_info(SequenceToken token,
+ WorkerShutdown shutdown_behavior) {
+ is_processing_task_ = true;
+ task_sequence_token_ = token;
+ task_shutdown_behavior_ = shutdown_behavior;
+ }
+
+ // Indicates that the task has finished running.
+ void reset_running_task_info() { is_processing_task_ = false; }
+
+ // Whether the worker is processing a task.
+ bool is_processing_task() { return is_processing_task_; }
+
+ SequenceToken task_sequence_token() const {
+ DCHECK(is_processing_task_);
+ return task_sequence_token_;
+ }
+
+ WorkerShutdown task_shutdown_behavior() const {
+ DCHECK(is_processing_task_);
+ return task_shutdown_behavior_;
+ }
+
+ scoped_refptr<SequencedWorkerPool> worker_pool() const {
+ return worker_pool_;
+ }
+
+ private:
+ static LazyInstance<ThreadLocalPointer<SequencedWorkerPool::Worker>>::Leaky
+ lazy_tls_ptr_;
+
+ scoped_refptr<SequencedWorkerPool> worker_pool_;
+ // The sequence token of the task being processed. Only valid when
+ // is_processing_task_ is true.
+ SequenceToken task_sequence_token_;
+ // The shutdown behavior of the task being processed. Only valid when
+ // is_processing_task_ is true.
+ WorkerShutdown task_shutdown_behavior_;
+ // Whether the Worker is processing a task.
+ bool is_processing_task_;
+
+ DISALLOW_COPY_AND_ASSIGN(Worker);
+};
+
+// Inner ----------------------------------------------------------------------
+
+class SequencedWorkerPool::Inner {
+ public:
+ // Take a raw pointer to |worker| to avoid cycles (since we're owned
+ // by it).
+ Inner(SequencedWorkerPool* worker_pool, size_t max_threads,
+ const std::string& thread_name_prefix,
+ TestingObserver* observer);
+
+ ~Inner();
+
+ static SequenceToken GetSequenceToken();
+
+ SequenceToken GetNamedSequenceToken(const std::string& name);
+
+ // This function accepts a name and an ID. If the name is null, the
+ // token ID is used. This allows us to implement the optional name lookup
+ // from a single function without having to enter the lock a separate time.
+ bool PostTask(const std::string* optional_token_name,
+ SequenceToken sequence_token,
+ WorkerShutdown shutdown_behavior,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay);
+
+ bool RunsTasksOnCurrentThread() const;
+
+ bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
+
+ bool IsRunningSequence(SequenceToken sequence_token) const;
+
+ void SetRunningTaskInfoForCurrentThread(SequenceToken sequence_token,
+ WorkerShutdown shutdown_behavior);
+
+ void CleanupForTesting();
+
+ void SignalHasWorkForTesting();
+
+ int GetWorkSignalCountForTesting() const;
+
+ void Shutdown(int max_blocking_tasks_after_shutdown);
+
+ bool IsShutdownInProgress();
+
+ // Runs the worker loop on the background thread.
+ void ThreadLoop(Worker* this_worker);
+
+ private:
+ enum GetWorkStatus {
+ GET_WORK_FOUND,
+ GET_WORK_NOT_FOUND,
+ GET_WORK_WAIT,
+ };
+
+ enum CleanupState {
+ CLEANUP_REQUESTED,
+ CLEANUP_STARTING,
+ CLEANUP_RUNNING,
+ CLEANUP_FINISHING,
+ CLEANUP_DONE,
+ };
+
+ // Called from within the lock, this converts the given token name into a
+ // token ID, creating a new one if necessary.
+ int LockedGetNamedTokenID(const std::string& name);
+
+ // Called from within the lock, this returns the next sequence task number.
+ int64_t LockedGetNextSequenceTaskNumber();
+
+ // Gets new task. There are 3 cases depending on the return value:
+ //
+ // 1) If the return value is |GET_WORK_FOUND|, |task| is filled in and should
+ // be run immediately.
+ // 2) If the return value is |GET_WORK_NOT_FOUND|, there are no tasks to run,
+ // and |task| is not filled in. In this case, the caller should wait until
+ // a task is posted.
+ // 3) If the return value is |GET_WORK_WAIT|, there are no tasks to run
+ // immediately, and |task| is not filled in. Likewise, |wait_time| is
+ // filled in the time to wait until the next task to run. In this case, the
+ // caller should wait the time.
+ //
+ // In any case, the calling code should clear the given
+ // delete_these_outside_lock vector the next time the lock is released.
+ // See the implementation for a more detailed description.
+ GetWorkStatus GetWork(SequencedTask* task,
+ TimeDelta* wait_time,
+ std::vector<Closure>* delete_these_outside_lock);
+
+ void HandleCleanup();
+
+ // Peforms init and cleanup around running the given task. WillRun...
+ // returns the value from PrepareToStartAdditionalThreadIfNecessary.
+ // The calling code should call FinishStartingAdditionalThread once the
+ // lock is released if the return values is nonzero.
+ int WillRunWorkerTask(const SequencedTask& task);
+ void DidRunWorkerTask(const SequencedTask& task);
+
+ // Returns true if there are no threads currently running the given
+ // sequence token.
+ bool IsSequenceTokenRunnable(int sequence_token_id) const;
+
+ // Checks if all threads are busy and the addition of one more could run an
+ // additional task waiting in the queue. This must be called from within
+ // the lock.
+ //
+ // If another thread is helpful, this will mark the thread as being in the
+ // process of starting and returns the index of the new thread which will be
+ // 0 or more. The caller should then call FinishStartingAdditionalThread to
+ // complete initialization once the lock is released.
+ //
+ // If another thread is not necessary, returne 0;
+ //
+ // See the implementedion for more.
+ int PrepareToStartAdditionalThreadIfHelpful();
+
+ // The second part of thread creation after
+ // PrepareToStartAdditionalThreadIfHelpful with the thread number it
+ // generated. This actually creates the thread and should be called outside
+ // the lock to avoid blocking important work starting a thread in the lock.
+ void FinishStartingAdditionalThread(int thread_number);
+
+ // Signal |has_work_| and increment |has_work_signal_count_|.
+ void SignalHasWork();
+
+ // Checks whether there is work left that's blocking shutdown. Must be
+ // called inside the lock.
+ bool CanShutdown() const;
+
+ SequencedWorkerPool* const worker_pool_;
+
+ // The last sequence number used. Managed by GetSequenceToken, since this
+ // only does threadsafe increment operations, you do not need to hold the
+ // lock. This is class-static to make SequenceTokens issued by
+ // GetSequenceToken unique across SequencedWorkerPool instances.
+ static base::StaticAtomicSequenceNumber g_last_sequence_number_;
+
+ // This lock protects |everything in this class|. Do not read or modify
+ // anything without holding this lock. Do not block while holding this
+ // lock.
+ mutable Lock lock_;
+
+ // Condition variable that is waited on by worker threads until new
+ // tasks are posted or shutdown starts.
+ ConditionVariable has_work_cv_;
+
+ // Condition variable that is waited on by non-worker threads (in
+ // Shutdown()) until CanShutdown() goes to true.
+ ConditionVariable can_shutdown_cv_;
+
+ // The maximum number of worker threads we'll create.
+ const size_t max_threads_;
+
+ const std::string thread_name_prefix_;
+
+ // Associates all known sequence token names with their IDs.
+ std::map<std::string, int> named_sequence_tokens_;
+
+ // Owning pointers to all threads we've created so far, indexed by
+ // ID. Since we lazily create threads, this may be less than
+ // max_threads_ and will be initially empty.
+ using ThreadMap = std::map<PlatformThreadId, std::unique_ptr<Worker>>;
+ ThreadMap threads_;
+
+ // Set to true when we're in the process of creating another thread.
+ // See PrepareToStartAdditionalThreadIfHelpful for more.
+ bool thread_being_created_;
+
+ // Number of threads currently waiting for work.
+ size_t waiting_thread_count_;
+
+ // Number of threads currently running tasks that have the BLOCK_SHUTDOWN
+ // or SKIP_ON_SHUTDOWN flag set.
+ size_t blocking_shutdown_thread_count_;
+
+ // A set of all pending tasks in time-to-run order. These are tasks that are
+ // either waiting for a thread to run on, waiting for their time to run,
+ // or blocked on a previous task in their sequence. We have to iterate over
+ // the tasks by time-to-run order, so we use the set instead of the
+ // traditional priority_queue.
+ typedef std::set<SequencedTask, SequencedTaskLessThan> PendingTaskSet;
+ PendingTaskSet pending_tasks_;
+
+ // The next sequence number for a new sequenced task.
+ int64_t next_sequence_task_number_;
+
+ // Number of tasks in the pending_tasks_ list that are marked as blocking
+ // shutdown.
+ size_t blocking_shutdown_pending_task_count_;
+
+ // Lists all sequence tokens currently executing.
+ std::set<int> current_sequences_;
+
+ // An ID for each posted task to distinguish the task from others in traces.
+ int trace_id_;
+
+ // Set when Shutdown is called and no further tasks should be
+ // allowed, though we may still be running existing tasks.
+ bool shutdown_called_;
+
+ // The number of new BLOCK_SHUTDOWN tasks that may be posted after Shudown()
+ // has been called.
+ int max_blocking_tasks_after_shutdown_;
+
+ // State used to cleanup for testing, all guarded by lock_.
+ CleanupState cleanup_state_;
+ size_t cleanup_idlers_;
+ ConditionVariable cleanup_cv_;
+
+ TestingObserver* const testing_observer_;
+
+ DISALLOW_COPY_AND_ASSIGN(Inner);
+};
+
+// Worker definitions ---------------------------------------------------------
+
+SequencedWorkerPool::Worker::Worker(
+ scoped_refptr<SequencedWorkerPool> worker_pool,
+ int thread_number,
+ const std::string& prefix)
+ : SimpleThread(prefix + StringPrintf("Worker%d", thread_number)),
+ worker_pool_(std::move(worker_pool)),
+ task_shutdown_behavior_(BLOCK_SHUTDOWN),
+ is_processing_task_(false) {
+ Start();
+}
+
+SequencedWorkerPool::Worker::~Worker() {
+}
+
+void SequencedWorkerPool::Worker::Run() {
+#if defined(OS_WIN)
+ win::ScopedCOMInitializer com_initializer;
+#endif
+
+ // Store a pointer to this worker in thread local storage for static function
+ // access.
+ DCHECK(!lazy_tls_ptr_.Get().Get());
+ lazy_tls_ptr_.Get().Set(this);
+
+ // Just jump back to the Inner object to run the thread, since it has all the
+ // tracking information and queues. It might be more natural to implement
+ // using DelegateSimpleThread and have Inner implement the Delegate to avoid
+ // having these worker objects at all, but that method lacks the ability to
+ // send thread-specific information easily to the thread loop.
+ worker_pool_->inner_->ThreadLoop(this);
+ // Release our cyclic reference once we're done.
+ worker_pool_ = nullptr;
+}
+
+// static
+SequencedWorkerPool::Worker*
+SequencedWorkerPool::Worker::GetForCurrentThread() {
+ // Don't construct lazy instance on check.
+ if (lazy_tls_ptr_ == nullptr)
+ return nullptr;
+
+ return lazy_tls_ptr_.Get().Get();
+}
+
+// static
+LazyInstance<ThreadLocalPointer<SequencedWorkerPool::Worker>>::Leaky
+ SequencedWorkerPool::Worker::lazy_tls_ptr_ = LAZY_INSTANCE_INITIALIZER;
+
+// Inner definitions ---------------------------------------------------------
+
+SequencedWorkerPool::Inner::Inner(
+ SequencedWorkerPool* worker_pool,
+ size_t max_threads,
+ const std::string& thread_name_prefix,
+ TestingObserver* observer)
+ : worker_pool_(worker_pool),
+ lock_(),
+ has_work_cv_(&lock_),
+ can_shutdown_cv_(&lock_),
+ max_threads_(max_threads),
+ thread_name_prefix_(thread_name_prefix),
+ thread_being_created_(false),
+ waiting_thread_count_(0),
+ blocking_shutdown_thread_count_(0),
+ next_sequence_task_number_(0),
+ blocking_shutdown_pending_task_count_(0),
+ trace_id_(0),
+ shutdown_called_(false),
+ max_blocking_tasks_after_shutdown_(0),
+ cleanup_state_(CLEANUP_DONE),
+ cleanup_idlers_(0),
+ cleanup_cv_(&lock_),
+ testing_observer_(observer) {}
+
+SequencedWorkerPool::Inner::~Inner() {
+ // You must call Shutdown() before destroying the pool.
+ DCHECK(shutdown_called_);
+
+ // Need to explicitly join with the threads before they're destroyed or else
+ // they will be running when our object is half torn down.
+ for (ThreadMap::iterator it = threads_.begin(); it != threads_.end(); ++it)
+ it->second->Join();
+ threads_.clear();
+
+ if (testing_observer_)
+ testing_observer_->OnDestruct();
+}
+
+// static
+SequencedWorkerPool::SequenceToken
+SequencedWorkerPool::Inner::GetSequenceToken() {
+ // Need to add one because StaticAtomicSequenceNumber starts at zero, which
+ // is used as a sentinel value in SequenceTokens.
+ return SequenceToken(g_last_sequence_number_.GetNext() + 1);
+}
+
+SequencedWorkerPool::SequenceToken
+SequencedWorkerPool::Inner::GetNamedSequenceToken(const std::string& name) {
+ AutoLock lock(lock_);
+ return SequenceToken(LockedGetNamedTokenID(name));
+}
+
+bool SequencedWorkerPool::Inner::PostTask(
+ const std::string* optional_token_name,
+ SequenceToken sequence_token,
+ WorkerShutdown shutdown_behavior,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN);
+ SequencedTask sequenced(from_here);
+ sequenced.sequence_token_id = sequence_token.id_;
+ sequenced.shutdown_behavior = shutdown_behavior;
+ sequenced.posted_from = from_here;
+ sequenced.task =
+ shutdown_behavior == BLOCK_SHUTDOWN ?
+ base::MakeCriticalClosure(task) : task;
+ sequenced.time_to_run = TimeTicks::Now() + delay;
+
+ int create_thread_id = 0;
+ {
+ AutoLock lock(lock_);
+ if (shutdown_called_) {
+ // Don't allow a new task to be posted if it doesn't block shutdown.
+ if (shutdown_behavior != BLOCK_SHUTDOWN)
+ return false;
+
+ // If the current thread is running a task, and that task doesn't block
+ // shutdown, then it shouldn't be allowed to post any more tasks.
+ ThreadMap::const_iterator found =
+ threads_.find(PlatformThread::CurrentId());
+ if (found != threads_.end() && found->second->is_processing_task() &&
+ found->second->task_shutdown_behavior() != BLOCK_SHUTDOWN) {
+ return false;
+ }
+
+ if (max_blocking_tasks_after_shutdown_ <= 0) {
+ DLOG(WARNING) << "BLOCK_SHUTDOWN task disallowed";
+ return false;
+ }
+ max_blocking_tasks_after_shutdown_ -= 1;
+ }
+
+ // The trace_id is used for identifying the task in about:tracing.
+ sequenced.trace_id = trace_id_++;
+
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ "SequencedWorkerPool::Inner::PostTask",
+ TRACE_ID_MANGLE(GetTaskTraceID(sequenced, static_cast<void*>(this))),
+ TRACE_EVENT_FLAG_FLOW_OUT);
+
+ sequenced.sequence_task_number = LockedGetNextSequenceTaskNumber();
+
+ // Now that we have the lock, apply the named token rules.
+ if (optional_token_name)
+ sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name);
+
+ pending_tasks_.insert(sequenced);
+ if (shutdown_behavior == BLOCK_SHUTDOWN)
+ blocking_shutdown_pending_task_count_++;
+
+ create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
+ }
+
+ // Actually start the additional thread or signal an existing one now that
+ // we're outside the lock.
+ if (create_thread_id)
+ FinishStartingAdditionalThread(create_thread_id);
+ else
+ SignalHasWork();
+
+ return true;
+}
+
+bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const {
+ AutoLock lock(lock_);
+ return ContainsKey(threads_, PlatformThread::CurrentId());
+}
+
+bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread(
+ SequenceToken sequence_token) const {
+ AutoLock lock(lock_);
+ ThreadMap::const_iterator found = threads_.find(PlatformThread::CurrentId());
+ if (found == threads_.end())
+ return false;
+ return found->second->is_processing_task() &&
+ sequence_token.Equals(found->second->task_sequence_token());
+}
+
+bool SequencedWorkerPool::Inner::IsRunningSequence(
+ SequenceToken sequence_token) const {
+ DCHECK(sequence_token.IsValid());
+ AutoLock lock(lock_);
+ return !IsSequenceTokenRunnable(sequence_token.id_);
+}
+
+void SequencedWorkerPool::Inner::SetRunningTaskInfoForCurrentThread(
+ SequenceToken sequence_token,
+ WorkerShutdown shutdown_behavior) {
+ AutoLock lock(lock_);
+ ThreadMap::const_iterator found = threads_.find(PlatformThread::CurrentId());
+ DCHECK(found != threads_.end());
+ DCHECK(found->second->is_processing_task());
+ DCHECK(!found->second->task_sequence_token().IsValid());
+ found->second->set_running_task_info(sequence_token, shutdown_behavior);
+
+ // Mark the sequence token as in use.
+ bool success = current_sequences_.insert(sequence_token.id_).second;
+ DCHECK(success);
+}
+
+// See https://code.google.com/p/chromium/issues/detail?id=168415
+void SequencedWorkerPool::Inner::CleanupForTesting() {
+ DCHECK(!RunsTasksOnCurrentThread());
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ AutoLock lock(lock_);
+ CHECK_EQ(CLEANUP_DONE, cleanup_state_);
+ if (shutdown_called_)
+ return;
+ if (pending_tasks_.empty() && waiting_thread_count_ == threads_.size())
+ return;
+ cleanup_state_ = CLEANUP_REQUESTED;
+ cleanup_idlers_ = 0;
+ has_work_cv_.Signal();
+ while (cleanup_state_ != CLEANUP_DONE)
+ cleanup_cv_.Wait();
+}
+
+void SequencedWorkerPool::Inner::SignalHasWorkForTesting() {
+ SignalHasWork();
+}
+
+void SequencedWorkerPool::Inner::Shutdown(
+ int max_new_blocking_tasks_after_shutdown) {
+ DCHECK_GE(max_new_blocking_tasks_after_shutdown, 0);
+ {
+ AutoLock lock(lock_);
+ // Cleanup and Shutdown should not be called concurrently.
+ CHECK_EQ(CLEANUP_DONE, cleanup_state_);
+ if (shutdown_called_)
+ return;
+ shutdown_called_ = true;
+ max_blocking_tasks_after_shutdown_ = max_new_blocking_tasks_after_shutdown;
+
+ // Tickle the threads. This will wake up a waiting one so it will know that
+ // it can exit, which in turn will wake up any other waiting ones.
+ SignalHasWork();
+
+ // There are no pending or running tasks blocking shutdown, we're done.
+ if (CanShutdown())
+ return;
+ }
+
+ // If we're here, then something is blocking shutdown. So wait for
+ // CanShutdown() to go to true.
+
+ if (testing_observer_)
+ testing_observer_->WillWaitForShutdown();
+
+#if !defined(OS_NACL)
+ TimeTicks shutdown_wait_begin = TimeTicks::Now();
+#endif
+
+ {
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ AutoLock lock(lock_);
+ while (!CanShutdown())
+ can_shutdown_cv_.Wait();
+ }
+#if !defined(OS_NACL)
+ UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime",
+ TimeTicks::Now() - shutdown_wait_begin);
+#endif
+}
+
+bool SequencedWorkerPool::Inner::IsShutdownInProgress() {
+ AutoLock lock(lock_);
+ return shutdown_called_;
+}
+
+void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
+ {
+ AutoLock lock(lock_);
+ DCHECK(thread_being_created_);
+ thread_being_created_ = false;
+ auto result = threads_.insert(
+ std::make_pair(this_worker->tid(), WrapUnique(this_worker)));
+ DCHECK(result.second);
+
+ while (true) {
+#if defined(OS_MACOSX)
+ base::mac::ScopedNSAutoreleasePool autorelease_pool;
+#endif
+
+ HandleCleanup();
+
+ // See GetWork for what delete_these_outside_lock is doing.
+ SequencedTask task;
+ TimeDelta wait_time;
+ std::vector<Closure> delete_these_outside_lock;
+ GetWorkStatus status =
+ GetWork(&task, &wait_time, &delete_these_outside_lock);
+ if (status == GET_WORK_FOUND) {
+ TRACE_EVENT_WITH_FLOW2(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ "SequencedWorkerPool::Inner::ThreadLoop",
+ TRACE_ID_MANGLE(GetTaskTraceID(task, static_cast<void*>(this))),
+ TRACE_EVENT_FLAG_FLOW_IN,
+ "src_file", task.posted_from.file_name(),
+ "src_func", task.posted_from.function_name());
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION task_event(
+ task.posted_from.file_name());
+ int new_thread_id = WillRunWorkerTask(task);
+ {
+ AutoUnlock unlock(lock_);
+ // There may be more work available, so wake up another
+ // worker thread. (Technically not required, since we
+ // already get a signal for each new task, but it doesn't
+ // hurt.)
+ SignalHasWork();
+ delete_these_outside_lock.clear();
+
+ // Complete thread creation outside the lock if necessary.
+ if (new_thread_id)
+ FinishStartingAdditionalThread(new_thread_id);
+
+ this_worker->set_running_task_info(
+ SequenceToken(task.sequence_token_id), task.shutdown_behavior);
+
+ tracked_objects::TaskStopwatch stopwatch;
+ stopwatch.Start();
+ task.task.Run();
+ stopwatch.Stop();
+
+ tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
+ task, stopwatch);
+
+ // Update the sequence token in case it has been set from within the
+ // task, so it can be removed from the set of currently running
+ // sequences in DidRunWorkerTask() below.
+ task.sequence_token_id = this_worker->task_sequence_token().id_;
+
+ // Make sure our task is erased outside the lock for the
+ // same reason we do this with delete_these_oustide_lock.
+ // Also, do it before calling reset_running_task_info() so
+ // that sequence-checking from within the task's destructor
+ // still works.
+ task.task = Closure();
+
+ this_worker->reset_running_task_info();
+ }
+ DidRunWorkerTask(task); // Must be done inside the lock.
+ } else if (cleanup_state_ == CLEANUP_RUNNING) {
+ switch (status) {
+ case GET_WORK_WAIT: {
+ AutoUnlock unlock(lock_);
+ delete_these_outside_lock.clear();
+ }
+ break;
+ case GET_WORK_NOT_FOUND:
+ CHECK(delete_these_outside_lock.empty());
+ cleanup_state_ = CLEANUP_FINISHING;
+ cleanup_cv_.Broadcast();
+ break;
+ default:
+ NOTREACHED();
+ }
+ } else {
+ // When we're terminating and there's no more work, we can
+ // shut down, other workers can complete any pending or new tasks.
+ // We can get additional tasks posted after shutdown_called_ is set
+ // but only worker threads are allowed to post tasks at that time, and
+ // the workers responsible for posting those tasks will be available
+ // to run them. Also, there may be some tasks stuck behind running
+ // ones with the same sequence token, but additional threads won't
+ // help this case.
+ if (shutdown_called_ && blocking_shutdown_pending_task_count_ == 0) {
+ AutoUnlock unlock(lock_);
+ delete_these_outside_lock.clear();
+ break;
+ }
+
+ // No work was found, but there are tasks that need deletion. The
+ // deletion must happen outside of the lock.
+ if (delete_these_outside_lock.size()) {
+ AutoUnlock unlock(lock_);
+ delete_these_outside_lock.clear();
+
+ // Since the lock has been released, |status| may no longer be
+ // accurate. It might read GET_WORK_WAIT even if there are tasks
+ // ready to perform work. Jump to the top of the loop to recalculate
+ // |status|.
+ continue;
+ }
+
+ waiting_thread_count_++;
+
+ switch (status) {
+ case GET_WORK_NOT_FOUND:
+ has_work_cv_.Wait();
+ break;
+ case GET_WORK_WAIT:
+ has_work_cv_.TimedWait(wait_time);
+ break;
+ default:
+ NOTREACHED();
+ }
+ waiting_thread_count_--;
+ }
+ }
+ } // Release lock_.
+
+ // We noticed we should exit. Wake up the next worker so it knows it should
+ // exit as well (because the Shutdown() code only signals once).
+ SignalHasWork();
+
+ // Possibly unblock shutdown.
+ can_shutdown_cv_.Signal();
+}
+
+void SequencedWorkerPool::Inner::HandleCleanup() {
+ lock_.AssertAcquired();
+ if (cleanup_state_ == CLEANUP_DONE)
+ return;
+ if (cleanup_state_ == CLEANUP_REQUESTED) {
+ // We win, we get to do the cleanup as soon as the others wise up and idle.
+ cleanup_state_ = CLEANUP_STARTING;
+ while (thread_being_created_ ||
+ cleanup_idlers_ != threads_.size() - 1) {
+ has_work_cv_.Signal();
+ cleanup_cv_.Wait();
+ }
+ cleanup_state_ = CLEANUP_RUNNING;
+ return;
+ }
+ if (cleanup_state_ == CLEANUP_STARTING) {
+ // Another worker thread is cleaning up, we idle here until thats done.
+ ++cleanup_idlers_;
+ cleanup_cv_.Broadcast();
+ while (cleanup_state_ != CLEANUP_FINISHING) {
+ cleanup_cv_.Wait();
+ }
+ --cleanup_idlers_;
+ cleanup_cv_.Broadcast();
+ return;
+ }
+ if (cleanup_state_ == CLEANUP_FINISHING) {
+ // We wait for all idlers to wake up prior to being DONE.
+ while (cleanup_idlers_ != 0) {
+ cleanup_cv_.Broadcast();
+ cleanup_cv_.Wait();
+ }
+ if (cleanup_state_ == CLEANUP_FINISHING) {
+ cleanup_state_ = CLEANUP_DONE;
+ cleanup_cv_.Signal();
+ }
+ return;
+ }
+}
+
+int SequencedWorkerPool::Inner::LockedGetNamedTokenID(
+ const std::string& name) {
+ lock_.AssertAcquired();
+ DCHECK(!name.empty());
+
+ std::map<std::string, int>::const_iterator found =
+ named_sequence_tokens_.find(name);
+ if (found != named_sequence_tokens_.end())
+ return found->second; // Got an existing one.
+
+ // Create a new one for this name.
+ SequenceToken result = GetSequenceToken();
+ named_sequence_tokens_.insert(std::make_pair(name, result.id_));
+ return result.id_;
+}
+
+int64_t SequencedWorkerPool::Inner::LockedGetNextSequenceTaskNumber() {
+ lock_.AssertAcquired();
+ // We assume that we never create enough tasks to wrap around.
+ return next_sequence_task_number_++;
+}
+
+SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
+ SequencedTask* task,
+ TimeDelta* wait_time,
+ std::vector<Closure>* delete_these_outside_lock) {
+ lock_.AssertAcquired();
+
+ // Find the next task with a sequence token that's not currently in use.
+ // If the token is in use, that means another thread is running something
+ // in that sequence, and we can't run it without going out-of-order.
+ //
+ // This algorithm is simple and fair, but inefficient in some cases. For
+ // example, say somebody schedules 1000 slow tasks with the same sequence
+ // number. We'll have to go through all those tasks each time we feel like
+ // there might be work to schedule. If this proves to be a problem, we
+ // should make this more efficient.
+ //
+ // One possible enhancement would be to keep a map from sequence ID to a
+ // list of pending but currently blocked SequencedTasks for that ID.
+ // When a worker finishes a task of one sequence token, it can pick up the
+ // next one from that token right away.
+ //
+ // This may lead to starvation if there are sufficient numbers of sequences
+ // in use. To alleviate this, we could add an incrementing priority counter
+ // to each SequencedTask. Then maintain a priority_queue of all runnable
+ // tasks, sorted by priority counter. When a sequenced task is completed
+ // we would pop the head element off of that tasks pending list and add it
+ // to the priority queue. Then we would run the first item in the priority
+ // queue.
+
+ GetWorkStatus status = GET_WORK_NOT_FOUND;
+ int unrunnable_tasks = 0;
+ PendingTaskSet::iterator i = pending_tasks_.begin();
+ // We assume that the loop below doesn't take too long and so we can just do
+ // a single call to TimeTicks::Now().
+ const TimeTicks current_time = TimeTicks::Now();
+ while (i != pending_tasks_.end()) {
+ if (!IsSequenceTokenRunnable(i->sequence_token_id)) {
+ unrunnable_tasks++;
+ ++i;
+ continue;
+ }
+
+ if (shutdown_called_ && i->shutdown_behavior != BLOCK_SHUTDOWN) {
+ // We're shutting down and the task we just found isn't blocking
+ // shutdown. Delete it and get more work.
+ //
+ // Note that we do not want to delete unrunnable tasks. Deleting a task
+ // can have side effects (like freeing some objects) and deleting a
+ // task that's supposed to run after one that's currently running could
+ // cause an obscure crash.
+ //
+ // We really want to delete these tasks outside the lock in case the
+ // closures are holding refs to objects that want to post work from
+ // their destructorss (which would deadlock). The closures are
+ // internally refcounted, so we just need to keep a copy of them alive
+ // until the lock is exited. The calling code can just clear() the
+ // vector they passed to us once the lock is exited to make this
+ // happen.
+ delete_these_outside_lock->push_back(i->task);
+ pending_tasks_.erase(i++);
+ continue;
+ }
+
+ if (i->time_to_run > current_time) {
+ // The time to run has not come yet.
+ *wait_time = i->time_to_run - current_time;
+ status = GET_WORK_WAIT;
+ if (cleanup_state_ == CLEANUP_RUNNING) {
+ // Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
+ delete_these_outside_lock->push_back(i->task);
+ pending_tasks_.erase(i);
+ }
+ break;
+ }
+
+ // Found a runnable task.
+ *task = *i;
+ pending_tasks_.erase(i);
+ if (task->shutdown_behavior == BLOCK_SHUTDOWN) {
+ blocking_shutdown_pending_task_count_--;
+ }
+
+ status = GET_WORK_FOUND;
+ break;
+ }
+
+ return status;
+}
+
+int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) {
+ lock_.AssertAcquired();
+
+ // Mark the task's sequence number as in use.
+ if (task.sequence_token_id)
+ current_sequences_.insert(task.sequence_token_id);
+
+ // Ensure that threads running tasks posted with either SKIP_ON_SHUTDOWN
+ // or BLOCK_SHUTDOWN will prevent shutdown until that task or thread
+ // completes.
+ if (task.shutdown_behavior != CONTINUE_ON_SHUTDOWN)
+ blocking_shutdown_thread_count_++;
+
+ // We just picked up a task. Since StartAdditionalThreadIfHelpful only
+ // creates a new thread if there is no free one, there is a race when posting
+ // tasks that many tasks could have been posted before a thread started
+ // running them, so only one thread would have been created. So we also check
+ // whether we should create more threads after removing our task from the
+ // queue, which also has the nice side effect of creating the workers from
+ // background threads rather than the main thread of the app.
+ //
+ // If another thread wasn't created, we want to wake up an existing thread
+ // if there is one waiting to pick up the next task.
+ //
+ // Note that we really need to do this *before* running the task, not
+ // after. Otherwise, if more than one task is posted, the creation of the
+ // second thread (since we only create one at a time) will be blocked by
+ // the execution of the first task, which could be arbitrarily long.
+ return PrepareToStartAdditionalThreadIfHelpful();
+}
+
+void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) {
+ lock_.AssertAcquired();
+
+ if (task.shutdown_behavior != CONTINUE_ON_SHUTDOWN) {
+ DCHECK_GT(blocking_shutdown_thread_count_, 0u);
+ blocking_shutdown_thread_count_--;
+ }
+
+ if (task.sequence_token_id)
+ current_sequences_.erase(task.sequence_token_id);
+}
+
+bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
+ int sequence_token_id) const {
+ lock_.AssertAcquired();
+ return !sequence_token_id ||
+ current_sequences_.find(sequence_token_id) ==
+ current_sequences_.end();
+}
+
+int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() {
+ lock_.AssertAcquired();
+ // How thread creation works:
+ //
+ // We'de like to avoid creating threads with the lock held. However, we
+ // need to be sure that we have an accurate accounting of the threads for
+ // proper Joining and deltion on shutdown.
+ //
+ // We need to figure out if we need another thread with the lock held, which
+ // is what this function does. It then marks us as in the process of creating
+ // a thread. When we do shutdown, we wait until the thread_being_created_
+ // flag is cleared, which ensures that the new thread is properly added to
+ // all the data structures and we can't leak it. Once shutdown starts, we'll
+ // refuse to create more threads or they would be leaked.
+ //
+ // Note that this creates a mostly benign race condition on shutdown that
+ // will cause fewer workers to be created than one would expect. It isn't
+ // much of an issue in real life, but affects some tests. Since we only spawn
+ // one worker at a time, the following sequence of events can happen:
+ //
+ // 1. Main thread posts a bunch of unrelated tasks that would normally be
+ // run on separate threads.
+ // 2. The first task post causes us to start a worker. Other tasks do not
+ // cause a worker to start since one is pending.
+ // 3. Main thread initiates shutdown.
+ // 4. No more threads are created since the shutdown_called_ flag is set.
+ //
+ // The result is that one may expect that max_threads_ workers to be created
+ // given the workload, but in reality fewer may be created because the
+ // sequence of thread creation on the background threads is racing with the
+ // shutdown call.
+ if (!shutdown_called_ &&
+ !thread_being_created_ &&
+ cleanup_state_ == CLEANUP_DONE &&
+ threads_.size() < max_threads_ &&
+ waiting_thread_count_ == 0) {
+ // We could use an additional thread if there's work to be done.
+ for (PendingTaskSet::const_iterator i = pending_tasks_.begin();
+ i != pending_tasks_.end(); ++i) {
+ if (IsSequenceTokenRunnable(i->sequence_token_id)) {
+ // Found a runnable task, mark the thread as being started.
+ thread_being_created_ = true;
+ return static_cast<int>(threads_.size() + 1);
+ }
+ }
+ }
+ return 0;
+}
+
+void SequencedWorkerPool::Inner::FinishStartingAdditionalThread(
+ int thread_number) {
+ // Called outside of the lock.
+ DCHECK_GT(thread_number, 0);
+
+ // The worker is assigned to the list when the thread actually starts, which
+ // will manage the memory of the pointer.
+ new Worker(worker_pool_, thread_number, thread_name_prefix_);
+}
+
+void SequencedWorkerPool::Inner::SignalHasWork() {
+ has_work_cv_.Signal();
+ if (testing_observer_) {
+ testing_observer_->OnHasWork();
+ }
+}
+
+bool SequencedWorkerPool::Inner::CanShutdown() const {
+ lock_.AssertAcquired();
+ // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works.
+ return !thread_being_created_ &&
+ blocking_shutdown_thread_count_ == 0 &&
+ blocking_shutdown_pending_task_count_ == 0;
+}
+
+base::StaticAtomicSequenceNumber
+SequencedWorkerPool::Inner::g_last_sequence_number_;
+
+// SequencedWorkerPool --------------------------------------------------------
+
+std::string SequencedWorkerPool::SequenceToken::ToString() const {
+ return base::StringPrintf("[%d]", id_);
+}
+
+// static
+SequencedWorkerPool::SequenceToken
+SequencedWorkerPool::GetSequenceTokenForCurrentThread() {
+ Worker* worker = Worker::GetForCurrentThread();
+ if (!worker)
+ return SequenceToken();
+
+ return worker->task_sequence_token();
+}
+
+// static
+scoped_refptr<SequencedWorkerPool>
+SequencedWorkerPool::GetWorkerPoolForCurrentThread() {
+ Worker* worker = Worker::GetForCurrentThread();
+ if (!worker)
+ return nullptr;
+
+ return worker->worker_pool();
+}
+
+// static
+scoped_refptr<SequencedTaskRunner>
+SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread() {
+ Worker* worker = Worker::GetForCurrentThread();
+
+ // If there is no worker, this thread is not a worker thread. Otherwise, it is
+ // currently running a task (sequenced or unsequenced).
+ if (!worker)
+ return nullptr;
+
+ scoped_refptr<SequencedWorkerPool> pool = worker->worker_pool();
+ SequenceToken sequence_token = worker->task_sequence_token();
+ WorkerShutdown shutdown_behavior = worker->task_shutdown_behavior();
+ if (!sequence_token.IsValid()) {
+ // Create a new sequence token and bind this thread to it, to make sure that
+ // a task posted to the SequencedTaskRunner we are going to return is not
+ // immediately going to run on a different thread.
+ sequence_token = Inner::GetSequenceToken();
+ pool->inner_->SetRunningTaskInfoForCurrentThread(sequence_token,
+ shutdown_behavior);
+ }
+
+ DCHECK(pool->IsRunningSequenceOnCurrentThread(sequence_token));
+ return new SequencedWorkerPoolSequencedTaskRunner(
+ std::move(pool), sequence_token, shutdown_behavior);
+}
+
+SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
+ const std::string& thread_name_prefix)
+ : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
+ inner_(new Inner(this, max_threads, thread_name_prefix, NULL)) {
+}
+
+SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
+ const std::string& thread_name_prefix,
+ TestingObserver* observer)
+ : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
+ inner_(new Inner(this, max_threads, thread_name_prefix, observer)) {
+}
+
+SequencedWorkerPool::~SequencedWorkerPool() {}
+
+void SequencedWorkerPool::OnDestruct() const {
+ // Avoid deleting ourselves on a worker thread (which would deadlock).
+ if (RunsTasksOnCurrentThread()) {
+ constructor_task_runner_->DeleteSoon(FROM_HERE, this);
+ } else {
+ delete this;
+ }
+}
+
+// static
+SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() {
+ return Inner::GetSequenceToken();
+}
+
+SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetNamedSequenceToken(
+ const std::string& name) {
+ return inner_->GetNamedSequenceToken(name);
+}
+
+scoped_refptr<SequencedTaskRunner> SequencedWorkerPool::GetSequencedTaskRunner(
+ SequenceToken token) {
+ return GetSequencedTaskRunnerWithShutdownBehavior(token, BLOCK_SHUTDOWN);
+}
+
+scoped_refptr<SequencedTaskRunner>
+SequencedWorkerPool::GetSequencedTaskRunnerWithShutdownBehavior(
+ SequenceToken token, WorkerShutdown shutdown_behavior) {
+ return new SequencedWorkerPoolSequencedTaskRunner(
+ this, token, shutdown_behavior);
+}
+
+scoped_refptr<TaskRunner>
+SequencedWorkerPool::GetTaskRunnerWithShutdownBehavior(
+ WorkerShutdown shutdown_behavior) {
+ return new SequencedWorkerPoolTaskRunner(this, shutdown_behavior);
+}
+
+bool SequencedWorkerPool::PostWorkerTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN,
+ from_here, task, TimeDelta());
+}
+
+bool SequencedWorkerPool::PostDelayedWorkerTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ WorkerShutdown shutdown_behavior =
+ delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
+ from_here, task, delay);
+}
+
+bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ WorkerShutdown shutdown_behavior) {
+ return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
+ from_here, task, TimeDelta());
+}
+
+bool SequencedWorkerPool::PostSequencedWorkerTask(
+ SequenceToken sequence_token,
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN,
+ from_here, task, TimeDelta());
+}
+
+bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
+ SequenceToken sequence_token,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ WorkerShutdown shutdown_behavior =
+ delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
+ from_here, task, delay);
+}
+
+bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
+ const std::string& token_name,
+ const tracked_objects::Location& from_here,
+ const Closure& task) {
+ DCHECK(!token_name.empty());
+ return inner_->PostTask(&token_name, SequenceToken(), BLOCK_SHUTDOWN,
+ from_here, task, TimeDelta());
+}
+
+bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
+ SequenceToken sequence_token,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ WorkerShutdown shutdown_behavior) {
+ return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
+ from_here, task, TimeDelta());
+}
+
+bool SequencedWorkerPool::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ return PostDelayedWorkerTask(from_here, task, delay);
+}
+
+bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
+ return inner_->RunsTasksOnCurrentThread();
+}
+
+bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread(
+ SequenceToken sequence_token) const {
+ return inner_->IsRunningSequenceOnCurrentThread(sequence_token);
+}
+
+bool SequencedWorkerPool::IsRunningSequence(
+ SequenceToken sequence_token) const {
+ return inner_->IsRunningSequence(sequence_token);
+}
+
+void SequencedWorkerPool::FlushForTesting() {
+ inner_->CleanupForTesting();
+}
+
+void SequencedWorkerPool::SignalHasWorkForTesting() {
+ inner_->SignalHasWorkForTesting();
+}
+
+void SequencedWorkerPool::Shutdown(int max_new_blocking_tasks_after_shutdown) {
+ DCHECK(constructor_task_runner_->BelongsToCurrentThread());
+ inner_->Shutdown(max_new_blocking_tasks_after_shutdown);
+}
+
+bool SequencedWorkerPool::IsShutdownInProgress() {
+ return inner_->IsShutdownInProgress();
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/sequenced_worker_pool.h b/libchrome/base/threading/sequenced_worker_pool.h
new file mode 100644
index 0000000..cbec395
--- /dev/null
+++ b/libchrome/base/threading/sequenced_worker_pool.h
@@ -0,0 +1,384 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCED_WORKER_POOL_H_
+#define BASE_THREADING_SEQUENCED_WORKER_POOL_H_
+
+#include <stddef.h>
+
+#include <cstddef>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_runner.h"
+
+namespace tracked_objects {
+class Location;
+} // namespace tracked_objects
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+template <class T> class DeleteHelper;
+
+class SequencedTaskRunner;
+
+// A worker thread pool that enforces ordering between sets of tasks. It also
+// allows you to specify what should happen to your tasks on shutdown.
+//
+// To enforce ordering, get a unique sequence token from the pool and post all
+// tasks you want to order with the token. All tasks with the same token are
+// guaranteed to execute serially, though not necessarily on the same thread.
+// This means that:
+//
+// - No two tasks with the same token will run at the same time.
+//
+// - Given two tasks T1 and T2 with the same token such that T2 will
+// run after T1, then T2 will start after T1 is destroyed.
+//
+// - If T2 will run after T1, then all memory changes in T1 and T1's
+// destruction will be visible to T2.
+//
+// Example:
+// SequencedWorkerPool::SequenceToken token =
+// SequencedWorkerPool::GetSequenceToken();
+// pool.PostSequencedWorkerTask(token, SequencedWorkerPool::SKIP_ON_SHUTDOWN,
+// FROM_HERE, base::Bind(...));
+// pool.PostSequencedWorkerTask(token, SequencedWorkerPool::SKIP_ON_SHUTDOWN,
+// FROM_HERE, base::Bind(...));
+//
+// You can make named sequence tokens to make it easier to share a token
+// across different components.
+//
+// You can also post tasks to the pool without ordering using PostWorkerTask.
+// These will be executed in an unspecified order. The order of execution
+// between tasks with different sequence tokens is also unspecified.
+//
+// This class may be leaked on shutdown to facilitate fast shutdown. The
+// expected usage, however, is to call Shutdown(), which correctly accounts
+// for CONTINUE_ON_SHUTDOWN behavior and is required for BLOCK_SHUTDOWN
+// behavior.
+//
+// Implementation note: This does not use a base::WorkerPool since that does
+// not enforce shutdown semantics or allow us to specify how many worker
+// threads to run. For the typical use case of random background work, we don't
+// necessarily want to be super aggressive about creating threads.
+//
+// Note that SequencedWorkerPool is RefCountedThreadSafe (inherited
+// from TaskRunner).
+//
+// Test-only code should wrap this in a base::SequencedWorkerPoolOwner to avoid
+// memory leaks. See http://crbug.com/273800
+class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
+ public:
+ // Defines what should happen to a task posted to the worker pool on
+ // shutdown.
+ enum WorkerShutdown {
+ // Tasks posted with this mode which have not run at shutdown will be
+ // deleted rather than run, and any tasks with this mode running at
+ // shutdown will be ignored (the worker thread will not be joined).
+ //
+ // This option provides a nice way to post stuff you don't want blocking
+ // shutdown. For example, you might be doing a slow DNS lookup and if it's
+ // blocked on the OS, you may not want to stop shutdown, since the result
+ // doesn't really matter at that point.
+ //
+ // However, you need to be very careful what you do in your callback when
+ // you use this option. Since the thread will continue to run until the OS
+ // terminates the process, the app can be in the process of tearing down
+ // when you're running. This means any singletons or global objects you
+ // use may suddenly become invalid out from under you. For this reason,
+ // it's best to use this only for slow but simple operations like the DNS
+ // example.
+ CONTINUE_ON_SHUTDOWN,
+
+ // Tasks posted with this mode that have not started executing at
+ // shutdown will be deleted rather than executed. However, any tasks that
+ // have already begun executing when shutdown is called will be allowed
+ // to continue, and will block shutdown until completion.
+ //
+ // Note: Because Shutdown() may block while these tasks are executing,
+ // care must be taken to ensure that they do not block on the thread that
+ // called Shutdown(), as this may lead to deadlock.
+ SKIP_ON_SHUTDOWN,
+
+ // Tasks posted with this mode will block shutdown until they're
+ // executed. Since this can have significant performance implications,
+ // use sparingly.
+ //
+ // Generally, this should be used only for user data, for example, a task
+ // writing a preference file.
+ //
+ // If a task is posted during shutdown, it will not get run since the
+ // workers may already be stopped. In this case, the post operation will
+ // fail (return false) and the task will be deleted.
+ BLOCK_SHUTDOWN,
+ };
+
+ // Opaque identifier that defines sequencing of tasks posted to the worker
+ // pool.
+ class BASE_EXPORT SequenceToken {
+ public:
+ SequenceToken() : id_(0) {}
+ ~SequenceToken() {}
+
+ bool Equals(const SequenceToken& other) const {
+ return id_ == other.id_;
+ }
+
+ // Returns false if current thread is executing an unsequenced task.
+ bool IsValid() const {
+ return id_ != 0;
+ }
+
+ // Returns a string representation of this token. This method should only be
+ // used for debugging.
+ std::string ToString() const;
+
+ private:
+ friend class SequencedWorkerPool;
+
+ explicit SequenceToken(int id) : id_(id) {}
+
+ int id_;
+ };
+
+ // Allows tests to perform certain actions.
+ class TestingObserver {
+ public:
+ virtual ~TestingObserver() {}
+ virtual void OnHasWork() = 0;
+ virtual void WillWaitForShutdown() = 0;
+ virtual void OnDestruct() = 0;
+ };
+
+ // Gets the SequencedToken of the current thread.
+ // If current thread is not a SequencedWorkerPool worker thread or is running
+ // an unsequenced task, returns an invalid SequenceToken.
+ static SequenceToken GetSequenceTokenForCurrentThread();
+
+ // Gets a SequencedTaskRunner for the current thread. If the current thread is
+ // running an unsequenced task, a new SequenceToken will be generated and set,
+ // so that the returned SequencedTaskRunner is guaranteed to run tasks after
+ // the current task has finished running.
+ static scoped_refptr<SequencedTaskRunner>
+ GetSequencedTaskRunnerForCurrentThread();
+
+ // Returns a unique token that can be used to sequence tasks posted to
+ // PostSequencedWorkerTask(). Valid tokens are always nonzero.
+ // TODO(bauerb): Rename this to better differentiate from
+ // GetSequenceTokenForCurrentThread().
+ static SequenceToken GetSequenceToken();
+
+ // Returns the SequencedWorkerPool that owns this thread, or null if the
+ // current thread is not a SequencedWorkerPool worker thread.
+ static scoped_refptr<SequencedWorkerPool> GetWorkerPoolForCurrentThread();
+
+ // When constructing a SequencedWorkerPool, there must be a
+ // ThreadTaskRunnerHandle on the current thread unless you plan to
+ // deliberately leak it.
+
+ // Pass the maximum number of threads (they will be lazily created as needed)
+ // and a prefix for the thread name to aid in debugging.
+ SequencedWorkerPool(size_t max_threads,
+ const std::string& thread_name_prefix);
+
+ // Like above, but with |observer| for testing. Does not take ownership of
+ // |observer|.
+ SequencedWorkerPool(size_t max_threads,
+ const std::string& thread_name_prefix,
+ TestingObserver* observer);
+
+ // Returns the sequence token associated with the given name. Calling this
+ // function multiple times with the same string will always produce the
+ // same sequence token. If the name has not been used before, a new token
+ // will be created.
+ SequenceToken GetNamedSequenceToken(const std::string& name);
+
+ // Returns a SequencedTaskRunner wrapper which posts to this
+ // SequencedWorkerPool using the given sequence token. Tasks with nonzero
+ // delay are posted with SKIP_ON_SHUTDOWN behavior and tasks with zero delay
+ // are posted with BLOCK_SHUTDOWN behavior.
+ scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunner(
+ SequenceToken token);
+
+ // Returns a SequencedTaskRunner wrapper which posts to this
+ // SequencedWorkerPool using the given sequence token. Tasks with nonzero
+ // delay are posted with SKIP_ON_SHUTDOWN behavior and tasks with zero delay
+ // are posted with the given shutdown behavior.
+ scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunnerWithShutdownBehavior(
+ SequenceToken token,
+ WorkerShutdown shutdown_behavior);
+
+ // Returns a TaskRunner wrapper which posts to this SequencedWorkerPool using
+ // the given shutdown behavior. Tasks with nonzero delay are posted with
+ // SKIP_ON_SHUTDOWN behavior and tasks with zero delay are posted with the
+ // given shutdown behavior.
+ scoped_refptr<TaskRunner> GetTaskRunnerWithShutdownBehavior(
+ WorkerShutdown shutdown_behavior);
+
+ // Posts the given task for execution in the worker pool. Tasks posted with
+ // this function will execute in an unspecified order on a background thread.
+ // Returns true if the task was posted. If your tasks have ordering
+ // requirements, see PostSequencedWorkerTask().
+ //
+ // This class will attempt to delete tasks that aren't run
+ // (non-block-shutdown semantics) but can't guarantee that this happens. If
+ // all worker threads are busy running CONTINUE_ON_SHUTDOWN tasks, there
+ // will be no workers available to delete these tasks. And there may be
+ // tasks with the same sequence token behind those CONTINUE_ON_SHUTDOWN
+ // tasks. Deleting those tasks before the previous one has completed could
+ // cause nondeterministic crashes because the task could be keeping some
+ // objects alive which do work in their destructor, which could voilate the
+ // assumptions of the running task.
+ //
+ // The task will be guaranteed to run to completion before shutdown
+ // (BLOCK_SHUTDOWN semantics).
+ //
+ // Returns true if the task was posted successfully. This may fail during
+ // shutdown regardless of the specified ShutdownBehavior.
+ bool PostWorkerTask(const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ // Same as PostWorkerTask but allows a delay to be specified (although doing
+ // so changes the shutdown behavior). The task will be run after the given
+ // delay has elapsed.
+ //
+ // If the delay is nonzero, the task won't be guaranteed to run to completion
+ // before shutdown (SKIP_ON_SHUTDOWN semantics) to avoid shutdown hangs.
+ // If the delay is zero, this behaves exactly like PostWorkerTask, i.e. the
+ // task will be guaranteed to run to completion before shutdown
+ // (BLOCK_SHUTDOWN semantics).
+ bool PostDelayedWorkerTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay);
+
+ // Same as PostWorkerTask but allows specification of the shutdown behavior.
+ bool PostWorkerTaskWithShutdownBehavior(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ WorkerShutdown shutdown_behavior);
+
+ // Like PostWorkerTask above, but provides sequencing semantics. This means
+ // that tasks posted with the same sequence token (see GetSequenceToken())
+ // are guaranteed to execute in order. This is useful in cases where you're
+ // doing operations that may depend on previous ones, like appending to a
+ // file.
+ //
+ // The task will be guaranteed to run to completion before shutdown
+ // (BLOCK_SHUTDOWN semantics).
+ //
+ // Returns true if the task was posted successfully. This may fail during
+ // shutdown regardless of the specified ShutdownBehavior.
+ bool PostSequencedWorkerTask(SequenceToken sequence_token,
+ const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ // Like PostSequencedWorkerTask above, but allows you to specify a named
+ // token, which saves an extra call to GetNamedSequenceToken.
+ bool PostNamedSequencedWorkerTask(const std::string& token_name,
+ const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ // Same as PostSequencedWorkerTask but allows a delay to be specified
+ // (although doing so changes the shutdown behavior). The task will be run
+ // after the given delay has elapsed.
+ //
+ // If the delay is nonzero, the task won't be guaranteed to run to completion
+ // before shutdown (SKIP_ON_SHUTDOWN semantics) to avoid shutdown hangs.
+ // If the delay is zero, this behaves exactly like PostSequencedWorkerTask,
+ // i.e. the task will be guaranteed to run to completion before shutdown
+ // (BLOCK_SHUTDOWN semantics).
+ bool PostDelayedSequencedWorkerTask(
+ SequenceToken sequence_token,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay);
+
+ // Same as PostSequencedWorkerTask but allows specification of the shutdown
+ // behavior.
+ bool PostSequencedWorkerTaskWithShutdownBehavior(
+ SequenceToken sequence_token,
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ WorkerShutdown shutdown_behavior);
+
+ // TaskRunner implementation. Forwards to PostDelayedWorkerTask().
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+ bool RunsTasksOnCurrentThread() const override;
+
+ // Returns true if the current thread is processing a task with the given
+ // sequence_token.
+ bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
+
+ // Returns true if any thread is currently processing a task with the given
+ // sequence token. Should only be called with a valid sequence token.
+ bool IsRunningSequence(SequenceToken sequence_token) const;
+
+ // Blocks until all pending tasks are complete. This should only be called in
+ // unit tests when you want to validate something that should have happened.
+ // This will not flush delayed tasks; delayed tasks get deleted.
+ //
+ // Note that calling this will not prevent other threads from posting work to
+ // the queue while the calling thread is waiting on Flush(). In this case,
+ // Flush will return only when there's no more work in the queue. Normally,
+ // this doesn't come up since in a test, all the work is being posted from
+ // the main thread.
+ void FlushForTesting();
+
+ // Spuriously signal that there is work to be done.
+ void SignalHasWorkForTesting();
+
+ // Implements the worker pool shutdown. This should be called during app
+ // shutdown, and will discard/join with appropriate tasks before returning.
+ // After this call, subsequent calls to post tasks will fail.
+ //
+ // Must be called from the same thread this object was constructed on.
+ void Shutdown() { Shutdown(0); }
+
+ // A variant that allows an arbitrary number of new blocking tasks to be
+ // posted during shutdown. The tasks cannot be posted within the execution
+ // context of tasks whose shutdown behavior is not BLOCKING_SHUTDOWN. Once
+ // the limit is reached, subsequent calls to post task fail in all cases.
+ // Must be called from the same thread this object was constructed on.
+ void Shutdown(int max_new_blocking_tasks_after_shutdown);
+
+ // Check if Shutdown was called for given threading pool. This method is used
+ // for aborting time consuming operation to avoid blocking shutdown.
+ //
+ // Can be called from any thread.
+ bool IsShutdownInProgress();
+
+ protected:
+ ~SequencedWorkerPool() override;
+
+ void OnDestruct() const override;
+
+ private:
+ friend class RefCountedThreadSafe<SequencedWorkerPool>;
+ friend class DeleteHelper<SequencedWorkerPool>;
+
+ class Inner;
+ class Worker;
+
+ const scoped_refptr<SingleThreadTaskRunner> constructor_task_runner_;
+
+ // Avoid pulling in too many headers by putting (almost) everything
+ // into |inner_|.
+ const std::unique_ptr<Inner> inner_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPool);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_SEQUENCED_WORKER_POOL_H_
diff --git a/libchrome/base/threading/simple_thread.cc b/libchrome/base/threading/simple_thread.cc
new file mode 100644
index 0000000..6c64a17
--- /dev/null
+++ b/libchrome/base/threading/simple_thread.cc
@@ -0,0 +1,174 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/simple_thread.h"
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+SimpleThread::SimpleThread(const std::string& name_prefix)
+ : name_prefix_(name_prefix),
+ name_(name_prefix),
+ thread_(),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ tid_(0),
+ joined_(false) {}
+
+SimpleThread::SimpleThread(const std::string& name_prefix,
+ const Options& options)
+ : name_prefix_(name_prefix),
+ name_(name_prefix),
+ options_(options),
+ thread_(),
+ event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ tid_(0),
+ joined_(false) {}
+
+SimpleThread::~SimpleThread() {
+ DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
+ DCHECK(HasBeenJoined()) << "SimpleThread destroyed without being Join()ed.";
+}
+
+void SimpleThread::Start() {
+ DCHECK(!HasBeenStarted()) << "Tried to Start a thread multiple times.";
+ bool success;
+ if (options_.priority() == ThreadPriority::NORMAL) {
+ success = PlatformThread::Create(options_.stack_size(), this, &thread_);
+ } else {
+ success = PlatformThread::CreateWithPriority(options_.stack_size(), this,
+ &thread_, options_.priority());
+ }
+ DCHECK(success);
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ event_.Wait(); // Wait for the thread to complete initialization.
+}
+
+void SimpleThread::Join() {
+ DCHECK(HasBeenStarted()) << "Tried to Join a never-started thread.";
+ DCHECK(!HasBeenJoined()) << "Tried to Join a thread multiple times.";
+ PlatformThread::Join(thread_);
+ joined_ = true;
+}
+
+bool SimpleThread::HasBeenStarted() {
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ return event_.IsSignaled();
+}
+
+void SimpleThread::ThreadMain() {
+ tid_ = PlatformThread::CurrentId();
+ // Construct our full name of the form "name_prefix_/TID".
+ name_.push_back('/');
+ name_.append(IntToString(tid_));
+ PlatformThread::SetName(name_);
+
+ // We've initialized our new thread, signal that we're done to Start().
+ event_.Signal();
+
+ Run();
+}
+
+DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
+ const std::string& name_prefix)
+ : SimpleThread(name_prefix),
+ delegate_(delegate) {
+}
+
+DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
+ const std::string& name_prefix,
+ const Options& options)
+ : SimpleThread(name_prefix, options),
+ delegate_(delegate) {
+}
+
+DelegateSimpleThread::~DelegateSimpleThread() {
+}
+
+void DelegateSimpleThread::Run() {
+ DCHECK(delegate_) << "Tried to call Run without a delegate (called twice?)";
+ delegate_->Run();
+ delegate_ = NULL;
+}
+
+DelegateSimpleThreadPool::DelegateSimpleThreadPool(
+ const std::string& name_prefix,
+ int num_threads)
+ : name_prefix_(name_prefix),
+ num_threads_(num_threads),
+ dry_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+DelegateSimpleThreadPool::~DelegateSimpleThreadPool() {
+ DCHECK(threads_.empty());
+ DCHECK(delegates_.empty());
+ DCHECK(!dry_.IsSignaled());
+}
+
+void DelegateSimpleThreadPool::Start() {
+ DCHECK(threads_.empty()) << "Start() called with outstanding threads.";
+ for (int i = 0; i < num_threads_; ++i) {
+ DelegateSimpleThread* thread = new DelegateSimpleThread(this, name_prefix_);
+ thread->Start();
+ threads_.push_back(thread);
+ }
+}
+
+void DelegateSimpleThreadPool::JoinAll() {
+ DCHECK(!threads_.empty()) << "JoinAll() called with no outstanding threads.";
+
+ // Tell all our threads to quit their worker loop.
+ AddWork(NULL, num_threads_);
+
+ // Join and destroy all the worker threads.
+ for (int i = 0; i < num_threads_; ++i) {
+ threads_[i]->Join();
+ delete threads_[i];
+ }
+ threads_.clear();
+ DCHECK(delegates_.empty());
+}
+
+void DelegateSimpleThreadPool::AddWork(Delegate* delegate, int repeat_count) {
+ AutoLock locked(lock_);
+ for (int i = 0; i < repeat_count; ++i)
+ delegates_.push(delegate);
+ // If we were empty, signal that we have work now.
+ if (!dry_.IsSignaled())
+ dry_.Signal();
+}
+
+void DelegateSimpleThreadPool::Run() {
+ Delegate* work = NULL;
+
+ while (true) {
+ dry_.Wait();
+ {
+ AutoLock locked(lock_);
+ if (!dry_.IsSignaled())
+ continue;
+
+ DCHECK(!delegates_.empty());
+ work = delegates_.front();
+ delegates_.pop();
+
+ // Signal to any other threads that we're currently out of work.
+ if (delegates_.empty())
+ dry_.Reset();
+ }
+
+ // A NULL delegate pointer signals us to quit.
+ if (!work)
+ break;
+
+ work->Run();
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/simple_thread.h b/libchrome/base/threading/simple_thread.h
new file mode 100644
index 0000000..3deeb10
--- /dev/null
+++ b/libchrome/base/threading/simple_thread.h
@@ -0,0 +1,193 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should probably be using Thread (thread.h) instead. Thread is
+// Chrome's message-loop based Thread abstraction, and if you are a
+// thread running in the browser, there will likely be assumptions
+// that your thread will have an associated message loop.
+//
+// This is a simple thread interface that backs to a native operating system
+// thread. You should use this only when you want a thread that does not have
+// an associated MessageLoop. Unittesting is the best example of this.
+//
+// The simplest interface to use is DelegateSimpleThread, which will create
+// a new thread, and execute the Delegate's virtual Run() in this new thread
+// until it has completed, exiting the thread.
+//
+// NOTE: You *MUST* call Join on the thread to clean up the underlying thread
+// resources. You are also responsible for destructing the SimpleThread object.
+// It is invalid to destroy a SimpleThread while it is running, or without
+// Start() having been called (and a thread never created). The Delegate
+// object should live as long as a DelegateSimpleThread.
+//
+// Thread Safety: A SimpleThread is not completely thread safe. It is safe to
+// access it from the creating thread or from the newly created thread. This
+// implies that the creator thread should be the thread that calls Join.
+//
+// Example:
+// class MyThreadRunner : public DelegateSimpleThread::Delegate { ... };
+// MyThreadRunner runner;
+// DelegateSimpleThread thread(&runner, "good_name_here");
+// thread.Start();
+// // Start will return after the Thread has been successfully started and
+// // initialized. The newly created thread will invoke runner->Run(), and
+// // run until it returns.
+// thread.Join(); // Wait until the thread has exited. You *MUST* Join!
+// // The SimpleThread object is still valid, however you may not call Join
+// // or Start again.
+
+#ifndef BASE_THREADING_SIMPLE_THREAD_H_
+#define BASE_THREADING_SIMPLE_THREAD_H_
+
+#include <stddef.h>
+
+#include <queue>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+// This is the base SimpleThread. You can derive from it and implement the
+// virtual Run method, or you can use the DelegateSimpleThread interface.
+class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
+ public:
+ class BASE_EXPORT Options {
+ public:
+ Options() : stack_size_(0), priority_(ThreadPriority::NORMAL) {}
+ explicit Options(ThreadPriority priority)
+ : stack_size_(0), priority_(priority) {}
+ ~Options() {}
+
+ // We use the standard compiler-supplied copy constructor.
+
+ // A custom stack size, or 0 for the system default.
+ void set_stack_size(size_t size) { stack_size_ = size; }
+ size_t stack_size() const { return stack_size_; }
+
+ // A custom thread priority.
+ void set_priority(ThreadPriority priority) { priority_ = priority; }
+ ThreadPriority priority() const { return priority_; }
+ private:
+ size_t stack_size_;
+ ThreadPriority priority_;
+ };
+
+ // Create a SimpleThread. |options| should be used to manage any specific
+ // configuration involving the thread creation and management.
+ // Every thread has a name, in the form of |name_prefix|/TID, for example
+ // "my_thread/321". The thread will not be created until Start() is called.
+ explicit SimpleThread(const std::string& name_prefix);
+ SimpleThread(const std::string& name_prefix, const Options& options);
+
+ ~SimpleThread() override;
+
+ virtual void Start();
+ virtual void Join();
+
+ // Subclasses should override the Run method.
+ virtual void Run() = 0;
+
+ // Return the thread name prefix, or "unnamed" if none was supplied.
+ std::string name_prefix() { return name_prefix_; }
+
+ // Return the completed name including TID, only valid after Start().
+ std::string name() { return name_; }
+
+ // Return the thread id, only valid after Start().
+ PlatformThreadId tid() { return tid_; }
+
+ // Return True if Start() has ever been called.
+ bool HasBeenStarted();
+
+ // Return True if Join() has evern been called.
+ bool HasBeenJoined() { return joined_; }
+
+ // Overridden from PlatformThread::Delegate:
+ void ThreadMain() override;
+
+ private:
+ const std::string name_prefix_;
+ std::string name_;
+ const Options options_;
+ PlatformThreadHandle thread_; // PlatformThread handle, invalid after Join!
+ WaitableEvent event_; // Signaled if Start() was ever called.
+ PlatformThreadId tid_; // The backing thread's id.
+ bool joined_; // True if Join has been called.
+};
+
+class BASE_EXPORT DelegateSimpleThread : public SimpleThread {
+ public:
+ class BASE_EXPORT Delegate {
+ public:
+ Delegate() { }
+ virtual ~Delegate() { }
+ virtual void Run() = 0;
+ };
+
+ DelegateSimpleThread(Delegate* delegate,
+ const std::string& name_prefix);
+ DelegateSimpleThread(Delegate* delegate,
+ const std::string& name_prefix,
+ const Options& options);
+
+ ~DelegateSimpleThread() override;
+ void Run() override;
+
+ private:
+ Delegate* delegate_;
+};
+
+// DelegateSimpleThreadPool allows you to start up a fixed number of threads,
+// and then add jobs which will be dispatched to the threads. This is
+// convenient when you have a lot of small work that you want done
+// multi-threaded, but don't want to spawn a thread for each small bit of work.
+//
+// You just call AddWork() to add a delegate to the list of work to be done.
+// JoinAll() will make sure that all outstanding work is processed, and wait
+// for everything to finish. You can reuse a pool, so you can call Start()
+// again after you've called JoinAll().
+class BASE_EXPORT DelegateSimpleThreadPool
+ : public DelegateSimpleThread::Delegate {
+ public:
+ typedef DelegateSimpleThread::Delegate Delegate;
+
+ DelegateSimpleThreadPool(const std::string& name_prefix, int num_threads);
+ ~DelegateSimpleThreadPool() override;
+
+ // Start up all of the underlying threads, and start processing work if we
+ // have any.
+ void Start();
+
+ // Make sure all outstanding work is finished, and wait for and destroy all
+ // of the underlying threads in the pool.
+ void JoinAll();
+
+ // It is safe to AddWork() any time, before or after Start().
+ // Delegate* should always be a valid pointer, NULL is reserved internally.
+ void AddWork(Delegate* work, int repeat_count);
+ void AddWork(Delegate* work) {
+ AddWork(work, 1);
+ }
+
+ // We implement the Delegate interface, for running our internal threads.
+ void Run() override;
+
+ private:
+ const std::string name_prefix_;
+ int num_threads_;
+ std::vector<DelegateSimpleThread*> threads_;
+ std::queue<Delegate*> delegates_;
+ base::Lock lock_; // Locks delegates_
+ WaitableEvent dry_; // Not signaled when there is no work to do.
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_SIMPLE_THREAD_H_
diff --git a/libchrome/base/threading/simple_thread_unittest.cc b/libchrome/base/threading/simple_thread_unittest.cc
new file mode 100644
index 0000000..14dd459
--- /dev/null
+++ b/libchrome/base/threading/simple_thread_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/atomic_sequence_num.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class SetIntRunner : public DelegateSimpleThread::Delegate {
+ public:
+ SetIntRunner(int* ptr, int val) : ptr_(ptr), val_(val) { }
+ ~SetIntRunner() override {}
+
+ void Run() override { *ptr_ = val_; }
+
+ private:
+ int* ptr_;
+ int val_;
+};
+
+class WaitEventRunner : public DelegateSimpleThread::Delegate {
+ public:
+ explicit WaitEventRunner(WaitableEvent* event) : event_(event) { }
+ ~WaitEventRunner() override {}
+
+ void Run() override {
+ EXPECT_FALSE(event_->IsSignaled());
+ event_->Signal();
+ EXPECT_TRUE(event_->IsSignaled());
+ }
+ private:
+ WaitableEvent* event_;
+};
+
+class SeqRunner : public DelegateSimpleThread::Delegate {
+ public:
+ explicit SeqRunner(AtomicSequenceNumber* seq) : seq_(seq) { }
+ void Run() override { seq_->GetNext(); }
+
+ private:
+ AtomicSequenceNumber* seq_;
+};
+
+// We count up on a sequence number, firing on the event when we've hit our
+// expected amount, otherwise we wait on the event. This will ensure that we
+// have all threads outstanding until we hit our expected thread pool size.
+class VerifyPoolRunner : public DelegateSimpleThread::Delegate {
+ public:
+ VerifyPoolRunner(AtomicSequenceNumber* seq,
+ int total, WaitableEvent* event)
+ : seq_(seq), total_(total), event_(event) { }
+
+ void Run() override {
+ if (seq_->GetNext() == total_) {
+ event_->Signal();
+ } else {
+ event_->Wait();
+ }
+ }
+
+ private:
+ AtomicSequenceNumber* seq_;
+ int total_;
+ WaitableEvent* event_;
+};
+
+} // namespace
+
+TEST(SimpleThreadTest, CreateAndJoin) {
+ int stack_int = 0;
+
+ SetIntRunner runner(&stack_int, 7);
+ EXPECT_EQ(0, stack_int);
+
+ DelegateSimpleThread thread(&runner, "int_setter");
+ EXPECT_FALSE(thread.HasBeenStarted());
+ EXPECT_FALSE(thread.HasBeenJoined());
+ EXPECT_EQ(0, stack_int);
+
+ thread.Start();
+ EXPECT_TRUE(thread.HasBeenStarted());
+ EXPECT_FALSE(thread.HasBeenJoined());
+
+ thread.Join();
+ EXPECT_TRUE(thread.HasBeenStarted());
+ EXPECT_TRUE(thread.HasBeenJoined());
+ EXPECT_EQ(7, stack_int);
+}
+
+TEST(SimpleThreadTest, WaitForEvent) {
+ // Create a thread, and wait for it to signal us.
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WaitEventRunner runner(&event);
+ DelegateSimpleThread thread(&runner, "event_waiter");
+
+ EXPECT_FALSE(event.IsSignaled());
+ thread.Start();
+ event.Wait();
+ EXPECT_TRUE(event.IsSignaled());
+ thread.Join();
+}
+
+TEST(SimpleThreadTest, NamedWithOptions) {
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WaitEventRunner runner(&event);
+ SimpleThread::Options options;
+ DelegateSimpleThread thread(&runner, "event_waiter", options);
+ EXPECT_EQ(thread.name_prefix(), "event_waiter");
+ EXPECT_FALSE(event.IsSignaled());
+
+ thread.Start();
+ EXPECT_EQ(thread.name_prefix(), "event_waiter");
+ EXPECT_EQ(thread.name(),
+ std::string("event_waiter/") + IntToString(thread.tid()));
+ event.Wait();
+
+ EXPECT_TRUE(event.IsSignaled());
+ thread.Join();
+
+ // We keep the name and tid, even after the thread is gone.
+ EXPECT_EQ(thread.name_prefix(), "event_waiter");
+ EXPECT_EQ(thread.name(),
+ std::string("event_waiter/") + IntToString(thread.tid()));
+}
+
+TEST(SimpleThreadTest, ThreadPool) {
+ AtomicSequenceNumber seq;
+ SeqRunner runner(&seq);
+ DelegateSimpleThreadPool pool("seq_runner", 10);
+
+ // Add work before we're running.
+ pool.AddWork(&runner, 300);
+
+ EXPECT_EQ(seq.GetNext(), 0);
+ pool.Start();
+
+ // Add work while we're running.
+ pool.AddWork(&runner, 300);
+
+ pool.JoinAll();
+
+ EXPECT_EQ(seq.GetNext(), 601);
+
+ // We can reuse our pool. Verify that all 10 threads can actually run in
+ // parallel, so this test will only pass if there are actually 10 threads.
+ AtomicSequenceNumber seq2;
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ // Changing 9 to 10, for example, would cause us JoinAll() to never return.
+ VerifyPoolRunner verifier(&seq2, 9, &event);
+ pool.Start();
+
+ pool.AddWork(&verifier, 10);
+
+ pool.JoinAll();
+ EXPECT_EQ(seq2.GetNext(), 10);
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread.cc b/libchrome/base/threading/thread.cc
new file mode 100644
index 0000000..9cdc691
--- /dev/null
+++ b/libchrome/base/threading/thread.cc
@@ -0,0 +1,280 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#endif
+
+namespace base {
+
+namespace {
+
+// We use this thread-local variable to record whether or not a thread exited
+// because its Stop method was called. This allows us to catch cases where
+// MessageLoop::QuitWhenIdle() is called directly, which is unexpected when
+// using a Thread to setup and run a MessageLoop.
+base::LazyInstance<base::ThreadLocalBoolean> lazy_tls_bool =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+// This is used to trigger the message loop to exit.
+void ThreadQuitHelper() {
+ MessageLoop::current()->QuitWhenIdle();
+ Thread::SetThreadWasQuitProperly(true);
+}
+
+Thread::Options::Options()
+ : message_loop_type(MessageLoop::TYPE_DEFAULT),
+ timer_slack(TIMER_SLACK_NONE),
+ stack_size(0),
+ priority(ThreadPriority::NORMAL) {
+}
+
+Thread::Options::Options(MessageLoop::Type type,
+ size_t size)
+ : message_loop_type(type),
+ timer_slack(TIMER_SLACK_NONE),
+ stack_size(size),
+ priority(ThreadPriority::NORMAL) {
+}
+
+Thread::Options::Options(const Options& other) = default;
+
+Thread::Options::~Options() {
+}
+
+Thread::Thread(const std::string& name)
+ :
+#if defined(OS_WIN)
+ com_status_(NONE),
+#endif
+ stopping_(false),
+ running_(false),
+ thread_(0),
+ id_(kInvalidThreadId),
+ id_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ message_loop_(nullptr),
+ message_loop_timer_slack_(TIMER_SLACK_NONE),
+ name_(name),
+ start_event_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
+}
+
+Thread::~Thread() {
+ Stop();
+}
+
+bool Thread::Start() {
+ Options options;
+#if defined(OS_WIN)
+ if (com_status_ == STA)
+ options.message_loop_type = MessageLoop::TYPE_UI;
+#endif
+ return StartWithOptions(options);
+}
+
+bool Thread::StartWithOptions(const Options& options) {
+ DCHECK(!message_loop_);
+#if defined(OS_WIN)
+ DCHECK((com_status_ != STA) ||
+ (options.message_loop_type == MessageLoop::TYPE_UI));
+#endif
+
+ // Reset |id_| here to support restarting the thread.
+ id_event_.Reset();
+ id_ = kInvalidThreadId;
+
+ SetThreadWasQuitProperly(false);
+
+ MessageLoop::Type type = options.message_loop_type;
+ if (!options.message_pump_factory.is_null())
+ type = MessageLoop::TYPE_CUSTOM;
+
+ message_loop_timer_slack_ = options.timer_slack;
+ std::unique_ptr<MessageLoop> message_loop =
+ MessageLoop::CreateUnbound(type, options.message_pump_factory);
+ message_loop_ = message_loop.get();
+ start_event_.Reset();
+
+ // Hold the thread_lock_ while starting a new thread, so that we can make sure
+ // that thread_ is populated before the newly created thread accesses it.
+ {
+ AutoLock lock(thread_lock_);
+ if (!PlatformThread::CreateWithPriority(options.stack_size, this, &thread_,
+ options.priority)) {
+ DLOG(ERROR) << "failed to create thread";
+ message_loop_ = nullptr;
+ return false;
+ }
+ }
+
+ // The ownership of message_loop is managemed by the newly created thread
+ // within the ThreadMain.
+ ignore_result(message_loop.release());
+
+ DCHECK(message_loop_);
+ return true;
+}
+
+bool Thread::StartAndWaitForTesting() {
+ bool result = Start();
+ if (!result)
+ return false;
+ WaitUntilThreadStarted();
+ return true;
+}
+
+bool Thread::WaitUntilThreadStarted() const {
+ if (!message_loop_)
+ return false;
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ start_event_.Wait();
+ return true;
+}
+
+void Thread::Stop() {
+ AutoLock lock(thread_lock_);
+ if (thread_.is_null())
+ return;
+
+ StopSoon();
+
+ // Wait for the thread to exit.
+ //
+ // TODO(darin): Unfortunately, we need to keep message_loop_ around until
+ // the thread exits. Some consumers are abusing the API. Make them stop.
+ //
+ PlatformThread::Join(thread_);
+ thread_ = base::PlatformThreadHandle();
+
+ // The thread should nullify message_loop_ on exit.
+ DCHECK(!message_loop_);
+
+ stopping_ = false;
+}
+
+void Thread::StopSoon() {
+ // We should only be called on the same thread that started us.
+
+ DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
+
+ if (stopping_ || !message_loop_)
+ return;
+
+ stopping_ = true;
+ task_runner()->PostTask(FROM_HERE, base::Bind(&ThreadQuitHelper));
+}
+
+PlatformThreadId Thread::GetThreadId() const {
+ // If the thread is created but not started yet, wait for |id_| being ready.
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ id_event_.Wait();
+ return id_;
+}
+
+bool Thread::IsRunning() const {
+ // If the thread's already started (i.e. message_loop_ is non-null) and
+ // not yet requested to stop (i.e. stopping_ is false) we can just return
+ // true. (Note that stopping_ is touched only on the same thread that
+ // starts / started the new thread so we need no locking here.)
+ if (message_loop_ && !stopping_)
+ return true;
+ // Otherwise check the running_ flag, which is set to true by the new thread
+ // only while it is inside Run().
+ AutoLock lock(running_lock_);
+ return running_;
+}
+
+void Thread::Run(MessageLoop*) {
+ RunLoop().Run();
+}
+
+void Thread::SetThreadWasQuitProperly(bool flag) {
+ lazy_tls_bool.Pointer()->Set(flag);
+}
+
+bool Thread::GetThreadWasQuitProperly() {
+ bool quit_properly = true;
+#ifndef NDEBUG
+ quit_properly = lazy_tls_bool.Pointer()->Get();
+#endif
+ return quit_properly;
+}
+
+void Thread::ThreadMain() {
+ // First, make GetThreadId() available to avoid deadlocks. It could be called
+ // any place in the following thread initialization code.
+ id_ = PlatformThread::CurrentId();
+ DCHECK_NE(kInvalidThreadId, id_);
+ id_event_.Signal();
+
+ // Complete the initialization of our Thread object.
+ PlatformThread::SetName(name_.c_str());
+
+ // Lazily initialize the message_loop so that it can run on this thread.
+ DCHECK(message_loop_);
+ std::unique_ptr<MessageLoop> message_loop(message_loop_);
+ message_loop_->BindToCurrentThread();
+ message_loop_->SetTimerSlack(message_loop_timer_slack_);
+
+#if defined(OS_WIN)
+ std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
+ if (com_status_ != NONE) {
+ com_initializer.reset((com_status_ == STA) ?
+ new win::ScopedCOMInitializer() :
+ new win::ScopedCOMInitializer(win::ScopedCOMInitializer::kMTA));
+ }
+#endif
+
+ // Let the thread do extra initialization.
+ Init();
+
+ {
+ AutoLock lock(running_lock_);
+ running_ = true;
+ }
+
+ start_event_.Signal();
+
+ Run(message_loop_);
+
+ {
+ AutoLock lock(running_lock_);
+ running_ = false;
+ }
+
+ // Let the thread do extra cleanup.
+ CleanUp();
+
+#if defined(OS_WIN)
+ com_initializer.reset();
+#endif
+
+ if (message_loop->type() != MessageLoop::TYPE_CUSTOM) {
+ // Assert that MessageLoop::QuitWhenIdle was called by ThreadQuitHelper.
+ // Don't check for custom message pumps, because their shutdown might not
+ // allow this.
+ DCHECK(GetThreadWasQuitProperly());
+ }
+
+ // We can't receive messages anymore.
+ // (The message loop is destructed at the end of this block)
+ message_loop_ = nullptr;
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread.h b/libchrome/base/threading/thread.h
new file mode 100644
index 0000000..c9a77d7
--- /dev/null
+++ b/libchrome/base/threading/thread.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_H_
+#define BASE_THREADING_THREAD_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/timer_slack.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class MessagePump;
+
+// A simple thread abstraction that establishes a MessageLoop on a new thread.
+// The consumer uses the MessageLoop of the thread to cause code to execute on
+// the thread. When this object is destroyed the thread is terminated. All
+// pending tasks queued on the thread's message loop will run to completion
+// before the thread is terminated.
+//
+// WARNING! SUBCLASSES MUST CALL Stop() IN THEIR DESTRUCTORS! See ~Thread().
+//
+// After the thread is stopped, the destruction sequence is:
+//
+// (1) Thread::CleanUp()
+// (2) MessageLoop::~MessageLoop
+// (3.b) MessageLoop::DestructionObserver::WillDestroyCurrentMessageLoop
+class BASE_EXPORT Thread : PlatformThread::Delegate {
+ public:
+ struct BASE_EXPORT Options {
+ typedef Callback<std::unique_ptr<MessagePump>()> MessagePumpFactory;
+
+ Options();
+ Options(MessageLoop::Type type, size_t size);
+ Options(const Options& other);
+ ~Options();
+
+ // Specifies the type of message loop that will be allocated on the thread.
+ // This is ignored if message_pump_factory.is_null() is false.
+ MessageLoop::Type message_loop_type;
+
+ // Specifies timer slack for thread message loop.
+ TimerSlack timer_slack;
+
+ // Used to create the MessagePump for the MessageLoop. The callback is Run()
+ // on the thread. If message_pump_factory.is_null(), then a MessagePump
+ // appropriate for |message_loop_type| is created. Setting this forces the
+ // MessageLoop::Type to TYPE_CUSTOM.
+ MessagePumpFactory message_pump_factory;
+
+ // Specifies the maximum stack size that the thread is allowed to use.
+ // This does not necessarily correspond to the thread's initial stack size.
+ // A value of 0 indicates that the default maximum should be used.
+ size_t stack_size;
+
+ // Specifies the initial thread priority.
+ ThreadPriority priority;
+ };
+
+ // Constructor.
+ // name is a display string to identify the thread.
+ explicit Thread(const std::string& name);
+
+ // Destroys the thread, stopping it if necessary.
+ //
+ // NOTE: ALL SUBCLASSES OF Thread MUST CALL Stop() IN THEIR DESTRUCTORS (or
+ // guarantee Stop() is explicitly called before the subclass is destroyed).
+ // This is required to avoid a data race between the destructor modifying the
+ // vtable, and the thread's ThreadMain calling the virtual method Run(). It
+ // also ensures that the CleanUp() virtual method is called on the subclass
+ // before it is destructed.
+ ~Thread() override;
+
+#if defined(OS_WIN)
+ // Causes the thread to initialize COM. This must be called before calling
+ // Start() or StartWithOptions(). If |use_mta| is false, the thread is also
+ // started with a TYPE_UI message loop. It is an error to call
+ // init_com_with_mta(false) and then StartWithOptions() with any message loop
+ // type other than TYPE_UI.
+ void init_com_with_mta(bool use_mta) {
+ DCHECK(!message_loop_);
+ com_status_ = use_mta ? MTA : STA;
+ }
+#endif
+
+ // Starts the thread. Returns true if the thread was successfully started;
+ // otherwise, returns false. Upon successful return, the message_loop()
+ // getter will return non-null.
+ //
+ // Note: This function can't be called on Windows with the loader lock held;
+ // i.e. during a DllMain, global object construction or destruction, atexit()
+ // callback.
+ bool Start();
+
+ // Starts the thread. Behaves exactly like Start in addition to allow to
+ // override the default options.
+ //
+ // Note: This function can't be called on Windows with the loader lock held;
+ // i.e. during a DllMain, global object construction or destruction, atexit()
+ // callback.
+ bool StartWithOptions(const Options& options);
+
+ // Starts the thread and wait for the thread to start and run initialization
+ // before returning. It's same as calling Start() and then
+ // WaitUntilThreadStarted().
+ // Note that using this (instead of Start() or StartWithOptions() causes
+ // jank on the calling thread, should be used only in testing code.
+ bool StartAndWaitForTesting();
+
+ // Blocks until the thread starts running. Called within StartAndWait().
+ // Note that calling this causes jank on the calling thread, must be used
+ // carefully for production code.
+ bool WaitUntilThreadStarted() const;
+
+ // Signals the thread to exit and returns once the thread has exited. After
+ // this method returns, the Thread object is completely reset and may be used
+ // as if it were newly constructed (i.e., Start may be called again).
+ //
+ // Stop may be called multiple times and is simply ignored if the thread is
+ // already stopped.
+ //
+ // NOTE: If you are a consumer of Thread, it is not necessary to call this
+ // before deleting your Thread objects, as the destructor will do it.
+ // IF YOU ARE A SUBCLASS OF Thread, YOU MUST CALL THIS IN YOUR DESTRUCTOR.
+ void Stop();
+
+ // Signals the thread to exit in the near future.
+ //
+ // WARNING: This function is not meant to be commonly used. Use at your own
+ // risk. Calling this function will cause message_loop() to become invalid in
+ // the near future. This function was created to workaround a specific
+ // deadlock on Windows with printer worker thread. In any other case, Stop()
+ // should be used.
+ //
+ // StopSoon should not be called multiple times as it is risky to do so. It
+ // could cause a timing issue in message_loop() access. Call Stop() to reset
+ // the thread object once it is known that the thread has quit.
+ void StopSoon();
+
+ // Returns the message loop for this thread. Use the MessageLoop's
+ // PostTask methods to execute code on the thread. This only returns
+ // non-null after a successful call to Start. After Stop has been called,
+ // this will return nullptr.
+ //
+ // NOTE: You must not call this MessageLoop's Quit method directly. Use
+ // the Thread's Stop method instead.
+ //
+ MessageLoop* message_loop() const { return message_loop_; }
+
+ // Returns a TaskRunner for this thread. Use the TaskRunner's PostTask
+ // methods to execute code on the thread. Returns nullptr if the thread is not
+ // running (e.g. before Start or after Stop have been called). Callers can
+ // hold on to this even after the thread is gone; in this situation, attempts
+ // to PostTask() will fail.
+ scoped_refptr<SingleThreadTaskRunner> task_runner() const {
+ return message_loop_ ? message_loop_->task_runner() : nullptr;
+ }
+
+ // Returns the name of this thread (for display in debugger too).
+ const std::string& thread_name() const { return name_; }
+
+ // The native thread handle.
+ PlatformThreadHandle thread_handle() { return thread_; }
+
+ // Returns the thread ID. Should not be called before the first Start*()
+ // call. Keeps on returning the same ID even after a Stop() call. The next
+ // Start*() call renews the ID.
+ //
+ // WARNING: This function will block if the thread hasn't started yet.
+ //
+ PlatformThreadId GetThreadId() const;
+
+ // Returns true if the thread has been started, and not yet stopped.
+ bool IsRunning() const;
+
+ protected:
+ // Called just prior to starting the message loop
+ virtual void Init() {}
+
+ // Called to start the message loop
+ virtual void Run(MessageLoop* message_loop);
+
+ // Called just after the message loop ends
+ virtual void CleanUp() {}
+
+ static void SetThreadWasQuitProperly(bool flag);
+ static bool GetThreadWasQuitProperly();
+
+ void set_message_loop(MessageLoop* message_loop) {
+ message_loop_ = message_loop;
+ }
+
+ private:
+#if defined(OS_WIN)
+ enum ComStatus {
+ NONE,
+ STA,
+ MTA,
+ };
+#endif
+
+ // PlatformThread::Delegate methods:
+ void ThreadMain() override;
+
+#if defined(OS_WIN)
+ // Whether this thread needs to initialize COM, and if so, in what mode.
+ ComStatus com_status_;
+#endif
+
+ // If true, we're in the middle of stopping, and shouldn't access
+ // |message_loop_|. It may non-nullptr and invalid.
+ // Should be written on the thread that created this thread. Also read data
+ // could be wrong on other threads.
+ bool stopping_;
+
+ // True while inside of Run().
+ bool running_;
+ mutable base::Lock running_lock_; // Protects |running_|.
+
+ // The thread's handle.
+ PlatformThreadHandle thread_;
+ mutable base::Lock thread_lock_; // Protects |thread_|.
+
+ // The thread's id once it has started.
+ PlatformThreadId id_;
+ mutable WaitableEvent id_event_; // Protects |id_|.
+
+ // The thread's message loop. Valid only while the thread is alive. Set
+ // by the created thread.
+ MessageLoop* message_loop_;
+
+ // Stores Options::timer_slack_ until the message loop has been bound to
+ // a thread.
+ TimerSlack message_loop_timer_slack_;
+
+ // The name of the thread. Used for debugging purposes.
+ std::string name_;
+
+ // Signaled when the created thread gets ready to use the message loop.
+ mutable WaitableEvent start_event_;
+
+ friend void ThreadQuitHelper();
+
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_H_
diff --git a/libchrome/base/threading/thread_checker.h b/libchrome/base/threading/thread_checker.h
new file mode 100644
index 0000000..1d970f0
--- /dev/null
+++ b/libchrome/base/threading/thread_checker.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_CHECKER_H_
+#define BASE_THREADING_THREAD_CHECKER_H_
+
+#include "base/logging.h"
+#include "base/threading/thread_checker_impl.h"
+
+// Apart from debug builds, we also enable the thread checker in
+// builds with DCHECK_ALWAYS_ON so that trybots and waterfall bots
+// with this define will get the same level of thread checking as
+// debug bots.
+#if DCHECK_IS_ON()
+#define ENABLE_THREAD_CHECKER 1
+#else
+#define ENABLE_THREAD_CHECKER 0
+#endif
+
+namespace base {
+
+// Do nothing implementation, for use in release mode.
+//
+// Note: You should almost always use the ThreadChecker class to get the
+// right version for your build configuration.
+class ThreadCheckerDoNothing {
+ public:
+ bool CalledOnValidThread() const WARN_UNUSED_RESULT {
+ return true;
+ }
+
+ void DetachFromThread() {}
+};
+
+// ThreadChecker is a helper class used to help verify that some methods of a
+// class are called from the same thread. It provides identical functionality to
+// base::NonThreadSafe, but it is meant to be held as a member variable, rather
+// than inherited from base::NonThreadSafe.
+//
+// While inheriting from base::NonThreadSafe may give a clear indication about
+// the thread-safety of a class, it may also lead to violations of the style
+// guide with regard to multiple inheritance. The choice between having a
+// ThreadChecker member and inheriting from base::NonThreadSafe should be based
+// on whether:
+// - Derived classes need to know the thread they belong to, as opposed to
+// having that functionality fully encapsulated in the base class.
+// - Derived classes should be able to reassign the base class to another
+// thread, via DetachFromThread.
+//
+// If neither of these are true, then having a ThreadChecker member and calling
+// CalledOnValidThread is the preferable solution.
+//
+// Example:
+// class MyClass {
+// public:
+// void Foo() {
+// DCHECK(thread_checker_.CalledOnValidThread());
+// ... (do stuff) ...
+// }
+//
+// private:
+// ThreadChecker thread_checker_;
+// }
+//
+// In Release mode, CalledOnValidThread will always return true.
+#if ENABLE_THREAD_CHECKER
+class ThreadChecker : public ThreadCheckerImpl {
+};
+#else
+class ThreadChecker : public ThreadCheckerDoNothing {
+};
+#endif // ENABLE_THREAD_CHECKER
+
+#undef ENABLE_THREAD_CHECKER
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_CHECKER_H_
diff --git a/libchrome/base/threading/thread_checker_impl.cc b/libchrome/base/threading/thread_checker_impl.cc
new file mode 100644
index 0000000..eb87bae
--- /dev/null
+++ b/libchrome/base/threading/thread_checker_impl.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_checker_impl.h"
+
+namespace base {
+
+ThreadCheckerImpl::ThreadCheckerImpl()
+ : valid_thread_id_() {
+ EnsureThreadIdAssigned();
+}
+
+ThreadCheckerImpl::~ThreadCheckerImpl() {}
+
+bool ThreadCheckerImpl::CalledOnValidThread() const {
+ EnsureThreadIdAssigned();
+ AutoLock auto_lock(lock_);
+ return valid_thread_id_ == PlatformThread::CurrentRef();
+}
+
+void ThreadCheckerImpl::DetachFromThread() {
+ AutoLock auto_lock(lock_);
+ valid_thread_id_ = PlatformThreadRef();
+}
+
+void ThreadCheckerImpl::EnsureThreadIdAssigned() const {
+ AutoLock auto_lock(lock_);
+ if (valid_thread_id_.is_null()) {
+ valid_thread_id_ = PlatformThread::CurrentRef();
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_checker_impl.h b/libchrome/base/threading/thread_checker_impl.h
new file mode 100644
index 0000000..c92e143
--- /dev/null
+++ b/libchrome/base/threading/thread_checker_impl.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_CHECKER_IMPL_H_
+#define BASE_THREADING_THREAD_CHECKER_IMPL_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+// Real implementation of ThreadChecker, for use in debug mode, or
+// for temporary use in release mode (e.g. to CHECK on a threading issue
+// seen only in the wild).
+//
+// Note: You should almost always use the ThreadChecker class to get the
+// right version for your build configuration.
+class BASE_EXPORT ThreadCheckerImpl {
+ public:
+ ThreadCheckerImpl();
+ ~ThreadCheckerImpl();
+
+ bool CalledOnValidThread() const WARN_UNUSED_RESULT;
+
+ // Changes the thread that is checked for in CalledOnValidThread. This may
+ // be useful when an object may be created on one thread and then used
+ // exclusively on another thread.
+ void DetachFromThread();
+
+ private:
+ void EnsureThreadIdAssigned() const;
+
+ mutable base::Lock lock_;
+ // This is mutable so that CalledOnValidThread can set it.
+ // It's guarded by |lock_|.
+ mutable PlatformThreadRef valid_thread_id_;
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_CHECKER_IMPL_H_
diff --git a/libchrome/base/threading/thread_checker_unittest.cc b/libchrome/base/threading/thread_checker_unittest.cc
new file mode 100644
index 0000000..bc5b1e4
--- /dev/null
+++ b/libchrome/base/threading/thread_checker_unittest.cc
@@ -0,0 +1,181 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_checker.h"
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Duplicated from base/threading/thread_checker.h so that we can be
+// good citizens there and undef the macro.
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define ENABLE_THREAD_CHECKER 1
+#else
+#define ENABLE_THREAD_CHECKER 0
+#endif
+
+namespace base {
+
+namespace {
+
+// Simple class to exercise the basics of ThreadChecker.
+// Both the destructor and DoStuff should verify that they were
+// called on the same thread as the constructor.
+class ThreadCheckerClass : public ThreadChecker {
+ public:
+ ThreadCheckerClass() {}
+
+ // Verifies that it was called on the same thread as the constructor.
+ void DoStuff() {
+ DCHECK(CalledOnValidThread());
+ }
+
+ void DetachFromThread() {
+ ThreadChecker::DetachFromThread();
+ }
+
+ static void MethodOnDifferentThreadImpl();
+ static void DetachThenCallFromDifferentThreadImpl();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThreadCheckerClass);
+};
+
+// Calls ThreadCheckerClass::DoStuff on another thread.
+class CallDoStuffOnThread : public base::SimpleThread {
+ public:
+ explicit CallDoStuffOnThread(ThreadCheckerClass* thread_checker_class)
+ : SimpleThread("call_do_stuff_on_thread"),
+ thread_checker_class_(thread_checker_class) {
+ }
+
+ void Run() override { thread_checker_class_->DoStuff(); }
+
+ private:
+ ThreadCheckerClass* thread_checker_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallDoStuffOnThread);
+};
+
+// Deletes ThreadCheckerClass on a different thread.
+class DeleteThreadCheckerClassOnThread : public base::SimpleThread {
+ public:
+ explicit DeleteThreadCheckerClassOnThread(
+ ThreadCheckerClass* thread_checker_class)
+ : SimpleThread("delete_thread_checker_class_on_thread"),
+ thread_checker_class_(thread_checker_class) {
+ }
+
+ void Run() override { thread_checker_class_.reset(); }
+
+ private:
+ std::unique_ptr<ThreadCheckerClass> thread_checker_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread);
+};
+
+} // namespace
+
+TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
+ std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+ new ThreadCheckerClass);
+
+ // Verify that DoStuff doesn't assert.
+ thread_checker_class->DoStuff();
+
+ // Verify that the destructor doesn't assert.
+ thread_checker_class.reset();
+}
+
+TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
+ std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+ new ThreadCheckerClass);
+
+ // Verify that the destructor doesn't assert
+ // when called on a different thread.
+ DeleteThreadCheckerClassOnThread delete_on_thread(
+ thread_checker_class.release());
+
+ delete_on_thread.Start();
+ delete_on_thread.Join();
+}
+
+TEST(ThreadCheckerTest, DetachFromThread) {
+ std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+ new ThreadCheckerClass);
+
+ // Verify that DoStuff doesn't assert when called on a different thread after
+ // a call to DetachFromThread.
+ thread_checker_class->DetachFromThread();
+ CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+ call_on_thread.Start();
+ call_on_thread.Join();
+}
+
+#if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+
+void ThreadCheckerClass::MethodOnDifferentThreadImpl() {
+ std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+ new ThreadCheckerClass);
+
+ // DoStuff should assert in debug builds only when called on a
+ // different thread.
+ CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+ call_on_thread.Start();
+ call_on_thread.Join();
+}
+
+#if ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
+ ASSERT_DEATH({
+ ThreadCheckerClass::MethodOnDifferentThreadImpl();
+ }, "");
+}
+#else
+TEST(ThreadCheckerTest, MethodAllowedOnDifferentThreadInRelease) {
+ ThreadCheckerClass::MethodOnDifferentThreadImpl();
+}
+#endif // ENABLE_THREAD_CHECKER
+
+void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() {
+ std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+ new ThreadCheckerClass);
+
+ // DoStuff doesn't assert when called on a different thread
+ // after a call to DetachFromThread.
+ thread_checker_class->DetachFromThread();
+ CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+ call_on_thread.Start();
+ call_on_thread.Join();
+
+ // DoStuff should assert in debug builds only after moving to
+ // another thread.
+ thread_checker_class->DoStuff();
+}
+
+#if ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerDeathTest, DetachFromThreadInDebug) {
+ ASSERT_DEATH({
+ ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+ }, "");
+}
+#else
+TEST(ThreadCheckerTest, DetachFromThreadInRelease) {
+ ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+}
+#endif // ENABLE_THREAD_CHECKER
+
+#endif // GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_THREAD_CHECKER
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_collision_warner.cc b/libchrome/base/threading/thread_collision_warner.cc
new file mode 100644
index 0000000..547e11c
--- /dev/null
+++ b/libchrome/base/threading/thread_collision_warner.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_collision_warner.h"
+
+#include "base/logging.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+void DCheckAsserter::warn() {
+ NOTREACHED() << "Thread Collision";
+}
+
+static subtle::Atomic32 CurrentThread() {
+ const PlatformThreadId current_thread_id = PlatformThread::CurrentId();
+ // We need to get the thread id into an atomic data type. This might be a
+ // truncating conversion, but any loss-of-information just increases the
+ // chance of a fault negative, not a false positive.
+ const subtle::Atomic32 atomic_thread_id =
+ static_cast<subtle::Atomic32>(current_thread_id);
+
+ return atomic_thread_id;
+}
+
+void ThreadCollisionWarner::EnterSelf() {
+ // If the active thread is 0 then I'll write the current thread ID
+ // if two or more threads arrive here only one will succeed to
+ // write on valid_thread_id_ the current thread ID.
+ subtle::Atomic32 current_thread_id = CurrentThread();
+
+ int previous_value = subtle::NoBarrier_CompareAndSwap(&valid_thread_id_,
+ 0,
+ current_thread_id);
+ if (previous_value != 0 && previous_value != current_thread_id) {
+ // gotcha! a thread is trying to use the same class and that is
+ // not current thread.
+ asserter_->warn();
+ }
+
+ subtle::NoBarrier_AtomicIncrement(&counter_, 1);
+}
+
+void ThreadCollisionWarner::Enter() {
+ subtle::Atomic32 current_thread_id = CurrentThread();
+
+ if (subtle::NoBarrier_CompareAndSwap(&valid_thread_id_,
+ 0,
+ current_thread_id) != 0) {
+ // gotcha! another thread is trying to use the same class.
+ asserter_->warn();
+ }
+
+ subtle::NoBarrier_AtomicIncrement(&counter_, 1);
+}
+
+void ThreadCollisionWarner::Leave() {
+ if (subtle::Barrier_AtomicIncrement(&counter_, -1) == 0) {
+ subtle::NoBarrier_Store(&valid_thread_id_, 0);
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_collision_warner.h b/libchrome/base/threading/thread_collision_warner.h
new file mode 100644
index 0000000..4699a91
--- /dev/null
+++ b/libchrome/base/threading/thread_collision_warner.h
@@ -0,0 +1,245 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_COLLISION_WARNER_H_
+#define BASE_THREADING_THREAD_COLLISION_WARNER_H_
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+
+// A helper class alongside macros to be used to verify assumptions about thread
+// safety of a class.
+//
+// Example: Queue implementation non thread-safe but still usable if clients
+// are synchronized somehow.
+//
+// In this case the macro DFAKE_SCOPED_LOCK has to be
+// used, it checks that if a thread is inside the push/pop then
+// noone else is still inside the pop/push
+//
+// class NonThreadSafeQueue {
+// public:
+// ...
+// void push(int) { DFAKE_SCOPED_LOCK(push_pop_); ... }
+// int pop() { DFAKE_SCOPED_LOCK(push_pop_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Queue implementation non thread-safe but still usable if clients
+// are synchronized somehow, it calls a method to "protect" from
+// a "protected" method
+//
+// In this case the macro DFAKE_SCOPED_RECURSIVE_LOCK
+// has to be used, it checks that if a thread is inside the push/pop
+// then noone else is still inside the pop/push
+//
+// class NonThreadSafeQueue {
+// public:
+// void push(int) {
+// DFAKE_SCOPED_LOCK(push_pop_);
+// ...
+// }
+// int pop() {
+// DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+// bar();
+// ...
+// }
+// void bar() { DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Queue implementation not usable even if clients are synchronized,
+// so only one thread in the class life cycle can use the two members
+// push/pop.
+//
+// In this case the macro DFAKE_SCOPED_LOCK_THREAD_LOCKED pins the
+// specified
+// critical section the first time a thread enters push or pop, from
+// that time on only that thread is allowed to execute push or pop.
+//
+// class NonThreadSafeQueue {
+// public:
+// ...
+// void push(int) { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
+// int pop() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(push_pop_);
+// };
+//
+//
+// Example: Class that has to be contructed/destroyed on same thread, it has
+// a "shareable" method (with external synchronization) and a not
+// shareable method (even with external synchronization).
+//
+// In this case 3 Critical sections have to be defined
+//
+// class ExoticClass {
+// public:
+// ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+// ~ExoticClass() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+//
+// void Shareable() { DFAKE_SCOPED_LOCK(shareable_section_); ... }
+// void NotShareable() { DFAKE_SCOPED_LOCK_THREAD_LOCKED(ctor_dtor_); ... }
+// ...
+// private:
+// DFAKE_MUTEX(ctor_dtor_);
+// DFAKE_MUTEX(shareable_section_);
+// };
+
+
+#if !defined(NDEBUG)
+
+// Defines a class member that acts like a mutex. It is used only as a
+// verification tool.
+#define DFAKE_MUTEX(obj) \
+ mutable base::ThreadCollisionWarner obj
+// Asserts the call is never called simultaneously in two threads. Used at
+// member function scope.
+#define DFAKE_SCOPED_LOCK(obj) \
+ base::ThreadCollisionWarner::ScopedCheck s_check_##obj(&obj)
+// Asserts the call is never called simultaneously in two threads. Used at
+// member function scope. Same as DFAKE_SCOPED_LOCK but allows recursive locks.
+#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) \
+ base::ThreadCollisionWarner::ScopedRecursiveCheck sr_check_##obj(&obj)
+// Asserts the code is always executed in the same thread.
+#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) \
+ base::ThreadCollisionWarner::Check check_##obj(&obj)
+
+#else
+
+#define DFAKE_MUTEX(obj) typedef void InternalFakeMutexType##obj
+#define DFAKE_SCOPED_LOCK(obj) ((void)0)
+#define DFAKE_SCOPED_RECURSIVE_LOCK(obj) ((void)0)
+#define DFAKE_SCOPED_LOCK_THREAD_LOCKED(obj) ((void)0)
+
+#endif
+
+namespace base {
+
+// The class ThreadCollisionWarner uses an Asserter to notify the collision
+// AsserterBase is the interfaces and DCheckAsserter is the default asserter
+// used. During the unit tests is used another class that doesn't "DCHECK"
+// in case of collision (check thread_collision_warner_unittests.cc)
+struct BASE_EXPORT AsserterBase {
+ virtual ~AsserterBase() {}
+ virtual void warn() = 0;
+};
+
+struct BASE_EXPORT DCheckAsserter : public AsserterBase {
+ ~DCheckAsserter() override {}
+ void warn() override;
+};
+
+class BASE_EXPORT ThreadCollisionWarner {
+ public:
+ // The parameter asserter is there only for test purpose
+ explicit ThreadCollisionWarner(AsserterBase* asserter = new DCheckAsserter())
+ : valid_thread_id_(0),
+ counter_(0),
+ asserter_(asserter) {}
+
+ ~ThreadCollisionWarner() {
+ delete asserter_;
+ }
+
+ // This class is meant to be used through the macro
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED
+ // it doesn't leave the critical section, as opposed to ScopedCheck,
+ // because the critical section being pinned is allowed to be used only
+ // from one thread
+ class BASE_EXPORT Check {
+ public:
+ explicit Check(ThreadCollisionWarner* warner)
+ : warner_(warner) {
+ warner_->EnterSelf();
+ }
+
+ ~Check() {}
+
+ private:
+ ThreadCollisionWarner* warner_;
+
+ DISALLOW_COPY_AND_ASSIGN(Check);
+ };
+
+ // This class is meant to be used through the macro
+ // DFAKE_SCOPED_LOCK
+ class BASE_EXPORT ScopedCheck {
+ public:
+ explicit ScopedCheck(ThreadCollisionWarner* warner)
+ : warner_(warner) {
+ warner_->Enter();
+ }
+
+ ~ScopedCheck() {
+ warner_->Leave();
+ }
+
+ private:
+ ThreadCollisionWarner* warner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
+ };
+
+ // This class is meant to be used through the macro
+ // DFAKE_SCOPED_RECURSIVE_LOCK
+ class BASE_EXPORT ScopedRecursiveCheck {
+ public:
+ explicit ScopedRecursiveCheck(ThreadCollisionWarner* warner)
+ : warner_(warner) {
+ warner_->EnterSelf();
+ }
+
+ ~ScopedRecursiveCheck() {
+ warner_->Leave();
+ }
+
+ private:
+ ThreadCollisionWarner* warner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedRecursiveCheck);
+ };
+
+ private:
+ // This method stores the current thread identifier and does a DCHECK
+ // if a another thread has already done it, it is safe if same thread
+ // calls this multiple time (recursion allowed).
+ void EnterSelf();
+
+ // Same as EnterSelf but recursion is not allowed.
+ void Enter();
+
+ // Removes the thread_id stored in order to allow other threads to
+ // call EnterSelf or Enter.
+ void Leave();
+
+ // This stores the thread id that is inside the critical section, if the
+ // value is 0 then no thread is inside.
+ volatile subtle::Atomic32 valid_thread_id_;
+
+ // Counter to trace how many time a critical section was "pinned"
+ // (when allowed) in order to unpin it when counter_ reaches 0.
+ volatile subtle::Atomic32 counter_;
+
+ // Here only for class unit tests purpose, during the test I need to not
+ // DCHECK but notify the collision with something else.
+ AsserterBase* asserter_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadCollisionWarner);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_COLLISION_WARNER_H_
diff --git a/libchrome/base/threading/thread_collision_warner_unittest.cc b/libchrome/base/threading/thread_collision_warner_unittest.cc
new file mode 100644
index 0000000..71447ef
--- /dev/null
+++ b/libchrome/base/threading/thread_collision_warner_unittest.cc
@@ -0,0 +1,382 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_collision_warner.h"
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// '' : local class member function does not have a body
+MSVC_PUSH_DISABLE_WARNING(4822)
+
+
+#if defined(NDEBUG)
+
+// Would cause a memory leak otherwise.
+#undef DFAKE_MUTEX
+#define DFAKE_MUTEX(obj) std::unique_ptr<base::AsserterBase> obj
+
+// In Release, we expect the AsserterBase::warn() to not happen.
+#define EXPECT_NDEBUG_FALSE_DEBUG_TRUE EXPECT_FALSE
+
+#else
+
+// In Debug, we expect the AsserterBase::warn() to happen.
+#define EXPECT_NDEBUG_FALSE_DEBUG_TRUE EXPECT_TRUE
+
+#endif
+
+
+namespace {
+
+// This is the asserter used with ThreadCollisionWarner instead of the default
+// DCheckAsserter. The method fail_state is used to know if a collision took
+// place.
+class AssertReporter : public base::AsserterBase {
+ public:
+ AssertReporter()
+ : failed_(false) {}
+
+ void warn() override { failed_ = true; }
+
+ ~AssertReporter() override {}
+
+ bool fail_state() const { return failed_; }
+ void reset() { failed_ = false; }
+
+ private:
+ bool failed_;
+};
+
+} // namespace
+
+TEST(ThreadCollisionTest, BookCriticalSection) {
+ AssertReporter* local_reporter = new AssertReporter();
+
+ base::ThreadCollisionWarner warner(local_reporter);
+ EXPECT_FALSE(local_reporter->fail_state());
+
+ { // Pin section.
+ DFAKE_SCOPED_LOCK_THREAD_LOCKED(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ { // Pin section.
+ DFAKE_SCOPED_LOCK_THREAD_LOCKED(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ }
+ }
+}
+
+TEST(ThreadCollisionTest, ScopedRecursiveBookCriticalSection) {
+ AssertReporter* local_reporter = new AssertReporter();
+
+ base::ThreadCollisionWarner warner(local_reporter);
+ EXPECT_FALSE(local_reporter->fail_state());
+
+ { // Pin section.
+ DFAKE_SCOPED_RECURSIVE_LOCK(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ { // Pin section again (allowed by DFAKE_SCOPED_RECURSIVE_LOCK)
+ DFAKE_SCOPED_RECURSIVE_LOCK(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ } // Unpin section.
+ } // Unpin section.
+
+ // Check that section is not pinned
+ { // Pin section.
+ DFAKE_SCOPED_LOCK(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ } // Unpin section.
+}
+
+TEST(ThreadCollisionTest, ScopedBookCriticalSection) {
+ AssertReporter* local_reporter = new AssertReporter();
+
+ base::ThreadCollisionWarner warner(local_reporter);
+ EXPECT_FALSE(local_reporter->fail_state());
+
+ { // Pin section.
+ DFAKE_SCOPED_LOCK(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ } // Unpin section.
+
+ { // Pin section.
+ DFAKE_SCOPED_LOCK(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ {
+ // Pin section again (not allowed by DFAKE_SCOPED_LOCK)
+ DFAKE_SCOPED_LOCK(warner);
+ EXPECT_NDEBUG_FALSE_DEBUG_TRUE(local_reporter->fail_state());
+ // Reset the status of warner for further tests.
+ local_reporter->reset();
+ } // Unpin section.
+ } // Unpin section.
+
+ {
+ // Pin section.
+ DFAKE_SCOPED_LOCK(warner);
+ EXPECT_FALSE(local_reporter->fail_state());
+ } // Unpin section.
+}
+
+TEST(ThreadCollisionTest, MTBookCriticalSectionTest) {
+ class NonThreadSafeQueue {
+ public:
+ explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+ : push_pop_(asserter) {
+ }
+
+ void push(int value) {
+ DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_);
+ }
+
+ int pop() {
+ DFAKE_SCOPED_LOCK_THREAD_LOCKED(push_pop_);
+ return 0;
+ }
+
+ private:
+ DFAKE_MUTEX(push_pop_);
+
+ DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+ };
+
+ class QueueUser : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit QueueUser(NonThreadSafeQueue* queue) : queue_(queue) {}
+
+ void Run() override {
+ queue_->push(0);
+ queue_->pop();
+ }
+
+ private:
+ NonThreadSafeQueue* queue_;
+ };
+
+ AssertReporter* local_reporter = new AssertReporter();
+
+ NonThreadSafeQueue queue(local_reporter);
+
+ QueueUser queue_user_a(&queue);
+ QueueUser queue_user_b(&queue);
+
+ base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+ base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+ thread_a.Start();
+ thread_b.Start();
+
+ thread_a.Join();
+ thread_b.Join();
+
+ EXPECT_NDEBUG_FALSE_DEBUG_TRUE(local_reporter->fail_state());
+}
+
+TEST(ThreadCollisionTest, MTScopedBookCriticalSectionTest) {
+ // Queue with a 5 seconds push execution time, hopefuly the two used threads
+ // in the test will enter the push at same time.
+ class NonThreadSafeQueue {
+ public:
+ explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+ : push_pop_(asserter) {
+ }
+
+ void push(int value) {
+ DFAKE_SCOPED_LOCK(push_pop_);
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(5));
+ }
+
+ int pop() {
+ DFAKE_SCOPED_LOCK(push_pop_);
+ return 0;
+ }
+
+ private:
+ DFAKE_MUTEX(push_pop_);
+
+ DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+ };
+
+ class QueueUser : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit QueueUser(NonThreadSafeQueue* queue) : queue_(queue) {}
+
+ void Run() override {
+ queue_->push(0);
+ queue_->pop();
+ }
+
+ private:
+ NonThreadSafeQueue* queue_;
+ };
+
+ AssertReporter* local_reporter = new AssertReporter();
+
+ NonThreadSafeQueue queue(local_reporter);
+
+ QueueUser queue_user_a(&queue);
+ QueueUser queue_user_b(&queue);
+
+ base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+ base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+ thread_a.Start();
+ thread_b.Start();
+
+ thread_a.Join();
+ thread_b.Join();
+
+ EXPECT_NDEBUG_FALSE_DEBUG_TRUE(local_reporter->fail_state());
+}
+
+TEST(ThreadCollisionTest, MTSynchedScopedBookCriticalSectionTest) {
+ // Queue with a 2 seconds push execution time, hopefuly the two used threads
+ // in the test will enter the push at same time.
+ class NonThreadSafeQueue {
+ public:
+ explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+ : push_pop_(asserter) {
+ }
+
+ void push(int value) {
+ DFAKE_SCOPED_LOCK(push_pop_);
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(2));
+ }
+
+ int pop() {
+ DFAKE_SCOPED_LOCK(push_pop_);
+ return 0;
+ }
+
+ private:
+ DFAKE_MUTEX(push_pop_);
+
+ DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+ };
+
+ // This time the QueueUser class protects the non thread safe queue with
+ // a lock.
+ class QueueUser : public base::DelegateSimpleThread::Delegate {
+ public:
+ QueueUser(NonThreadSafeQueue* queue, base::Lock* lock)
+ : queue_(queue), lock_(lock) {}
+
+ void Run() override {
+ {
+ base::AutoLock auto_lock(*lock_);
+ queue_->push(0);
+ }
+ {
+ base::AutoLock auto_lock(*lock_);
+ queue_->pop();
+ }
+ }
+ private:
+ NonThreadSafeQueue* queue_;
+ base::Lock* lock_;
+ };
+
+ AssertReporter* local_reporter = new AssertReporter();
+
+ NonThreadSafeQueue queue(local_reporter);
+
+ base::Lock lock;
+
+ QueueUser queue_user_a(&queue, &lock);
+ QueueUser queue_user_b(&queue, &lock);
+
+ base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+ base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+ thread_a.Start();
+ thread_b.Start();
+
+ thread_a.Join();
+ thread_b.Join();
+
+ EXPECT_FALSE(local_reporter->fail_state());
+}
+
+TEST(ThreadCollisionTest, MTSynchedScopedRecursiveBookCriticalSectionTest) {
+ // Queue with a 2 seconds push execution time, hopefuly the two used threads
+ // in the test will enter the push at same time.
+ class NonThreadSafeQueue {
+ public:
+ explicit NonThreadSafeQueue(base::AsserterBase* asserter)
+ : push_pop_(asserter) {
+ }
+
+ void push(int) {
+ DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+ bar();
+ base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(2));
+ }
+
+ int pop() {
+ DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+ return 0;
+ }
+
+ void bar() {
+ DFAKE_SCOPED_RECURSIVE_LOCK(push_pop_);
+ }
+
+ private:
+ DFAKE_MUTEX(push_pop_);
+
+ DISALLOW_COPY_AND_ASSIGN(NonThreadSafeQueue);
+ };
+
+ // This time the QueueUser class protects the non thread safe queue with
+ // a lock.
+ class QueueUser : public base::DelegateSimpleThread::Delegate {
+ public:
+ QueueUser(NonThreadSafeQueue* queue, base::Lock* lock)
+ : queue_(queue), lock_(lock) {}
+
+ void Run() override {
+ {
+ base::AutoLock auto_lock(*lock_);
+ queue_->push(0);
+ }
+ {
+ base::AutoLock auto_lock(*lock_);
+ queue_->bar();
+ }
+ {
+ base::AutoLock auto_lock(*lock_);
+ queue_->pop();
+ }
+ }
+ private:
+ NonThreadSafeQueue* queue_;
+ base::Lock* lock_;
+ };
+
+ AssertReporter* local_reporter = new AssertReporter();
+
+ NonThreadSafeQueue queue(local_reporter);
+
+ base::Lock lock;
+
+ QueueUser queue_user_a(&queue, &lock);
+ QueueUser queue_user_b(&queue, &lock);
+
+ base::DelegateSimpleThread thread_a(&queue_user_a, "queue_user_thread_a");
+ base::DelegateSimpleThread thread_b(&queue_user_b, "queue_user_thread_b");
+
+ thread_a.Start();
+ thread_b.Start();
+
+ thread_a.Join();
+ thread_b.Join();
+
+ EXPECT_FALSE(local_reporter->fail_state());
+}
diff --git a/libchrome/base/threading/thread_id_name_manager.cc b/libchrome/base/threading/thread_id_name_manager.cc
new file mode 100644
index 0000000..107e0dc
--- /dev/null
+++ b/libchrome/base/threading/thread_id_name_manager.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_id_name_manager.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/memory/singleton.h"
+#include "base/strings/string_util.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+namespace base {
+namespace {
+
+static const char kDefaultName[] = "";
+static std::string* g_default_name;
+
+}
+
+ThreadIdNameManager::ThreadIdNameManager()
+ : main_process_name_(NULL),
+ main_process_id_(kInvalidThreadId) {
+ g_default_name = new std::string(kDefaultName);
+
+ AutoLock locked(lock_);
+ name_to_interned_name_[kDefaultName] = g_default_name;
+}
+
+ThreadIdNameManager::~ThreadIdNameManager() {
+}
+
+ThreadIdNameManager* ThreadIdNameManager::GetInstance() {
+ return Singleton<ThreadIdNameManager,
+ LeakySingletonTraits<ThreadIdNameManager> >::get();
+}
+
+const char* ThreadIdNameManager::GetDefaultInternedString() {
+ return g_default_name->c_str();
+}
+
+void ThreadIdNameManager::RegisterThread(PlatformThreadHandle::Handle handle,
+ PlatformThreadId id) {
+ AutoLock locked(lock_);
+ thread_id_to_handle_[id] = handle;
+ thread_handle_to_interned_name_[handle] =
+ name_to_interned_name_[kDefaultName];
+}
+
+void ThreadIdNameManager::SetName(PlatformThreadId id,
+ const std::string& name) {
+ std::string* leaked_str = NULL;
+ {
+ AutoLock locked(lock_);
+ NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
+ if (iter != name_to_interned_name_.end()) {
+ leaked_str = iter->second;
+ } else {
+ leaked_str = new std::string(name);
+ name_to_interned_name_[name] = leaked_str;
+ }
+
+ ThreadIdToHandleMap::iterator id_to_handle_iter =
+ thread_id_to_handle_.find(id);
+
+ // The main thread of a process will not be created as a Thread object which
+ // means there is no PlatformThreadHandler registered.
+ if (id_to_handle_iter == thread_id_to_handle_.end()) {
+ main_process_name_ = leaked_str;
+ main_process_id_ = id;
+ return;
+ }
+ thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
+ }
+
+ // Add the leaked thread name to heap profiler context tracker. The name added
+ // is valid for the lifetime of the process. AllocationContextTracker cannot
+ // call GetName(which holds a lock) during the first allocation because it can
+ // cause a deadlock when the first allocation happens in the
+ // ThreadIdNameManager itself when holding the lock.
+ trace_event::AllocationContextTracker::SetCurrentThreadName(
+ leaked_str->c_str());
+}
+
+const char* ThreadIdNameManager::GetName(PlatformThreadId id) {
+ AutoLock locked(lock_);
+
+ if (id == main_process_id_)
+ return main_process_name_->c_str();
+
+ ThreadIdToHandleMap::iterator id_to_handle_iter =
+ thread_id_to_handle_.find(id);
+ if (id_to_handle_iter == thread_id_to_handle_.end())
+ return name_to_interned_name_[kDefaultName]->c_str();
+
+ ThreadHandleToInternedNameMap::iterator handle_to_name_iter =
+ thread_handle_to_interned_name_.find(id_to_handle_iter->second);
+ return handle_to_name_iter->second->c_str();
+}
+
+void ThreadIdNameManager::RemoveName(PlatformThreadHandle::Handle handle,
+ PlatformThreadId id) {
+ AutoLock locked(lock_);
+ ThreadHandleToInternedNameMap::iterator handle_to_name_iter =
+ thread_handle_to_interned_name_.find(handle);
+
+ DCHECK(handle_to_name_iter != thread_handle_to_interned_name_.end());
+ thread_handle_to_interned_name_.erase(handle_to_name_iter);
+
+ ThreadIdToHandleMap::iterator id_to_handle_iter =
+ thread_id_to_handle_.find(id);
+ DCHECK((id_to_handle_iter!= thread_id_to_handle_.end()));
+ // The given |id| may have been re-used by the system. Make sure the
+ // mapping points to the provided |handle| before removal.
+ if (id_to_handle_iter->second != handle)
+ return;
+
+ thread_id_to_handle_.erase(id_to_handle_iter);
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_id_name_manager.h b/libchrome/base/threading/thread_id_name_manager.h
new file mode 100644
index 0000000..f469b60
--- /dev/null
+++ b/libchrome/base/threading/thread_id_name_manager.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
+#define BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+
+template <typename T>
+struct DefaultSingletonTraits;
+
+class BASE_EXPORT ThreadIdNameManager {
+ public:
+ static ThreadIdNameManager* GetInstance();
+
+ static const char* GetDefaultInternedString();
+
+ // Register the mapping between a thread |id| and |handle|.
+ void RegisterThread(PlatformThreadHandle::Handle handle, PlatformThreadId id);
+
+ // Set the name for the given id.
+ void SetName(PlatformThreadId id, const std::string& name);
+
+ // Get the name for the given id.
+ const char* GetName(PlatformThreadId id);
+
+ // Remove the name for the given id.
+ void RemoveName(PlatformThreadHandle::Handle handle, PlatformThreadId id);
+
+ private:
+ friend struct DefaultSingletonTraits<ThreadIdNameManager>;
+
+ typedef std::map<PlatformThreadId, PlatformThreadHandle::Handle>
+ ThreadIdToHandleMap;
+ typedef std::map<PlatformThreadHandle::Handle, std::string*>
+ ThreadHandleToInternedNameMap;
+ typedef std::map<std::string, std::string*> NameToInternedNameMap;
+
+ ThreadIdNameManager();
+ ~ThreadIdNameManager();
+
+ // lock_ protects the name_to_interned_name_, thread_id_to_handle_ and
+ // thread_handle_to_interned_name_ maps.
+ Lock lock_;
+
+ NameToInternedNameMap name_to_interned_name_;
+ ThreadIdToHandleMap thread_id_to_handle_;
+ ThreadHandleToInternedNameMap thread_handle_to_interned_name_;
+
+ // Treat the main process specially as there is no PlatformThreadHandle.
+ std::string* main_process_name_;
+ PlatformThreadId main_process_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadIdNameManager);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_ID_NAME_MANAGER_H_
diff --git a/libchrome/base/threading/thread_id_name_manager_unittest.cc b/libchrome/base/threading/thread_id_name_manager_unittest.cc
new file mode 100644
index 0000000..350dc0f
--- /dev/null
+++ b/libchrome/base/threading/thread_id_name_manager_unittest.cc
@@ -0,0 +1,93 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_id_name_manager.h"
+
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+typedef PlatformTest ThreadIdNameManagerTest;
+
+namespace {
+
+const char kAThread[] = "a thread";
+const char kBThread[] = "b thread";
+
+TEST_F(ThreadIdNameManagerTest, AddThreads) {
+ base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+ base::Thread thread_a(kAThread);
+ base::Thread thread_b(kBThread);
+
+ thread_a.StartAndWaitForTesting();
+ thread_b.StartAndWaitForTesting();
+
+ EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+ EXPECT_STREQ(kBThread, manager->GetName(thread_b.GetThreadId()));
+
+ thread_b.Stop();
+ thread_a.Stop();
+}
+
+TEST_F(ThreadIdNameManagerTest, RemoveThreads) {
+ base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+ base::Thread thread_a(kAThread);
+
+ thread_a.StartAndWaitForTesting();
+ {
+ base::Thread thread_b(kBThread);
+ thread_b.StartAndWaitForTesting();
+ thread_b.Stop();
+ }
+ EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+
+ thread_a.Stop();
+ EXPECT_STREQ("", manager->GetName(thread_a.GetThreadId()));
+}
+
+TEST_F(ThreadIdNameManagerTest, RestartThread) {
+ base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+ base::Thread thread_a(kAThread);
+
+ thread_a.StartAndWaitForTesting();
+ base::PlatformThreadId a_id = thread_a.GetThreadId();
+ EXPECT_STREQ(kAThread, manager->GetName(a_id));
+ thread_a.Stop();
+
+ thread_a.StartAndWaitForTesting();
+ EXPECT_STREQ("", manager->GetName(a_id));
+ EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+ thread_a.Stop();
+}
+
+TEST_F(ThreadIdNameManagerTest, ThreadNameInterning) {
+ base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+
+ base::PlatformThreadId a_id = base::PlatformThread::CurrentId();
+ base::PlatformThread::SetName("First Name");
+ std::string version = manager->GetName(a_id);
+
+ base::PlatformThread::SetName("New name");
+ EXPECT_NE(version, manager->GetName(a_id));
+ base::PlatformThread::SetName("");
+}
+
+TEST_F(ThreadIdNameManagerTest, ResettingNameKeepsCorrectInternedValue) {
+ base::ThreadIdNameManager* manager = base::ThreadIdNameManager::GetInstance();
+
+ base::PlatformThreadId a_id = base::PlatformThread::CurrentId();
+ base::PlatformThread::SetName("Test Name");
+ std::string version = manager->GetName(a_id);
+
+ base::PlatformThread::SetName("New name");
+ EXPECT_NE(version, manager->GetName(a_id));
+
+ base::PlatformThread::SetName("Test Name");
+ EXPECT_EQ(version, manager->GetName(a_id));
+
+ base::PlatformThread::SetName("");
+}
+
+} // namespace
diff --git a/libchrome/base/threading/thread_local.h b/libchrome/base/threading/thread_local.h
new file mode 100644
index 0000000..f40420c
--- /dev/null
+++ b/libchrome/base/threading/thread_local.h
@@ -0,0 +1,134 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: Thread local storage is a bit tricky to get right. Please make
+// sure that this is really the proper solution for what you're trying to
+// achieve. Don't prematurely optimize, most likely you can just use a Lock.
+//
+// These classes implement a wrapper around the platform's TLS storage
+// mechanism. On construction, they will allocate a TLS slot, and free the
+// TLS slot on destruction. No memory management (creation or destruction) is
+// handled. This means for uses of ThreadLocalPointer, you must correctly
+// manage the memory yourself, these classes will not destroy the pointer for
+// you. There are no at-thread-exit actions taken by these classes.
+//
+// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
+// destruction, so memory management must be handled elsewhere. The first call
+// to Get() on a thread will return NULL. You can update the pointer with a
+// call to Set().
+//
+// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// been set otherwise with Set().
+//
+// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
+// once it has been created. If you want to dynamically create an instance,
+// you must of course properly deal with safety and race conditions. This
+// means a function-level static initializer is generally inappropiate.
+//
+// In Android, the system TLS is limited, the implementation is backed with
+// ThreadLocalStorage.
+//
+// Example usage:
+// // My class is logically attached to a single thread. We cache a pointer
+// // on the thread it was created on, so we can implement current().
+// MyClass::MyClass() {
+// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
+// Singleton<ThreadLocalPointer<MyClass> >::get()->Set(this);
+// }
+//
+// MyClass::~MyClass() {
+// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() != NULL);
+// Singleton<ThreadLocalPointer<MyClass> >::get()->Set(NULL);
+// }
+//
+// // Return the current MyClass associated with the calling thread, can be
+// // NULL if there isn't a MyClass associated.
+// MyClass* MyClass::current() {
+// return Singleton<ThreadLocalPointer<MyClass> >::get()->Get();
+// }
+
+#ifndef BASE_THREADING_THREAD_LOCAL_H_
+#define BASE_THREADING_THREAD_LOCAL_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/threading/thread_local_storage.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+namespace base {
+namespace internal {
+
+// Helper functions that abstract the cross-platform APIs. Do not use directly.
+struct BASE_EXPORT ThreadLocalPlatform {
+#if defined(OS_WIN)
+ typedef unsigned long SlotType;
+#elif defined(OS_ANDROID)
+ typedef ThreadLocalStorage::StaticSlot SlotType;
+#elif defined(OS_POSIX)
+ typedef pthread_key_t SlotType;
+#endif
+
+ static void AllocateSlot(SlotType* slot);
+ static void FreeSlot(SlotType slot);
+ static void* GetValueFromSlot(SlotType slot);
+ static void SetValueInSlot(SlotType slot, void* value);
+};
+
+} // namespace internal
+
+template <typename Type>
+class ThreadLocalPointer {
+ public:
+ ThreadLocalPointer() : slot_() {
+ internal::ThreadLocalPlatform::AllocateSlot(&slot_);
+ }
+
+ ~ThreadLocalPointer() {
+ internal::ThreadLocalPlatform::FreeSlot(slot_);
+ }
+
+ Type* Get() {
+ return static_cast<Type*>(
+ internal::ThreadLocalPlatform::GetValueFromSlot(slot_));
+ }
+
+ void Set(Type* ptr) {
+ internal::ThreadLocalPlatform::SetValueInSlot(
+ slot_, const_cast<void*>(static_cast<const void*>(ptr)));
+ }
+
+ private:
+ typedef internal::ThreadLocalPlatform::SlotType SlotType;
+
+ SlotType slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
+};
+
+class ThreadLocalBoolean {
+ public:
+ ThreadLocalBoolean() {}
+ ~ThreadLocalBoolean() {}
+
+ bool Get() {
+ return tlp_.Get() != NULL;
+ }
+
+ void Set(bool val) {
+ tlp_.Set(val ? this : NULL);
+ }
+
+ private:
+ ThreadLocalPointer<void> tlp_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalBoolean);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_LOCAL_H_
diff --git a/libchrome/base/threading/thread_local_posix.cc b/libchrome/base/threading/thread_local_posix.cc
new file mode 100644
index 0000000..8bc46ad
--- /dev/null
+++ b/libchrome/base/threading/thread_local_posix.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local.h"
+
+#include <pthread.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if !defined(OS_ANDROID)
+
+namespace base {
+namespace internal {
+
+// static
+void ThreadLocalPlatform::AllocateSlot(SlotType* slot) {
+ int error = pthread_key_create(slot, NULL);
+ CHECK_EQ(error, 0);
+}
+
+// static
+void ThreadLocalPlatform::FreeSlot(SlotType slot) {
+ int error = pthread_key_delete(slot);
+ DCHECK_EQ(0, error);
+}
+
+// static
+void* ThreadLocalPlatform::GetValueFromSlot(SlotType slot) {
+ return pthread_getspecific(slot);
+}
+
+// static
+void ThreadLocalPlatform::SetValueInSlot(SlotType slot, void* value) {
+ int error = pthread_setspecific(slot, value);
+ DCHECK_EQ(error, 0);
+}
+
+} // namespace internal
+} // namespace base
+
+#endif // !defined(OS_ANDROID)
diff --git a/libchrome/base/threading/thread_local_storage.cc b/libchrome/base/threading/thread_local_storage.cc
new file mode 100644
index 0000000..a7eb527
--- /dev/null
+++ b/libchrome/base/threading/thread_local_storage.cc
@@ -0,0 +1,252 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+using base::internal::PlatformThreadLocalStorage;
+
+namespace {
+// In order to make TLS destructors work, we need to keep around a function
+// pointer to the destructor for each slot. We keep this array of pointers in a
+// global (static) array.
+// We use the single OS-level TLS slot (giving us one pointer per thread) to
+// hold a pointer to a per-thread array (table) of slots that we allocate to
+// Chromium consumers.
+
+// g_native_tls_key is the one native TLS that we use. It stores our table.
+base::subtle::Atomic32 g_native_tls_key =
+ PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
+
+// g_last_used_tls_key is the high-water-mark of allocated thread local storage.
+// Each allocation is an index into our g_tls_destructors[]. Each such index is
+// assigned to the instance variable slot_ in a ThreadLocalStorage::Slot
+// instance. We reserve the value slot_ == 0 to indicate that the corresponding
+// instance of ThreadLocalStorage::Slot has been freed (i.e., destructor called,
+// etc.). This reserved use of 0 is then stated as the initial value of
+// g_last_used_tls_key, so that the first issued index will be 1.
+base::subtle::Atomic32 g_last_used_tls_key = 0;
+
+// The maximum number of 'slots' in our thread local storage stack.
+const int kThreadLocalStorageSize = 256;
+
+// The maximum number of times to try to clear slots by calling destructors.
+// Use pthread naming convention for clarity.
+const int kMaxDestructorIterations = kThreadLocalStorageSize;
+
+// An array of destructor function pointers for the slots. If a slot has a
+// destructor, it will be stored in its corresponding entry in this array.
+// The elements are volatile to ensure that when the compiler reads the value
+// to potentially call the destructor, it does so once, and that value is tested
+// for null-ness and then used. Yes, that would be a weird de-optimization,
+// but I can imagine some register machines where it was just as easy to
+// re-fetch an array element, and I want to be sure a call to free the key
+// (i.e., null out the destructor entry) that happens on a separate thread can't
+// hurt the racy calls to the destructors on another thread.
+volatile base::ThreadLocalStorage::TLSDestructorFunc
+ g_tls_destructors[kThreadLocalStorageSize];
+
+// This function is called to initialize our entire Chromium TLS system.
+// It may be called very early, and we need to complete most all of the setup
+// (initialization) before calling *any* memory allocator functions, which may
+// recursively depend on this initialization.
+// As a result, we use Atomics, and avoid anything (like a singleton) that might
+// require memory allocations.
+void** ConstructTlsVector() {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
+ CHECK(PlatformThreadLocalStorage::AllocTLS(&key));
+
+ // The TLS_KEY_OUT_OF_INDEXES is used to find out whether the key is set or
+ // not in NoBarrier_CompareAndSwap, but Posix doesn't have invalid key, we
+ // define an almost impossible value be it.
+ // If we really get TLS_KEY_OUT_OF_INDEXES as value of key, just alloc
+ // another TLS slot.
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
+ PlatformThreadLocalStorage::TLSKey tmp = key;
+ CHECK(PlatformThreadLocalStorage::AllocTLS(&key) &&
+ key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
+ PlatformThreadLocalStorage::FreeTLS(tmp);
+ }
+ // Atomically test-and-set the tls_key. If the key is
+ // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
+ // another thread already did our dirty work.
+ if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
+ static_cast<PlatformThreadLocalStorage::TLSKey>(
+ base::subtle::NoBarrier_CompareAndSwap(
+ &g_native_tls_key,
+ PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key))) {
+ // We've been shortcut. Another thread replaced g_native_tls_key first so
+ // we need to destroy our index and use the one the other thread got
+ // first.
+ PlatformThreadLocalStorage::FreeTLS(key);
+ key = base::subtle::NoBarrier_Load(&g_native_tls_key);
+ }
+ }
+ CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
+
+ // Some allocators, such as TCMalloc, make use of thread local storage.
+ // As a result, any attempt to call new (or malloc) will lazily cause such a
+ // system to initialize, which will include registering for a TLS key. If we
+ // are not careful here, then that request to create a key will call new back,
+ // and we'll have an infinite loop. We avoid that as follows:
+ // Use a stack allocated vector, so that we don't have dependence on our
+ // allocator until our service is in place. (i.e., don't even call new until
+ // after we're setup)
+ void* stack_allocated_tls_data[kThreadLocalStorageSize];
+ memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
+ // Ensure that any rentrant calls change the temp version.
+ PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
+
+ // Allocate an array to store our data.
+ void** tls_data = new void*[kThreadLocalStorageSize];
+ memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
+ PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
+ return tls_data;
+}
+
+void OnThreadExitInternal(void* value) {
+ DCHECK(value);
+ void** tls_data = static_cast<void**>(value);
+ // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
+ // terminates, one of the destructor calls we make may be to shut down an
+ // allocator. We have to be careful that after we've shutdown all of the
+ // known destructors (perchance including an allocator), that we don't call
+ // the allocator and cause it to resurrect itself (with no possibly destructor
+ // call to follow). We handle this problem as follows:
+ // Switch to using a stack allocated vector, so that we don't have dependence
+ // on our allocator after we have called all g_tls_destructors. (i.e., don't
+ // even call delete[] after we're done with destructors.)
+ void* stack_allocated_tls_data[kThreadLocalStorageSize];
+ memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
+ // Ensure that any re-entrant calls change the temp version.
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
+ delete[] tls_data; // Our last dependence on an allocator.
+
+ int remaining_attempts = kMaxDestructorIterations;
+ bool need_to_scan_destructors = true;
+ while (need_to_scan_destructors) {
+ need_to_scan_destructors = false;
+ // Try to destroy the first-created-slot (which is slot 1) in our last
+ // destructor call. That user was able to function, and define a slot with
+ // no other services running, so perhaps it is a basic service (like an
+ // allocator) and should also be destroyed last. If we get the order wrong,
+ // then we'll itterate several more times, so it is really not that
+ // critical (but it might help).
+ base::subtle::Atomic32 last_used_tls_key =
+ base::subtle::NoBarrier_Load(&g_last_used_tls_key);
+ for (int slot = last_used_tls_key; slot > 0; --slot) {
+ void* tls_value = stack_allocated_tls_data[slot];
+ if (tls_value == NULL)
+ continue;
+
+ base::ThreadLocalStorage::TLSDestructorFunc destructor =
+ g_tls_destructors[slot];
+ if (destructor == NULL)
+ continue;
+ stack_allocated_tls_data[slot] = NULL; // pre-clear the slot.
+ destructor(tls_value);
+ // Any destructor might have called a different service, which then set
+ // a different slot to a non-NULL value. Hence we need to check
+ // the whole vector again. This is a pthread standard.
+ need_to_scan_destructors = true;
+ }
+ if (--remaining_attempts <= 0) {
+ NOTREACHED(); // Destructors might not have been called.
+ break;
+ }
+ }
+
+ // Remove our stack allocated vector.
+ PlatformThreadLocalStorage::SetTLSValue(key, NULL);
+}
+
+} // namespace
+
+namespace base {
+
+namespace internal {
+
+#if defined(OS_WIN)
+void PlatformThreadLocalStorage::OnThreadExit() {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES)
+ return;
+ void *tls_data = GetTLSValue(key);
+ // Maybe we have never initialized TLS for this thread.
+ if (!tls_data)
+ return;
+ OnThreadExitInternal(tls_data);
+}
+#elif defined(OS_POSIX)
+void PlatformThreadLocalStorage::OnThreadExit(void* value) {
+ OnThreadExitInternal(value);
+}
+#endif // defined(OS_WIN)
+
+} // namespace internal
+
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
+ slot_ = 0;
+ base::subtle::Release_Store(&initialized_, 0);
+ Initialize(destructor);
+}
+
+void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
+ PlatformThreadLocalStorage::TLSKey key =
+ base::subtle::NoBarrier_Load(&g_native_tls_key);
+ if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
+ !PlatformThreadLocalStorage::GetTLSValue(key))
+ ConstructTlsVector();
+
+ // Grab a new slot.
+ slot_ = base::subtle::NoBarrier_AtomicIncrement(&g_last_used_tls_key, 1);
+ DCHECK_GT(slot_, 0);
+ CHECK_LT(slot_, kThreadLocalStorageSize);
+
+ // Setup our destructor.
+ g_tls_destructors[slot_] = destructor;
+ base::subtle::Release_Store(&initialized_, 1);
+}
+
+void ThreadLocalStorage::StaticSlot::Free() {
+ // At this time, we don't reclaim old indices for TLS slots.
+ // So all we need to do is wipe the destructor.
+ DCHECK_GT(slot_, 0);
+ DCHECK_LT(slot_, kThreadLocalStorageSize);
+ g_tls_destructors[slot_] = NULL;
+ slot_ = 0;
+ base::subtle::Release_Store(&initialized_, 0);
+}
+
+void* ThreadLocalStorage::StaticSlot::Get() const {
+ void** tls_data = static_cast<void**>(
+ PlatformThreadLocalStorage::GetTLSValue(
+ base::subtle::NoBarrier_Load(&g_native_tls_key)));
+ if (!tls_data)
+ tls_data = ConstructTlsVector();
+ DCHECK_GT(slot_, 0);
+ DCHECK_LT(slot_, kThreadLocalStorageSize);
+ return tls_data[slot_];
+}
+
+void ThreadLocalStorage::StaticSlot::Set(void* value) {
+ void** tls_data = static_cast<void**>(
+ PlatformThreadLocalStorage::GetTLSValue(
+ base::subtle::NoBarrier_Load(&g_native_tls_key)));
+ if (!tls_data)
+ tls_data = ConstructTlsVector();
+ DCHECK_GT(slot_, 0);
+ DCHECK_LT(slot_, kThreadLocalStorageSize);
+ tls_data[slot_] = value;
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_local_storage.h b/libchrome/base/threading/thread_local_storage.h
new file mode 100644
index 0000000..0c7a692
--- /dev/null
+++ b/libchrome/base/threading/thread_local_storage.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+namespace base {
+
+namespace internal {
+
+// WARNING: You should *NOT* be using this class directly.
+// PlatformThreadLocalStorage is low-level abstraction to the OS's TLS
+// interface, you should instead be using ThreadLocalStorage::StaticSlot/Slot.
+class BASE_EXPORT PlatformThreadLocalStorage {
+ public:
+
+#if defined(OS_WIN)
+ typedef unsigned long TLSKey;
+ enum : unsigned { TLS_KEY_OUT_OF_INDEXES = TLS_OUT_OF_INDEXES };
+#elif defined(OS_POSIX)
+ typedef pthread_key_t TLSKey;
+ // The following is a "reserved key" which is used in our generic Chromium
+ // ThreadLocalStorage implementation. We expect that an OS will not return
+ // such a key, but if it is returned (i.e., the OS tries to allocate it) we
+ // will just request another key.
+ enum { TLS_KEY_OUT_OF_INDEXES = 0x7FFFFFFF };
+#endif
+
+ // The following methods need to be supported on each OS platform, so that
+ // the Chromium ThreadLocalStore functionality can be constructed.
+ // Chromium will use these methods to acquire a single OS slot, and then use
+ // that to support a much larger number of Chromium slots (independent of the
+ // OS restrictions).
+ // The following returns true if it successfully is able to return an OS
+ // key in |key|.
+ static bool AllocTLS(TLSKey* key);
+ // Note: FreeTLS() doesn't have to be called, it is fine with this leak, OS
+ // might not reuse released slot, you might just reset the TLS value with
+ // SetTLSValue().
+ static void FreeTLS(TLSKey key);
+ static void SetTLSValue(TLSKey key, void* value);
+ static void* GetTLSValue(TLSKey key);
+
+ // Each platform (OS implementation) is required to call this method on each
+ // terminating thread when the thread is about to terminate. This method
+ // will then call all registered destructors for slots in Chromium
+ // ThreadLocalStorage, until there are no slot values remaining as having
+ // been set on this thread.
+ // Destructors may end up being called multiple times on a terminating
+ // thread, as other destructors may re-set slots that were previously
+ // destroyed.
+#if defined(OS_WIN)
+ // Since Windows which doesn't support TLS destructor, the implementation
+ // should use GetTLSValue() to retrieve the value of TLS slot.
+ static void OnThreadExit();
+#elif defined(OS_POSIX)
+ // |Value| is the data stored in TLS slot, The implementation can't use
+ // GetTLSValue() to retrieve the value of slot as it has already been reset
+ // in Posix.
+ static void OnThreadExit(void* value);
+#endif
+};
+
+} // namespace internal
+
+// Wrapper for thread local storage. This class doesn't do much except provide
+// an API for portability.
+class BASE_EXPORT ThreadLocalStorage {
+ public:
+
+ // Prototype for the TLS destructor function, which can be optionally used to
+ // cleanup thread local storage on thread exit. 'value' is the data that is
+ // stored in thread local storage.
+ typedef void (*TLSDestructorFunc)(void* value);
+
+ // StaticSlot uses its own struct initializer-list style static
+ // initialization, as base's LINKER_INITIALIZED requires a constructor and on
+ // some compilers (notably gcc 4.4) this still ends up needing runtime
+ // initialization.
+ #define TLS_INITIALIZER {false, 0}
+
+ // A key representing one value stored in TLS.
+ // Initialize like
+ // ThreadLocalStorage::StaticSlot my_slot = TLS_INITIALIZER;
+ // If you're not using a static variable, use the convenience class
+ // ThreadLocalStorage::Slot (below) instead.
+ struct BASE_EXPORT StaticSlot {
+ // Set up the TLS slot. Called by the constructor.
+ // 'destructor' is a pointer to a function to perform per-thread cleanup of
+ // this object. If set to NULL, no cleanup is done for this TLS slot.
+ void Initialize(TLSDestructorFunc destructor);
+
+ // Free a previously allocated TLS 'slot'.
+ // If a destructor was set for this slot, removes
+ // the destructor so that remaining threads exiting
+ // will not free data.
+ void Free();
+
+ // Get the thread-local value stored in slot 'slot'.
+ // Values are guaranteed to initially be zero.
+ void* Get() const;
+
+ // Set the thread-local value stored in slot 'slot' to
+ // value 'value'.
+ void Set(void* value);
+
+ bool initialized() const {
+ return base::subtle::Acquire_Load(&initialized_) != 0;
+ }
+
+ // The internals of this struct should be considered private.
+ base::subtle::Atomic32 initialized_;
+ int slot_;
+ };
+
+ // A convenience wrapper around StaticSlot with a constructor. Can be used
+ // as a member variable.
+ class BASE_EXPORT Slot : public StaticSlot {
+ public:
+ // Calls StaticSlot::Initialize().
+ explicit Slot(TLSDestructorFunc destructor = NULL);
+
+ private:
+ using StaticSlot::initialized_;
+ using StaticSlot::slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(Slot);
+ };
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_LOCAL_STORAGE_H_
diff --git a/libchrome/base/threading/thread_local_storage_posix.cc b/libchrome/base/threading/thread_local_storage_posix.cc
new file mode 100644
index 0000000..ebaf400
--- /dev/null
+++ b/libchrome/base/threading/thread_local_storage_posix.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local_storage.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace internal {
+
+bool PlatformThreadLocalStorage::AllocTLS(TLSKey* key) {
+ return !pthread_key_create(key,
+ base::internal::PlatformThreadLocalStorage::OnThreadExit);
+}
+
+void PlatformThreadLocalStorage::FreeTLS(TLSKey key) {
+ int ret = pthread_key_delete(key);
+ DCHECK_EQ(ret, 0);
+}
+
+void* PlatformThreadLocalStorage::GetTLSValue(TLSKey key) {
+ return pthread_getspecific(key);
+}
+
+void PlatformThreadLocalStorage::SetTLSValue(TLSKey key, void* value) {
+ int ret = pthread_setspecific(key, value);
+ DCHECK_EQ(ret, 0);
+}
+
+} // namespace internal
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_local_storage_unittest.cc b/libchrome/base/threading/thread_local_storage_unittest.cc
new file mode 100644
index 0000000..322524b
--- /dev/null
+++ b/libchrome/base/threading/thread_local_storage_unittest.cc
@@ -0,0 +1,130 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <process.h>
+#endif
+
+#include "base/macros.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_local_storage.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+// Ignore warnings about ptr->int conversions that we use when
+// storing ints into ThreadLocalStorage.
+#pragma warning(disable : 4311 4312)
+#endif
+
+namespace base {
+
+namespace {
+
+const int kInitialTlsValue = 0x5555;
+const int kFinalTlsValue = 0x7777;
+// How many times must a destructor be called before we really are done.
+const int kNumberDestructorCallRepetitions = 3;
+
+static ThreadLocalStorage::StaticSlot tls_slot = TLS_INITIALIZER;
+
+class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate {
+ public:
+ explicit ThreadLocalStorageRunner(int* tls_value_ptr)
+ : tls_value_ptr_(tls_value_ptr) {}
+
+ ~ThreadLocalStorageRunner() override {}
+
+ void Run() override {
+ *tls_value_ptr_ = kInitialTlsValue;
+ tls_slot.Set(tls_value_ptr_);
+
+ int *ptr = static_cast<int*>(tls_slot.Get());
+ EXPECT_EQ(ptr, tls_value_ptr_);
+ EXPECT_EQ(*ptr, kInitialTlsValue);
+ *tls_value_ptr_ = 0;
+
+ ptr = static_cast<int*>(tls_slot.Get());
+ EXPECT_EQ(ptr, tls_value_ptr_);
+ EXPECT_EQ(*ptr, 0);
+
+ *ptr = kFinalTlsValue + kNumberDestructorCallRepetitions;
+ }
+
+ private:
+ int* tls_value_ptr_;
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorageRunner);
+};
+
+
+void ThreadLocalStorageCleanup(void *value) {
+ int *ptr = reinterpret_cast<int*>(value);
+ // Destructors should never be called with a NULL.
+ ASSERT_NE(reinterpret_cast<int*>(NULL), ptr);
+ if (*ptr == kFinalTlsValue)
+ return; // We've been called enough times.
+ ASSERT_LT(kFinalTlsValue, *ptr);
+ ASSERT_GE(kFinalTlsValue + kNumberDestructorCallRepetitions, *ptr);
+ --*ptr; // Move closer to our target.
+ // Tell tls that we're not done with this thread, and still need destruction.
+ tls_slot.Set(value);
+}
+
+} // namespace
+
+TEST(ThreadLocalStorageTest, Basics) {
+ ThreadLocalStorage::Slot slot;
+ slot.Set(reinterpret_cast<void*>(123));
+ int value = reinterpret_cast<intptr_t>(slot.Get());
+ EXPECT_EQ(value, 123);
+}
+
+#if defined(THREAD_SANITIZER) || \
+ (defined(OS_WIN) && defined(ARCH_CPU_X86_64) && !defined(NDEBUG))
+// Do not run the test under ThreadSanitizer. Because this test iterates its
+// own TSD destructor for the maximum possible number of times, TSan can't jump
+// in after the last destructor invocation, therefore the destructor remains
+// unsynchronized with the following users of the same TSD slot. This results
+// in race reports between the destructor and functions in other tests.
+//
+// It is disabled on Win x64 with incremental linking (i.e. "Debug") pending
+// resolution of http://crbug.com/251251.
+#define MAYBE_TLSDestructors DISABLED_TLSDestructors
+#else
+#define MAYBE_TLSDestructors TLSDestructors
+#endif
+TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) {
+ // Create a TLS index with a destructor. Create a set of
+ // threads that set the TLS, while the destructor cleans it up.
+ // After the threads finish, verify that the value is cleaned up.
+ const int kNumThreads = 5;
+ int values[kNumThreads];
+ ThreadLocalStorageRunner* thread_delegates[kNumThreads];
+ DelegateSimpleThread* threads[kNumThreads];
+
+ tls_slot.Initialize(ThreadLocalStorageCleanup);
+
+ // Spawn the threads.
+ for (int index = 0; index < kNumThreads; index++) {
+ values[index] = kInitialTlsValue;
+ thread_delegates[index] = new ThreadLocalStorageRunner(&values[index]);
+ threads[index] = new DelegateSimpleThread(thread_delegates[index],
+ "tls thread");
+ threads[index]->Start();
+ }
+
+ // Wait for the threads to finish.
+ for (int index = 0; index < kNumThreads; index++) {
+ threads[index]->Join();
+ delete threads[index];
+ delete thread_delegates[index];
+
+ // Verify that the destructor was called and that we reset.
+ EXPECT_EQ(values[index], kFinalTlsValue);
+ }
+ tls_slot.Free(); // Stop doing callbacks to cleanup threads.
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_local_unittest.cc b/libchrome/base/threading/thread_local_unittest.cc
new file mode 100644
index 0000000..cdc1ca6
--- /dev/null
+++ b/libchrome/base/threading/thread_local_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_local.h"
+#include "base/synchronization/waitable_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class ThreadLocalTesterBase : public base::DelegateSimpleThreadPool::Delegate {
+ public:
+ typedef base::ThreadLocalPointer<char> TLPType;
+
+ ThreadLocalTesterBase(TLPType* tlp, base::WaitableEvent* done)
+ : tlp_(tlp),
+ done_(done) {
+ }
+ ~ThreadLocalTesterBase() override {}
+
+ protected:
+ TLPType* tlp_;
+ base::WaitableEvent* done_;
+};
+
+class SetThreadLocal : public ThreadLocalTesterBase {
+ public:
+ SetThreadLocal(TLPType* tlp, base::WaitableEvent* done)
+ : ThreadLocalTesterBase(tlp, done),
+ val_(NULL) {
+ }
+ ~SetThreadLocal() override {}
+
+ void set_value(char* val) { val_ = val; }
+
+ void Run() override {
+ DCHECK(!done_->IsSignaled());
+ tlp_->Set(val_);
+ done_->Signal();
+ }
+
+ private:
+ char* val_;
+};
+
+class GetThreadLocal : public ThreadLocalTesterBase {
+ public:
+ GetThreadLocal(TLPType* tlp, base::WaitableEvent* done)
+ : ThreadLocalTesterBase(tlp, done),
+ ptr_(NULL) {
+ }
+ ~GetThreadLocal() override {}
+
+ void set_ptr(char** ptr) { ptr_ = ptr; }
+
+ void Run() override {
+ DCHECK(!done_->IsSignaled());
+ *ptr_ = tlp_->Get();
+ done_->Signal();
+ }
+
+ private:
+ char** ptr_;
+};
+
+} // namespace
+
+// In this test, we start 2 threads which will access a ThreadLocalPointer. We
+// make sure the default is NULL, and the pointers are unique to the threads.
+TEST(ThreadLocalTest, Pointer) {
+ base::DelegateSimpleThreadPool tp1("ThreadLocalTest tp1", 1);
+ base::DelegateSimpleThreadPool tp2("ThreadLocalTest tp1", 1);
+ tp1.Start();
+ tp2.Start();
+
+ base::ThreadLocalPointer<char> tlp;
+
+ static char* const kBogusPointer = reinterpret_cast<char*>(0x1234);
+
+ char* tls_val;
+ base::WaitableEvent done(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ GetThreadLocal getter(&tlp, &done);
+ getter.set_ptr(&tls_val);
+
+ // Check that both threads defaulted to NULL.
+ tls_val = kBogusPointer;
+ done.Reset();
+ tp1.AddWork(&getter);
+ done.Wait();
+ EXPECT_EQ(static_cast<char*>(NULL), tls_val);
+
+ tls_val = kBogusPointer;
+ done.Reset();
+ tp2.AddWork(&getter);
+ done.Wait();
+ EXPECT_EQ(static_cast<char*>(NULL), tls_val);
+
+
+ SetThreadLocal setter(&tlp, &done);
+ setter.set_value(kBogusPointer);
+
+ // Have thread 1 set their pointer value to kBogusPointer.
+ done.Reset();
+ tp1.AddWork(&setter);
+ done.Wait();
+
+ tls_val = NULL;
+ done.Reset();
+ tp1.AddWork(&getter);
+ done.Wait();
+ EXPECT_EQ(kBogusPointer, tls_val);
+
+ // Make sure thread 2 is still NULL
+ tls_val = kBogusPointer;
+ done.Reset();
+ tp2.AddWork(&getter);
+ done.Wait();
+ EXPECT_EQ(static_cast<char*>(NULL), tls_val);
+
+ // Set thread 2 to kBogusPointer + 1.
+ setter.set_value(kBogusPointer + 1);
+
+ done.Reset();
+ tp2.AddWork(&setter);
+ done.Wait();
+
+ tls_val = NULL;
+ done.Reset();
+ tp2.AddWork(&getter);
+ done.Wait();
+ EXPECT_EQ(kBogusPointer + 1, tls_val);
+
+ // Make sure thread 1 is still kBogusPointer.
+ tls_val = NULL;
+ done.Reset();
+ tp1.AddWork(&getter);
+ done.Wait();
+ EXPECT_EQ(kBogusPointer, tls_val);
+
+ tp1.JoinAll();
+ tp2.JoinAll();
+}
+
+TEST(ThreadLocalTest, Boolean) {
+ {
+ base::ThreadLocalBoolean tlb;
+ EXPECT_FALSE(tlb.Get());
+
+ tlb.Set(false);
+ EXPECT_FALSE(tlb.Get());
+
+ tlb.Set(true);
+ EXPECT_TRUE(tlb.Get());
+ }
+
+ // Our slot should have been freed, we're all reset.
+ {
+ base::ThreadLocalBoolean tlb;
+ EXPECT_FALSE(tlb.Get());
+ }
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_restrictions.cc b/libchrome/base/threading/thread_restrictions.cc
new file mode 100644
index 0000000..00306c5
--- /dev/null
+++ b/libchrome/base/threading/thread_restrictions.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_restrictions.h"
+
+#if ENABLE_THREAD_RESTRICTIONS
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+LazyInstance<ThreadLocalBoolean>::Leaky
+ g_io_disallowed = LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBoolean>::Leaky
+ g_singleton_disallowed = LAZY_INSTANCE_INITIALIZER;
+
+LazyInstance<ThreadLocalBoolean>::Leaky
+ g_wait_disallowed = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+// static
+bool ThreadRestrictions::SetIOAllowed(bool allowed) {
+ bool previous_disallowed = g_io_disallowed.Get().Get();
+ g_io_disallowed.Get().Set(!allowed);
+ return !previous_disallowed;
+}
+
+// static
+void ThreadRestrictions::AssertIOAllowed() {
+ if (g_io_disallowed.Get().Get()) {
+ LOG(FATAL) <<
+ "Function marked as IO-only was called from a thread that "
+ "disallows IO! If this thread really should be allowed to "
+ "make IO calls, adjust the call to "
+ "base::ThreadRestrictions::SetIOAllowed() in this thread's "
+ "startup.";
+ }
+}
+
+// static
+bool ThreadRestrictions::SetSingletonAllowed(bool allowed) {
+ bool previous_disallowed = g_singleton_disallowed.Get().Get();
+ g_singleton_disallowed.Get().Set(!allowed);
+ return !previous_disallowed;
+}
+
+// static
+void ThreadRestrictions::AssertSingletonAllowed() {
+ if (g_singleton_disallowed.Get().Get()) {
+ LOG(FATAL) << "LazyInstance/Singleton is not allowed to be used on this "
+ << "thread. Most likely it's because this thread is not "
+ << "joinable, so AtExitManager may have deleted the object "
+ << "on shutdown, leading to a potential shutdown crash.";
+ }
+}
+
+// static
+void ThreadRestrictions::DisallowWaiting() {
+ g_wait_disallowed.Get().Set(true);
+}
+
+// static
+void ThreadRestrictions::AssertWaitAllowed() {
+ if (g_wait_disallowed.Get().Get()) {
+ LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent "
+ << "jank and deadlock.";
+ }
+}
+
+bool ThreadRestrictions::SetWaitAllowed(bool allowed) {
+ bool previous_disallowed = g_wait_disallowed.Get().Get();
+ g_wait_disallowed.Get().Set(!allowed);
+ return !previous_disallowed;
+}
+
+} // namespace base
+
+#endif // ENABLE_THREAD_RESTRICTIONS
diff --git a/libchrome/base/threading/thread_restrictions.h b/libchrome/base/threading/thread_restrictions.h
new file mode 100644
index 0000000..4212a4b
--- /dev/null
+++ b/libchrome/base/threading/thread_restrictions.h
@@ -0,0 +1,270 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_RESTRICTIONS_H_
+#define BASE_THREADING_THREAD_RESTRICTIONS_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+// See comment at top of thread_checker.h
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_THREAD_RESTRICTIONS 1
+#else
+#define ENABLE_THREAD_RESTRICTIONS 0
+#endif
+
+class BrowserProcessImpl;
+class HistogramSynchronizer;
+class NativeBackendKWallet;
+class ScopedAllowWaitForLegacyWebViewApi;
+
+namespace cc {
+class CompletionEvent;
+class SingleThreadTaskGraphRunner;
+}
+namespace chromeos {
+class BlockingMethodCaller;
+namespace system {
+class StatisticsProviderImpl;
+}
+}
+namespace chrome_browser_net {
+class Predictor;
+}
+namespace content {
+class BrowserGpuChannelHostFactory;
+class BrowserGpuMemoryBufferManager;
+class BrowserShutdownProfileDumper;
+class BrowserSurfaceViewManager;
+class BrowserTestBase;
+class NestedMessagePumpAndroid;
+class ScopedAllowWaitForAndroidLayoutTests;
+class ScopedAllowWaitForDebugURL;
+class SoftwareOutputDeviceMus;
+class TextInputClientMac;
+class CategorizedWorkerPool;
+} // namespace content
+namespace dbus {
+class Bus;
+}
+namespace disk_cache {
+class BackendImpl;
+class InFlightIO;
+}
+namespace gpu {
+class GpuChannelHost;
+}
+namespace mojo {
+namespace common {
+class MessagePumpMojo;
+}
+class SyncCallRestrictions;
+}
+namespace ui {
+class CommandBufferClientImpl;
+class CommandBufferLocal;
+class GpuState;
+}
+namespace net {
+class NetworkChangeNotifierMac;
+namespace internal {
+class AddressTrackerLinux;
+}
+}
+
+namespace remoting {
+class AutoThread;
+}
+
+namespace ui {
+class WindowResizeHelperMac;
+}
+
+namespace views {
+class ScreenMus;
+}
+
+namespace base {
+
+namespace android {
+class JavaHandlerThread;
+}
+
+class SequencedWorkerPool;
+class SimpleThread;
+class Thread;
+class ThreadTestHelper;
+
+// Certain behavior is disallowed on certain threads. ThreadRestrictions helps
+// enforce these rules. Examples of such rules:
+//
+// * Do not do blocking IO (makes the thread janky)
+// * Do not access Singleton/LazyInstance (may lead to shutdown crashes)
+//
+// Here's more about how the protection works:
+//
+// 1) If a thread should not be allowed to make IO calls, mark it:
+// base::ThreadRestrictions::SetIOAllowed(false);
+// By default, threads *are* allowed to make IO calls.
+// In Chrome browser code, IO calls should be proxied to the File thread.
+//
+// 2) If a function makes a call that will go out to disk, check whether the
+// current thread is allowed:
+// base::ThreadRestrictions::AssertIOAllowed();
+//
+//
+// Style tip: where should you put AssertIOAllowed checks? It's best
+// if you put them as close to the disk access as possible, at the
+// lowest level. This rule is simple to follow and helps catch all
+// callers. For example, if your function GoDoSomeBlockingDiskCall()
+// only calls other functions in Chrome and not fopen(), you should go
+// add the AssertIOAllowed checks in the helper functions.
+
+class BASE_EXPORT ThreadRestrictions {
+ public:
+ // Constructing a ScopedAllowIO temporarily allows IO for the current
+ // thread. Doing this is almost certainly always incorrect.
+ class BASE_EXPORT ScopedAllowIO {
+ public:
+ ScopedAllowIO() { previous_value_ = SetIOAllowed(true); }
+ ~ScopedAllowIO() { SetIOAllowed(previous_value_); }
+ private:
+ // Whether IO is allowed when the ScopedAllowIO was constructed.
+ bool previous_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
+ };
+
+ // Constructing a ScopedAllowSingleton temporarily allows accessing for the
+ // current thread. Doing this is almost always incorrect.
+ class BASE_EXPORT ScopedAllowSingleton {
+ public:
+ ScopedAllowSingleton() { previous_value_ = SetSingletonAllowed(true); }
+ ~ScopedAllowSingleton() { SetSingletonAllowed(previous_value_); }
+ private:
+ // Whether singleton use is allowed when the ScopedAllowSingleton was
+ // constructed.
+ bool previous_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowSingleton);
+ };
+
+#if ENABLE_THREAD_RESTRICTIONS
+ // Set whether the current thread to make IO calls.
+ // Threads start out in the *allowed* state.
+ // Returns the previous value.
+ static bool SetIOAllowed(bool allowed);
+
+ // Check whether the current thread is allowed to make IO calls,
+ // and DCHECK if not. See the block comment above the class for
+ // a discussion of where to add these checks.
+ static void AssertIOAllowed();
+
+ // Set whether the current thread can use singletons. Returns the previous
+ // value.
+ static bool SetSingletonAllowed(bool allowed);
+
+ // Check whether the current thread is allowed to use singletons (Singleton /
+ // LazyInstance). DCHECKs if not.
+ static void AssertSingletonAllowed();
+
+ // Disable waiting on the current thread. Threads start out in the *allowed*
+ // state. Returns the previous value.
+ static void DisallowWaiting();
+
+ // Check whether the current thread is allowed to wait, and DCHECK if not.
+ static void AssertWaitAllowed();
+#else
+ // Inline the empty definitions of these functions so that they can be
+ // compiled out.
+ static bool SetIOAllowed(bool) { return true; }
+ static void AssertIOAllowed() {}
+ static bool SetSingletonAllowed(bool) { return true; }
+ static void AssertSingletonAllowed() {}
+ static void DisallowWaiting() {}
+ static void AssertWaitAllowed() {}
+#endif
+
+ private:
+ // DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
+ // BEGIN ALLOWED USAGE.
+ friend class content::BrowserShutdownProfileDumper;
+ friend class content::BrowserSurfaceViewManager;
+ friend class content::BrowserTestBase;
+ friend class content::NestedMessagePumpAndroid;
+ friend class content::ScopedAllowWaitForAndroidLayoutTests;
+ friend class content::ScopedAllowWaitForDebugURL;
+ friend class ::HistogramSynchronizer;
+ friend class ::ScopedAllowWaitForLegacyWebViewApi;
+ friend class cc::CompletionEvent;
+ friend class cc::SingleThreadTaskGraphRunner;
+ friend class content::CategorizedWorkerPool;
+ friend class remoting::AutoThread;
+ friend class ui::WindowResizeHelperMac;
+ friend class MessagePumpDefault;
+ friend class SequencedWorkerPool;
+ friend class SimpleThread;
+ friend class Thread;
+ friend class ThreadTestHelper;
+ friend class PlatformThread;
+ friend class android::JavaHandlerThread;
+ friend class mojo::common::MessagePumpMojo;
+ friend class mojo::SyncCallRestrictions;
+ friend class ui::CommandBufferClientImpl;
+ friend class ui::CommandBufferLocal;
+ friend class ui::GpuState;
+
+ // END ALLOWED USAGE.
+ // BEGIN USAGE THAT NEEDS TO BE FIXED.
+ friend class ::chromeos::BlockingMethodCaller; // http://crbug.com/125360
+ friend class ::chromeos::system::StatisticsProviderImpl; // http://crbug.com/125385
+ friend class chrome_browser_net::Predictor; // http://crbug.com/78451
+ friend class
+ content::BrowserGpuChannelHostFactory; // http://crbug.com/125248
+ friend class
+ content::BrowserGpuMemoryBufferManager; // http://crbug.com/420368
+ friend class content::TextInputClientMac; // http://crbug.com/121917
+ friend class dbus::Bus; // http://crbug.com/125222
+ friend class disk_cache::BackendImpl; // http://crbug.com/74623
+ friend class disk_cache::InFlightIO; // http://crbug.com/74623
+ friend class gpu::GpuChannelHost; // http://crbug.com/125264
+ friend class net::internal::AddressTrackerLinux; // http://crbug.com/125097
+ friend class net::NetworkChangeNotifierMac; // http://crbug.com/125097
+ friend class ::BrowserProcessImpl; // http://crbug.com/125207
+ friend class ::NativeBackendKWallet; // http://crbug.com/125331
+#if !defined(OFFICIAL_BUILD)
+ friend class content::SoftwareOutputDeviceMus; // Interim non-production code
+#endif
+ friend class views::ScreenMus;
+// END USAGE THAT NEEDS TO BE FIXED.
+
+#if ENABLE_THREAD_RESTRICTIONS
+ static bool SetWaitAllowed(bool allowed);
+#else
+ static bool SetWaitAllowed(bool) { return true; }
+#endif
+
+ // Constructing a ScopedAllowWait temporarily allows waiting on the current
+ // thread. Doing this is almost always incorrect, which is why we limit who
+ // can use this through friend. If you find yourself needing to use this, find
+ // another way. Talk to jam or brettw.
+ class BASE_EXPORT ScopedAllowWait {
+ public:
+ ScopedAllowWait() { previous_value_ = SetWaitAllowed(true); }
+ ~ScopedAllowWait() { SetWaitAllowed(previous_value_); }
+ private:
+ // Whether singleton use is allowed when the ScopedAllowWait was
+ // constructed.
+ bool previous_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowWait);
+ };
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ThreadRestrictions);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_RESTRICTIONS_H_
diff --git a/libchrome/base/threading/thread_task_runner_handle.cc b/libchrome/base/threading/thread_task_runner_handle.cc
new file mode 100644
index 0000000..190e18f
--- /dev/null
+++ b/libchrome/base/threading/thread_task_runner_handle.cc
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_task_runner_handle.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+
+namespace {
+
+base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle>>::Leaky
+ lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+// static
+scoped_refptr<SingleThreadTaskRunner> ThreadTaskRunnerHandle::Get() {
+ ThreadTaskRunnerHandle* current = lazy_tls_ptr.Pointer()->Get();
+ DCHECK(current);
+ return current->task_runner_;
+}
+
+// static
+bool ThreadTaskRunnerHandle::IsSet() {
+ return !!lazy_tls_ptr.Pointer()->Get();
+}
+
+ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
+ scoped_refptr<SingleThreadTaskRunner> task_runner)
+ : task_runner_(std::move(task_runner)) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ // No SequencedTaskRunnerHandle (which includes ThreadTaskRunnerHandles)
+ // should already be set for this thread.
+ DCHECK(!SequencedTaskRunnerHandle::IsSet());
+ lazy_tls_ptr.Pointer()->Set(this);
+}
+
+ThreadTaskRunnerHandle::~ThreadTaskRunnerHandle() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
+ lazy_tls_ptr.Pointer()->Set(nullptr);
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/thread_task_runner_handle.h b/libchrome/base/threading/thread_task_runner_handle.h
new file mode 100644
index 0000000..c8e5893
--- /dev/null
+++ b/libchrome/base/threading/thread_task_runner_handle.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// ThreadTaskRunnerHandle stores a reference to a thread's TaskRunner
+// in thread-local storage. Callers can then retrieve the TaskRunner
+// for the current thread by calling ThreadTaskRunnerHandle::Get().
+// At most one TaskRunner may be bound to each thread at a time.
+// Prefer SequenceTaskRunnerHandle to this unless thread affinity is required.
+class BASE_EXPORT ThreadTaskRunnerHandle {
+ public:
+ // Gets the SingleThreadTaskRunner for the current thread.
+ static scoped_refptr<SingleThreadTaskRunner> Get();
+
+ // Returns true if the SingleThreadTaskRunner is already created for
+ // the current thread.
+ static bool IsSet();
+
+ // Binds |task_runner| to the current thread. |task_runner| must belong
+ // to the current thread for this to succeed.
+ explicit ThreadTaskRunnerHandle(
+ scoped_refptr<SingleThreadTaskRunner> task_runner);
+ ~ThreadTaskRunnerHandle();
+
+ private:
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadTaskRunnerHandle);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
diff --git a/libchrome/base/threading/thread_unittest.cc b/libchrome/base/threading/thread_unittest.cc
new file mode 100644
index 0000000..b0fd265
--- /dev/null
+++ b/libchrome/base/threading/thread_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+using base::Thread;
+
+typedef PlatformTest ThreadTest;
+
+namespace {
+
+void ToggleValue(bool* value) {
+ *value = !*value;
+}
+
+class SleepInsideInitThread : public Thread {
+ public:
+ SleepInsideInitThread() : Thread("none") {
+ init_called_ = false;
+ }
+ ~SleepInsideInitThread() override { Stop(); }
+
+ void Init() override {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
+ init_called_ = true;
+ }
+ bool InitCalled() { return init_called_; }
+ private:
+ bool init_called_;
+};
+
+enum ThreadEvent {
+ // Thread::Init() was called.
+ THREAD_EVENT_INIT = 0,
+
+ // The MessageLoop for the thread was deleted.
+ THREAD_EVENT_MESSAGE_LOOP_DESTROYED,
+
+ // Thread::CleanUp() was called.
+ THREAD_EVENT_CLEANUP,
+
+ // Keep at end of list.
+ THREAD_NUM_EVENTS
+};
+
+typedef std::vector<ThreadEvent> EventList;
+
+class CaptureToEventList : public Thread {
+ public:
+ // This Thread pushes events into the vector |event_list| to show
+ // the order they occured in. |event_list| must remain valid for the
+ // lifetime of this thread.
+ explicit CaptureToEventList(EventList* event_list)
+ : Thread("none"),
+ event_list_(event_list) {
+ }
+
+ ~CaptureToEventList() override { Stop(); }
+
+ void Init() override { event_list_->push_back(THREAD_EVENT_INIT); }
+
+ void CleanUp() override { event_list_->push_back(THREAD_EVENT_CLEANUP); }
+
+ private:
+ EventList* event_list_;
+};
+
+// Observer that writes a value into |event_list| when a message loop has been
+// destroyed.
+class CapturingDestructionObserver
+ : public base::MessageLoop::DestructionObserver {
+ public:
+ // |event_list| must remain valid throughout the observer's lifetime.
+ explicit CapturingDestructionObserver(EventList* event_list)
+ : event_list_(event_list) {
+ }
+
+ // DestructionObserver implementation:
+ void WillDestroyCurrentMessageLoop() override {
+ event_list_->push_back(THREAD_EVENT_MESSAGE_LOOP_DESTROYED);
+ event_list_ = NULL;
+ }
+
+ private:
+ EventList* event_list_;
+};
+
+// Task that adds a destruction observer to the current message loop.
+void RegisterDestructionObserver(
+ base::MessageLoop::DestructionObserver* observer) {
+ base::MessageLoop::current()->AddDestructionObserver(observer);
+}
+
+// Task that calls GetThreadId() of |thread|, stores the result into |id|, then
+// signal |event|.
+void ReturnThreadId(base::Thread* thread,
+ base::PlatformThreadId* id,
+ base::WaitableEvent* event) {
+ *id = thread->GetThreadId();
+ event->Signal();
+}
+
+} // namespace
+
+TEST_F(ThreadTest, Restart) {
+ Thread a("Restart");
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+}
+
+TEST_F(ThreadTest, StartWithOptions_StackSize) {
+ Thread a("StartWithStackSize");
+ // Ensure that the thread can work with only 12 kb and still process a
+ // message.
+ Thread::Options options;
+#if defined(ADDRESS_SANITIZER)
+ // ASan bloats the stack variables and overflows the 12 kb stack.
+ options.stack_size = 24*1024;
+#else
+ options.stack_size = 12*1024;
+#endif
+ EXPECT_TRUE(a.StartWithOptions(options));
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+
+ bool was_invoked = false;
+ a.task_runner()->PostTask(FROM_HERE, base::Bind(&ToggleValue, &was_invoked));
+
+ // wait for the task to run (we could use a kernel event here
+ // instead to avoid busy waiting, but this is sufficient for
+ // testing purposes).
+ for (int i = 100; i >= 0 && !was_invoked; --i) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+ }
+ EXPECT_TRUE(was_invoked);
+}
+
+TEST_F(ThreadTest, TwoTasks) {
+ bool was_invoked = false;
+ {
+ Thread a("TwoTasks");
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+
+ // Test that all events are dispatched before the Thread object is
+ // destroyed. We do this by dispatching a sleep event before the
+ // event that will toggle our sentinel value.
+ a.task_runner()->PostTask(
+ FROM_HERE, base::Bind(static_cast<void (*)(base::TimeDelta)>(
+ &base::PlatformThread::Sleep),
+ base::TimeDelta::FromMilliseconds(20)));
+ a.task_runner()->PostTask(FROM_HERE,
+ base::Bind(&ToggleValue, &was_invoked));
+ }
+ EXPECT_TRUE(was_invoked);
+}
+
+TEST_F(ThreadTest, StopSoon) {
+ Thread a("StopSoon");
+ EXPECT_TRUE(a.Start());
+ EXPECT_TRUE(a.message_loop());
+ EXPECT_TRUE(a.IsRunning());
+ a.StopSoon();
+ a.StopSoon();
+ a.Stop();
+ EXPECT_FALSE(a.message_loop());
+ EXPECT_FALSE(a.IsRunning());
+}
+
+TEST_F(ThreadTest, ThreadName) {
+ Thread a("ThreadName");
+ EXPECT_TRUE(a.Start());
+ EXPECT_EQ("ThreadName", a.thread_name());
+}
+
+TEST_F(ThreadTest, ThreadId) {
+ Thread a("ThreadId0");
+ Thread b("ThreadId1");
+ a.Start();
+ b.Start();
+
+ // Post a task that calls GetThreadId() on the created thread.
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ base::PlatformThreadId id_from_new_thread;
+ a.task_runner()->PostTask(
+ FROM_HERE, base::Bind(ReturnThreadId, &a, &id_from_new_thread, &event));
+
+ // Call GetThreadId() on the current thread before calling event.Wait() so
+ // that this test can find a race issue with TSAN.
+ base::PlatformThreadId id_from_current_thread = a.GetThreadId();
+
+ // Check if GetThreadId() returns consistent value in both threads.
+ event.Wait();
+ EXPECT_EQ(id_from_current_thread, id_from_new_thread);
+
+ // A started thread should have a valid ID.
+ EXPECT_NE(base::kInvalidThreadId, a.GetThreadId());
+ EXPECT_NE(base::kInvalidThreadId, b.GetThreadId());
+
+ // Each thread should have a different thread ID.
+ EXPECT_NE(a.GetThreadId(), b.GetThreadId());
+}
+
+TEST_F(ThreadTest, ThreadIdWithRestart) {
+ Thread a("ThreadIdWithRestart");
+ base::PlatformThreadId previous_id = base::kInvalidThreadId;
+
+ for (size_t i = 0; i < 16; ++i) {
+ EXPECT_TRUE(a.Start());
+ base::PlatformThreadId current_id = a.GetThreadId();
+ EXPECT_NE(previous_id, current_id);
+ previous_id = current_id;
+ a.Stop();
+ }
+}
+
+// Make sure Init() is called after Start() and before
+// WaitUntilThreadInitialized() returns.
+TEST_F(ThreadTest, SleepInsideInit) {
+ SleepInsideInitThread t;
+ EXPECT_FALSE(t.InitCalled());
+ t.StartAndWaitForTesting();
+ EXPECT_TRUE(t.InitCalled());
+}
+
+// Make sure that the destruction sequence is:
+//
+// (1) Thread::CleanUp()
+// (2) MessageLoop::~MessageLoop()
+// MessageLoop::DestructionObservers called.
+TEST_F(ThreadTest, CleanUp) {
+ EventList captured_events;
+ CapturingDestructionObserver loop_destruction_observer(&captured_events);
+
+ {
+ // Start a thread which writes its event into |captured_events|.
+ CaptureToEventList t(&captured_events);
+ EXPECT_TRUE(t.Start());
+ EXPECT_TRUE(t.message_loop());
+ EXPECT_TRUE(t.IsRunning());
+
+ // Register an observer that writes into |captured_events| once the
+ // thread's message loop is destroyed.
+ t.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&RegisterDestructionObserver,
+ base::Unretained(&loop_destruction_observer)));
+
+ // Upon leaving this scope, the thread is deleted.
+ }
+
+ // Check the order of events during shutdown.
+ ASSERT_EQ(static_cast<size_t>(THREAD_NUM_EVENTS), captured_events.size());
+ EXPECT_EQ(THREAD_EVENT_INIT, captured_events[0]);
+ EXPECT_EQ(THREAD_EVENT_CLEANUP, captured_events[1]);
+ EXPECT_EQ(THREAD_EVENT_MESSAGE_LOOP_DESTROYED, captured_events[2]);
+}
+
+TEST_F(ThreadTest, ThreadNotStarted) {
+ Thread a("Inert");
+ EXPECT_FALSE(a.task_runner());
+}
+
+TEST_F(ThreadTest, MultipleWaitUntilThreadStarted) {
+ Thread a("MultipleWaitUntilThreadStarted");
+ EXPECT_TRUE(a.Start());
+ // It's OK to call WaitUntilThreadStarted() multiple times.
+ EXPECT_TRUE(a.WaitUntilThreadStarted());
+ EXPECT_TRUE(a.WaitUntilThreadStarted());
+}
diff --git a/libchrome/base/threading/worker_pool.cc b/libchrome/base/threading/worker_pool.cc
new file mode 100644
index 0000000..0b7bf8e
--- /dev/null
+++ b/libchrome/base/threading/worker_pool.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/worker_pool.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/task_runner.h"
+#include "base/threading/post_task_and_reply_impl.h"
+#include "base/tracked_objects.h"
+
+namespace base {
+
+namespace {
+
+class PostTaskAndReplyWorkerPool : public internal::PostTaskAndReplyImpl {
+ public:
+ explicit PostTaskAndReplyWorkerPool(bool task_is_slow)
+ : task_is_slow_(task_is_slow) {
+ }
+ ~PostTaskAndReplyWorkerPool() override = default;
+
+ private:
+ bool PostTask(const tracked_objects::Location& from_here,
+ const Closure& task) override {
+ return WorkerPool::PostTask(from_here, task, task_is_slow_);
+ }
+
+ bool task_is_slow_;
+};
+
+// WorkerPoolTaskRunner ---------------------------------------------
+// A TaskRunner which posts tasks to a WorkerPool with a
+// fixed ShutdownBehavior.
+//
+// Note that this class is RefCountedThreadSafe (inherited from TaskRunner).
+class WorkerPoolTaskRunner : public TaskRunner {
+ public:
+ explicit WorkerPoolTaskRunner(bool tasks_are_slow);
+
+ // TaskRunner implementation
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override;
+ bool RunsTasksOnCurrentThread() const override;
+
+ private:
+ ~WorkerPoolTaskRunner() override;
+
+ // Helper function for posting a delayed task. Asserts that the delay is
+ // zero because non-zero delays are not supported.
+ bool PostDelayedTaskAssertZeroDelay(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ base::TimeDelta delay);
+
+ const bool tasks_are_slow_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkerPoolTaskRunner);
+};
+
+WorkerPoolTaskRunner::WorkerPoolTaskRunner(bool tasks_are_slow)
+ : tasks_are_slow_(tasks_are_slow) {
+}
+
+WorkerPoolTaskRunner::~WorkerPoolTaskRunner() {
+}
+
+bool WorkerPoolTaskRunner::PostDelayedTask(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ return PostDelayedTaskAssertZeroDelay(from_here, task, delay);
+}
+
+bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
+ return WorkerPool::RunsTasksOnCurrentThread();
+}
+
+bool WorkerPoolTaskRunner::PostDelayedTaskAssertZeroDelay(
+ const tracked_objects::Location& from_here,
+ const Closure& task,
+ base::TimeDelta delay) {
+ DCHECK_EQ(delay.InMillisecondsRoundedUp(), 0)
+ << "WorkerPoolTaskRunner does not support non-zero delays";
+ return WorkerPool::PostTask(from_here, task, tasks_are_slow_);
+}
+
+struct TaskRunnerHolder {
+ TaskRunnerHolder() {
+ taskrunners_[0] = new WorkerPoolTaskRunner(false);
+ taskrunners_[1] = new WorkerPoolTaskRunner(true);
+ }
+ scoped_refptr<TaskRunner> taskrunners_[2];
+};
+
+base::LazyInstance<TaskRunnerHolder>::Leaky
+ g_taskrunners = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply,
+ bool task_is_slow) {
+ // Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
+ // do about them because WorkerPool doesn't have a flushing API.
+ // http://crbug.com/248513
+ // http://crbug.com/290897
+ return PostTaskAndReplyWorkerPool(task_is_slow).PostTaskAndReply(
+ from_here, task, reply);
+}
+
+// static
+const scoped_refptr<TaskRunner>&
+WorkerPool::GetTaskRunner(bool tasks_are_slow) {
+ return g_taskrunners.Get().taskrunners_[tasks_are_slow];
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/worker_pool.h b/libchrome/base/threading/worker_pool.h
new file mode 100644
index 0000000..a52a414
--- /dev/null
+++ b/libchrome/base/threading/worker_pool.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_WORKER_POOL_H_
+#define BASE_THREADING_WORKER_POOL_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/memory/ref_counted.h"
+
+class Task;
+
+namespace tracked_objects {
+class Location;
+} // namespace tracked_objects
+
+namespace base {
+
+class TaskRunner;
+
+// This is a facility that runs tasks that don't require a specific thread or
+// a message loop.
+//
+// WARNING: This shouldn't be used unless absolutely necessary. We don't wait
+// for the worker pool threads to finish on shutdown, so the tasks running
+// inside the pool must be extremely careful about other objects they access
+// (MessageLoops, Singletons, etc). During shutdown these object may no longer
+// exist.
+class BASE_EXPORT WorkerPool {
+ public:
+ // This function posts |task| to run on a worker thread. |task_is_slow|
+ // should be used for tasks that will take a long time to execute. Returns
+ // false if |task| could not be posted to a worker thread. Regardless of
+ // return value, ownership of |task| is transferred to the worker pool.
+ static bool PostTask(const tracked_objects::Location& from_here,
+ const base::Closure& task, bool task_is_slow);
+
+ // Just like TaskRunner::PostTaskAndReply, except the destination
+ // for |task| is a worker thread and you can specify |task_is_slow| just
+ // like you can for PostTask above.
+ static bool PostTaskAndReply(const tracked_objects::Location& from_here,
+ const Closure& task,
+ const Closure& reply,
+ bool task_is_slow);
+
+ // Return true if the current thread is one that this WorkerPool runs tasks
+ // on. (Note that if the Windows worker pool is used without going through
+ // this WorkerPool interface, RunsTasksOnCurrentThread would return false on
+ // those threads.)
+ static bool RunsTasksOnCurrentThread();
+
+ // Get a TaskRunner wrapper which posts to the WorkerPool using the given
+ // |task_is_slow| behavior.
+ static const scoped_refptr<TaskRunner>& GetTaskRunner(bool task_is_slow);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_WORKER_POOL_H_
diff --git a/libchrome/base/threading/worker_pool_posix.cc b/libchrome/base/threading/worker_pool_posix.cc
new file mode 100644
index 0000000..6b4c42f
--- /dev/null
+++ b/libchrome/base/threading/worker_pool_posix.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/worker_pool_posix.h"
+
+#include <stddef.h>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/worker_pool.h"
+#include "base/trace_event/trace_event.h"
+#include "base/tracked_objects.h"
+
+using tracked_objects::TrackedTime;
+
+namespace base {
+
+namespace {
+
+base::LazyInstance<ThreadLocalBoolean>::Leaky
+ g_worker_pool_running_on_this_thread = LAZY_INSTANCE_INITIALIZER;
+
+const int kIdleSecondsBeforeExit = 10 * 60;
+
+class WorkerPoolImpl {
+ public:
+ WorkerPoolImpl();
+ ~WorkerPoolImpl();
+
+ void PostTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ bool task_is_slow);
+
+ private:
+ scoped_refptr<base::PosixDynamicThreadPool> pool_;
+};
+
+WorkerPoolImpl::WorkerPoolImpl()
+ : pool_(new base::PosixDynamicThreadPool("WorkerPool",
+ kIdleSecondsBeforeExit)) {}
+
+WorkerPoolImpl::~WorkerPoolImpl() {
+ pool_->Terminate();
+}
+
+void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ bool /*task_is_slow*/) {
+ pool_->PostTask(from_here, task);
+}
+
+base::LazyInstance<WorkerPoolImpl> g_lazy_worker_pool =
+ LAZY_INSTANCE_INITIALIZER;
+
+class WorkerThread : public PlatformThread::Delegate {
+ public:
+ WorkerThread(const std::string& name_prefix,
+ base::PosixDynamicThreadPool* pool)
+ : name_prefix_(name_prefix), pool_(pool) {}
+
+ void ThreadMain() override;
+
+ private:
+ const std::string name_prefix_;
+ scoped_refptr<base::PosixDynamicThreadPool> pool_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkerThread);
+};
+
+void WorkerThread::ThreadMain() {
+ g_worker_pool_running_on_this_thread.Get().Set(true);
+ const std::string name = base::StringPrintf("%s/%d", name_prefix_.c_str(),
+ PlatformThread::CurrentId());
+ // Note |name.c_str()| must remain valid for for the whole life of the thread.
+ PlatformThread::SetName(name);
+
+ for (;;) {
+ PendingTask pending_task = pool_->WaitForTask();
+ if (pending_task.task.is_null())
+ break;
+ TRACE_TASK_EXECUTION("WorkerThread::ThreadMain::Run", pending_task);
+
+ tracked_objects::TaskStopwatch stopwatch;
+ stopwatch.Start();
+ pending_task.task.Run();
+ stopwatch.Stop();
+
+ tracked_objects::ThreadData::TallyRunOnWorkerThreadIfTracking(
+ pending_task.birth_tally, pending_task.time_posted, stopwatch);
+ }
+
+ // The WorkerThread is non-joinable, so it deletes itself.
+ delete this;
+}
+
+} // namespace
+
+// static
+bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ bool task_is_slow) {
+ g_lazy_worker_pool.Pointer()->PostTask(from_here, task, task_is_slow);
+ return true;
+}
+
+// static
+bool WorkerPool::RunsTasksOnCurrentThread() {
+ return g_worker_pool_running_on_this_thread.Get().Get();
+}
+
+PosixDynamicThreadPool::PosixDynamicThreadPool(const std::string& name_prefix,
+ int idle_seconds_before_exit)
+ : name_prefix_(name_prefix),
+ idle_seconds_before_exit_(idle_seconds_before_exit),
+ pending_tasks_available_cv_(&lock_),
+ num_idle_threads_(0),
+ terminated_(false) {}
+
+PosixDynamicThreadPool::~PosixDynamicThreadPool() {
+ while (!pending_tasks_.empty())
+ pending_tasks_.pop();
+}
+
+void PosixDynamicThreadPool::Terminate() {
+ {
+ AutoLock locked(lock_);
+ DCHECK(!terminated_) << "Thread pool is already terminated.";
+ terminated_ = true;
+ }
+ pending_tasks_available_cv_.Broadcast();
+}
+
+void PosixDynamicThreadPool::PostTask(
+ const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ PendingTask pending_task(from_here, task);
+ AddTask(&pending_task);
+}
+
+void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
+ AutoLock locked(lock_);
+ DCHECK(!terminated_)
+ << "This thread pool is already terminated. Do not post new tasks.";
+
+ pending_tasks_.push(std::move(*pending_task));
+
+ // We have enough worker threads.
+ if (static_cast<size_t>(num_idle_threads_) >= pending_tasks_.size()) {
+ pending_tasks_available_cv_.Signal();
+ } else {
+ // The new PlatformThread will take ownership of the WorkerThread object,
+ // which will delete itself on exit.
+ WorkerThread* worker = new WorkerThread(name_prefix_, this);
+ PlatformThread::CreateNonJoinable(0, worker);
+ }
+}
+
+PendingTask PosixDynamicThreadPool::WaitForTask() {
+ AutoLock locked(lock_);
+
+ if (terminated_)
+ return PendingTask(FROM_HERE, base::Closure());
+
+ if (pending_tasks_.empty()) { // No work available, wait for work.
+ num_idle_threads_++;
+ if (num_idle_threads_cv_.get())
+ num_idle_threads_cv_->Signal();
+ pending_tasks_available_cv_.TimedWait(
+ TimeDelta::FromSeconds(idle_seconds_before_exit_));
+ num_idle_threads_--;
+ if (num_idle_threads_cv_.get())
+ num_idle_threads_cv_->Signal();
+ if (pending_tasks_.empty()) {
+ // We waited for work, but there's still no work. Return NULL to signal
+ // the thread to terminate.
+ return PendingTask(FROM_HERE, base::Closure());
+ }
+ }
+
+ PendingTask pending_task = std::move(pending_tasks_.front());
+ pending_tasks_.pop();
+ return pending_task;
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/worker_pool_posix.h b/libchrome/base/threading/worker_pool_posix.h
new file mode 100644
index 0000000..628e2b6
--- /dev/null
+++ b/libchrome/base/threading/worker_pool_posix.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// The thread pool used in the POSIX implementation of WorkerPool dynamically
+// adds threads as necessary to handle all tasks. It keeps old threads around
+// for a period of time to allow them to be reused. After this waiting period,
+// the threads exit. This thread pool uses non-joinable threads, therefore
+// worker threads are not joined during process shutdown. This means that
+// potentially long running tasks (such as DNS lookup) do not block process
+// shutdown, but also means that process shutdown may "leak" objects. Note that
+// although PosixDynamicThreadPool spawns the worker threads and manages the
+// task queue, it does not own the worker threads. The worker threads ask the
+// PosixDynamicThreadPool for work and eventually clean themselves up. The
+// worker threads all maintain scoped_refptrs to the PosixDynamicThreadPool
+// instance, which prevents PosixDynamicThreadPool from disappearing before all
+// worker threads exit. The owner of PosixDynamicThreadPool should likewise
+// maintain a scoped_refptr to the PosixDynamicThreadPool instance.
+//
+// NOTE: The classes defined in this file are only meant for use by the POSIX
+// implementation of WorkerPool. No one else should be using these classes.
+// These symbols are exported in a header purely for testing purposes.
+
+#ifndef BASE_THREADING_WORKER_POOL_POSIX_H_
+#define BASE_THREADING_WORKER_POOL_POSIX_H_
+
+#include <memory>
+#include <queue>
+#include <string>
+
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/tracked_objects.h"
+
+class Task;
+
+namespace base {
+
+class BASE_EXPORT PosixDynamicThreadPool
+ : public RefCountedThreadSafe<PosixDynamicThreadPool> {
+ public:
+ class PosixDynamicThreadPoolPeer;
+
+ // All worker threads will share the same |name_prefix|. They will exit after
+ // |idle_seconds_before_exit|.
+ PosixDynamicThreadPool(const std::string& name_prefix,
+ int idle_seconds_before_exit);
+
+ // Indicates that the thread pool is going away. Stops handing out tasks to
+ // worker threads. Wakes up all the idle threads to let them exit.
+ void Terminate();
+
+ // Adds |task| to the thread pool.
+ void PostTask(const tracked_objects::Location& from_here,
+ const Closure& task);
+
+ // Worker thread method to wait for up to |idle_seconds_before_exit| for more
+ // work from the thread pool. Returns NULL if no work is available.
+ PendingTask WaitForTask();
+
+ private:
+ friend class RefCountedThreadSafe<PosixDynamicThreadPool>;
+ friend class PosixDynamicThreadPoolPeer;
+
+ ~PosixDynamicThreadPool();
+
+ // Adds pending_task to the thread pool. This function will clear
+ // |pending_task->task|.
+ void AddTask(PendingTask* pending_task);
+
+ const std::string name_prefix_;
+ const int idle_seconds_before_exit_;
+
+ Lock lock_; // Protects all the variables below.
+
+ // Signal()s worker threads to let them know more tasks are available.
+ // Also used for Broadcast()'ing to worker threads to let them know the pool
+ // is being deleted and they can exit.
+ ConditionVariable pending_tasks_available_cv_;
+ int num_idle_threads_;
+ TaskQueue pending_tasks_;
+ bool terminated_;
+ // Only used for tests to ensure correct thread ordering. It will always be
+ // NULL in non-test code.
+ std::unique_ptr<ConditionVariable> num_idle_threads_cv_;
+
+ DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPool);
+};
+
+} // namespace base
+
+#endif // BASE_THREADING_WORKER_POOL_POSIX_H_
diff --git a/libchrome/base/threading/worker_pool_posix_unittest.cc b/libchrome/base/threading/worker_pool_posix_unittest.cc
new file mode 100644
index 0000000..6cefeed
--- /dev/null
+++ b/libchrome/base/threading/worker_pool_posix_unittest.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/worker_pool_posix.h"
+
+#include <set>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Peer class to provide passthrough access to PosixDynamicThreadPool internals.
+class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer {
+ public:
+ explicit PosixDynamicThreadPoolPeer(PosixDynamicThreadPool* pool)
+ : pool_(pool) {}
+
+ Lock* lock() { return &pool_->lock_; }
+ ConditionVariable* pending_tasks_available_cv() {
+ return &pool_->pending_tasks_available_cv_;
+ }
+ const std::queue<PendingTask>& pending_tasks() const {
+ return pool_->pending_tasks_;
+ }
+ int num_idle_threads() const { return pool_->num_idle_threads_; }
+ ConditionVariable* num_idle_threads_cv() {
+ return pool_->num_idle_threads_cv_.get();
+ }
+ void set_num_idle_threads_cv(ConditionVariable* cv) {
+ pool_->num_idle_threads_cv_.reset(cv);
+ }
+
+ private:
+ PosixDynamicThreadPool* pool_;
+
+ DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPoolPeer);
+};
+
+namespace {
+
+// IncrementingTask's main purpose is to increment a counter. It also updates a
+// set of unique thread ids, and signals a ConditionVariable on completion.
+// Note that since it does not block, there is no way to control the number of
+// threads used if more than one IncrementingTask is consecutively posted to the
+// thread pool, since the first one might finish executing before the subsequent
+// PostTask() calls get invoked.
+void IncrementingTask(Lock* counter_lock,
+ int* counter,
+ Lock* unique_threads_lock,
+ std::set<PlatformThreadId>* unique_threads) {
+ {
+ base::AutoLock locked(*unique_threads_lock);
+ unique_threads->insert(PlatformThread::CurrentId());
+ }
+ base::AutoLock locked(*counter_lock);
+ (*counter)++;
+}
+
+// BlockingIncrementingTask is a simple wrapper around IncrementingTask that
+// allows for waiting at the start of Run() for a WaitableEvent to be signalled.
+struct BlockingIncrementingTaskArgs {
+ Lock* counter_lock;
+ int* counter;
+ Lock* unique_threads_lock;
+ std::set<PlatformThreadId>* unique_threads;
+ Lock* num_waiting_to_start_lock;
+ int* num_waiting_to_start;
+ ConditionVariable* num_waiting_to_start_cv;
+ base::WaitableEvent* start;
+};
+
+void BlockingIncrementingTask(const BlockingIncrementingTaskArgs& args) {
+ {
+ base::AutoLock num_waiting_to_start_locked(*args.num_waiting_to_start_lock);
+ (*args.num_waiting_to_start)++;
+ }
+ args.num_waiting_to_start_cv->Signal();
+ args.start->Wait();
+ IncrementingTask(args.counter_lock, args.counter, args.unique_threads_lock,
+ args.unique_threads);
+}
+
+class PosixDynamicThreadPoolTest : public testing::Test {
+ protected:
+ PosixDynamicThreadPoolTest()
+ : pool_(new base::PosixDynamicThreadPool("dynamic_pool", 60 * 60)),
+ peer_(pool_.get()),
+ counter_(0),
+ num_waiting_to_start_(0),
+ num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
+ start_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ void SetUp() override {
+ peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
+ }
+
+ void TearDown() override {
+ // Wake up the idle threads so they can terminate.
+ if (pool_.get())
+ pool_->Terminate();
+ }
+
+ void WaitForTasksToStart(int num_tasks) {
+ base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
+ while (num_waiting_to_start_ < num_tasks) {
+ num_waiting_to_start_cv_.Wait();
+ }
+ }
+
+ void WaitForIdleThreads(int num_idle_threads) {
+ base::AutoLock pool_locked(*peer_.lock());
+ while (peer_.num_idle_threads() < num_idle_threads) {
+ peer_.num_idle_threads_cv()->Wait();
+ }
+ }
+
+ base::Closure CreateNewIncrementingTaskCallback() {
+ return base::Bind(&IncrementingTask, &counter_lock_, &counter_,
+ &unique_threads_lock_, &unique_threads_);
+ }
+
+ base::Closure CreateNewBlockingIncrementingTaskCallback() {
+ BlockingIncrementingTaskArgs args = {
+ &counter_lock_, &counter_, &unique_threads_lock_, &unique_threads_,
+ &num_waiting_to_start_lock_, &num_waiting_to_start_,
+ &num_waiting_to_start_cv_, &start_
+ };
+ return base::Bind(&BlockingIncrementingTask, args);
+ }
+
+ scoped_refptr<base::PosixDynamicThreadPool> pool_;
+ base::PosixDynamicThreadPool::PosixDynamicThreadPoolPeer peer_;
+ Lock counter_lock_;
+ int counter_;
+ Lock unique_threads_lock_;
+ std::set<PlatformThreadId> unique_threads_;
+ Lock num_waiting_to_start_lock_;
+ int num_waiting_to_start_;
+ ConditionVariable num_waiting_to_start_cv_;
+ base::WaitableEvent start_;
+};
+
+} // namespace
+
+TEST_F(PosixDynamicThreadPoolTest, Basic) {
+ EXPECT_EQ(0, peer_.num_idle_threads());
+ EXPECT_EQ(0U, unique_threads_.size());
+ EXPECT_EQ(0U, peer_.pending_tasks().size());
+
+ // Add one task and wait for it to be completed.
+ pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
+
+ WaitForIdleThreads(1);
+
+ EXPECT_EQ(1U, unique_threads_.size()) <<
+ "There should be only one thread allocated for one task.";
+ EXPECT_EQ(1, counter_);
+}
+
+TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) {
+ // Add one task and wait for it to be completed.
+ pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
+
+ WaitForIdleThreads(1);
+
+ // Add another 2 tasks. One should reuse the existing worker thread.
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+
+ WaitForTasksToStart(2);
+ start_.Signal();
+ WaitForIdleThreads(2);
+
+ EXPECT_EQ(2U, unique_threads_.size());
+ EXPECT_EQ(2, peer_.num_idle_threads());
+ EXPECT_EQ(3, counter_);
+}
+
+TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) {
+ // Add two blocking tasks.
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+
+ EXPECT_EQ(0, counter_) << "Blocking tasks should not have started yet.";
+
+ WaitForTasksToStart(2);
+ start_.Signal();
+ WaitForIdleThreads(2);
+
+ EXPECT_EQ(2U, unique_threads_.size());
+ EXPECT_EQ(2, peer_.num_idle_threads()) << "Existing threads are now idle.";
+ EXPECT_EQ(2, counter_);
+}
+
+TEST_F(PosixDynamicThreadPoolTest, Complex) {
+ // Add two non blocking tasks and wait for them to finish.
+ pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
+
+ WaitForIdleThreads(1);
+
+ // Add two blocking tasks, start them simultaneously, and wait for them to
+ // finish.
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+
+ WaitForTasksToStart(2);
+ start_.Signal();
+ WaitForIdleThreads(2);
+
+ EXPECT_EQ(3, counter_);
+ EXPECT_EQ(2, peer_.num_idle_threads());
+ EXPECT_EQ(2U, unique_threads_.size());
+
+ // Wake up all idle threads so they can exit.
+ {
+ base::AutoLock locked(*peer_.lock());
+ while (peer_.num_idle_threads() > 0) {
+ peer_.pending_tasks_available_cv()->Signal();
+ peer_.num_idle_threads_cv()->Wait();
+ }
+ }
+
+ // Add another non blocking task. There are no threads to reuse.
+ pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
+ WaitForIdleThreads(1);
+
+ // The POSIX implementation of PlatformThread::CurrentId() uses pthread_self()
+ // which is not guaranteed to be unique after a thread joins. The OS X
+ // implemntation of pthread_self() returns the address of the pthread_t, which
+ // is merely a malloc()ed pointer stored in the first TLS slot. When a thread
+ // joins and that structure is freed, the block of memory can be put on the
+ // OS free list, meaning the same address could be reused in a subsequent
+ // allocation. This in fact happens when allocating in a loop as this test
+ // does.
+ //
+ // Because there are two concurrent threads, there's at least the guarantee
+ // of having two unique thread IDs in the set. But after those two threads are
+ // joined, the next-created thread can get a re-used ID if the allocation of
+ // the pthread_t structure is taken from the free list. Therefore, there can
+ // be either 2 or 3 unique thread IDs in the set at this stage in the test.
+ EXPECT_TRUE(unique_threads_.size() >= 2 && unique_threads_.size() <= 3)
+ << "unique_threads_.size() = " << unique_threads_.size();
+ EXPECT_EQ(1, peer_.num_idle_threads());
+ EXPECT_EQ(4, counter_);
+}
+
+} // namespace base
diff --git a/libchrome/base/threading/worker_pool_unittest.cc b/libchrome/base/threading/worker_pool_unittest.cc
new file mode 100644
index 0000000..ef4bed1
--- /dev/null
+++ b/libchrome/base/threading/worker_pool_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/worker_pool.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread_checker_impl.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+typedef PlatformTest WorkerPoolTest;
+
+namespace base {
+
+namespace {
+
+class PostTaskAndReplyTester
+ : public base::RefCountedThreadSafe<PostTaskAndReplyTester> {
+ public:
+ PostTaskAndReplyTester()
+ : finished_(false),
+ test_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ void RunTest() {
+ ASSERT_TRUE(thread_checker_.CalledOnValidThread());
+ WorkerPool::PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&PostTaskAndReplyTester::OnWorkerThread, this),
+ base::Bind(&PostTaskAndReplyTester::OnOriginalThread, this),
+ false);
+
+ test_event_.Wait();
+ }
+
+ void OnWorkerThread() {
+ // We're not on the original thread.
+ EXPECT_FALSE(thread_checker_.CalledOnValidThread());
+
+ test_event_.Signal();
+ }
+
+ void OnOriginalThread() {
+ EXPECT_TRUE(thread_checker_.CalledOnValidThread());
+ finished_ = true;
+ }
+
+ bool finished() const {
+ return finished_;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<PostTaskAndReplyTester>;
+ ~PostTaskAndReplyTester() {}
+
+ bool finished_;
+ WaitableEvent test_event_;
+
+ // The Impl version performs its checks even in release builds.
+ ThreadCheckerImpl thread_checker_;
+};
+
+} // namespace
+
+TEST_F(WorkerPoolTest, PostTask) {
+ WaitableEvent test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent long_test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ WorkerPool::PostTask(FROM_HERE,
+ base::Bind(&WaitableEvent::Signal,
+ base::Unretained(&test_event)),
+ false);
+ WorkerPool::PostTask(FROM_HERE,
+ base::Bind(&WaitableEvent::Signal,
+ base::Unretained(&long_test_event)),
+ true);
+
+ test_event.Wait();
+ long_test_event.Wait();
+}
+
+#if defined(OS_WIN) || defined(OS_LINUX)
+// Flaky on Windows and Linux (http://crbug.com/130337)
+#define MAYBE_PostTaskAndReply DISABLED_PostTaskAndReply
+#else
+#define MAYBE_PostTaskAndReply PostTaskAndReply
+#endif
+
+TEST_F(WorkerPoolTest, MAYBE_PostTaskAndReply) {
+ MessageLoop message_loop;
+ scoped_refptr<PostTaskAndReplyTester> tester(new PostTaskAndReplyTester());
+ tester->RunTest();
+
+ const TimeDelta kMaxDuration = TestTimeouts::tiny_timeout();
+ TimeTicks start = TimeTicks::Now();
+ while (!tester->finished() && TimeTicks::Now() - start < kMaxDuration) {
+#if defined(OS_IOS)
+ // Ensure that the other thread has a chance to run even on a single-core
+ // device.
+ pthread_yield_np();
+#endif
+ RunLoop().RunUntilIdle();
+ }
+ EXPECT_TRUE(tester->finished());
+}
+
+} // namespace base
diff --git a/libchrome/base/time/clock.cc b/libchrome/base/time/clock.cc
new file mode 100644
index 0000000..34dc37e
--- /dev/null
+++ b/libchrome/base/time/clock.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/clock.h"
+
+namespace base {
+
+Clock::~Clock() {}
+
+} // namespace base
diff --git a/libchrome/base/time/clock.h b/libchrome/base/time/clock.h
new file mode 100644
index 0000000..507a850
--- /dev/null
+++ b/libchrome/base/time/clock.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_CLOCK_H_
+#define BASE_TIME_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A Clock is an interface for objects that vend Times. It is
+// intended to be able to test the behavior of classes with respect to
+// time.
+//
+// See DefaultClock (base/time/default_clock.h) for the default
+// implementation that simply uses Time::Now().
+//
+// (An implementation that uses Time::SystemTime() should be added as
+// needed.)
+//
+// See SimpleTestClock (base/test/simple_test_clock.h) for a simple
+// test implementation.
+//
+// See TickClock (base/time/tick_clock.h) for the equivalent interface for
+// TimeTicks.
+class BASE_EXPORT Clock {
+ public:
+ virtual ~Clock();
+
+ // Now() must be safe to call from any thread. The caller cannot
+ // make any ordering assumptions about the returned Time. For
+ // example, the system clock may change to an earlier time.
+ virtual Time Now() = 0;
+};
+
+} // namespace base
+
+#endif // BASE_TIME_CLOCK_H_
diff --git a/libchrome/base/time/default_clock.cc b/libchrome/base/time/default_clock.cc
new file mode 100644
index 0000000..5f70114
--- /dev/null
+++ b/libchrome/base/time/default_clock.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/default_clock.h"
+
+namespace base {
+
+DefaultClock::~DefaultClock() {}
+
+Time DefaultClock::Now() {
+ return Time::Now();
+}
+
+} // namespace base
diff --git a/libchrome/base/time/default_clock.h b/libchrome/base/time/default_clock.h
new file mode 100644
index 0000000..0b8250e
--- /dev/null
+++ b/libchrome/base/time/default_clock.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_DEFAULT_CLOCK_H_
+#define BASE_TIME_DEFAULT_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/time/clock.h"
+
+namespace base {
+
+// DefaultClock is a Clock implementation that uses Time::Now().
+class BASE_EXPORT DefaultClock : public Clock {
+ public:
+ ~DefaultClock() override;
+
+ // Simply returns Time::Now().
+ Time Now() override;
+};
+
+} // namespace base
+
+#endif // BASE_TIME_DEFAULT_CLOCK_H_
diff --git a/libchrome/base/time/default_tick_clock.cc b/libchrome/base/time/default_tick_clock.cc
new file mode 100644
index 0000000..ce62fcc
--- /dev/null
+++ b/libchrome/base/time/default_tick_clock.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/default_tick_clock.h"
+
+namespace base {
+
+DefaultTickClock::~DefaultTickClock() {}
+
+TimeTicks DefaultTickClock::NowTicks() {
+ return TimeTicks::Now();
+}
+
+} // namespace base
diff --git a/libchrome/base/time/default_tick_clock.h b/libchrome/base/time/default_tick_clock.h
new file mode 100644
index 0000000..cb041e6
--- /dev/null
+++ b/libchrome/base/time/default_tick_clock.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_DEFAULT_TICK_CLOCK_H_
+#define BASE_TIME_DEFAULT_TICK_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+// DefaultClock is a Clock implementation that uses TimeTicks::Now().
+class BASE_EXPORT DefaultTickClock : public TickClock {
+ public:
+ ~DefaultTickClock() override;
+
+ // Simply returns TimeTicks::Now().
+ TimeTicks NowTicks() override;
+};
+
+} // namespace base
+
+#endif // BASE_TIME_DEFAULT_TICK_CLOCK_H_
diff --git a/libchrome/base/time/pr_time_unittest.cc b/libchrome/base/time/pr_time_unittest.cc
new file mode 100644
index 0000000..3f1a348
--- /dev/null
+++ b/libchrome/base/time/pr_time_unittest.cc
@@ -0,0 +1,290 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <time.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/third_party/nspr/prtime.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+namespace {
+
+// time_t representation of 15th Oct 2007 12:45:00 PDT
+PRTime comparison_time_pdt = 1192477500 * Time::kMicrosecondsPerSecond;
+
+// Time with positive tz offset and fractional seconds:
+// 2013-07-08T11:28:12.441381+02:00
+PRTime comparison_time_2 = INT64_C(1373275692441381); // represented as GMT
+
+// Specialized test fixture allowing time strings without timezones to be
+// tested by comparing them to a known time in the local zone.
+class PRTimeTest : public testing::Test {
+ protected:
+ void SetUp() override {
+ // Use mktime to get a time_t, and turn it into a PRTime by converting
+ // seconds to microseconds. Use 15th Oct 2007 12:45:00 local. This
+ // must be a time guaranteed to be outside of a DST fallback hour in
+ // any timezone.
+ struct tm local_comparison_tm = {
+ 0, // second
+ 45, // minute
+ 12, // hour
+ 15, // day of month
+ 10 - 1, // month
+ 2007 - 1900, // year
+ 0, // day of week (ignored, output only)
+ 0, // day of year (ignored, output only)
+ -1 // DST in effect, -1 tells mktime to figure it out
+ };
+ comparison_time_local_ =
+ mktime(&local_comparison_tm) * Time::kMicrosecondsPerSecond;
+ ASSERT_GT(comparison_time_local_, 0);
+
+ const int microseconds = 441381;
+ struct tm local_comparison_tm_2 = {
+ 12, // second
+ 28, // minute
+ 11, // hour
+ 8, // day of month
+ 7 - 1, // month
+ 2013 - 1900, // year
+ 0, // day of week (ignored, output only)
+ 0, // day of year (ignored, output only)
+ -1 // DST in effect, -1 tells mktime to figure it out
+ };
+ comparison_time_local_2_ =
+ mktime(&local_comparison_tm_2) * Time::kMicrosecondsPerSecond;
+ ASSERT_GT(comparison_time_local_2_, 0);
+ comparison_time_local_2_ += microseconds;
+ }
+
+ PRTime comparison_time_local_;
+ PRTime comparison_time_local_2_;
+};
+
+// Tests the PR_ParseTimeString nspr helper function for
+// a variety of time strings.
+TEST_F(PRTimeTest, ParseTimeTest1) {
+ time_t current_time = 0;
+ time(¤t_time);
+
+ const int BUFFER_SIZE = 64;
+ struct tm local_time = {0};
+ char time_buf[BUFFER_SIZE] = {0};
+#if defined(OS_WIN)
+ localtime_s(&local_time, ¤t_time);
+ asctime_s(time_buf, arraysize(time_buf), &local_time);
+#elif defined(OS_POSIX)
+ localtime_r(¤t_time, &local_time);
+ asctime_r(&local_time, time_buf);
+#endif
+
+ PRTime current_time64 = static_cast<PRTime>(current_time) * PR_USEC_PER_SEC;
+
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString(time_buf, PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(current_time64, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest2) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("Mon, 15 Oct 2007 19:45:00 GMT",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest3) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("15 Oct 07 12:45:00", PR_FALSE,
+ &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest4) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("15 Oct 07 19:45 GMT", PR_FALSE,
+ &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest5) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("Mon Oct 15 12:45 PDT 2007",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest6) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("Monday, Oct 15, 2007 12:45 PM",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest7) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("10/15/07 12:45:00 PM", PR_FALSE,
+ &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest8) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("10/15/07 12:45:00. PM", PR_FALSE,
+ &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest9) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("10/15/07 12:45:00.0 PM", PR_FALSE,
+ &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest10) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("15-OCT-2007 12:45pm", PR_FALSE,
+ &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTest11) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("16 Oct 2007 4:45-JST (Tuesday)",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+// hh:mm timezone offset.
+TEST_F(PRTimeTest, ParseTimeTest12) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.441381+02:00",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// hhmm timezone offset.
+TEST_F(PRTimeTest, ParseTimeTest13) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.441381+0200",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// hh timezone offset.
+TEST_F(PRTimeTest, ParseTimeTest14) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.4413819+02",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// 5 digits fractional second.
+TEST_F(PRTimeTest, ParseTimeTest15) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08T09:28:12.44138Z",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_2-1, parsed_time);
+}
+
+// Fractional seconds, local timezone.
+TEST_F(PRTimeTest, ParseTimeTest16) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08T11:28:12.441381",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_local_2_, parsed_time);
+}
+
+// "Z" (=GMT) timezone.
+TEST_F(PRTimeTest, ParseTimeTest17) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08T09:28:12.441381Z",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+// "T" delimiter replaced by space.
+TEST_F(PRTimeTest, ParseTimeTest18) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-08 09:28:12.441381Z",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_2, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestInvalid1) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("201-07-08T09:28:12.441381Z",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_FAILURE, result);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestInvalid2) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-007-08T09:28:12.441381Z",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_FAILURE, result);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestInvalid3) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("2013-07-008T09:28:12.441381Z",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_FAILURE, result);
+}
+
+// This test should not crash when compiled with Visual C++ 2005 (see
+// http://crbug.com/4387).
+TEST_F(PRTimeTest, ParseTimeTestOutOfRange) {
+ PRTime parsed_time = 0;
+ // Note the lack of timezone in the time string. The year has to be 3001.
+ // The date has to be after 23:59:59, December 31, 3000, US Pacific Time, so
+ // we use January 2, 3001 to make sure it's after the magic maximum in any
+ // timezone.
+ PRStatus result = PR_ParseTimeString("Sun Jan 2 00:00:00 3001",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestNotNormalized1) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("Mon Oct 15 12:44:60 PDT 2007",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+TEST_F(PRTimeTest, ParseTimeTestNotNormalized2) {
+ PRTime parsed_time = 0;
+ PRStatus result = PR_ParseTimeString("Sun Oct 14 36:45 PDT 2007",
+ PR_FALSE, &parsed_time);
+ EXPECT_EQ(PR_SUCCESS, result);
+ EXPECT_EQ(comparison_time_pdt, parsed_time);
+}
+
+} // namespace
diff --git a/libchrome/base/time/tick_clock.cc b/libchrome/base/time/tick_clock.cc
new file mode 100644
index 0000000..495805c
--- /dev/null
+++ b/libchrome/base/time/tick_clock.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/tick_clock.h"
+
+namespace base {
+
+TickClock::~TickClock() {}
+
+} // namespace base
diff --git a/libchrome/base/time/tick_clock.h b/libchrome/base/time/tick_clock.h
new file mode 100644
index 0000000..f7aba53
--- /dev/null
+++ b/libchrome/base/time/tick_clock.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIME_TICK_CLOCK_H_
+#define BASE_TIME_TICK_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A TickClock is an interface for objects that vend TimeTicks. It is
+// intended to be able to test the behavior of classes with respect to
+// non-decreasing time.
+//
+// See DefaultTickClock (base/time/default_tick_clock.h) for the default
+// implementation that simply uses TimeTicks::Now().
+//
+// (Other implementations that use TimeTicks::NowFromSystemTime() should
+// be added as needed.)
+//
+// See SimpleTestTickClock (base/test/simple_test_tick_clock.h) for a
+// simple test implementation.
+//
+// See Clock (base/time/clock.h) for the equivalent interface for Times.
+class BASE_EXPORT TickClock {
+ public:
+ virtual ~TickClock();
+
+ // NowTicks() must be safe to call from any thread. The caller may
+ // assume that NowTicks() is monotonic (but not strictly monotonic).
+ // In other words, the returned TimeTicks will never decrease with
+ // time, although they might "stand still".
+ virtual TimeTicks NowTicks() = 0;
+};
+
+} // namespace base
+
+#endif // BASE_TIME_TICK_CLOCK_H_
diff --git a/libchrome/base/time/time.cc b/libchrome/base/time/time.cc
new file mode 100644
index 0000000..3670f55
--- /dev/null
+++ b/libchrome/base/time/time.cc
@@ -0,0 +1,350 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <cmath>
+#include <ios>
+#include <limits>
+#include <ostream>
+#include <sstream>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/third_party/nspr/prtime.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// TimeDelta ------------------------------------------------------------------
+
+// static
+TimeDelta TimeDelta::Max() {
+ return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
+int TimeDelta::InDays() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+int TimeDelta::InHours() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+int TimeDelta::InMinutes() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int>::max();
+ }
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+double TimeDelta::InSecondsF() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+int64_t TimeDelta::InSeconds() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+double TimeDelta::InMillisecondsF() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMilliseconds() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMillisecondsRoundedUp() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
+ Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMicroseconds() const {
+ if (is_max()) {
+ // Preserve max to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return delta_;
+}
+
+namespace time_internal {
+
+int64_t SaturatedAdd(TimeDelta delta, int64_t value) {
+ CheckedNumeric<int64_t> rv(delta.delta_);
+ rv += value;
+ return FromCheckedNumeric(rv);
+}
+
+int64_t SaturatedSub(TimeDelta delta, int64_t value) {
+ CheckedNumeric<int64_t> rv(delta.delta_);
+ rv -= value;
+ return FromCheckedNumeric(rv);
+}
+
+int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value) {
+ if (value.IsValid())
+ return value.ValueUnsafe();
+
+ // We could return max/min but we don't really expose what the maximum delta
+ // is. Instead, return max/(-max), which is something that clients can reason
+ // about.
+ // TODO(rvargas) crbug.com/332611: don't use internal values.
+ int64_t limit = std::numeric_limits<int64_t>::max();
+ if (value.validity() == internal::RANGE_UNDERFLOW)
+ limit = -limit;
+ return value.ValueOrDefault(limit);
+}
+
+} // namespace time_internal
+
+std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
+ return os << time_delta.InSecondsF() << "s";
+}
+
+// Time -----------------------------------------------------------------------
+
+// static
+Time Time::FromTimeT(time_t tt) {
+ if (tt == 0)
+ return Time(); // Preserve 0 so we can tell it doesn't exist.
+ if (tt == std::numeric_limits<time_t>::max())
+ return Max();
+ return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSeconds(tt);
+}
+
+time_t Time::ToTimeT() const {
+ if (is_null())
+ return 0; // Preserve 0 so we can tell it doesn't exist.
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<time_t>::max();
+ }
+ if (std::numeric_limits<int64_t>::max() - kTimeTToMicrosecondsOffset <= us_) {
+ DLOG(WARNING) << "Overflow when converting base::Time with internal " <<
+ "value " << us_ << " to time_t.";
+ return std::numeric_limits<time_t>::max();
+ }
+ return (us_ - kTimeTToMicrosecondsOffset) / kMicrosecondsPerSecond;
+}
+
+// static
+Time Time::FromDoubleT(double dt) {
+ if (dt == 0 || std::isnan(dt))
+ return Time(); // Preserve 0 so we can tell it doesn't exist.
+ return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSecondsD(dt);
+}
+
+double Time::ToDoubleT() const {
+ if (is_null())
+ return 0; // Preserve 0 so we can tell it doesn't exist.
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+ static_cast<double>(kMicrosecondsPerSecond));
+}
+
+#if defined(OS_POSIX)
+// static
+Time Time::FromTimeSpec(const timespec& ts) {
+ return FromDoubleT(ts.tv_sec +
+ static_cast<double>(ts.tv_nsec) /
+ base::Time::kNanosecondsPerSecond);
+}
+#endif
+
+// static
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ return Time(kTimeTToMicrosecondsOffset) +
+ TimeDelta::FromMillisecondsD(ms_since_epoch);
+}
+
+double Time::ToJsTime() const {
+ if (is_null()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::infinity();
+ }
+ return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+ kMicrosecondsPerMillisecond);
+}
+
+int64_t Time::ToJavaTime() const {
+ if (is_null()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (is_max()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<int64_t>::max();
+ }
+ return ((us_ - kTimeTToMicrosecondsOffset) /
+ kMicrosecondsPerMillisecond);
+}
+
+// static
+Time Time::UnixEpoch() {
+ Time time;
+ time.us_ = kTimeTToMicrosecondsOffset;
+ return time;
+}
+
+Time Time::LocalMidnight() const {
+ Exploded exploded;
+ LocalExplode(&exploded);
+ exploded.hour = 0;
+ exploded.minute = 0;
+ exploded.second = 0;
+ exploded.millisecond = 0;
+ return FromLocalExploded(exploded);
+}
+
+// static
+bool Time::FromStringInternal(const char* time_string,
+ bool is_local,
+ Time* parsed_time) {
+ DCHECK((time_string != NULL) && (parsed_time != NULL));
+
+ if (time_string[0] == '\0')
+ return false;
+
+ PRTime result_time = 0;
+ PRStatus result = PR_ParseTimeString(time_string,
+ is_local ? PR_FALSE : PR_TRUE,
+ &result_time);
+ if (PR_SUCCESS != result)
+ return false;
+
+ result_time += kTimeTToMicrosecondsOffset;
+ *parsed_time = Time(result_time);
+ return true;
+}
+
+// static
+bool Time::ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs) {
+ return lhs.year == rhs.year && lhs.month == rhs.month &&
+ lhs.day_of_month == rhs.day_of_month && lhs.hour == rhs.hour &&
+ lhs.minute == rhs.minute && lhs.second == rhs.second &&
+ lhs.millisecond == rhs.millisecond;
+}
+
+std::ostream& operator<<(std::ostream& os, Time time) {
+ Time::Exploded exploded;
+ time.UTCExplode(&exploded);
+ // Use StringPrintf because iostreams formatting is painful.
+ return os << StringPrintf("%04d-%02d-%02d %02d:%02d:%02d.%03d UTC",
+ exploded.year,
+ exploded.month,
+ exploded.day_of_month,
+ exploded.hour,
+ exploded.minute,
+ exploded.second,
+ exploded.millisecond);
+}
+
+// Local helper class to hold the conversion from Time to TickTime at the
+// time of the Unix epoch.
+class UnixEpochSingleton {
+ public:
+ UnixEpochSingleton()
+ : unix_epoch_(TimeTicks::Now() - (Time::Now() - Time::UnixEpoch())) {}
+
+ TimeTicks unix_epoch() const { return unix_epoch_; }
+
+ private:
+ const TimeTicks unix_epoch_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnixEpochSingleton);
+};
+
+static LazyInstance<UnixEpochSingleton>::Leaky
+ leaky_unix_epoch_singleton_instance = LAZY_INSTANCE_INITIALIZER;
+
+// Static
+TimeTicks TimeTicks::UnixEpoch() {
+ return leaky_unix_epoch_singleton_instance.Get().unix_epoch();
+}
+
+TimeTicks TimeTicks::SnappedToNextTick(TimeTicks tick_phase,
+ TimeDelta tick_interval) const {
+ // |interval_offset| is the offset from |this| to the next multiple of
+ // |tick_interval| after |tick_phase|, possibly negative if in the past.
+ TimeDelta interval_offset = (tick_phase - *this) % tick_interval;
+ // If |this| is exactly on the interval (i.e. offset==0), don't adjust.
+ // Otherwise, if |tick_phase| was in the past, adjust forward to the next
+ // tick after |this|.
+ if (!interval_offset.is_zero() && tick_phase < *this)
+ interval_offset += tick_interval;
+ return *this + interval_offset;
+}
+
+std::ostream& operator<<(std::ostream& os, TimeTicks time_ticks) {
+ // This function formats a TimeTicks object as "bogo-microseconds".
+ // The origin and granularity of the count are platform-specific, and may very
+ // from run to run. Although bogo-microseconds usually roughly correspond to
+ // real microseconds, the only real guarantee is that the number never goes
+ // down during a single run.
+ const TimeDelta as_time_delta = time_ticks - TimeTicks();
+ return os << as_time_delta.InMicroseconds() << " bogo-microseconds";
+}
+
+std::ostream& operator<<(std::ostream& os, ThreadTicks thread_ticks) {
+ const TimeDelta as_time_delta = thread_ticks - ThreadTicks();
+ return os << as_time_delta.InMicroseconds() << " bogo-thread-microseconds";
+}
+
+// Time::Exploded -------------------------------------------------------------
+
+inline bool is_in_range(int value, int lo, int hi) {
+ return lo <= value && value <= hi;
+}
+
+bool Time::Exploded::HasValidValues() const {
+ return is_in_range(month, 1, 12) &&
+ is_in_range(day_of_week, 0, 6) &&
+ is_in_range(day_of_month, 1, 31) &&
+ is_in_range(hour, 0, 23) &&
+ is_in_range(minute, 0, 59) &&
+ is_in_range(second, 0, 60) &&
+ is_in_range(millisecond, 0, 999);
+}
+
+} // namespace base
diff --git a/libchrome/base/time/time.h b/libchrome/base/time/time.h
new file mode 100644
index 0000000..efece96
--- /dev/null
+++ b/libchrome/base/time/time.h
@@ -0,0 +1,832 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Time represents an absolute point in coordinated universal time (UTC),
+// internally represented as microseconds (s/1,000,000) since the Windows epoch
+// (1601-01-01 00:00:00 UTC). System-dependent clock interface routines are
+// defined in time_PLATFORM.cc. Note that values for Time may skew and jump
+// around as the operating system makes adjustments to synchronize (e.g., with
+// NTP servers). Thus, client code that uses the Time class must account for
+// this.
+//
+// TimeDelta represents a duration of time, internally represented in
+// microseconds.
+//
+// TimeTicks and ThreadTicks represent an abstract time that is most of the time
+// incrementing, for use in measuring time durations. Internally, they are
+// represented in microseconds. They can not be converted to a human-readable
+// time, but are guaranteed not to decrease (unlike the Time class). Note that
+// TimeTicks may "stand still" (e.g., if the computer is suspended), and
+// ThreadTicks will "stand still" whenever the thread has been de-scheduled by
+// the operating system.
+//
+// All time classes are copyable, assignable, and occupy 64-bits per
+// instance. Thus, they can be efficiently passed by-value (as opposed to
+// by-reference).
+//
+// Definitions of operator<< are provided to make these types work with
+// DCHECK_EQ() and other log macros. For human-readable formatting, see
+// "base/i18n/time_formatting.h".
+//
+// So many choices! Which time class should you use? Examples:
+//
+// Time: Interpreting the wall-clock time provided by a remote
+// system. Detecting whether cached resources have
+// expired. Providing the user with a display of the current date
+// and time. Determining the amount of time between events across
+// re-boots of the machine.
+//
+// TimeTicks: Tracking the amount of time a task runs. Executing delayed
+// tasks at the right time. Computing presentation timestamps.
+// Synchronizing audio and video using TimeTicks as a common
+// reference clock (lip-sync). Measuring network round-trip
+// latency.
+//
+// ThreadTicks: Benchmarking how long the current thread has been doing actual
+// work.
+
+#ifndef BASE_TIME_TIME_H_
+#define BASE_TIME_TIME_H_
+
+#include <stdint.h>
+#include <time.h>
+
+#include <iosfwd>
+#include <limits>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/numerics/safe_math.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <CoreFoundation/CoreFoundation.h>
+// Avoid Mac system header macro leak.
+#undef TYPE_BOOL
+#endif
+
+#if defined(OS_POSIX)
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+
+#if defined(OS_WIN)
+// For FILETIME in FromFileTime, until it moves to a new converter class.
+// See TODO(iyengar) below.
+#include <windows.h>
+#include "base/gtest_prod_util.h"
+#endif
+
+namespace base {
+
+class PlatformThreadHandle;
+class TimeDelta;
+
+// The functions in the time_internal namespace are meant to be used only by the
+// time classes and functions. Please use the math operators defined in the
+// time classes instead.
+namespace time_internal {
+
+// Add or subtract |value| from a TimeDelta. The int64_t argument and return
+// value are in terms of a microsecond timebase.
+BASE_EXPORT int64_t SaturatedAdd(TimeDelta delta, int64_t value);
+BASE_EXPORT int64_t SaturatedSub(TimeDelta delta, int64_t value);
+
+// Clamp |value| on overflow and underflow conditions. The int64_t argument and
+// return value are in terms of a microsecond timebase.
+BASE_EXPORT int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value);
+
+} // namespace time_internal
+
+// TimeDelta ------------------------------------------------------------------
+
+class BASE_EXPORT TimeDelta {
+ public:
+ TimeDelta() : delta_(0) {
+ }
+
+ // Converts units of time to TimeDeltas.
+ static constexpr TimeDelta FromDays(int days);
+ static constexpr TimeDelta FromHours(int hours);
+ static constexpr TimeDelta FromMinutes(int minutes);
+ static constexpr TimeDelta FromSeconds(int64_t secs);
+ static constexpr TimeDelta FromMilliseconds(int64_t ms);
+ static constexpr TimeDelta FromSecondsD(double secs);
+ static constexpr TimeDelta FromMillisecondsD(double ms);
+ static constexpr TimeDelta FromMicroseconds(int64_t us);
+#if defined(OS_WIN)
+ static TimeDelta FromQPCValue(LONGLONG qpc_value);
+#endif
+
+ // Converts an integer value representing TimeDelta to a class. This is used
+ // when deserializing a |TimeDelta| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ static TimeDelta FromInternalValue(int64_t delta) { return TimeDelta(delta); }
+
+ // Returns the maximum time delta, which should be greater than any reasonable
+ // time delta we might compare it to. Adding or subtracting the maximum time
+ // delta to a time or another time delta has an undefined result.
+ static TimeDelta Max();
+
+ // Returns the internal numeric value of the TimeDelta object. Please don't
+ // use this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ // For serializing, use FromInternalValue to reconstitute.
+ int64_t ToInternalValue() const { return delta_; }
+
+ // Returns the magnitude (absolute value) of this TimeDelta.
+ TimeDelta magnitude() const {
+ // Some toolchains provide an incomplete C++11 implementation and lack an
+ // int64_t overload for std::abs(). The following is a simple branchless
+ // implementation:
+ const int64_t mask = delta_ >> (sizeof(delta_) * 8 - 1);
+ return TimeDelta((delta_ + mask) ^ mask);
+ }
+
+ // Returns true if the time delta is zero.
+ bool is_zero() const {
+ return delta_ == 0;
+ }
+
+ // Returns true if the time delta is the maximum time delta.
+ bool is_max() const { return delta_ == std::numeric_limits<int64_t>::max(); }
+
+#if defined(OS_POSIX)
+ struct timespec ToTimeSpec() const;
+#endif
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ //
+ // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+ // to the next full millisecond.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ int64_t InMicroseconds() const;
+
+ TimeDelta& operator=(TimeDelta other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(TimeDelta other) const {
+ return TimeDelta(time_internal::SaturatedAdd(*this, other.delta_));
+ }
+ TimeDelta operator-(TimeDelta other) const {
+ return TimeDelta(time_internal::SaturatedSub(*this, other.delta_));
+ }
+
+ TimeDelta& operator+=(TimeDelta other) {
+ return *this = (*this + other);
+ }
+ TimeDelta& operator-=(TimeDelta other) {
+ return *this = (*this - other);
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ // Computations with numeric types.
+ template<typename T>
+ TimeDelta operator*(T a) const {
+ CheckedNumeric<int64_t> rv(delta_);
+ rv *= a;
+ return TimeDelta(time_internal::FromCheckedNumeric(rv));
+ }
+ template<typename T>
+ TimeDelta operator/(T a) const {
+ CheckedNumeric<int64_t> rv(delta_);
+ rv /= a;
+ return TimeDelta(time_internal::FromCheckedNumeric(rv));
+ }
+ template<typename T>
+ TimeDelta& operator*=(T a) {
+ return *this = (*this * a);
+ }
+ template<typename T>
+ TimeDelta& operator/=(T a) {
+ return *this = (*this / a);
+ }
+
+ int64_t operator/(TimeDelta a) const { return delta_ / a.delta_; }
+ TimeDelta operator%(TimeDelta a) const {
+ return TimeDelta(delta_ % a.delta_);
+ }
+
+ // Comparison operators.
+ constexpr bool operator==(TimeDelta other) const {
+ return delta_ == other.delta_;
+ }
+ constexpr bool operator!=(TimeDelta other) const {
+ return delta_ != other.delta_;
+ }
+ constexpr bool operator<(TimeDelta other) const {
+ return delta_ < other.delta_;
+ }
+ constexpr bool operator<=(TimeDelta other) const {
+ return delta_ <= other.delta_;
+ }
+ constexpr bool operator>(TimeDelta other) const {
+ return delta_ > other.delta_;
+ }
+ constexpr bool operator>=(TimeDelta other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ friend int64_t time_internal::SaturatedAdd(TimeDelta delta, int64_t value);
+ friend int64_t time_internal::SaturatedSub(TimeDelta delta, int64_t value);
+
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
+
+ // Private method to build a delta from a double.
+ static constexpr TimeDelta FromDouble(double value);
+
+ // Private method to build a delta from the product of a user-provided value
+ // and a known-positive value.
+ static constexpr TimeDelta FromProduct(int64_t value, int64_t positive_value);
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+template<typename T>
+inline TimeDelta operator*(T a, TimeDelta td) {
+ return td * a;
+}
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, TimeDelta time_delta);
+
+// Do not reference the time_internal::TimeBase template class directly. Please
+// use one of the time subclasses instead, and only reference the public
+// TimeBase members via those classes.
+namespace time_internal {
+
+// TimeBase--------------------------------------------------------------------
+
+// Provides value storage and comparison/math operations common to all time
+// classes. Each subclass provides for strong type-checking to ensure
+// semantically meaningful comparison/math of time values from the same clock
+// source or timeline.
+template<class TimeClass>
+class TimeBase {
+ public:
+ static const int64_t kHoursPerDay = 24;
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMillisecondsPerDay =
+ kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond =
+ kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay =
+ kMicrosecondsPerHour * kHoursPerDay;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond =
+ kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
+
+ // Returns true if this object has not been initialized.
+ //
+ // Warning: Be careful when writing code that performs math on time values,
+ // since it's possible to produce a valid "zero" result that should not be
+ // interpreted as a "null" value.
+ bool is_null() const {
+ return us_ == 0;
+ }
+
+ // Returns true if this object represents the maximum time.
+ bool is_max() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static TimeClass Max() {
+ return TimeClass(std::numeric_limits<int64_t>::max());
+ }
+
+ // For serializing only. Use FromInternalValue() to reconstitute. Please don't
+ // use this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ int64_t ToInternalValue() const { return us_; }
+
+ TimeClass& operator=(TimeClass other) {
+ us_ = other.us_;
+ return *(static_cast<TimeClass*>(this));
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(TimeClass other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Return a new time modified by some delta.
+ TimeClass operator+(TimeDelta delta) const {
+ return TimeClass(time_internal::SaturatedAdd(delta, us_));
+ }
+ TimeClass operator-(TimeDelta delta) const {
+ return TimeClass(-time_internal::SaturatedSub(delta, us_));
+ }
+
+ // Modify by some time delta.
+ TimeClass& operator+=(TimeDelta delta) {
+ return static_cast<TimeClass&>(*this = (*this + delta));
+ }
+ TimeClass& operator-=(TimeDelta delta) {
+ return static_cast<TimeClass&>(*this = (*this - delta));
+ }
+
+ // Comparison operators
+ bool operator==(TimeClass other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(TimeClass other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(TimeClass other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(TimeClass other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(TimeClass other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(TimeClass other) const {
+ return us_ >= other.us_;
+ }
+
+ // Converts an integer value representing TimeClass to a class. This is used
+ // when deserializing a |TimeClass| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ static TimeClass FromInternalValue(int64_t us) { return TimeClass(us); }
+
+ protected:
+ explicit TimeBase(int64_t us) : us_(us) {}
+
+ // Time value in a microsecond timebase.
+ int64_t us_;
+};
+
+} // namespace time_internal
+
+template<class TimeClass>
+inline TimeClass operator+(TimeDelta delta, TimeClass t) {
+ return t + delta;
+}
+
+// Time -----------------------------------------------------------------------
+
+// Represents a wall clock time in UTC. Values are not guaranteed to be
+// monotonically non-decreasing and are subject to large amounts of skew.
+class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
+ public:
+ // The representation of Jan 1, 1970 UTC in microseconds since the
+ // platform-dependent epoch.
+ static const int64_t kTimeTToMicrosecondsOffset;
+
+#if !defined(OS_WIN)
+ // On Mac & Linux, this value is the delta from the Windows epoch of 1601 to
+ // the Posix delta of 1970. This is used for migrating between the old
+ // 1970-based epochs to the new 1601-based ones. It should be removed from
+ // this global header and put in the platform-specific ones when we remove the
+ // migration code.
+ static const int64_t kWindowsEpochDeltaMicroseconds;
+#else
+ // To avoid overflow in QPC to Microseconds calculations, since we multiply
+ // by kMicrosecondsPerSecond, then the QPC value should not exceed
+ // (2^63 - 1) / 1E6. If it exceeds that threshold, we divide then multiply.
+ enum : int64_t{kQPCOverflowThreshold = 0x8637BD05AF7};
+#endif
+
+ // Represents an exploded time that can be formatted nicely. This is kind of
+ // like the Win32 SYSTEMTIME structure or the Unix "struct tm" with a few
+ // additions and changes to prevent errors.
+ struct BASE_EXPORT Exploded {
+ int year; // Four digit year "2007"
+ int month; // 1-based month (values 1 = January, etc.)
+ int day_of_week; // 0-based day of week (0 = Sunday, etc.)
+ int day_of_month; // 1-based day of month (1-31)
+ int hour; // Hour within the current day (0-23)
+ int minute; // Minute within the current hour (0-59)
+ int second; // Second within the current minute (0-59 plus leap
+ // seconds which may take it up to 60).
+ int millisecond; // Milliseconds within the current second (0-999)
+
+ // A cursory test for whether the data members are within their
+ // respective ranges. A 'true' return value does not guarantee the
+ // Exploded value can be successfully converted to a Time value.
+ bool HasValidValues() const;
+ };
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : TimeBase(0) {
+ }
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch();
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Converts to/from time_t in UTC and a Time class.
+ // TODO(brettw) this should be removed once everybody starts using the |Time|
+ // class.
+ static Time FromTimeT(time_t tt);
+ time_t ToTimeT() const;
+
+ // Converts time to/from a double which is the number of seconds since epoch
+ // (Jan 1, 1970). Webkit uses this format to represent time.
+ // Because WebKit initializes double time value to 0 to indicate "not
+ // initialized", we map it to empty Time object that also means "not
+ // initialized".
+ static Time FromDoubleT(double dt);
+ double ToDoubleT() const;
+
+#if defined(OS_POSIX)
+ // Converts the timespec structure to time. MacOS X 10.8.3 (and tentatively,
+ // earlier versions) will have the |ts|'s tv_nsec component zeroed out,
+ // having a 1 second resolution, which agrees with
+ // https://developer.apple.com/legacy/library/#technotes/tn/tn1150.html#HFSPlusDates.
+ static Time FromTimeSpec(const timespec& ts);
+#endif
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ // https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Date/getTime.
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+
+ // Converts to Java convention for times, a number of
+ // milliseconds since the epoch.
+ int64_t ToJavaTime() const;
+
+#if defined(OS_POSIX)
+ static Time FromTimeVal(struct timeval t);
+ struct timeval ToTimeVal() const;
+#endif
+
+#if defined(OS_MACOSX)
+ static Time FromCFAbsoluteTime(CFAbsoluteTime t);
+ CFAbsoluteTime ToCFAbsoluteTime() const;
+#endif
+
+#if defined(OS_WIN)
+ static Time FromFileTime(FILETIME ft);
+ FILETIME ToFileTime() const;
+
+ // The minimum time of a low resolution timer. This is basically a windows
+ // constant of ~15.6ms. While it does vary on some older OS versions, we'll
+ // treat it as static across all windows versions.
+ static const int kMinLowResolutionThresholdMs = 16;
+
+ // Enable or disable Windows high resolution timer.
+ static void EnableHighResolutionTimer(bool enable);
+
+ // Activates or deactivates the high resolution timer based on the |activate|
+ // flag. If the HighResolutionTimer is not Enabled (see
+ // EnableHighResolutionTimer), this function will return false. Otherwise
+ // returns true. Each successful activate call must be paired with a
+ // subsequent deactivate call.
+ // All callers to activate the high resolution timer must eventually call
+ // this function to deactivate the high resolution timer.
+ static bool ActivateHighResolutionTimer(bool activate);
+
+ // Returns true if the high resolution timer is both enabled and activated.
+ // This is provided for testing only, and is not tracked in a thread-safe
+ // way.
+ static bool IsHighResolutionTimerInUse();
+#endif
+
+ // Converts an exploded structure representing either the local time or UTC
+ // into a Time class.
+ // TODO(maksims): Get rid of these in favor of the methods below when
+ // all the callers stop using these ones.
+ static Time FromUTCExploded(const Exploded& exploded) {
+ base::Time time;
+ ignore_result(FromUTCExploded(exploded, &time));
+ return time;
+ }
+ static Time FromLocalExploded(const Exploded& exploded) {
+ base::Time time;
+ ignore_result(FromLocalExploded(exploded, &time));
+ return time;
+ }
+
+ // Converts an exploded structure representing either the local time or UTC
+ // into a Time class. Returns false on a failure when, for example, a day of
+ // month is set to 31 on a 28-30 day month.
+ static bool FromUTCExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(false, exploded, time);
+ }
+ static bool FromLocalExploded(const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT {
+ return FromExploded(true, exploded, time);
+ }
+
+ // Converts a string representation of time to a Time object.
+ // An example of a time string which is converted is as below:-
+ // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+ // in the input string, FromString assumes local time and FromUTCString
+ // assumes UTC. A timezone that cannot be parsed (e.g. "UTC" which is not
+ // specified in RFC822) is treated as if the timezone is not specified.
+ // TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
+ // a new time converter class.
+ static bool FromString(const char* time_string, Time* parsed_time) {
+ return FromStringInternal(time_string, true, parsed_time);
+ }
+ static bool FromUTCString(const char* time_string, Time* parsed_time) {
+ return FromStringInternal(time_string, false, parsed_time);
+ }
+
+ // Fills the given exploded structure with either the local time or UTC from
+ // this time structure (containing UTC).
+ void UTCExplode(Exploded* exploded) const {
+ return Explode(false, exploded);
+ }
+ void LocalExplode(Exploded* exploded) const {
+ return Explode(true, exploded);
+ }
+
+ // Rounds this time down to the nearest day in local time. It will represent
+ // midnight on that day.
+ Time LocalMidnight() const;
+
+ private:
+ friend class time_internal::TimeBase<Time>;
+
+ explicit Time(int64_t us) : TimeBase(us) {}
+
+ // Explodes the given time to either local time |is_local = true| or UTC
+ // |is_local = false|.
+ void Explode(bool is_local, Exploded* exploded) const;
+
+ // Unexplodes a given time assuming the source is either local time
+ // |is_local = true| or UTC |is_local = false|. Function returns false on
+ // failure and sets |time| to Time(0). Otherwise returns true and sets |time|
+ // to non-exploded time.
+ static bool FromExploded(bool is_local,
+ const Exploded& exploded,
+ Time* time) WARN_UNUSED_RESULT;
+
+ // Converts a string representation of time to a Time object.
+ // An example of a time string which is converted is as below:-
+ // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+ // in the input string, local time |is_local = true| or
+ // UTC |is_local = false| is assumed. A timezone that cannot be parsed
+ // (e.g. "UTC" which is not specified in RFC822) is treated as if the
+ // timezone is not specified.
+ static bool FromStringInternal(const char* time_string,
+ bool is_local,
+ Time* parsed_time);
+
+ // Comparison does not consider |day_of_week| when doing the operation.
+ static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
+};
+
+// static
+constexpr TimeDelta TimeDelta::FromDays(int days) {
+ return days == std::numeric_limits<int>::max()
+ ? Max()
+ : TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromHours(int hours) {
+ return hours == std::numeric_limits<int>::max()
+ ? Max()
+ : TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return minutes == std::numeric_limits<int>::max()
+ ? Max()
+ : TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromSeconds(int64_t secs) {
+ return FromProduct(secs, Time::kMicrosecondsPerSecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
+ return FromProduct(ms, Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromSecondsD(double secs) {
+ return FromDouble(secs * Time::kMicrosecondsPerSecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMillisecondsD(double ms) {
+ return FromDouble(ms * Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
+ return TimeDelta(us);
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromDouble(double value) {
+ // TODO(crbug.com/612601): Use saturated_cast<int64_t>(value) once we sort out
+ // the Min() behavior.
+ return value > std::numeric_limits<int64_t>::max()
+ ? Max()
+ : value < -std::numeric_limits<int64_t>::max()
+ ? -Max()
+ : TimeDelta(static_cast<int64_t>(value));
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
+ int64_t positive_value) {
+ return (
+#if !defined(_PREFAST_) || !defined(OS_WIN)
+ // Avoid internal compiler errors in /analyze builds with VS 2015
+ // update 3.
+ // https://connect.microsoft.com/VisualStudio/feedback/details/2870865
+ DCHECK(positive_value > 0),
+#endif
+ value > std::numeric_limits<int64_t>::max() / positive_value
+ ? Max()
+ : value < -std::numeric_limits<int64_t>::max() / positive_value
+ ? -Max()
+ : TimeDelta(value * positive_value));
+}
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, Time time);
+
+// TimeTicks ------------------------------------------------------------------
+
+// Represents monotonically non-decreasing clock time.
+class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
+ public:
+ // The underlying clock used to generate new TimeTicks.
+ enum class Clock {
+ LINUX_CLOCK_MONOTONIC,
+ IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
+ MAC_MACH_ABSOLUTE_TIME,
+ WIN_QPC,
+ WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
+ };
+
+ TimeTicks() : TimeBase(0) {
+ }
+
+ // Platform-dependent tick count representing "right now." When
+ // IsHighResolution() returns false, the resolution of the clock could be
+ // as coarse as ~15.6ms. Otherwise, the resolution should be no worse than one
+ // microsecond.
+ static TimeTicks Now();
+
+ // Returns true if the high resolution clock is working on this system and
+ // Now() will return high resolution values. Note that, on systems where the
+ // high resolution clock works but is deemed inefficient, the low resolution
+ // clock will be used instead.
+ static bool IsHighResolution();
+
+#if defined(OS_WIN)
+ // Translates an absolute QPC timestamp into a TimeTicks value. The returned
+ // value has the same origin as Now(). Do NOT attempt to use this if
+ // IsHighResolution() returns false.
+ static TimeTicks FromQPCValue(LONGLONG qpc_value);
+#endif
+
+ // Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
+ // Time and TimeTicks respond differently to user-set time and NTP
+ // adjustments, this number is only an estimate. Nevertheless, this can be
+ // useful when you need to relate the value of TimeTicks to a real time and
+ // date. Note: Upon first invocation, this function takes a snapshot of the
+ // realtime clock to establish a reference point. This function will return
+ // the same value for the duration of the application, but will be different
+ // in future application runs.
+ static TimeTicks UnixEpoch();
+
+ // Returns |this| snapped to the next tick, given a |tick_phase| and
+ // repeating |tick_interval| in both directions. |this| may be before,
+ // after, or equal to the |tick_phase|.
+ TimeTicks SnappedToNextTick(TimeTicks tick_phase,
+ TimeDelta tick_interval) const;
+
+ // Returns an enum indicating the underlying clock being used to generate
+ // TimeTicks timestamps. This function should only be used for debugging and
+ // logging purposes.
+ static Clock GetClock();
+
+#if defined(OS_WIN)
+ protected:
+ typedef DWORD (*TickFunctionType)(void);
+ static TickFunctionType SetMockTickFunction(TickFunctionType ticker);
+#endif
+
+ private:
+ friend class time_internal::TimeBase<TimeTicks>;
+
+ // Please use Now() to create a new object. This is for internal use
+ // and testing.
+ explicit TimeTicks(int64_t us) : TimeBase(us) {}
+};
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, TimeTicks time_ticks);
+
+// ThreadTicks ----------------------------------------------------------------
+
+// Represents a clock, specific to a particular thread, than runs only while the
+// thread is running.
+class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
+ public:
+ ThreadTicks() : TimeBase(0) {
+ }
+
+ // Returns true if ThreadTicks::Now() is supported on this system.
+ static bool IsSupported() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+ (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_ANDROID)
+ return true;
+#elif defined(OS_WIN)
+ return IsSupportedWin();
+#else
+ return false;
+#endif
+ }
+
+ // Waits until the initialization is completed. Needs to be guarded with a
+ // call to IsSupported().
+ static void WaitUntilInitialized() {
+#if defined(OS_WIN)
+ WaitUntilInitializedWin();
+#endif
+ }
+
+ // Returns thread-specific CPU-time on systems that support this feature.
+ // Needs to be guarded with a call to IsSupported(). Use this timer
+ // to (approximately) measure how much time the calling thread spent doing
+ // actual work vs. being de-scheduled. May return bogus results if the thread
+ // migrates to another CPU between two calls. Returns an empty ThreadTicks
+ // object until the initialization is completed. If a clock reading is
+ // absolutely needed, call WaitUntilInitialized() before this method.
+ static ThreadTicks Now();
+
+#if defined(OS_WIN)
+ // Similar to Now() above except this returns thread-specific CPU time for an
+ // arbitrary thread. All comments for Now() method above apply apply to this
+ // method as well.
+ static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
+#endif
+
+ private:
+ friend class time_internal::TimeBase<ThreadTicks>;
+
+ // Please use Now() or GetForThread() to create a new object. This is for
+ // internal use and testing.
+ explicit ThreadTicks(int64_t us) : TimeBase(us) {}
+
+#if defined(OS_WIN)
+ FRIEND_TEST_ALL_PREFIXES(TimeTicks, TSCTicksPerSecond);
+
+ // Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
+ // been measured yet. Needs to be guarded with a call to IsSupported().
+ // This method is declared here rather than in the anonymous namespace to
+ // allow testing.
+ static double TSCTicksPerSecond();
+
+ static bool IsSupportedWin();
+ static void WaitUntilInitializedWin();
+#endif
+};
+
+// For logging use only.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os, ThreadTicks time_ticks);
+
+} // namespace base
+
+#endif // BASE_TIME_TIME_H_
diff --git a/libchrome/base/time/time_mac.cc b/libchrome/base/time/time_mac.cc
new file mode 100644
index 0000000..373ec3a
--- /dev/null
+++ b/libchrome/base/time/time_mac.cc
@@ -0,0 +1,275 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <CoreFoundation/CFDate.h>
+#include <CoreFoundation/CFTimeZone.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+
+#include "base/logging.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "build/build_config.h"
+
+namespace {
+
+int64_t ComputeCurrentTicks() {
+#if defined(OS_IOS)
+ // On iOS mach_absolute_time stops while the device is sleeping. Instead use
+ // now - KERN_BOOTTIME to get a time difference that is not impacted by clock
+ // changes. KERN_BOOTTIME will be updated by the system whenever the system
+ // clock change.
+ struct timeval boottime;
+ int mib[2] = {CTL_KERN, KERN_BOOTTIME};
+ size_t size = sizeof(boottime);
+ int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
+ DCHECK_EQ(KERN_SUCCESS, kr);
+ base::TimeDelta time_difference = base::Time::Now() -
+ (base::Time::FromTimeT(boottime.tv_sec) +
+ base::TimeDelta::FromMicroseconds(boottime.tv_usec));
+ return time_difference.InMicroseconds();
+#else
+ static mach_timebase_info_data_t timebase_info;
+ if (timebase_info.denom == 0) {
+ // Zero-initialization of statics guarantees that denom will be 0 before
+ // calling mach_timebase_info. mach_timebase_info will never set denom to
+ // 0 as that would be invalid, so the zero-check can be used to determine
+ // whether mach_timebase_info has already been called. This is
+ // recommended by Apple's QA1398.
+ kern_return_t kr = mach_timebase_info(&timebase_info);
+ MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
+ }
+
+ // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
+ // with less precision (such as TickCount) just call through to
+ // mach_absolute_time.
+
+ // timebase_info converts absolute time tick units into nanoseconds. Convert
+ // to microseconds up front to stave off overflows.
+ base::CheckedNumeric<uint64_t> result(
+ mach_absolute_time() / base::Time::kNanosecondsPerMicrosecond);
+ result *= timebase_info.numer;
+ result /= timebase_info.denom;
+
+ // Don't bother with the rollover handling that the Windows version does.
+ // With numer and denom = 1 (the expected case), the 64-bit absolute time
+ // reported in nanoseconds is enough to last nearly 585 years.
+ return base::checked_cast<int64_t>(result.ValueOrDie());
+#endif // defined(OS_IOS)
+}
+
+int64_t ComputeThreadTicks() {
+#if defined(OS_IOS)
+ NOTREACHED();
+ return 0;
+#else
+ base::mac::ScopedMachSendRight thread(mach_thread_self());
+ mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
+ thread_basic_info_data_t thread_info_data;
+
+ if (thread.get() == MACH_PORT_NULL) {
+ DLOG(ERROR) << "Failed to get mach_thread_self()";
+ return 0;
+ }
+
+ kern_return_t kr = thread_info(
+ thread.get(),
+ THREAD_BASIC_INFO,
+ reinterpret_cast<thread_info_t>(&thread_info_data),
+ &thread_info_count);
+ MACH_DCHECK(kr == KERN_SUCCESS, kr) << "thread_info";
+
+ base::CheckedNumeric<int64_t> absolute_micros(
+ thread_info_data.user_time.seconds +
+ thread_info_data.system_time.seconds);
+ absolute_micros *= base::Time::kMicrosecondsPerSecond;
+ absolute_micros += (thread_info_data.user_time.microseconds +
+ thread_info_data.system_time.microseconds);
+ return absolute_micros.ValueOrDie();
+#endif // defined(OS_IOS)
+}
+
+} // namespace
+
+namespace base {
+
+// The Time routines in this file use Mach and CoreFoundation APIs, since the
+// POSIX definition of time_t in Mac OS X wraps around after 2038--and
+// there are already cookie expiration dates, etc., past that time out in
+// the field. Using CFDate prevents that problem, and using mach_absolute_time
+// for TimeTicks gives us nice high-resolution interval timing.
+
+// Time -----------------------------------------------------------------------
+
+// Core Foundation uses a double second count since 2001-01-01 00:00:00 UTC.
+// The UNIX epoch is 1970-01-01 00:00:00 UTC.
+// Windows uses a Gregorian epoch of 1601. We need to match this internally
+// so that our time representations match across all platforms. See bug 14734.
+// irb(main):010:0> Time.at(0).getutc()
+// => Thu Jan 01 00:00:00 UTC 1970
+// irb(main):011:0> Time.at(-11644473600).getutc()
+// => Mon Jan 01 00:00:00 UTC 1601
+static const int64_t kWindowsEpochDeltaSeconds = INT64_C(11644473600);
+
+// static
+const int64_t Time::kWindowsEpochDeltaMicroseconds =
+ kWindowsEpochDeltaSeconds * Time::kMicrosecondsPerSecond;
+
+// Some functions in time.cc use time_t directly, so we provide an offset
+// to convert from time_t (Unix epoch) and internal (Windows epoch).
+// static
+const int64_t Time::kTimeTToMicrosecondsOffset = kWindowsEpochDeltaMicroseconds;
+
+// static
+Time Time::Now() {
+ return FromCFAbsoluteTime(CFAbsoluteTimeGetCurrent());
+}
+
+// static
+Time Time::FromCFAbsoluteTime(CFAbsoluteTime t) {
+ static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+ "CFAbsoluteTime must have an infinity value");
+ if (t == 0)
+ return Time(); // Consider 0 as a null Time.
+ if (t == std::numeric_limits<CFAbsoluteTime>::infinity())
+ return Max();
+ return Time(static_cast<int64_t>((t + kCFAbsoluteTimeIntervalSince1970) *
+ kMicrosecondsPerSecond) +
+ kWindowsEpochDeltaMicroseconds);
+}
+
+CFAbsoluteTime Time::ToCFAbsoluteTime() const {
+ static_assert(std::numeric_limits<CFAbsoluteTime>::has_infinity,
+ "CFAbsoluteTime must have an infinity value");
+ if (is_null())
+ return 0; // Consider 0 as a null Time.
+ if (is_max())
+ return std::numeric_limits<CFAbsoluteTime>::infinity();
+ return (static_cast<CFAbsoluteTime>(us_ - kWindowsEpochDeltaMicroseconds) /
+ kMicrosecondsPerSecond) - kCFAbsoluteTimeIntervalSince1970;
+}
+
+// static
+Time Time::NowFromSystemTime() {
+ // Just use Now() because Now() returns the system time.
+ return Now();
+}
+
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+ base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
+ is_local
+ ? CFTimeZoneCopySystem()
+ : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+ base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+ kCFAllocatorDefault, kCFGregorianCalendar));
+ CFCalendarSetTimeZone(gregorian, time_zone);
+ CFAbsoluteTime absolute_time;
+ // 'S' is not defined in componentDesc in Apple documentation, but can be
+ // found at http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+ CFCalendarComposeAbsoluteTime(
+ gregorian, &absolute_time, "yMdHmsS", exploded.year, exploded.month,
+ exploded.day_of_month, exploded.hour, exploded.minute, exploded.second,
+ exploded.millisecond);
+ CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
+
+ base::Time converted_time =
+ Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
+ kWindowsEpochDeltaMicroseconds);
+
+ // If |exploded.day_of_month| is set to 31
+ // on a 28-30 day month, it will return the first day of the next month.
+ // Thus round-trip the time and compare the initial |exploded| with
+ // |utc_to_exploded| time.
+ base::Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ // Avoid rounding issues, by only putting the integral number of seconds
+ // (rounded towards -infinity) into a |CFAbsoluteTime| (which is a |double|).
+ int64_t microsecond = us_ % kMicrosecondsPerSecond;
+ if (microsecond < 0)
+ microsecond += kMicrosecondsPerSecond;
+ CFAbsoluteTime seconds = ((us_ - microsecond) / kMicrosecondsPerSecond) -
+ kWindowsEpochDeltaSeconds -
+ kCFAbsoluteTimeIntervalSince1970;
+
+ base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
+ is_local
+ ? CFTimeZoneCopySystem()
+ : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+ base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+ kCFAllocatorDefault, kCFGregorianCalendar));
+ CFCalendarSetTimeZone(gregorian, time_zone);
+ int second, day_of_week;
+ // 'E' sets the day of week, but is not defined in componentDesc in Apple
+ // documentation. It can be found in open source code here:
+ // http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+ CFCalendarDecomposeAbsoluteTime(gregorian, seconds, "yMdHmsE",
+ &exploded->year, &exploded->month,
+ &exploded->day_of_month, &exploded->hour,
+ &exploded->minute, &second, &day_of_week);
+ // Make sure seconds are rounded down towards -infinity.
+ exploded->second = floor(second);
+ // |Exploded|'s convention for day of week is 0 = Sunday, i.e. different
+ // from CF's 1 = Sunday.
+ exploded->day_of_week = (day_of_week - 1) % 7;
+ // Calculate milliseconds ourselves, since we rounded the |seconds|, making
+ // sure to round towards -infinity.
+ exploded->millisecond =
+ (microsecond >= 0) ? microsecond / kMicrosecondsPerMillisecond :
+ (microsecond - kMicrosecondsPerMillisecond + 1) /
+ kMicrosecondsPerMillisecond;
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// static
+TimeTicks TimeTicks::Now() {
+ return TimeTicks(ComputeCurrentTicks());
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+ return true;
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+#if defined(OS_IOS)
+ return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
+#else
+ return Clock::MAC_MACH_ABSOLUTE_TIME;
+#endif // defined(OS_IOS)
+}
+
+// static
+ThreadTicks ThreadTicks::Now() {
+ return ThreadTicks(ComputeThreadTicks());
+}
+
+} // namespace base
diff --git a/libchrome/base/time/time_posix.cc b/libchrome/base/time/time_posix.cc
new file mode 100644
index 0000000..495e249
--- /dev/null
+++ b/libchrome/base/time/time_posix.cc
@@ -0,0 +1,386 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <time.h>
+#if defined(OS_ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#endif
+#include <unistd.h>
+
+#include <limits>
+#include <ostream>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#elif defined(OS_NACL)
+#include "base/os_compat_nacl.h"
+#endif
+
+#if !defined(OS_MACOSX)
+#include "base/lazy_instance.h"
+#include "base/synchronization/lock.h"
+#endif
+
+namespace {
+
+#if !defined(OS_MACOSX)
+// This prevents a crash on traversing the environment global and looking up
+// the 'TZ' variable in libc. See: crbug.com/390567.
+base::LazyInstance<base::Lock>::Leaky
+ g_sys_time_to_time_struct_lock = LAZY_INSTANCE_INITIALIZER;
+
+// Define a system-specific SysTime that wraps either to a time_t or
+// a time64_t depending on the host system, and associated convertion.
+// See crbug.com/162007
+#if defined(OS_ANDROID) && !defined(__LP64__)
+typedef time64_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ if (is_local)
+ return mktime64(timestruct);
+ else
+ return timegm64(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ if (is_local)
+ localtime64_r(&t, timestruct);
+ else
+ gmtime64_r(&t, timestruct);
+}
+
+#else // OS_ANDROID && !__LP64__
+typedef time_t SysTime;
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ if (is_local)
+ return mktime(timestruct);
+ else
+ return timegm(timestruct);
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+ base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
+ if (is_local)
+ localtime_r(&t, timestruct);
+ else
+ gmtime_r(&t, timestruct);
+}
+#endif // OS_ANDROID
+
+int64_t ConvertTimespecToMicros(const struct timespec& ts) {
+ base::CheckedNumeric<int64_t> result(ts.tv_sec);
+ result *= base::Time::kMicrosecondsPerSecond;
+ result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+ return result.ValueOrDie();
+}
+
+// Helper function to get results from clock_gettime() and convert to a
+// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
+// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
+// _POSIX_MONOTONIC_CLOCK to -1.
+#if (defined(OS_POSIX) && \
+ defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
+ defined(OS_BSD) || defined(OS_ANDROID)
+int64_t ClockNow(clockid_t clk_id) {
+ struct timespec ts;
+ if (clock_gettime(clk_id, &ts) != 0) {
+ NOTREACHED() << "clock_gettime(" << clk_id << ") failed.";
+ return 0;
+ }
+ return ConvertTimespecToMicros(ts);
+}
+#else // _POSIX_MONOTONIC_CLOCK
+#error No usable tick clock function on this platform.
+#endif // _POSIX_MONOTONIC_CLOCK
+#endif // !defined(OS_MACOSX)
+
+} // namespace
+
+namespace base {
+
+struct timespec TimeDelta::ToTimeSpec() const {
+ int64_t microseconds = InMicroseconds();
+ time_t seconds = 0;
+ if (microseconds >= Time::kMicrosecondsPerSecond) {
+ seconds = InSeconds();
+ microseconds -= seconds * Time::kMicrosecondsPerSecond;
+ }
+ struct timespec result =
+ {seconds,
+ static_cast<long>(microseconds * Time::kNanosecondsPerMicrosecond)};
+ return result;
+}
+
+#if !defined(OS_MACOSX)
+// The Time routines in this file use standard POSIX routines, or almost-
+// standard routines in the case of timegm. We need to use a Mach-specific
+// function for TimeTicks::Now() on Mac OS X.
+
+// Time -----------------------------------------------------------------------
+
+// Windows uses a Gregorian epoch of 1601. We need to match this internally
+// so that our time representations match across all platforms. See bug 14734.
+// irb(main):010:0> Time.at(0).getutc()
+// => Thu Jan 01 00:00:00 UTC 1970
+// irb(main):011:0> Time.at(-11644473600).getutc()
+// => Mon Jan 01 00:00:00 UTC 1601
+static const int64_t kWindowsEpochDeltaSeconds = INT64_C(11644473600);
+
+// static
+const int64_t Time::kWindowsEpochDeltaMicroseconds =
+ kWindowsEpochDeltaSeconds * Time::kMicrosecondsPerSecond;
+
+// Some functions in time.cc use time_t directly, so we provide an offset
+// to convert from time_t (Unix epoch) and internal (Windows epoch).
+// static
+const int64_t Time::kTimeTToMicrosecondsOffset = kWindowsEpochDeltaMicroseconds;
+
+// static
+Time Time::Now() {
+ struct timeval tv;
+ struct timezone tz = { 0, 0 }; // UTC
+ if (gettimeofday(&tv, &tz) != 0) {
+ DCHECK(0) << "Could not determine time of day";
+ PLOG(ERROR) << "Call to gettimeofday failed.";
+ // Return null instead of uninitialized |tv| value, which contains random
+ // garbage data. This may result in the crash seen in crbug.com/147570.
+ return Time();
+ }
+ // Combine seconds and microseconds in a 64-bit field containing microseconds
+ // since the epoch. That's enough for nearly 600 centuries. Adjust from
+ // Unix (1970) to Windows (1601) epoch.
+ return Time((tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec) +
+ kWindowsEpochDeltaMicroseconds);
+}
+
+// static
+Time Time::NowFromSystemTime() {
+ // Just use Now() because Now() returns the system time.
+ return Now();
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ // Time stores times with microsecond resolution, but Exploded only carries
+ // millisecond resolution, so begin by being lossy. Adjust from Windows
+ // epoch (1601) to Unix epoch (1970);
+ int64_t microseconds = us_ - kWindowsEpochDeltaMicroseconds;
+ // The following values are all rounded towards -infinity.
+ int64_t milliseconds; // Milliseconds since epoch.
+ SysTime seconds; // Seconds since epoch.
+ int millisecond; // Exploded millisecond value (0-999).
+ if (microseconds >= 0) {
+ // Rounding towards -infinity <=> rounding towards 0, in this case.
+ milliseconds = microseconds / kMicrosecondsPerMillisecond;
+ seconds = milliseconds / kMillisecondsPerSecond;
+ millisecond = milliseconds % kMillisecondsPerSecond;
+ } else {
+ // Round these *down* (towards -infinity).
+ milliseconds = (microseconds - kMicrosecondsPerMillisecond + 1) /
+ kMicrosecondsPerMillisecond;
+ seconds = (milliseconds - kMillisecondsPerSecond + 1) /
+ kMillisecondsPerSecond;
+ // Make this nonnegative (and between 0 and 999 inclusive).
+ millisecond = milliseconds % kMillisecondsPerSecond;
+ if (millisecond < 0)
+ millisecond += kMillisecondsPerSecond;
+ }
+
+ struct tm timestruct;
+ SysTimeToTimeStruct(seconds, ×truct, is_local);
+
+ exploded->year = timestruct.tm_year + 1900;
+ exploded->month = timestruct.tm_mon + 1;
+ exploded->day_of_week = timestruct.tm_wday;
+ exploded->day_of_month = timestruct.tm_mday;
+ exploded->hour = timestruct.tm_hour;
+ exploded->minute = timestruct.tm_min;
+ exploded->second = timestruct.tm_sec;
+ exploded->millisecond = millisecond;
+}
+
+// static
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+ struct tm timestruct;
+ timestruct.tm_sec = exploded.second;
+ timestruct.tm_min = exploded.minute;
+ timestruct.tm_hour = exploded.hour;
+ timestruct.tm_mday = exploded.day_of_month;
+ timestruct.tm_mon = exploded.month - 1;
+ timestruct.tm_year = exploded.year - 1900;
+ timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
+ timestruct.tm_yday = 0; // mktime/timegm ignore this
+ timestruct.tm_isdst = -1; // attempt to figure it out
+#if !defined(OS_NACL) && !defined(OS_SOLARIS)
+ timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
+#endif
+
+ int64_t milliseconds;
+ SysTime seconds;
+
+ // Certain exploded dates do not really exist due to daylight saving times,
+ // and this causes mktime() to return implementation-defined values when
+ // tm_isdst is set to -1. On Android, the function will return -1, while the
+ // C libraries of other platforms typically return a liberally-chosen value.
+ // Handling this requires the special code below.
+
+ // SysTimeFromTimeStruct() modifies the input structure, save current value.
+ struct tm timestruct0 = timestruct;
+
+ seconds = SysTimeFromTimeStruct(×truct, is_local);
+ if (seconds == -1) {
+ // Get the time values with tm_isdst == 0 and 1, then select the closest one
+ // to UTC 00:00:00 that isn't -1.
+ timestruct = timestruct0;
+ timestruct.tm_isdst = 0;
+ int64_t seconds_isdst0 = SysTimeFromTimeStruct(×truct, is_local);
+
+ timestruct = timestruct0;
+ timestruct.tm_isdst = 1;
+ int64_t seconds_isdst1 = SysTimeFromTimeStruct(×truct, is_local);
+
+ // seconds_isdst0 or seconds_isdst1 can be -1 for some timezones.
+ // E.g. "CLST" (Chile Summer Time) returns -1 for 'tm_isdt == 1'.
+ if (seconds_isdst0 < 0)
+ seconds = seconds_isdst1;
+ else if (seconds_isdst1 < 0)
+ seconds = seconds_isdst0;
+ else
+ seconds = std::min(seconds_isdst0, seconds_isdst1);
+ }
+
+ // Handle overflow. Clamping the range to what mktime and timegm might
+ // return is the best that can be done here. It's not ideal, but it's better
+ // than failing here or ignoring the overflow case and treating each time
+ // overflow as one second prior to the epoch.
+ if (seconds == -1 &&
+ (exploded.year < 1969 || exploded.year > 1970)) {
+ // If exploded.year is 1969 or 1970, take -1 as correct, with the
+ // time indicating 1 second prior to the epoch. (1970 is allowed to handle
+ // time zone and DST offsets.) Otherwise, return the most future or past
+ // time representable. Assumes the time_t epoch is 1970-01-01 00:00:00 UTC.
+ //
+ // The minimum and maximum representible times that mktime and timegm could
+ // return are used here instead of values outside that range to allow for
+ // proper round-tripping between exploded and counter-type time
+ // representations in the presence of possible truncation to time_t by
+ // division and use with other functions that accept time_t.
+ //
+ // When representing the most distant time in the future, add in an extra
+ // 999ms to avoid the time being less than any other possible value that
+ // this function can return.
+
+ // On Android, SysTime is int64_t, special care must be taken to avoid
+ // overflows.
+ const int64_t min_seconds = (sizeof(SysTime) < sizeof(int64_t))
+ ? std::numeric_limits<SysTime>::min()
+ : std::numeric_limits<int32_t>::min();
+ const int64_t max_seconds = (sizeof(SysTime) < sizeof(int64_t))
+ ? std::numeric_limits<SysTime>::max()
+ : std::numeric_limits<int32_t>::max();
+ if (exploded.year < 1969) {
+ milliseconds = min_seconds * kMillisecondsPerSecond;
+ } else {
+ milliseconds = max_seconds * kMillisecondsPerSecond;
+ milliseconds += (kMillisecondsPerSecond - 1);
+ }
+ } else {
+ milliseconds = seconds * kMillisecondsPerSecond + exploded.millisecond;
+ }
+
+ // Adjust from Unix (1970) to Windows (1601) epoch.
+ base::Time converted_time =
+ Time((milliseconds * kMicrosecondsPerMillisecond) +
+ kWindowsEpochDeltaMicroseconds);
+
+ // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
+ // return the first day of the next month. Thus round-trip the time and
+ // compare the initial |exploded| with |utc_to_exploded| time.
+ base::Time::Exploded to_exploded;
+ if (!is_local)
+ converted_time.UTCExplode(&to_exploded);
+ else
+ converted_time.LocalExplode(&to_exploded);
+
+ if (ExplodedMostlyEquals(to_exploded, exploded)) {
+ *time = converted_time;
+ return true;
+ }
+
+ *time = Time(0);
+ return false;
+}
+
+// TimeTicks ------------------------------------------------------------------
+// static
+TimeTicks TimeTicks::Now() {
+ return TimeTicks(ClockNow(CLOCK_MONOTONIC));
+}
+
+// static
+TimeTicks::Clock TimeTicks::GetClock() {
+ return Clock::LINUX_CLOCK_MONOTONIC;
+}
+
+// static
+bool TimeTicks::IsHighResolution() {
+ return true;
+}
+
+// static
+ThreadTicks ThreadTicks::Now() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+ defined(OS_ANDROID)
+ return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#else
+ NOTREACHED();
+ return ThreadTicks();
+#endif
+}
+
+#endif // !OS_MACOSX
+
+// static
+Time Time::FromTimeVal(struct timeval t) {
+ DCHECK_LT(t.tv_usec, static_cast<int>(Time::kMicrosecondsPerSecond));
+ DCHECK_GE(t.tv_usec, 0);
+ if (t.tv_usec == 0 && t.tv_sec == 0)
+ return Time();
+ if (t.tv_usec == static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1 &&
+ t.tv_sec == std::numeric_limits<time_t>::max())
+ return Max();
+ return Time((static_cast<int64_t>(t.tv_sec) * Time::kMicrosecondsPerSecond) +
+ t.tv_usec + kTimeTToMicrosecondsOffset);
+}
+
+struct timeval Time::ToTimeVal() const {
+ struct timeval result;
+ if (is_null()) {
+ result.tv_sec = 0;
+ result.tv_usec = 0;
+ return result;
+ }
+ if (is_max()) {
+ result.tv_sec = std::numeric_limits<time_t>::max();
+ result.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
+ return result;
+ }
+ int64_t us = us_ - kTimeTToMicrosecondsOffset;
+ result.tv_sec = us / Time::kMicrosecondsPerSecond;
+ result.tv_usec = us % Time::kMicrosecondsPerSecond;
+ return result;
+}
+
+} // namespace base
diff --git a/libchrome/base/time/time_unittest.cc b/libchrome/base/time/time_unittest.cc
new file mode 100644
index 0000000..4f47d56
--- /dev/null
+++ b/libchrome/base/time/time_unittest.cc
@@ -0,0 +1,1182 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time/time.h"
+
+#include <stdint.h>
+#include <time.h>
+#include <limits>
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
+ // FromUTCExploded must set time to Time(0) and failure, if the day is set to
+ // 31 on a 28-30 day month. Test |exploded| returns Time(0) on 31st of
+ // February and 31st of April. New implementation handles this.
+
+ const struct DateTestData {
+ Time::Exploded explode;
+ bool is_valid;
+ } kDateTestData[] = {
+ // 31st of February
+ {{2016, 2, 0, 31, 12, 30, 0, 0}, true},
+ // 31st of April
+ {{2016, 4, 0, 31, 8, 43, 0, 0}, true},
+ // Negative month
+ {{2016, -5, 0, 2, 4, 10, 0, 0}, false},
+ // Negative date of month
+ {{2016, 6, 0, -15, 2, 50, 0, 0}, false},
+ // Negative hours
+ {{2016, 7, 0, 10, -11, 29, 0, 0}, false},
+ // Negative minutes
+ {{2016, 3, 0, 14, 10, -29, 0, 0}, false},
+ // Negative seconds
+ {{2016, 10, 0, 25, 7, 47, -30, 0}, false},
+ // Negative milliseconds
+ {{2016, 10, 0, 25, 7, 47, 20, -500}, false},
+ // Hours are too large
+ {{2016, 7, 0, 10, 26, 29, 0, 0}, false},
+ // Minutes are too large
+ {{2016, 3, 0, 14, 10, 78, 0, 0}, false},
+ // Seconds are too large
+ {{2016, 10, 0, 25, 7, 47, 234, 0}, false},
+ // Milliseconds are too large
+ {{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+ };
+
+ for (const auto& test : kDateTestData) {
+ EXPECT_EQ(test.explode.HasValidValues(), test.is_valid);
+
+ base::Time result;
+ EXPECT_FALSE(base::Time::FromUTCExploded(test.explode, &result));
+ EXPECT_TRUE(result.is_null());
+ EXPECT_FALSE(base::Time::FromLocalExploded(test.explode, &result));
+ EXPECT_TRUE(result.is_null());
+ }
+}
+
+// Specialized test fixture allowing time strings without timezones to be
+// tested by comparing them to a known time in the local zone.
+// See also pr_time_unittests.cc
+class TimeTest : public testing::Test {
+ protected:
+ void SetUp() override {
+ // Use mktime to get a time_t, and turn it into a PRTime by converting
+ // seconds to microseconds. Use 15th Oct 2007 12:45:00 local. This
+ // must be a time guaranteed to be outside of a DST fallback hour in
+ // any timezone.
+ struct tm local_comparison_tm = {
+ 0, // second
+ 45, // minute
+ 12, // hour
+ 15, // day of month
+ 10 - 1, // month
+ 2007 - 1900, // year
+ 0, // day of week (ignored, output only)
+ 0, // day of year (ignored, output only)
+ -1 // DST in effect, -1 tells mktime to figure it out
+ };
+
+ time_t converted_time = mktime(&local_comparison_tm);
+ ASSERT_GT(converted_time, 0);
+ comparison_time_local_ = Time::FromTimeT(converted_time);
+
+ // time_t representation of 15th Oct 2007 12:45:00 PDT
+ comparison_time_pdt_ = Time::FromTimeT(1192477500);
+ }
+
+ Time comparison_time_local_;
+ Time comparison_time_pdt_;
+};
+
+// Test conversions to/from time_t and exploding/unexploding.
+TEST_F(TimeTest, TimeT) {
+ // C library time and exploded time.
+ time_t now_t_1 = time(NULL);
+ struct tm tms;
+#if defined(OS_WIN)
+ localtime_s(&tms, &now_t_1);
+#elif defined(OS_POSIX)
+ localtime_r(&now_t_1, &tms);
+#endif
+
+ // Convert to ours.
+ Time our_time_1 = Time::FromTimeT(now_t_1);
+ Time::Exploded exploded;
+ our_time_1.LocalExplode(&exploded);
+
+ // This will test both our exploding and our time_t -> Time conversion.
+ EXPECT_EQ(tms.tm_year + 1900, exploded.year);
+ EXPECT_EQ(tms.tm_mon + 1, exploded.month);
+ EXPECT_EQ(tms.tm_mday, exploded.day_of_month);
+ EXPECT_EQ(tms.tm_hour, exploded.hour);
+ EXPECT_EQ(tms.tm_min, exploded.minute);
+ EXPECT_EQ(tms.tm_sec, exploded.second);
+
+ // Convert exploded back to the time struct.
+ Time our_time_2;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &our_time_2));
+ EXPECT_TRUE(our_time_1 == our_time_2);
+
+ time_t now_t_2 = our_time_2.ToTimeT();
+ EXPECT_EQ(now_t_1, now_t_2);
+
+ EXPECT_EQ(10, Time().FromTimeT(10).ToTimeT());
+ EXPECT_EQ(10.0, Time().FromTimeT(10).ToDoubleT());
+
+ // Conversions of 0 should stay 0.
+ EXPECT_EQ(0, Time().ToTimeT());
+ EXPECT_EQ(0, Time::FromTimeT(0).ToInternalValue());
+}
+
+// Test conversions to/from javascript time.
+TEST_F(TimeTest, JsTime) {
+ Time epoch = Time::FromJsTime(0.0);
+ EXPECT_EQ(epoch, Time::UnixEpoch());
+ Time t = Time::FromJsTime(700000.3);
+ EXPECT_EQ(700.0003, t.ToDoubleT());
+ t = Time::FromDoubleT(800.73);
+ EXPECT_EQ(800730.0, t.ToJsTime());
+}
+
+#if defined(OS_POSIX)
+TEST_F(TimeTest, FromTimeVal) {
+ Time now = Time::Now();
+ Time also_now = Time::FromTimeVal(now.ToTimeVal());
+ EXPECT_EQ(now, also_now);
+}
+#endif // OS_POSIX
+
+TEST_F(TimeTest, FromExplodedWithMilliseconds) {
+ // Some platform implementations of FromExploded are liable to drop
+ // milliseconds if we aren't careful.
+ Time now = Time::NowFromSystemTime();
+ Time::Exploded exploded1 = {0};
+ now.UTCExplode(&exploded1);
+ exploded1.millisecond = 500;
+ Time time;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded1, &time));
+ Time::Exploded exploded2 = {0};
+ time.UTCExplode(&exploded2);
+ EXPECT_EQ(exploded1.millisecond, exploded2.millisecond);
+}
+
+TEST_F(TimeTest, ZeroIsSymmetric) {
+ Time zero_time(Time::FromTimeT(0));
+ EXPECT_EQ(0, zero_time.ToTimeT());
+
+ EXPECT_EQ(0.0, zero_time.ToDoubleT());
+}
+
+TEST_F(TimeTest, LocalExplode) {
+ Time a = Time::Now();
+ Time::Exploded exploded;
+ a.LocalExplode(&exploded);
+
+ Time b;
+ EXPECT_TRUE(Time::FromLocalExploded(exploded, &b));
+
+ // The exploded structure doesn't have microseconds, and on Mac & Linux, the
+ // internal OS conversion uses seconds, which will cause truncation. So we
+ // can only make sure that the delta is within one second.
+ EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
+}
+
+TEST_F(TimeTest, UTCExplode) {
+ Time a = Time::Now();
+ Time::Exploded exploded;
+ a.UTCExplode(&exploded);
+
+ Time b;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &b));
+ EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
+}
+
+TEST_F(TimeTest, LocalMidnight) {
+ Time::Exploded exploded;
+ Time::Now().LocalMidnight().LocalExplode(&exploded);
+ EXPECT_EQ(0, exploded.hour);
+ EXPECT_EQ(0, exploded.minute);
+ EXPECT_EQ(0, exploded.second);
+ EXPECT_EQ(0, exploded.millisecond);
+}
+
+TEST_F(TimeTest, ParseTimeTest1) {
+ time_t current_time = 0;
+ time(¤t_time);
+
+ const int BUFFER_SIZE = 64;
+ struct tm local_time = {0};
+ char time_buf[BUFFER_SIZE] = {0};
+#if defined(OS_WIN)
+ localtime_s(&local_time, ¤t_time);
+ asctime_s(time_buf, arraysize(time_buf), &local_time);
+#elif defined(OS_POSIX)
+ localtime_r(¤t_time, &local_time);
+ asctime_r(&local_time, time_buf);
+#endif
+
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString(time_buf, &parsed_time));
+ EXPECT_EQ(current_time, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, DayOfWeekSunday) {
+ Time time;
+ EXPECT_TRUE(Time::FromString("Sun, 06 May 2012 12:00:00 GMT", &time));
+ Time::Exploded exploded;
+ time.UTCExplode(&exploded);
+ EXPECT_EQ(0, exploded.day_of_week);
+}
+
+TEST_F(TimeTest, DayOfWeekWednesday) {
+ Time time;
+ EXPECT_TRUE(Time::FromString("Wed, 09 May 2012 12:00:00 GMT", &time));
+ Time::Exploded exploded;
+ time.UTCExplode(&exploded);
+ EXPECT_EQ(3, exploded.day_of_week);
+}
+
+TEST_F(TimeTest, DayOfWeekSaturday) {
+ Time time;
+ EXPECT_TRUE(Time::FromString("Sat, 12 May 2012 12:00:00 GMT", &time));
+ Time::Exploded exploded;
+ time.UTCExplode(&exploded);
+ EXPECT_EQ(6, exploded.day_of_week);
+}
+
+TEST_F(TimeTest, ParseTimeTest2) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("Mon, 15 Oct 2007 19:45:00 GMT", &parsed_time));
+ EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest3) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("15 Oct 07 12:45:00", &parsed_time));
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest4) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("15 Oct 07 19:45 GMT", &parsed_time));
+ EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest5) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("Mon Oct 15 12:45 PDT 2007", &parsed_time));
+ EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest6) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("Monday, Oct 15, 2007 12:45 PM", &parsed_time));
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest7) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("10/15/07 12:45:00 PM", &parsed_time));
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest8) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("15-OCT-2007 12:45pm", &parsed_time));
+ EXPECT_EQ(comparison_time_local_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest9) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("16 Oct 2007 4:45-JST (Tuesday)", &parsed_time));
+ EXPECT_EQ(comparison_time_pdt_, parsed_time);
+}
+
+TEST_F(TimeTest, ParseTimeTest10) {
+ Time parsed_time;
+ EXPECT_TRUE(Time::FromString("15/10/07 12:45", &parsed_time));
+ EXPECT_EQ(parsed_time, comparison_time_local_);
+}
+
+// Test some of edge cases around epoch, etc.
+TEST_F(TimeTest, ParseTimeTestEpoch0) {
+ Time parsed_time;
+
+ // time_t == epoch == 0
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 01:00:00 +0100 1970",
+ &parsed_time));
+ EXPECT_EQ(0, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 00:00:00 GMT 1970",
+ &parsed_time));
+ EXPECT_EQ(0, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpoch1) {
+ Time parsed_time;
+
+ // time_t == 1 second after epoch == 1
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 01:00:01 +0100 1970",
+ &parsed_time));
+ EXPECT_EQ(1, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 00:00:01 GMT 1970",
+ &parsed_time));
+ EXPECT_EQ(1, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpoch2) {
+ Time parsed_time;
+
+ // time_t == 2 seconds after epoch == 2
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 01:00:02 +0100 1970",
+ &parsed_time));
+ EXPECT_EQ(2, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 00:00:02 GMT 1970",
+ &parsed_time));
+ EXPECT_EQ(2, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpochNeg1) {
+ Time parsed_time;
+
+ // time_t == 1 second before epoch == -1
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 00:59:59 +0100 1970",
+ &parsed_time));
+ EXPECT_EQ(-1, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Wed Dec 31 23:59:59 GMT 1969",
+ &parsed_time));
+ EXPECT_EQ(-1, parsed_time.ToTimeT());
+}
+
+// If time_t is 32 bits, a date after year 2038 will overflow time_t and
+// cause timegm() to return -1. The parsed time should not be 1 second
+// before epoch.
+TEST_F(TimeTest, ParseTimeTestEpochNotNeg1) {
+ Time parsed_time;
+
+ EXPECT_TRUE(Time::FromString("Wed Dec 31 23:59:59 GMT 2100",
+ &parsed_time));
+ EXPECT_NE(-1, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpochNeg2) {
+ Time parsed_time;
+
+ // time_t == 2 seconds before epoch == -2
+ EXPECT_TRUE(Time::FromString("Thu Jan 01 00:59:58 +0100 1970",
+ &parsed_time));
+ EXPECT_EQ(-2, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Wed Dec 31 23:59:58 GMT 1969",
+ &parsed_time));
+ EXPECT_EQ(-2, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEpoch1960) {
+ Time parsed_time;
+
+ // time_t before Epoch, in 1960
+ EXPECT_TRUE(Time::FromString("Wed Jun 29 19:40:01 +0100 1960",
+ &parsed_time));
+ EXPECT_EQ(-299999999, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Wed Jun 29 18:40:01 GMT 1960",
+ &parsed_time));
+ EXPECT_EQ(-299999999, parsed_time.ToTimeT());
+ EXPECT_TRUE(Time::FromString("Wed Jun 29 17:40:01 GMT 1960",
+ &parsed_time));
+ EXPECT_EQ(-300003599, parsed_time.ToTimeT());
+}
+
+TEST_F(TimeTest, ParseTimeTestEmpty) {
+ Time parsed_time;
+ EXPECT_FALSE(Time::FromString("", &parsed_time));
+}
+
+TEST_F(TimeTest, ParseTimeTestInvalidString) {
+ Time parsed_time;
+ EXPECT_FALSE(Time::FromString("Monday morning 2000", &parsed_time));
+}
+
+TEST_F(TimeTest, ExplodeBeforeUnixEpoch) {
+ static const int kUnixEpochYear = 1970; // In case this changes (ha!).
+ Time t;
+ Time::Exploded exploded;
+
+ t = Time::UnixEpoch() - TimeDelta::FromMicroseconds(1);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1969-12-31 23:59:59 999 milliseconds (and 999 microseconds).
+ EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+ EXPECT_EQ(12, exploded.month);
+ EXPECT_EQ(31, exploded.day_of_month);
+ EXPECT_EQ(23, exploded.hour);
+ EXPECT_EQ(59, exploded.minute);
+ EXPECT_EQ(59, exploded.second);
+ EXPECT_EQ(999, exploded.millisecond);
+
+ t = Time::UnixEpoch() - TimeDelta::FromMicroseconds(1000);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1969-12-31 23:59:59 999 milliseconds.
+ EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+ EXPECT_EQ(12, exploded.month);
+ EXPECT_EQ(31, exploded.day_of_month);
+ EXPECT_EQ(23, exploded.hour);
+ EXPECT_EQ(59, exploded.minute);
+ EXPECT_EQ(59, exploded.second);
+ EXPECT_EQ(999, exploded.millisecond);
+
+ t = Time::UnixEpoch() - TimeDelta::FromMicroseconds(1001);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1969-12-31 23:59:59 998 milliseconds (and 999 microseconds).
+ EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+ EXPECT_EQ(12, exploded.month);
+ EXPECT_EQ(31, exploded.day_of_month);
+ EXPECT_EQ(23, exploded.hour);
+ EXPECT_EQ(59, exploded.minute);
+ EXPECT_EQ(59, exploded.second);
+ EXPECT_EQ(998, exploded.millisecond);
+
+ t = Time::UnixEpoch() - TimeDelta::FromMilliseconds(1000);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1969-12-31 23:59:59.
+ EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+ EXPECT_EQ(12, exploded.month);
+ EXPECT_EQ(31, exploded.day_of_month);
+ EXPECT_EQ(23, exploded.hour);
+ EXPECT_EQ(59, exploded.minute);
+ EXPECT_EQ(59, exploded.second);
+ EXPECT_EQ(0, exploded.millisecond);
+
+ t = Time::UnixEpoch() - TimeDelta::FromMilliseconds(1001);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1969-12-31 23:59:58 999 milliseconds.
+ EXPECT_EQ(kUnixEpochYear - 1, exploded.year);
+ EXPECT_EQ(12, exploded.month);
+ EXPECT_EQ(31, exploded.day_of_month);
+ EXPECT_EQ(23, exploded.hour);
+ EXPECT_EQ(59, exploded.minute);
+ EXPECT_EQ(58, exploded.second);
+ EXPECT_EQ(999, exploded.millisecond);
+
+ // Make sure we still handle at/after Unix epoch correctly.
+ t = Time::UnixEpoch();
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1970-12-31 00:00:00 0 milliseconds.
+ EXPECT_EQ(kUnixEpochYear, exploded.year);
+ EXPECT_EQ(1, exploded.month);
+ EXPECT_EQ(1, exploded.day_of_month);
+ EXPECT_EQ(0, exploded.hour);
+ EXPECT_EQ(0, exploded.minute);
+ EXPECT_EQ(0, exploded.second);
+ EXPECT_EQ(0, exploded.millisecond);
+
+ t = Time::UnixEpoch() + TimeDelta::FromMicroseconds(1);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1970-01-01 00:00:00 0 milliseconds (and 1 microsecond).
+ EXPECT_EQ(kUnixEpochYear, exploded.year);
+ EXPECT_EQ(1, exploded.month);
+ EXPECT_EQ(1, exploded.day_of_month);
+ EXPECT_EQ(0, exploded.hour);
+ EXPECT_EQ(0, exploded.minute);
+ EXPECT_EQ(0, exploded.second);
+ EXPECT_EQ(0, exploded.millisecond);
+
+ t = Time::UnixEpoch() + TimeDelta::FromMicroseconds(1000);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1970-01-01 00:00:00 1 millisecond.
+ EXPECT_EQ(kUnixEpochYear, exploded.year);
+ EXPECT_EQ(1, exploded.month);
+ EXPECT_EQ(1, exploded.day_of_month);
+ EXPECT_EQ(0, exploded.hour);
+ EXPECT_EQ(0, exploded.minute);
+ EXPECT_EQ(0, exploded.second);
+ EXPECT_EQ(1, exploded.millisecond);
+
+ t = Time::UnixEpoch() + TimeDelta::FromMilliseconds(1000);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1970-01-01 00:00:01.
+ EXPECT_EQ(kUnixEpochYear, exploded.year);
+ EXPECT_EQ(1, exploded.month);
+ EXPECT_EQ(1, exploded.day_of_month);
+ EXPECT_EQ(0, exploded.hour);
+ EXPECT_EQ(0, exploded.minute);
+ EXPECT_EQ(1, exploded.second);
+ EXPECT_EQ(0, exploded.millisecond);
+
+ t = Time::UnixEpoch() + TimeDelta::FromMilliseconds(1001);
+ t.UTCExplode(&exploded);
+ EXPECT_TRUE(exploded.HasValidValues());
+ // Should be 1970-01-01 00:00:01 1 millisecond.
+ EXPECT_EQ(kUnixEpochYear, exploded.year);
+ EXPECT_EQ(1, exploded.month);
+ EXPECT_EQ(1, exploded.day_of_month);
+ EXPECT_EQ(0, exploded.hour);
+ EXPECT_EQ(0, exploded.minute);
+ EXPECT_EQ(1, exploded.second);
+ EXPECT_EQ(1, exploded.millisecond);
+}
+
+TEST_F(TimeTest, Max) {
+ Time max = Time::Max();
+ EXPECT_TRUE(max.is_max());
+ EXPECT_EQ(max, Time::Max());
+ EXPECT_GT(max, Time::Now());
+ EXPECT_GT(max, Time());
+}
+
+TEST_F(TimeTest, MaxConversions) {
+ Time t = Time::Max();
+ EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.ToInternalValue());
+
+ t = Time::FromDoubleT(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(t.is_max());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToDoubleT());
+
+ t = Time::FromJsTime(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(t.is_max());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), t.ToJsTime());
+
+ t = Time::FromTimeT(std::numeric_limits<time_t>::max());
+ EXPECT_TRUE(t.is_max());
+ EXPECT_EQ(std::numeric_limits<time_t>::max(), t.ToTimeT());
+
+#if defined(OS_POSIX)
+ struct timeval tval;
+ tval.tv_sec = std::numeric_limits<time_t>::max();
+ tval.tv_usec = static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1;
+ t = Time::FromTimeVal(tval);
+ EXPECT_TRUE(t.is_max());
+ tval = t.ToTimeVal();
+ EXPECT_EQ(std::numeric_limits<time_t>::max(), tval.tv_sec);
+ EXPECT_EQ(static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1,
+ tval.tv_usec);
+#endif
+
+#if defined(OS_MACOSX)
+ t = Time::FromCFAbsoluteTime(std::numeric_limits<CFAbsoluteTime>::infinity());
+ EXPECT_TRUE(t.is_max());
+ EXPECT_EQ(std::numeric_limits<CFAbsoluteTime>::infinity(),
+ t.ToCFAbsoluteTime());
+#endif
+
+#if defined(OS_WIN)
+ FILETIME ftime;
+ ftime.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ ftime.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ t = Time::FromFileTime(ftime);
+ EXPECT_TRUE(t.is_max());
+ ftime = t.ToFileTime();
+ EXPECT_EQ(std::numeric_limits<DWORD>::max(), ftime.dwHighDateTime);
+ EXPECT_EQ(std::numeric_limits<DWORD>::max(), ftime.dwLowDateTime);
+#endif
+}
+
+#if defined(OS_MACOSX)
+TEST_F(TimeTest, TimeTOverflow) {
+ Time t = Time::FromInternalValue(std::numeric_limits<int64_t>::max() - 1);
+ EXPECT_FALSE(t.is_max());
+ EXPECT_EQ(std::numeric_limits<time_t>::max(), t.ToTimeT());
+}
+#endif
+
+#if defined(OS_ANDROID)
+TEST_F(TimeTest, FromLocalExplodedCrashOnAndroid) {
+ // This crashed inside Time:: FromLocalExploded() on Android 4.1.2.
+ // See http://crbug.com/287821
+ Time::Exploded midnight = {2013, // year
+ 10, // month
+ 0, // day_of_week
+ 13, // day_of_month
+ 0, // hour
+ 0, // minute
+ 0, // second
+ };
+ // The string passed to putenv() must be a char* and the documentation states
+ // that it 'becomes part of the environment', so use a static buffer.
+ static char buffer[] = "TZ=America/Santiago";
+ putenv(buffer);
+ tzset();
+ Time t;
+ EXPECT_TRUE(Time::FromLocalExploded(midnight, &t));
+ EXPECT_EQ(1381633200, t.ToTimeT());
+}
+#endif // OS_ANDROID
+
+TEST(TimeTicks, Deltas) {
+ for (int index = 0; index < 50; index++) {
+ TimeTicks ticks_start = TimeTicks::Now();
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+ TimeTicks ticks_stop = TimeTicks::Now();
+ TimeDelta delta = ticks_stop - ticks_start;
+ // Note: Although we asked for a 10ms sleep, if the
+ // time clock has a finer granularity than the Sleep()
+ // clock, it is quite possible to wakeup early. Here
+ // is how that works:
+ // Time(ms timer) Time(us timer)
+ // 5 5010
+ // 6 6010
+ // 7 7010
+ // 8 8010
+ // 9 9000
+ // Elapsed 4ms 3990us
+ //
+ // Unfortunately, our InMilliseconds() function truncates
+ // rather than rounds. We should consider fixing this
+ // so that our averages come out better.
+ EXPECT_GE(delta.InMilliseconds(), 9);
+ EXPECT_GE(delta.InMicroseconds(), 9000);
+ EXPECT_EQ(delta.InSeconds(), 0);
+ }
+}
+
+static void HighResClockTest(TimeTicks (*GetTicks)()) {
+ // IsHighResolution() is false on some systems. Since the product still works
+ // even if it's false, it makes this entire test questionable.
+ if (!TimeTicks::IsHighResolution())
+ return;
+
+ // Why do we loop here?
+ // We're trying to measure that intervals increment in a VERY small amount
+ // of time -- less than 15ms. Unfortunately, if we happen to have a
+ // context switch in the middle of our test, the context switch could easily
+ // exceed our limit. So, we iterate on this several times. As long as we're
+ // able to detect the fine-granularity timers at least once, then the test
+ // has succeeded.
+
+ const int kTargetGranularityUs = 15000; // 15ms
+
+ bool success = false;
+ int retries = 100; // Arbitrary.
+ TimeDelta delta;
+ while (!success && retries--) {
+ TimeTicks ticks_start = GetTicks();
+ // Loop until we can detect that the clock has changed. Non-HighRes timers
+ // will increment in chunks, e.g. 15ms. By spinning until we see a clock
+ // change, we detect the minimum time between measurements.
+ do {
+ delta = GetTicks() - ticks_start;
+ } while (delta.InMilliseconds() == 0);
+
+ if (delta.InMicroseconds() <= kTargetGranularityUs)
+ success = true;
+ }
+
+ // In high resolution mode, we expect to see the clock increment
+ // in intervals less than 15ms.
+ EXPECT_TRUE(success);
+}
+
+TEST(TimeTicks, HighRes) {
+ HighResClockTest(&TimeTicks::Now);
+}
+
+// Fails frequently on Android http://crbug.com/352633 with:
+// Expected: (delta_thread.InMicroseconds()) > (0), actual: 0 vs 0
+#if defined(OS_ANDROID)
+#define MAYBE_ThreadNow DISABLED_ThreadNow
+#else
+#define MAYBE_ThreadNow ThreadNow
+#endif
+TEST(ThreadTicks, MAYBE_ThreadNow) {
+ if (ThreadTicks::IsSupported()) {
+ ThreadTicks::WaitUntilInitialized();
+ TimeTicks begin = TimeTicks::Now();
+ ThreadTicks begin_thread = ThreadTicks::Now();
+ // Make sure that ThreadNow value is non-zero.
+ EXPECT_GT(begin_thread, ThreadTicks());
+ // Sleep for 10 milliseconds to get the thread de-scheduled.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+ ThreadTicks end_thread = ThreadTicks::Now();
+ TimeTicks end = TimeTicks::Now();
+ TimeDelta delta = end - begin;
+ TimeDelta delta_thread = end_thread - begin_thread;
+ // Make sure that some thread time have elapsed.
+ EXPECT_GT(delta_thread.InMicroseconds(), 0);
+ // But the thread time is at least 9ms less than clock time.
+ TimeDelta difference = delta - delta_thread;
+ EXPECT_GE(difference.InMicroseconds(), 9000);
+ }
+}
+
+TEST(TimeTicks, SnappedToNextTickBasic) {
+ base::TimeTicks phase = base::TimeTicks::FromInternalValue(4000);
+ base::TimeDelta interval = base::TimeDelta::FromMicroseconds(1000);
+ base::TimeTicks timestamp;
+
+ // Timestamp in previous interval.
+ timestamp = base::TimeTicks::FromInternalValue(3500);
+ EXPECT_EQ(4000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+ // Timestamp in next interval.
+ timestamp = base::TimeTicks::FromInternalValue(4500);
+ EXPECT_EQ(5000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+ // Timestamp multiple intervals before.
+ timestamp = base::TimeTicks::FromInternalValue(2500);
+ EXPECT_EQ(3000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+ // Timestamp multiple intervals after.
+ timestamp = base::TimeTicks::FromInternalValue(6500);
+ EXPECT_EQ(7000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+ // Timestamp on previous interval.
+ timestamp = base::TimeTicks::FromInternalValue(3000);
+ EXPECT_EQ(3000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+ // Timestamp on next interval.
+ timestamp = base::TimeTicks::FromInternalValue(5000);
+ EXPECT_EQ(5000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+
+ // Timestamp equal to phase.
+ timestamp = base::TimeTicks::FromInternalValue(4000);
+ EXPECT_EQ(4000,
+ timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+}
+
+TEST(TimeTicks, SnappedToNextTickOverflow) {
+ // int(big_timestamp / interval) < 0, so this causes a crash if the number of
+ // intervals elapsed is attempted to be stored in an int.
+ base::TimeTicks phase = base::TimeTicks::FromInternalValue(0);
+ base::TimeDelta interval = base::TimeDelta::FromMicroseconds(4000);
+ base::TimeTicks big_timestamp =
+ base::TimeTicks::FromInternalValue(8635916564000);
+
+ EXPECT_EQ(8635916564000,
+ big_timestamp.SnappedToNextTick(phase, interval).ToInternalValue());
+ EXPECT_EQ(8635916564000,
+ big_timestamp.SnappedToNextTick(big_timestamp, interval)
+ .ToInternalValue());
+}
+
+TEST(TimeDelta, FromAndIn) {
+ // static_assert also checks that the contained expression is a constant
+ // expression, meaning all its components are suitable for initializing global
+ // variables.
+ static_assert(TimeDelta::FromDays(2) == TimeDelta::FromHours(48), "");
+ static_assert(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180), "");
+ static_assert(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120), "");
+ static_assert(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000),
+ "");
+ static_assert(
+ TimeDelta::FromMilliseconds(2) == TimeDelta::FromMicroseconds(2000), "");
+ static_assert(
+ TimeDelta::FromSecondsD(2.3) == TimeDelta::FromMilliseconds(2300), "");
+ static_assert(
+ TimeDelta::FromMillisecondsD(2.5) == TimeDelta::FromMicroseconds(2500),
+ "");
+ EXPECT_EQ(13, TimeDelta::FromDays(13).InDays());
+ EXPECT_EQ(13, TimeDelta::FromHours(13).InHours());
+ EXPECT_EQ(13, TimeDelta::FromMinutes(13).InMinutes());
+ EXPECT_EQ(13, TimeDelta::FromSeconds(13).InSeconds());
+ EXPECT_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
+ EXPECT_EQ(13, TimeDelta::FromMilliseconds(13).InMilliseconds());
+ EXPECT_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
+ EXPECT_EQ(13, TimeDelta::FromSecondsD(13.1).InSeconds());
+ EXPECT_EQ(13.1, TimeDelta::FromSecondsD(13.1).InSecondsF());
+ EXPECT_EQ(13, TimeDelta::FromMillisecondsD(13.3).InMilliseconds());
+ EXPECT_EQ(13.3, TimeDelta::FromMillisecondsD(13.3).InMillisecondsF());
+ EXPECT_EQ(13, TimeDelta::FromMicroseconds(13).InMicroseconds());
+ EXPECT_EQ(3.456, TimeDelta::FromMillisecondsD(3.45678).InMillisecondsF());
+}
+
+#if defined(OS_POSIX)
+TEST(TimeDelta, TimeSpecConversion) {
+ struct timespec result = TimeDelta::FromSeconds(0).ToTimeSpec();
+ EXPECT_EQ(result.tv_sec, 0);
+ EXPECT_EQ(result.tv_nsec, 0);
+
+ result = TimeDelta::FromSeconds(1).ToTimeSpec();
+ EXPECT_EQ(result.tv_sec, 1);
+ EXPECT_EQ(result.tv_nsec, 0);
+
+ result = TimeDelta::FromMicroseconds(1).ToTimeSpec();
+ EXPECT_EQ(result.tv_sec, 0);
+ EXPECT_EQ(result.tv_nsec, 1000);
+
+ result = TimeDelta::FromMicroseconds(
+ Time::kMicrosecondsPerSecond + 1).ToTimeSpec();
+ EXPECT_EQ(result.tv_sec, 1);
+ EXPECT_EQ(result.tv_nsec, 1000);
+}
+#endif // OS_POSIX
+
+// Our internal time format is serialized in things like databases, so it's
+// important that it's consistent across all our platforms. We use the 1601
+// Windows epoch as the internal format across all platforms.
+TEST(TimeDelta, WindowsEpoch) {
+ Time::Exploded exploded;
+ exploded.year = 1970;
+ exploded.month = 1;
+ exploded.day_of_week = 0; // Should be unusued.
+ exploded.day_of_month = 1;
+ exploded.hour = 0;
+ exploded.minute = 0;
+ exploded.second = 0;
+ exploded.millisecond = 0;
+ Time t;
+ EXPECT_TRUE(Time::FromUTCExploded(exploded, &t));
+ // Unix 1970 epoch.
+ EXPECT_EQ(INT64_C(11644473600000000), t.ToInternalValue());
+
+ // We can't test 1601 epoch, since the system time functions on Linux
+ // only compute years starting from 1900.
+}
+
+// We could define this separately for Time, TimeTicks and TimeDelta but the
+// definitions would be identical anyway.
+template <class Any>
+std::string AnyToString(Any any) {
+ std::ostringstream oss;
+ oss << any;
+ return oss.str();
+}
+
+TEST(TimeDelta, Magnitude) {
+ const int64_t zero = 0;
+ EXPECT_EQ(TimeDelta::FromMicroseconds(zero),
+ TimeDelta::FromMicroseconds(zero).magnitude());
+
+ const int64_t one = 1;
+ const int64_t negative_one = -1;
+ EXPECT_EQ(TimeDelta::FromMicroseconds(one),
+ TimeDelta::FromMicroseconds(one).magnitude());
+ EXPECT_EQ(TimeDelta::FromMicroseconds(one),
+ TimeDelta::FromMicroseconds(negative_one).magnitude());
+
+ const int64_t max_int64_minus_one = std::numeric_limits<int64_t>::max() - 1;
+ const int64_t min_int64_plus_two = std::numeric_limits<int64_t>::min() + 2;
+ EXPECT_EQ(TimeDelta::FromMicroseconds(max_int64_minus_one),
+ TimeDelta::FromMicroseconds(max_int64_minus_one).magnitude());
+ EXPECT_EQ(TimeDelta::FromMicroseconds(max_int64_minus_one),
+ TimeDelta::FromMicroseconds(min_int64_plus_two).magnitude());
+}
+
+TEST(TimeDelta, Max) {
+ TimeDelta max = TimeDelta::Max();
+ EXPECT_TRUE(max.is_max());
+ EXPECT_EQ(max, TimeDelta::Max());
+ EXPECT_GT(max, TimeDelta::FromDays(100 * 365));
+ EXPECT_GT(max, TimeDelta());
+}
+
+bool IsMin(TimeDelta delta) {
+ return (-delta).is_max();
+}
+
+TEST(TimeDelta, MaxConversions) {
+ TimeDelta t = TimeDelta::Max();
+ EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.ToInternalValue());
+
+ EXPECT_EQ(std::numeric_limits<int>::max(), t.InDays());
+ EXPECT_EQ(std::numeric_limits<int>::max(), t.InHours());
+ EXPECT_EQ(std::numeric_limits<int>::max(), t.InMinutes());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), t.InSecondsF());
+ EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.InSeconds());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), t.InMillisecondsF());
+ EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.InMilliseconds());
+ EXPECT_EQ(std::numeric_limits<int64_t>::max(), t.InMillisecondsRoundedUp());
+
+ t = TimeDelta::FromDays(std::numeric_limits<int>::max());
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromHours(std::numeric_limits<int>::max());
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMinutes(std::numeric_limits<int>::max());
+ EXPECT_TRUE(t.is_max());
+
+ int64_t max_int = std::numeric_limits<int64_t>::max();
+
+ t = TimeDelta::FromSeconds(max_int / Time::kMicrosecondsPerSecond + 1);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMilliseconds(max_int / Time::kMillisecondsPerSecond + 1);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMicroseconds(max_int);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromSeconds(-max_int / Time::kMicrosecondsPerSecond - 1);
+ EXPECT_TRUE(IsMin(t));
+
+ t = TimeDelta::FromMilliseconds(-max_int / Time::kMillisecondsPerSecond - 1);
+ EXPECT_TRUE(IsMin(t));
+
+ t = TimeDelta::FromMicroseconds(-max_int);
+ EXPECT_TRUE(IsMin(t));
+
+ t = -TimeDelta::FromMicroseconds(std::numeric_limits<int64_t>::min());
+ EXPECT_FALSE(IsMin(t));
+
+ t = TimeDelta::FromSecondsD(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(t.is_max());
+
+ double max_d = max_int;
+
+ t = TimeDelta::FromSecondsD(max_d / Time::kMicrosecondsPerSecond + 1);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMillisecondsD(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMillisecondsD(max_d / Time::kMillisecondsPerSecond * 2);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromSecondsD(-max_d / Time::kMicrosecondsPerSecond - 1);
+ EXPECT_TRUE(IsMin(t));
+
+ t = TimeDelta::FromMillisecondsD(-max_d / Time::kMillisecondsPerSecond * 2);
+ EXPECT_TRUE(IsMin(t));
+}
+
+TEST(TimeDelta, NumericOperators) {
+ double d = 0.5;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) * d);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) / d);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) *= d);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) /= d);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ d * TimeDelta::FromMilliseconds(1000));
+
+ float f = 0.5;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) * f);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) / f);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) *= f);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) /= f);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ f * TimeDelta::FromMilliseconds(1000));
+
+ int i = 2;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) * i);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) / i);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) *= i);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) /= i);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ i * TimeDelta::FromMilliseconds(1000));
+
+ int64_t i64 = 2;
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) * i64);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) / i64);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) *= i64);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) /= i64);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ i64 * TimeDelta::FromMilliseconds(1000));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) * 0.5);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) / 0.5);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) *= 0.5);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) /= 0.5);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ 0.5 * TimeDelta::FromMilliseconds(1000));
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) * 2);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) / 2);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ TimeDelta::FromMilliseconds(1000) *= 2);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(500),
+ TimeDelta::FromMilliseconds(1000) /= 2);
+ EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
+ 2 * TimeDelta::FromMilliseconds(1000));
+}
+
+TEST(TimeDelta, Overflows) {
+ // Some sanity checks.
+ EXPECT_TRUE(TimeDelta::Max().is_max());
+ EXPECT_TRUE(IsMin(-TimeDelta::Max()));
+ EXPECT_GT(TimeDelta(), -TimeDelta::Max());
+
+ TimeDelta large_delta = TimeDelta::Max() - TimeDelta::FromMilliseconds(1);
+ TimeDelta large_negative = -large_delta;
+ EXPECT_GT(TimeDelta(), large_negative);
+ EXPECT_FALSE(large_delta.is_max());
+ EXPECT_FALSE(IsMin(-large_negative));
+ TimeDelta one_second = TimeDelta::FromSeconds(1);
+
+ // Test +, -, * and / operators.
+ EXPECT_TRUE((large_delta + one_second).is_max());
+ EXPECT_TRUE(IsMin(large_negative + (-one_second)));
+ EXPECT_TRUE(IsMin(large_negative - one_second));
+ EXPECT_TRUE((large_delta - (-one_second)).is_max());
+ EXPECT_TRUE((large_delta * 2).is_max());
+ EXPECT_TRUE(IsMin(large_delta * -2));
+ EXPECT_TRUE((large_delta / 0.5).is_max());
+ EXPECT_TRUE(IsMin(large_delta / -0.5));
+
+ // Test +=, -=, *= and /= operators.
+ TimeDelta delta = large_delta;
+ delta += one_second;
+ EXPECT_TRUE(delta.is_max());
+ delta = large_negative;
+ delta += -one_second;
+ EXPECT_TRUE(IsMin(delta));
+
+ delta = large_negative;
+ delta -= one_second;
+ EXPECT_TRUE(IsMin(delta));
+ delta = large_delta;
+ delta -= -one_second;
+ EXPECT_TRUE(delta.is_max());
+
+ delta = large_delta;
+ delta *= 2;
+ EXPECT_TRUE(delta.is_max());
+ delta = large_negative;
+ delta *= 1.5;
+ EXPECT_TRUE(IsMin(delta));
+
+ delta = large_delta;
+ delta /= 0.5;
+ EXPECT_TRUE(delta.is_max());
+ delta = large_negative;
+ delta /= 0.5;
+ EXPECT_TRUE(IsMin(delta));
+
+ // Test operations with Time and TimeTicks.
+ EXPECT_TRUE((large_delta + Time::Now()).is_max());
+ EXPECT_TRUE((large_delta + TimeTicks::Now()).is_max());
+ EXPECT_TRUE((Time::Now() + large_delta).is_max());
+ EXPECT_TRUE((TimeTicks::Now() + large_delta).is_max());
+
+ Time time_now = Time::Now();
+ EXPECT_EQ(one_second, (time_now + one_second) - time_now);
+ EXPECT_EQ(-one_second, (time_now - one_second) - time_now);
+
+ TimeTicks ticks_now = TimeTicks::Now();
+ EXPECT_EQ(-one_second, (ticks_now - one_second) - ticks_now);
+ EXPECT_EQ(one_second, (ticks_now + one_second) - ticks_now);
+}
+
+TEST(TimeDeltaLogging, DCheckEqCompiles) {
+ DCHECK_EQ(TimeDelta(), TimeDelta());
+}
+
+TEST(TimeDeltaLogging, EmptyIsZero) {
+ TimeDelta zero;
+ EXPECT_EQ("0s", AnyToString(zero));
+}
+
+TEST(TimeDeltaLogging, FiveHundredMs) {
+ TimeDelta five_hundred_ms = TimeDelta::FromMilliseconds(500);
+ EXPECT_EQ("0.5s", AnyToString(five_hundred_ms));
+}
+
+TEST(TimeDeltaLogging, MinusTenSeconds) {
+ TimeDelta minus_ten_seconds = TimeDelta::FromSeconds(-10);
+ EXPECT_EQ("-10s", AnyToString(minus_ten_seconds));
+}
+
+TEST(TimeDeltaLogging, DoesNotMessUpFormattingFlags) {
+ std::ostringstream oss;
+ std::ios_base::fmtflags flags_before = oss.flags();
+ oss << TimeDelta();
+ EXPECT_EQ(flags_before, oss.flags());
+}
+
+TEST(TimeDeltaLogging, DoesNotMakeStreamBad) {
+ std::ostringstream oss;
+ oss << TimeDelta();
+ EXPECT_TRUE(oss.good());
+}
+
+TEST(TimeLogging, DCheckEqCompiles) {
+ DCHECK_EQ(Time(), Time());
+}
+
+TEST(TimeLogging, ChromeBirthdate) {
+ Time birthdate;
+ ASSERT_TRUE(Time::FromString("Tue, 02 Sep 2008 09:42:18 GMT", &birthdate));
+ EXPECT_EQ("2008-09-02 09:42:18.000 UTC", AnyToString(birthdate));
+}
+
+TEST(TimeLogging, DoesNotMessUpFormattingFlags) {
+ std::ostringstream oss;
+ std::ios_base::fmtflags flags_before = oss.flags();
+ oss << Time();
+ EXPECT_EQ(flags_before, oss.flags());
+}
+
+TEST(TimeLogging, DoesNotMakeStreamBad) {
+ std::ostringstream oss;
+ oss << Time();
+ EXPECT_TRUE(oss.good());
+}
+
+TEST(TimeTicksLogging, DCheckEqCompiles) {
+ DCHECK_EQ(TimeTicks(), TimeTicks());
+}
+
+TEST(TimeTicksLogging, ZeroTime) {
+ TimeTicks zero;
+ EXPECT_EQ("0 bogo-microseconds", AnyToString(zero));
+}
+
+TEST(TimeTicksLogging, FortyYearsLater) {
+ TimeTicks forty_years_later =
+ TimeTicks() + TimeDelta::FromDays(365.25 * 40);
+ EXPECT_EQ("1262304000000000 bogo-microseconds",
+ AnyToString(forty_years_later));
+}
+
+TEST(TimeTicksLogging, DoesNotMessUpFormattingFlags) {
+ std::ostringstream oss;
+ std::ios_base::fmtflags flags_before = oss.flags();
+ oss << TimeTicks();
+ EXPECT_EQ(flags_before, oss.flags());
+}
+
+TEST(TimeTicksLogging, DoesNotMakeStreamBad) {
+ std::ostringstream oss;
+ oss << TimeTicks();
+ EXPECT_TRUE(oss.good());
+}
+
+} // namespace
+
+} // namespace base
diff --git a/libchrome/base/timer/elapsed_timer.cc b/libchrome/base/timer/elapsed_timer.cc
new file mode 100644
index 0000000..f2a2f71
--- /dev/null
+++ b/libchrome/base/timer/elapsed_timer.cc
@@ -0,0 +1,17 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/elapsed_timer.h"
+
+namespace base {
+
+ElapsedTimer::ElapsedTimer() {
+ begin_ = TimeTicks::Now();
+}
+
+TimeDelta ElapsedTimer::Elapsed() const {
+ return TimeTicks::Now() - begin_;
+}
+
+} // namespace base
diff --git a/libchrome/base/timer/elapsed_timer.h b/libchrome/base/timer/elapsed_timer.h
new file mode 100644
index 0000000..592858a
--- /dev/null
+++ b/libchrome/base/timer/elapsed_timer.h
@@ -0,0 +1,30 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIMER_ELAPSED_TIMER_H_
+#define BASE_TIMER_ELAPSED_TIMER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+
+namespace base {
+
+// A simple wrapper around TimeTicks::Now().
+class BASE_EXPORT ElapsedTimer {
+ public:
+ ElapsedTimer();
+
+ // Returns the time elapsed since object construction.
+ TimeDelta Elapsed() const;
+
+ private:
+ TimeTicks begin_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElapsedTimer);
+};
+
+} // namespace base
+
+#endif // BASE_TIMER_ELAPSED_TIMER_H_
diff --git a/libchrome/base/timer/hi_res_timer_manager.h b/libchrome/base/timer/hi_res_timer_manager.h
new file mode 100644
index 0000000..21cdfaf
--- /dev/null
+++ b/libchrome/base/timer/hi_res_timer_manager.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIMER_HI_RES_TIMER_MANAGER_H_
+#define BASE_TIMER_HI_RES_TIMER_MANAGER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/power_monitor/power_observer.h"
+
+namespace base {
+
+// Ensures that the Windows high resolution timer is only used
+// when not running on battery power.
+class BASE_EXPORT HighResolutionTimerManager : public base::PowerObserver {
+ public:
+ HighResolutionTimerManager();
+ ~HighResolutionTimerManager() override;
+
+ // base::PowerObserver method.
+ void OnPowerStateChange(bool on_battery_power) override;
+
+ // Returns true if the hi resolution clock could be used right now.
+ bool hi_res_clock_available() const { return hi_res_clock_available_; }
+
+ private:
+ // Enable or disable the faster multimedia timer.
+ void UseHiResClock(bool use);
+
+ bool hi_res_clock_available_;
+
+ DISALLOW_COPY_AND_ASSIGN(HighResolutionTimerManager);
+};
+
+} // namespace base
+
+#endif // BASE_TIMER_HI_RES_TIMER_MANAGER_H_
diff --git a/libchrome/base/timer/hi_res_timer_manager_posix.cc b/libchrome/base/timer/hi_res_timer_manager_posix.cc
new file mode 100644
index 0000000..d2f152c
--- /dev/null
+++ b/libchrome/base/timer/hi_res_timer_manager_posix.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/hi_res_timer_manager.h"
+
+// On POSIX we don't need to do anything special with the system timer.
+
+namespace base {
+
+HighResolutionTimerManager::HighResolutionTimerManager()
+ : hi_res_clock_available_(false) {
+}
+
+HighResolutionTimerManager::~HighResolutionTimerManager() {
+}
+
+void HighResolutionTimerManager::OnPowerStateChange(bool on_battery_power) {
+}
+
+void HighResolutionTimerManager::UseHiResClock(bool use) {
+}
+
+} // namespace base
diff --git a/libchrome/base/timer/hi_res_timer_manager_unittest.cc b/libchrome/base/timer/hi_res_timer_manager_unittest.cc
new file mode 100644
index 0000000..a0b0f93
--- /dev/null
+++ b/libchrome/base/timer/hi_res_timer_manager_unittest.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/hi_res_timer_manager.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/message_loop/message_loop.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_device_source.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+#if defined(OS_WIN)
+TEST(HiResTimerManagerTest, ToggleOnOff) {
+ // The power monitor creates Window to receive power notifications from
+ // Windows, which makes this test flaky if you run while the machine
+ // goes in or out of AC power.
+ base::MessageLoop loop(base::MessageLoop::TYPE_UI);
+ std::unique_ptr<base::PowerMonitorSource> power_monitor_source(
+ new base::PowerMonitorDeviceSource());
+ std::unique_ptr<base::PowerMonitor> power_monitor(
+ new base::PowerMonitor(std::move(power_monitor_source)));
+
+ HighResolutionTimerManager manager;
+ // Simulate a on-AC power event to get to a known initial state.
+ manager.OnPowerStateChange(false);
+
+ // Loop a few times to test power toggling.
+ for (int times = 0; times != 3; ++times) {
+ // The manager has the high resolution clock enabled now.
+ EXPECT_TRUE(manager.hi_res_clock_available());
+ // But the Time class has it off, because it hasn't been activated.
+ EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
+
+ // Activate the high resolution timer.
+ base::Time::ActivateHighResolutionTimer(true);
+ EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
+
+ // Simulate a on-battery power event.
+ manager.OnPowerStateChange(true);
+ EXPECT_FALSE(manager.hi_res_clock_available());
+ EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
+
+ // Back to on-AC power.
+ manager.OnPowerStateChange(false);
+ EXPECT_TRUE(manager.hi_res_clock_available());
+ EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
+
+ // De-activate the high resolution timer.
+ base::Time::ActivateHighResolutionTimer(false);
+ }
+}
+#endif // defined(OS_WIN)
+
+} // namespace base
diff --git a/libchrome/base/timer/mock_timer.cc b/libchrome/base/timer/mock_timer.cc
new file mode 100644
index 0000000..296071e
--- /dev/null
+++ b/libchrome/base/timer/mock_timer.cc
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/mock_timer.h"
+
+namespace base {
+
+MockTimer::MockTimer(bool retain_user_task, bool is_repeating)
+ : Timer(retain_user_task, is_repeating),
+ is_running_(false) {
+}
+
+MockTimer::MockTimer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating)
+ : Timer(true, is_repeating),
+ delay_(delay),
+ is_running_(false) {
+}
+
+MockTimer::~MockTimer() {
+}
+
+bool MockTimer::IsRunning() const {
+ return is_running_;
+}
+
+base::TimeDelta MockTimer::GetCurrentDelay() const {
+ return delay_;
+}
+
+void MockTimer::Start(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task) {
+ delay_ = delay;
+ user_task_ = user_task;
+ Reset();
+}
+
+void MockTimer::Stop() {
+ is_running_ = false;
+ if (!retain_user_task())
+ user_task_.Reset();
+}
+
+void MockTimer::Reset() {
+ DCHECK(!user_task_.is_null());
+ is_running_ = true;
+}
+
+void MockTimer::Fire() {
+ DCHECK(is_running_);
+ base::Closure old_task = user_task_;
+ if (is_repeating())
+ Reset();
+ else
+ Stop();
+ old_task.Run();
+}
+
+} // namespace base
diff --git a/libchrome/base/timer/mock_timer.h b/libchrome/base/timer/mock_timer.h
new file mode 100644
index 0000000..e18a5c0
--- /dev/null
+++ b/libchrome/base/timer/mock_timer.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TIMER_MOCK_TIMER_H_
+#define BASE_TIMER_MOCK_TIMER_H_
+
+#include "base/timer/timer.h"
+
+namespace base {
+
+class BASE_EXPORT MockTimer : public Timer {
+ public:
+ MockTimer(bool retain_user_task, bool is_repeating);
+ MockTimer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating);
+ ~MockTimer() override;
+
+ // base::Timer implementation.
+ bool IsRunning() const override;
+ base::TimeDelta GetCurrentDelay() const override;
+ void Start(const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task) override;
+ void Stop() override;
+ void Reset() override;
+
+ // Testing methods.
+ void Fire();
+
+ private:
+ base::Closure user_task_;
+ TimeDelta delay_;
+ bool is_running_;
+};
+
+} // namespace base
+
+#endif // BASE_TIMER_MOCK_TIMER_H_
diff --git a/libchrome/base/timer/mock_timer_unittest.cc b/libchrome/base/timer/mock_timer_unittest.cc
new file mode 100644
index 0000000..a389815
--- /dev/null
+++ b/libchrome/base/timer/mock_timer_unittest.cc
@@ -0,0 +1,83 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/mock_timer.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+void CallMeMaybe(int *number) {
+ (*number)++;
+}
+
+TEST(MockTimerTest, FiresOnce) {
+ int calls = 0;
+ base::MockTimer timer(false, false);
+ base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+ timer.Start(FROM_HERE, delay,
+ base::Bind(&CallMeMaybe,
+ base::Unretained(&calls)));
+ EXPECT_EQ(delay, timer.GetCurrentDelay());
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Fire();
+ EXPECT_FALSE(timer.IsRunning());
+ EXPECT_EQ(1, calls);
+}
+
+TEST(MockTimerTest, FiresRepeatedly) {
+ int calls = 0;
+ base::MockTimer timer(true, true);
+ base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+ timer.Start(FROM_HERE, delay,
+ base::Bind(&CallMeMaybe,
+ base::Unretained(&calls)));
+ timer.Fire();
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Fire();
+ timer.Fire();
+ EXPECT_TRUE(timer.IsRunning());
+ EXPECT_EQ(3, calls);
+}
+
+TEST(MockTimerTest, Stops) {
+ int calls = 0;
+ base::MockTimer timer(true, true);
+ base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+ timer.Start(FROM_HERE, delay,
+ base::Bind(&CallMeMaybe,
+ base::Unretained(&calls)));
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Stop();
+ EXPECT_FALSE(timer.IsRunning());
+}
+
+class HasWeakPtr : public base::SupportsWeakPtr<HasWeakPtr> {
+ public:
+ HasWeakPtr() {}
+ virtual ~HasWeakPtr() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HasWeakPtr);
+};
+
+void DoNothingWithWeakPtr(HasWeakPtr* has_weak_ptr) {
+}
+
+TEST(MockTimerTest, DoesNotRetainClosure) {
+ HasWeakPtr *has_weak_ptr = new HasWeakPtr();
+ base::WeakPtr<HasWeakPtr> weak_ptr(has_weak_ptr->AsWeakPtr());
+ base::MockTimer timer(false, false);
+ base::TimeDelta delay = base::TimeDelta::FromSeconds(2);
+ ASSERT_TRUE(weak_ptr.get());
+ timer.Start(FROM_HERE, delay,
+ base::Bind(&DoNothingWithWeakPtr,
+ base::Owned(has_weak_ptr)));
+ ASSERT_TRUE(weak_ptr.get());
+ timer.Fire();
+ ASSERT_FALSE(weak_ptr.get());
+}
+
+} // namespace
diff --git a/libchrome/base/timer/timer.cc b/libchrome/base/timer/timer.cc
new file mode 100644
index 0000000..e554905
--- /dev/null
+++ b/libchrome/base/timer/timer.cc
@@ -0,0 +1,218 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/timer.h"
+
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+// BaseTimerTaskInternal is a simple delegate for scheduling a callback to
+// Timer in the thread's default task runner. It also handles the following
+// edge cases:
+// - deleted by the task runner.
+// - abandoned (orphaned) by Timer.
+class BaseTimerTaskInternal {
+ public:
+ explicit BaseTimerTaskInternal(Timer* timer)
+ : timer_(timer) {
+ }
+
+ ~BaseTimerTaskInternal() {
+ // This task may be getting cleared because the task runner has been
+ // destructed. If so, don't leave Timer with a dangling pointer
+ // to this.
+ if (timer_)
+ timer_->StopAndAbandon();
+ }
+
+ void Run() {
+ // timer_ is NULL if we were abandoned.
+ if (!timer_)
+ return;
+
+ // *this will be deleted by the task runner, so Timer needs to
+ // forget us:
+ timer_->scheduled_task_ = NULL;
+
+ // Although Timer should not call back into *this, let's clear
+ // the timer_ member first to be pedantic.
+ Timer* timer = timer_;
+ timer_ = NULL;
+ timer->RunScheduledTask();
+ }
+
+ // The task remains in the MessageLoop queue, but nothing will happen when it
+ // runs.
+ void Abandon() {
+ timer_ = NULL;
+ }
+
+ private:
+ Timer* timer_;
+};
+
+Timer::Timer(bool retain_user_task, bool is_repeating)
+ : scheduled_task_(NULL),
+ thread_id_(0),
+ is_repeating_(is_repeating),
+ retain_user_task_(retain_user_task),
+ is_running_(false) {
+}
+
+Timer::Timer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating)
+ : scheduled_task_(NULL),
+ posted_from_(posted_from),
+ delay_(delay),
+ user_task_(user_task),
+ thread_id_(0),
+ is_repeating_(is_repeating),
+ retain_user_task_(true),
+ is_running_(false) {
+}
+
+Timer::~Timer() {
+ StopAndAbandon();
+}
+
+bool Timer::IsRunning() const {
+ return is_running_;
+}
+
+TimeDelta Timer::GetCurrentDelay() const {
+ return delay_;
+}
+
+void Timer::SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ // Do not allow changing the task runner once something has been scheduled.
+ DCHECK_EQ(thread_id_, 0);
+ task_runner_.swap(task_runner);
+}
+
+void Timer::Start(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task) {
+ SetTaskInfo(posted_from, delay, user_task);
+ Reset();
+}
+
+void Timer::Stop() {
+ is_running_ = false;
+ if (!retain_user_task_)
+ user_task_.Reset();
+}
+
+void Timer::Reset() {
+ DCHECK(!user_task_.is_null());
+
+ // If there's no pending task, start one up and return.
+ if (!scheduled_task_) {
+ PostNewScheduledTask(delay_);
+ return;
+ }
+
+ // Set the new desired_run_time_.
+ if (delay_ > TimeDelta::FromMicroseconds(0))
+ desired_run_time_ = TimeTicks::Now() + delay_;
+ else
+ desired_run_time_ = TimeTicks();
+
+ // We can use the existing scheduled task if it arrives before the new
+ // desired_run_time_.
+ if (desired_run_time_ >= scheduled_run_time_) {
+ is_running_ = true;
+ return;
+ }
+
+ // We can't reuse the scheduled_task_, so abandon it and post a new one.
+ AbandonScheduledTask();
+ PostNewScheduledTask(delay_);
+}
+
+void Timer::SetTaskInfo(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task) {
+ posted_from_ = posted_from;
+ delay_ = delay;
+ user_task_ = user_task;
+}
+
+void Timer::PostNewScheduledTask(TimeDelta delay) {
+ DCHECK(scheduled_task_ == NULL);
+ is_running_ = true;
+ scheduled_task_ = new BaseTimerTaskInternal(this);
+ if (delay > TimeDelta::FromMicroseconds(0)) {
+ GetTaskRunner()->PostDelayedTask(posted_from_,
+ base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)),
+ delay);
+ scheduled_run_time_ = desired_run_time_ = TimeTicks::Now() + delay;
+ } else {
+ GetTaskRunner()->PostTask(posted_from_,
+ base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)));
+ scheduled_run_time_ = desired_run_time_ = TimeTicks();
+ }
+ // Remember the thread ID that posts the first task -- this will be verified
+ // later when the task is abandoned to detect misuse from multiple threads.
+ if (!thread_id_) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ thread_id_ = static_cast<int>(PlatformThread::CurrentId());
+ }
+}
+
+scoped_refptr<SingleThreadTaskRunner> Timer::GetTaskRunner() {
+ return task_runner_.get() ? task_runner_ : ThreadTaskRunnerHandle::Get();
+}
+
+void Timer::AbandonScheduledTask() {
+ DCHECK(thread_id_ == 0 ||
+ thread_id_ == static_cast<int>(PlatformThread::CurrentId()));
+ if (scheduled_task_) {
+ scheduled_task_->Abandon();
+ scheduled_task_ = NULL;
+ }
+}
+
+void Timer::RunScheduledTask() {
+ // Task may have been disabled.
+ if (!is_running_)
+ return;
+
+ // First check if we need to delay the task because of a new target time.
+ if (desired_run_time_ > scheduled_run_time_) {
+ // TimeTicks::Now() can be expensive, so only call it if we know the user
+ // has changed the desired_run_time_.
+ TimeTicks now = TimeTicks::Now();
+ // Task runner may have called us late anyway, so only post a continuation
+ // task if the desired_run_time_ is in the future.
+ if (desired_run_time_ > now) {
+ // Post a new task to span the remaining time.
+ PostNewScheduledTask(desired_run_time_ - now);
+ return;
+ }
+ }
+
+ // Make a local copy of the task to run. The Stop method will reset the
+ // user_task_ member if retain_user_task_ is false.
+ base::Closure task = user_task_;
+
+ if (is_repeating_)
+ PostNewScheduledTask(delay_);
+ else
+ Stop();
+
+ task.Run();
+
+ // No more member accesses here: *this could be deleted at this point.
+}
+
+} // namespace base
diff --git a/libchrome/base/timer/timer.h b/libchrome/base/timer/timer.h
new file mode 100644
index 0000000..661829b
--- /dev/null
+++ b/libchrome/base/timer/timer.h
@@ -0,0 +1,279 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// OneShotTimer and RepeatingTimer provide a simple timer API. As the names
+// suggest, OneShotTimer calls you back once after a time delay expires.
+// RepeatingTimer on the other hand calls you back periodically with the
+// prescribed time interval.
+//
+// OneShotTimer and RepeatingTimer both cancel the timer when they go out of
+// scope, which makes it easy to ensure that you do not get called when your
+// object has gone out of scope. Just instantiate a OneShotTimer or
+// RepeatingTimer as a member variable of the class for which you wish to
+// receive timer events.
+//
+// Sample RepeatingTimer usage:
+//
+// class MyClass {
+// public:
+// void StartDoingStuff() {
+// timer_.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+// this, &MyClass::DoStuff);
+// }
+// void StopDoingStuff() {
+// timer_.Stop();
+// }
+// private:
+// void DoStuff() {
+// // This method is called every second to do stuff.
+// ...
+// }
+// base::RepeatingTimer timer_;
+// };
+//
+// Both OneShotTimer and RepeatingTimer also support a Reset method, which
+// allows you to easily defer the timer event until the timer delay passes once
+// again. So, in the above example, if 0.5 seconds have already passed,
+// calling Reset on timer_ would postpone DoStuff by another 1 second. In
+// other words, Reset is shorthand for calling Stop and then Start again with
+// the same arguments.
+//
+// NOTE: These APIs are not thread safe. Always call from the same thread.
+
+#ifndef BASE_TIMER_TIMER_H_
+#define BASE_TIMER_TIMER_H_
+
+// IMPORTANT: If you change timer code, make sure that all tests (including
+// disabled ones) from timer_unittests.cc pass locally. Some are disabled
+// because they're flaky on the buildbot, but when you run them locally you
+// should be able to tell the difference.
+
+#include "base/base_export.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class BaseTimerTaskInternal;
+class SingleThreadTaskRunner;
+
+//-----------------------------------------------------------------------------
+// This class wraps MessageLoop::PostDelayedTask to manage delayed and repeating
+// tasks. It must be destructed on the same thread that starts tasks. There are
+// DCHECKs in place to verify this.
+//
+class BASE_EXPORT Timer {
+ public:
+ // Construct a timer in repeating or one-shot mode. Start or SetTaskInfo must
+ // be called later to set task info. |retain_user_task| determines whether the
+ // user_task is retained or reset when it runs or stops.
+ Timer(bool retain_user_task, bool is_repeating);
+
+ // Construct a timer with retained task info.
+ Timer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating);
+
+ virtual ~Timer();
+
+ // Returns true if the timer is running (i.e., not stopped).
+ virtual bool IsRunning() const;
+
+ // Returns the current delay for this timer.
+ virtual TimeDelta GetCurrentDelay() const;
+
+ // Set the task runner on which the task should be scheduled. This method can
+ // only be called before any tasks have been scheduled. The task runner must
+ // run tasks on the same thread the timer is used on.
+ virtual void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+ // Start the timer to run at the given |delay| from now. If the timer is
+ // already running, it will be replaced to call the given |user_task|.
+ virtual void Start(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task);
+
+ // Call this method to stop and cancel the timer. It is a no-op if the timer
+ // is not running.
+ virtual void Stop();
+
+ // Call this method to reset the timer delay. The user_task_ must be set. If
+ // the timer is not running, this will start it by posting a task.
+ virtual void Reset();
+
+ const base::Closure& user_task() const { return user_task_; }
+ const TimeTicks& desired_run_time() const { return desired_run_time_; }
+
+ protected:
+ // Used to initiate a new delayed task. This has the side-effect of disabling
+ // scheduled_task_ if it is non-null.
+ void SetTaskInfo(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task);
+
+ void set_user_task(const Closure& task) { user_task_ = task; }
+ void set_desired_run_time(TimeTicks desired) { desired_run_time_ = desired; }
+ void set_is_running(bool running) { is_running_ = running; }
+
+ const tracked_objects::Location& posted_from() const { return posted_from_; }
+ bool retain_user_task() const { return retain_user_task_; }
+ bool is_repeating() const { return is_repeating_; }
+ bool is_running() const { return is_running_; }
+
+ private:
+ friend class BaseTimerTaskInternal;
+
+ // Allocates a new scheduled_task_ and posts it on the current MessageLoop
+ // with the given |delay|. scheduled_task_ must be NULL. scheduled_run_time_
+ // and desired_run_time_ are reset to Now() + delay.
+ void PostNewScheduledTask(TimeDelta delay);
+
+ // Returns the task runner on which the task should be scheduled. If the
+ // corresponding task_runner_ field is null, the task runner for the current
+ // thread is returned.
+ scoped_refptr<SingleThreadTaskRunner> GetTaskRunner();
+
+ // Disable scheduled_task_ and abandon it so that it no longer refers back to
+ // this object.
+ void AbandonScheduledTask();
+
+ // Called by BaseTimerTaskInternal when the MessageLoop runs it.
+ void RunScheduledTask();
+
+ // Stop running task (if any) and abandon scheduled task (if any).
+ void StopAndAbandon() {
+ Stop();
+ AbandonScheduledTask();
+ }
+
+ // When non-NULL, the scheduled_task_ is waiting in the MessageLoop to call
+ // RunScheduledTask() at scheduled_run_time_.
+ BaseTimerTaskInternal* scheduled_task_;
+
+ // The task runner on which the task should be scheduled. If it is null, the
+ // task runner for the current thread should be used.
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ // Location in user code.
+ tracked_objects::Location posted_from_;
+ // Delay requested by user.
+ TimeDelta delay_;
+ // user_task_ is what the user wants to be run at desired_run_time_.
+ base::Closure user_task_;
+
+ // The estimated time that the MessageLoop will run the scheduled_task_ that
+ // will call RunScheduledTask(). This time can be a "zero" TimeTicks if the
+ // task must be run immediately.
+ TimeTicks scheduled_run_time_;
+
+ // The desired run time of user_task_. The user may update this at any time,
+ // even if their previous request has not run yet. If desired_run_time_ is
+ // greater than scheduled_run_time_, a continuation task will be posted to
+ // wait for the remaining time. This allows us to reuse the pending task so as
+ // not to flood the MessageLoop with orphaned tasks when the user code
+ // excessively Stops and Starts the timer. This time can be a "zero" TimeTicks
+ // if the task must be run immediately.
+ TimeTicks desired_run_time_;
+
+ // Thread ID of current MessageLoop for verifying single-threaded usage.
+ int thread_id_;
+
+ // Repeating timers automatically post the task again before calling the task
+ // callback.
+ const bool is_repeating_;
+
+ // If true, hold on to the user_task_ closure object for reuse.
+ const bool retain_user_task_;
+
+ // If true, user_task_ is scheduled to run sometime in the future.
+ bool is_running_;
+
+ DISALLOW_COPY_AND_ASSIGN(Timer);
+};
+
+//-----------------------------------------------------------------------------
+// This class is an implementation detail of OneShotTimer and RepeatingTimer.
+// Please do not use this class directly.
+class BaseTimerMethodPointer : public Timer {
+ public:
+ // This is here to work around the fact that Timer::Start is "hidden" by the
+ // Start definition below, rather than being overloaded.
+ // TODO(tim): We should remove uses of BaseTimerMethodPointer::Start below
+ // and convert callers to use the base::Closure version in Timer::Start,
+ // see bug 148832.
+ using Timer::Start;
+
+ enum RepeatMode { ONE_SHOT, REPEATING };
+ BaseTimerMethodPointer(RepeatMode mode)
+ : Timer(mode == REPEATING, mode == REPEATING) {}
+
+ // Start the timer to run at the given |delay| from now. If the timer is
+ // already running, it will be replaced to call a task formed from
+ // |reviewer->*method|.
+ template <class Receiver>
+ void Start(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ Receiver* receiver,
+ void (Receiver::*method)()) {
+ Timer::Start(posted_from, delay,
+ base::Bind(method, base::Unretained(receiver)));
+ }
+};
+
+//-----------------------------------------------------------------------------
+// A simple, one-shot timer. See usage notes at the top of the file.
+class OneShotTimer : public BaseTimerMethodPointer {
+ public:
+ OneShotTimer() : BaseTimerMethodPointer(ONE_SHOT) {}
+};
+
+//-----------------------------------------------------------------------------
+// A simple, repeating timer. See usage notes at the top of the file.
+class RepeatingTimer : public BaseTimerMethodPointer {
+ public:
+ RepeatingTimer() : BaseTimerMethodPointer(REPEATING) {}
+};
+
+//-----------------------------------------------------------------------------
+// A Delay timer is like The Button from Lost. Once started, you have to keep
+// calling Reset otherwise it will call the given method in the MessageLoop
+// thread.
+//
+// Once created, it is inactive until Reset is called. Once |delay| seconds have
+// passed since the last call to Reset, the callback is made. Once the callback
+// has been made, it's inactive until Reset is called again.
+//
+// If destroyed, the timeout is canceled and will not occur even if already
+// inflight.
+class DelayTimer : protected Timer {
+ public:
+ template <class Receiver>
+ DelayTimer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ Receiver* receiver,
+ void (Receiver::*method)())
+ : Timer(posted_from,
+ delay,
+ base::Bind(method, base::Unretained(receiver)),
+ false) {}
+
+ void Reset() override;
+};
+
+// This class has a templated method so it can not be exported without failing
+// to link in MSVC. But clang-plugin does not allow inline definitions of
+// virtual methods, so the inline definition lives in the header file here
+// to satisfy both.
+inline void DelayTimer::Reset() {
+ Timer::Reset();
+}
+
+} // namespace base
+
+#endif // BASE_TIMER_TIMER_H_
diff --git a/libchrome/base/timer/timer_unittest.cc b/libchrome/base/timer/timer_unittest.cc
new file mode 100644
index 0000000..6fcd25b
--- /dev/null
+++ b/libchrome/base/timer/timer_unittest.cc
@@ -0,0 +1,539 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer/timer.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+using base::SingleThreadTaskRunner;
+
+namespace {
+
+// The message loops on which each timer should be tested.
+const base::MessageLoop::Type testing_message_loops[] = {
+ base::MessageLoop::TYPE_DEFAULT,
+ base::MessageLoop::TYPE_IO,
+#if !defined(OS_IOS) // iOS does not allow direct running of the UI loop.
+ base::MessageLoop::TYPE_UI,
+#endif
+};
+
+const int kNumTestingMessageLoops = arraysize(testing_message_loops);
+
+class OneShotTimerTester {
+ public:
+ explicit OneShotTimerTester(bool* did_run, unsigned milliseconds = 10)
+ : did_run_(did_run),
+ delay_ms_(milliseconds),
+ quit_message_loop_(true) {
+ }
+
+ void Start() {
+ timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(delay_ms_), this,
+ &OneShotTimerTester::Run);
+ }
+
+ void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ quit_message_loop_ = false;
+ timer_.SetTaskRunner(task_runner);
+ }
+
+ private:
+ void Run() {
+ *did_run_ = true;
+ if (quit_message_loop_) {
+ base::MessageLoop::current()->QuitWhenIdle();
+ }
+ }
+
+ bool* did_run_;
+ base::OneShotTimer timer_;
+ const unsigned delay_ms_;
+ bool quit_message_loop_;
+};
+
+class OneShotSelfDeletingTimerTester {
+ public:
+ explicit OneShotSelfDeletingTimerTester(bool* did_run)
+ : did_run_(did_run), timer_(new base::OneShotTimer()) {}
+
+ void Start() {
+ timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(10), this,
+ &OneShotSelfDeletingTimerTester::Run);
+ }
+
+ private:
+ void Run() {
+ *did_run_ = true;
+ timer_.reset();
+ base::MessageLoop::current()->QuitWhenIdle();
+ }
+
+ bool* did_run_;
+ std::unique_ptr<base::OneShotTimer> timer_;
+};
+
+class RepeatingTimerTester {
+ public:
+ explicit RepeatingTimerTester(bool* did_run, const TimeDelta& delay)
+ : did_run_(did_run), counter_(10), delay_(delay) {
+ }
+
+ void Start() {
+ timer_.Start(FROM_HERE, delay_, this, &RepeatingTimerTester::Run);
+ }
+
+ private:
+ void Run() {
+ if (--counter_ == 0) {
+ *did_run_ = true;
+ timer_.Stop();
+ base::MessageLoop::current()->QuitWhenIdle();
+ }
+ }
+
+ bool* did_run_;
+ int counter_;
+ TimeDelta delay_;
+ base::RepeatingTimer timer_;
+};
+
+void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ bool did_run = false;
+ OneShotTimerTester f(&did_run);
+ f.Start();
+
+ base::RunLoop().Run();
+
+ EXPECT_TRUE(did_run);
+}
+
+void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ bool did_run_a = false;
+ OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
+
+ // This should run before the timer expires.
+ base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+
+ // Now start the timer.
+ a->Start();
+
+ bool did_run_b = false;
+ OneShotTimerTester b(&did_run_b);
+ b.Start();
+
+ base::RunLoop().Run();
+
+ EXPECT_FALSE(did_run_a);
+ EXPECT_TRUE(did_run_b);
+}
+
+void RunTest_OneShotSelfDeletingTimer(
+ base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ bool did_run = false;
+ OneShotSelfDeletingTimerTester f(&did_run);
+ f.Start();
+
+ base::RunLoop().Run();
+
+ EXPECT_TRUE(did_run);
+}
+
+void RunTest_RepeatingTimer(base::MessageLoop::Type message_loop_type,
+ const TimeDelta& delay) {
+ base::MessageLoop loop(message_loop_type);
+
+ bool did_run = false;
+ RepeatingTimerTester f(&did_run, delay);
+ f.Start();
+
+ base::RunLoop().Run();
+
+ EXPECT_TRUE(did_run);
+}
+
+void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
+ const TimeDelta& delay) {
+ base::MessageLoop loop(message_loop_type);
+
+ bool did_run_a = false;
+ RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
+
+ // This should run before the timer expires.
+ base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+
+ // Now start the timer.
+ a->Start();
+
+ bool did_run_b = false;
+ RepeatingTimerTester b(&did_run_b, delay);
+ b.Start();
+
+ base::RunLoop().Run();
+
+ EXPECT_FALSE(did_run_a);
+ EXPECT_TRUE(did_run_b);
+}
+
+class DelayTimerTarget {
+ public:
+ bool signaled() const { return signaled_; }
+
+ void Signal() {
+ ASSERT_FALSE(signaled_);
+ signaled_ = true;
+ }
+
+ private:
+ bool signaled_ = false;
+};
+
+void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ // If Delay is never called, the timer shouldn't go off.
+ DelayTimerTarget target;
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
+
+ bool did_run = false;
+ OneShotTimerTester tester(&did_run);
+ tester.Start();
+ base::RunLoop().Run();
+
+ ASSERT_FALSE(target.signaled());
+}
+
+void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ DelayTimerTarget target;
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
+ timer.Reset();
+
+ bool did_run = false;
+ OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
+ tester.Start();
+ base::RunLoop().Run();
+
+ ASSERT_TRUE(target.signaled());
+}
+
+struct ResetHelper {
+ ResetHelper(base::DelayTimer* timer, DelayTimerTarget* target)
+ : timer_(timer), target_(target) {}
+
+ void Reset() {
+ ASSERT_FALSE(target_->signaled());
+ timer_->Reset();
+ }
+
+ private:
+ base::DelayTimer* const timer_;
+ DelayTimerTarget* const target_;
+};
+
+void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ // If Delay is never called, the timer shouldn't go off.
+ DelayTimerTarget target;
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerTarget::Signal);
+ timer.Reset();
+
+ ResetHelper reset_helper(&timer, &target);
+
+ base::OneShotTimer timers[20];
+ for (size_t i = 0; i < arraysize(timers); ++i) {
+ timers[i].Start(FROM_HERE, TimeDelta::FromMilliseconds(i * 10),
+ &reset_helper, &ResetHelper::Reset);
+ }
+
+ bool did_run = false;
+ OneShotTimerTester tester(&did_run, 300);
+ tester.Start();
+ base::RunLoop().Run();
+
+ ASSERT_TRUE(target.signaled());
+}
+
+class DelayTimerFatalTarget {
+ public:
+ void Signal() {
+ ASSERT_TRUE(false);
+ }
+};
+
+
+void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
+ base::MessageLoop loop(message_loop_type);
+
+ DelayTimerFatalTarget target;
+
+ {
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerFatalTarget::Signal);
+ timer.Reset();
+ }
+
+ // When the timer is deleted, the DelayTimerFatalTarget should never be
+ // called.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+}
+
+} // namespace
+
+//-----------------------------------------------------------------------------
+// Each test is run against each type of MessageLoop. That way we are sure
+// that timers work properly in all configurations.
+
+TEST(TimerTest, OneShotTimer) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_OneShotTimer(testing_message_loops[i]);
+ }
+}
+
+TEST(TimerTest, OneShotTimer_Cancel) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_OneShotTimer_Cancel(testing_message_loops[i]);
+ }
+}
+
+// If underline timer does not handle properly, we will crash or fail
+// in full page heap environment.
+TEST(TimerTest, OneShotSelfDeletingTimer) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_OneShotSelfDeletingTimer(testing_message_loops[i]);
+ }
+}
+
+TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
+ scoped_refptr<base::TestSimpleTaskRunner> task_runner =
+ new base::TestSimpleTaskRunner();
+
+ bool did_run = false;
+ OneShotTimerTester f(&did_run);
+ f.SetTaskRunner(task_runner);
+ f.Start();
+
+ EXPECT_FALSE(did_run);
+ task_runner->RunUntilIdle();
+ EXPECT_TRUE(did_run);
+}
+
+TEST(TimerTest, RepeatingTimer) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_RepeatingTimer(testing_message_loops[i],
+ TimeDelta::FromMilliseconds(10));
+ }
+}
+
+TEST(TimerTest, RepeatingTimer_Cancel) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_RepeatingTimer_Cancel(testing_message_loops[i],
+ TimeDelta::FromMilliseconds(10));
+ }
+}
+
+TEST(TimerTest, RepeatingTimerZeroDelay) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_RepeatingTimer(testing_message_loops[i],
+ TimeDelta::FromMilliseconds(0));
+ }
+}
+
+TEST(TimerTest, RepeatingTimerZeroDelay_Cancel) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_RepeatingTimer_Cancel(testing_message_loops[i],
+ TimeDelta::FromMilliseconds(0));
+ }
+}
+
+TEST(TimerTest, DelayTimer_NoCall) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_DelayTimer_NoCall(testing_message_loops[i]);
+ }
+}
+
+TEST(TimerTest, DelayTimer_OneCall) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_DelayTimer_OneCall(testing_message_loops[i]);
+ }
+}
+
+// It's flaky on the buildbot, http://crbug.com/25038.
+TEST(TimerTest, DISABLED_DelayTimer_Reset) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_DelayTimer_Reset(testing_message_loops[i]);
+ }
+}
+
+TEST(TimerTest, DelayTimer_Deleted) {
+ for (int i = 0; i < kNumTestingMessageLoops; i++) {
+ RunTest_DelayTimer_Deleted(testing_message_loops[i]);
+ }
+}
+
+TEST(TimerTest, MessageLoopShutdown) {
+ // This test is designed to verify that shutdown of the
+ // message loop does not cause crashes if there were pending
+ // timers not yet fired. It may only trigger exceptions
+ // if debug heap checking is enabled.
+ bool did_run = false;
+ {
+ OneShotTimerTester a(&did_run);
+ OneShotTimerTester b(&did_run);
+ OneShotTimerTester c(&did_run);
+ OneShotTimerTester d(&did_run);
+ {
+ base::MessageLoop loop;
+ a.Start();
+ b.Start();
+ } // MessageLoop destructs by falling out of scope.
+ } // OneShotTimers destruct. SHOULD NOT CRASH, of course.
+
+ EXPECT_FALSE(did_run);
+}
+
+void TimerTestCallback() {
+}
+
+TEST(TimerTest, NonRepeatIsRunning) {
+ {
+ base::MessageLoop loop;
+ base::Timer timer(false, false);
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&TimerTestCallback));
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Stop();
+ EXPECT_FALSE(timer.IsRunning());
+ EXPECT_TRUE(timer.user_task().is_null());
+ }
+
+ {
+ base::Timer timer(true, false);
+ base::MessageLoop loop;
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&TimerTestCallback));
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Stop();
+ EXPECT_FALSE(timer.IsRunning());
+ ASSERT_FALSE(timer.user_task().is_null());
+ timer.Reset();
+ EXPECT_TRUE(timer.IsRunning());
+ }
+}
+
+TEST(TimerTest, NonRepeatMessageLoopDeath) {
+ base::Timer timer(false, false);
+ {
+ base::MessageLoop loop;
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&TimerTestCallback));
+ EXPECT_TRUE(timer.IsRunning());
+ }
+ EXPECT_FALSE(timer.IsRunning());
+ EXPECT_TRUE(timer.user_task().is_null());
+}
+
+TEST(TimerTest, RetainRepeatIsRunning) {
+ base::MessageLoop loop;
+ base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&TimerTestCallback), true);
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Reset();
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Stop();
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Reset();
+ EXPECT_TRUE(timer.IsRunning());
+}
+
+TEST(TimerTest, RetainNonRepeatIsRunning) {
+ base::MessageLoop loop;
+ base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&TimerTestCallback), false);
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Reset();
+ EXPECT_TRUE(timer.IsRunning());
+ timer.Stop();
+ EXPECT_FALSE(timer.IsRunning());
+ timer.Reset();
+ EXPECT_TRUE(timer.IsRunning());
+}
+
+namespace {
+
+bool g_callback_happened1 = false;
+bool g_callback_happened2 = false;
+
+void ClearAllCallbackHappened() {
+ g_callback_happened1 = false;
+ g_callback_happened2 = false;
+}
+
+void SetCallbackHappened1() {
+ g_callback_happened1 = true;
+ base::MessageLoop::current()->QuitWhenIdle();
+}
+
+void SetCallbackHappened2() {
+ g_callback_happened2 = true;
+ base::MessageLoop::current()->QuitWhenIdle();
+}
+
+TEST(TimerTest, ContinuationStopStart) {
+ {
+ ClearAllCallbackHappened();
+ base::MessageLoop loop;
+ base::Timer timer(false, false);
+ timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
+ base::Bind(&SetCallbackHappened1));
+ timer.Stop();
+ timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
+ base::Bind(&SetCallbackHappened2));
+ base::RunLoop().Run();
+ EXPECT_FALSE(g_callback_happened1);
+ EXPECT_TRUE(g_callback_happened2);
+ }
+}
+
+TEST(TimerTest, ContinuationReset) {
+ {
+ ClearAllCallbackHappened();
+ base::MessageLoop loop;
+ base::Timer timer(false, false);
+ timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
+ base::Bind(&SetCallbackHappened1));
+ timer.Reset();
+ // Since Reset happened before task ran, the user_task must not be cleared:
+ ASSERT_FALSE(timer.user_task().is_null());
+ base::RunLoop().Run();
+ EXPECT_TRUE(g_callback_happened1);
+ }
+}
+
+} // namespace
diff --git a/libchrome/base/trace_event/OWNERS b/libchrome/base/trace_event/OWNERS
new file mode 100644
index 0000000..9160267
--- /dev/null
+++ b/libchrome/base/trace_event/OWNERS
@@ -0,0 +1,6 @@
+dsinclair@chromium.org
+nduca@chromium.org
+oysteine@chromium.org
+primiano@chromium.org
+simonhatch@chromium.org
+per-file trace_event_android.cc=wangxianzhu@chromium.org
diff --git a/libchrome/base/trace_event/common/trace_event_common.h b/libchrome/base/trace_event/common/trace_event_common.h
new file mode 100644
index 0000000..0a04d62
--- /dev/null
+++ b/libchrome/base/trace_event/common/trace_event_common.h
@@ -0,0 +1,1093 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
+// land your change in base/ first, and then copy-and-paste it.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+// TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("importantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+// class MyData : public base::trace_event::ConvertableToTraceFormat {
+// public:
+// MyData() {}
+// void AppendAsTraceFormat(std::string* out) const override {
+// out->append("{\"foo\":1}");
+// }
+// private:
+// ~MyData() override {}
+// DISALLOW_COPY_AND_ASSIGN(MyData);
+// };
+//
+// TRACE_EVENT1("foo", "bar", "data",
+// std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE | scope)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_COPY)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+
+// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "value", static_cast<int>(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp, \
+ value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
+ value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \
+ category_group, name, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \
+ thread_id, timestamp, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1( \
+ category_group, name, id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+ step, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "step", step)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id, \
+ timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+// considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+// be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with one associated argument. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, \
+ TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
+// Records a clock sync event.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \
+ INTERNAL_TRACE_EVENT_ADD( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ TRACE_EVENT_FLAG_NONE, "sync_id", sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
+ TRACE_ID_DONT_MANGLE(context))
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Macro to explicitly warm up a given category group. This could be useful in
+// cases where we want to initialize a category group before any trace events
+// for that category group is reported. For example, to have a category group
+// always show up in the "record categories" list for manually selecting
+// settings in about://tracing.
+#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+ do { \
+ static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
+ int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
+ if (num_traces_recorded != -1 && \
+ num_traces_recorded != \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
+#define TRACE_EVENT_PHASE_MARK ('R')
+#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
diff --git a/libchrome/base/trace_event/etw_manifest/BUILD.gn b/libchrome/base/trace_event/etw_manifest/BUILD.gn
new file mode 100644
index 0000000..19c4ecf
--- /dev/null
+++ b/libchrome/base/trace_event/etw_manifest/BUILD.gn
@@ -0,0 +1,29 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/win/message_compiler.gni")
+
+assert(is_win, "This only runs on Windows.")
+
+message_compiler("chrome_events_win") {
+ visibility = [
+ "//base/*",
+ "//chrome:main_dll",
+ ]
+
+ sources = [
+ "chrome_events_win.man",
+ ]
+
+ user_mode_logging = true
+
+ # The only code generated from chrome_events_win.man is a header file that
+ # is included by trace_event_etw_export_win.cc, so there is no need to
+ # compile any generated code. The other thing which compile_generated_code
+ # controls in this context is linking in the .res file generated from the
+ # manifest. However this is only needed for ETW provider registration which
+ # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
+ # manifest resource can be skipped in Chrome.
+ compile_generated_code = false
+}
diff --git a/libchrome/base/trace_event/etw_manifest/chrome_events_win.man b/libchrome/base/trace_event/etw_manifest/chrome_events_win.man
new file mode 100644
index 0000000..10a8ddf
--- /dev/null
+++ b/libchrome/base/trace_event/etw_manifest/chrome_events_win.man
@@ -0,0 +1,84 @@
+<?xml version='1.0' encoding='utf-8' standalone='yes'?>
+<instrumentationManifest
+ xmlns="http://schemas.microsoft.com/win/2004/08/events"
+ xmlns:win="http://manifests.microsoft.com/win/2004/08/windows/events"
+ xmlns:xs="http://www.w3.org/2001/XMLSchema"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://schemas.microsoft.com/win/2004/08/events eventman.xsd"
+ >
+ <instrumentation>
+ <events>
+ <provider
+ guid="{D2D578D9-2936-45B6-A09f-30E32715F42D}"
+ messageFileName="chrome.dll"
+ name="Chrome"
+ resourceFileName="chrome.dll"
+ symbol="CHROME"
+ >
+ <channels>
+ <importChannel
+ chid="SYSTEM"
+ name="System"
+ />
+ </channels>
+ <templates>
+ <template tid="tid_chrome_event">
+ <data
+ inType="win:AnsiString"
+ name="Name"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Phase"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Arg Name 1"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Arg Value 1"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Arg Name 2"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Arg Value 2"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Arg Name 3"
+ />
+ <data
+ inType="win:AnsiString"
+ name="Arg Value 3"
+ />
+ </template>
+ </templates>
+ <events>
+ <event
+ channel="SYSTEM"
+ level="win:Informational"
+ message="$(string.ChromeEvent.EventMessage)"
+ opcode="win:Info"
+ symbol="ChromeEvent"
+ template="tid_chrome_event"
+ value="1"
+ />
+ </events>
+ </provider>
+ </events>
+ </instrumentation>
+ <localization xmlns="http://schemas.microsoft.com/win/2004/08/events">
+ <resources culture="en-US">
+ <stringTable>
+ <string
+ id="ChromeEvent.EventMessage"
+ value="Chrome Event: %1 (%2)"
+ />
+ </stringTable>
+ </resources>
+ </localization>
+</instrumentationManifest>
diff --git a/libchrome/base/trace_event/etw_manifest/etw_manifest.gyp b/libchrome/base/trace_event/etw_manifest/etw_manifest.gyp
new file mode 100644
index 0000000..b2f0eb8
--- /dev/null
+++ b/libchrome/base/trace_event/etw_manifest/etw_manifest.gyp
@@ -0,0 +1,41 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ # GN version: //base/trace_event/etw_manifest/BUILD.gn
+ 'target_name': 'etw_manifest',
+ 'type': 'none',
+ 'toolsets': ['host', 'target'],
+ 'hard_dependency': 1,
+ 'conditions': [
+ ['OS=="win"', {
+ 'sources': [
+ 'chrome_events_win.man',
+ ],
+ 'variables': {
+ 'man_output_dir': '<(SHARED_INTERMEDIATE_DIR)/base/trace_event/etw_manifest',
+ },
+ 'rules': [{
+ # Rule to run the message compiler.
+ 'rule_name': 'message_compiler',
+ 'extension': 'man',
+ 'outputs': [
+ '<(man_output_dir)/chrome_events_win.h',
+ '<(man_output_dir)/chrome_events_win.rc',
+ ],
+ 'action': [
+ 'mc.exe',
+ '-h', '<(man_output_dir)',
+ '-r', '<(man_output_dir)/.',
+ '-um',
+ '<(RULE_INPUT_PATH)',
+ ],
+ 'message': 'Running message compiler on <(RULE_INPUT_PATH)',
+ }],
+ }],
+ ],
+ }
+ ]
+}
diff --git a/libchrome/base/trace_event/heap_profiler.h b/libchrome/base/trace_event/heap_profiler.h
new file mode 100644
index 0000000..cf57524
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler.h
@@ -0,0 +1,89 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_H
+#define BASE_TRACE_EVENT_HEAP_PROFILER_H
+
+#include "base/compiler_specific.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+// This header file defines the set of macros that are used to track memory
+// usage in the heap profiler. This is in addition to the macros defined in
+// trace_event.h and are specific to heap profiler. This file also defines
+// implementation details of these macros.
+
+// Implementation detail: heap profiler macros create temporary variables to
+// keep instrumentation overhead low. These macros give each temporary variable
+// a unique name based on the line number to prevent name collisions.
+#define INTERNAL_HEAP_PROFILER_UID3(a, b) heap_profiler_unique_##a##b
+#define INTERNAL_HEAP_PROFILER_UID2(a, b) INTERNAL_HEAP_PROFILER_UID3(a, b)
+#define INTERNAL_HEAP_PROFILER_UID(name_prefix) \
+ INTERNAL_HEAP_PROFILER_UID2(name_prefix, __LINE__)
+
+// Scoped tracker for task execution context in the heap profiler.
+#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+ trace_event_internal::HeapProfilerScopedTaskExecutionTracker
+
+// A scoped ignore event used to tell heap profiler to ignore all the
+// allocations in the scope. It is useful to exclude allocations made for
+// tracing from the heap profiler dumps.
+#define HEAP_PROFILER_SCOPED_IGNORE \
+ trace_event_internal::HeapProfilerScopedIgnore INTERNAL_HEAP_PROFILER_UID( \
+ scoped_ignore)
+
+namespace trace_event_internal {
+
+// HeapProfilerScopedTaskExecutionTracker records the current task's context in
+// the heap profiler.
+class HeapProfilerScopedTaskExecutionTracker {
+ public:
+ inline explicit HeapProfilerScopedTaskExecutionTracker(
+ const char* task_context)
+ : context_(task_context) {
+ using base::trace_event::AllocationContextTracker;
+ if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+ AllocationContextTracker::CaptureMode::DISABLED)) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PushCurrentTaskContext(context_);
+ }
+ }
+
+ inline ~HeapProfilerScopedTaskExecutionTracker() {
+ using base::trace_event::AllocationContextTracker;
+ if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+ AllocationContextTracker::CaptureMode::DISABLED)) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PopCurrentTaskContext(context_);
+ }
+ }
+
+ private:
+ const char* context_;
+};
+
+class BASE_EXPORT HeapProfilerScopedIgnore {
+ public:
+ inline HeapProfilerScopedIgnore() {
+ using base::trace_event::AllocationContextTracker;
+ if (UNLIKELY(
+ AllocationContextTracker::capture_mode() !=
+ AllocationContextTracker::CaptureMode::DISABLED)) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->begin_ignore_scope();
+ }
+ }
+ inline ~HeapProfilerScopedIgnore() {
+ using base::trace_event::AllocationContextTracker;
+ if (UNLIKELY(
+ AllocationContextTracker::capture_mode() !=
+ AllocationContextTracker::CaptureMode::DISABLED)) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->end_ignore_scope();
+ }
+ }
+};
+
+} // namespace trace_event_internal
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_H
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_context.cc b/libchrome/base/trace_event/heap_profiler_allocation_context.cc
new file mode 100644
index 0000000..0f330a8
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_context.cc
@@ -0,0 +1,88 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+#include <cstring>
+
+#include "base/hash.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
+ return lhs.value < rhs.value;
+}
+
+bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
+ return lhs.value == rhs.value;
+}
+
+bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
+ return !(lhs.value == rhs.value);
+}
+
+Backtrace::Backtrace(): frame_count(0) {}
+
+bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
+ if (lhs.frame_count != rhs.frame_count) return false;
+ return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
+}
+
+bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
+ return !(lhs == rhs);
+}
+
+AllocationContext::AllocationContext(): type_name(nullptr) {}
+
+AllocationContext::AllocationContext(const Backtrace& backtrace,
+ const char* type_name)
+ : backtrace(backtrace), type_name(type_name) {}
+
+bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
+ return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
+}
+
+bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
+ return !(lhs == rhs);
+}
+} // namespace trace_event
+} // namespace base
+
+namespace BASE_HASH_NAMESPACE {
+using base::trace_event::AllocationContext;
+using base::trace_event::Backtrace;
+using base::trace_event::StackFrame;
+
+size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
+ return hash<const void*>()(frame.value);
+}
+
+size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
+ const void* values[Backtrace::kMaxFrameCount];
+ for (size_t i = 0; i != backtrace.frame_count; ++i) {
+ values[i] = backtrace.frames[i].value;
+ }
+ return base::SuperFastHash(
+ reinterpret_cast<const char*>(values),
+ static_cast<int>(backtrace.frame_count * sizeof(*values)));
+}
+
+size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
+ size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace);
+
+ // Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits,
+ // because the magic number is a prime very close to 2^32 / golden ratio, but
+ // will still redistribute keys bijectively on 64-bit architectures because
+ // the magic number is coprime to 2^64.
+ size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761;
+
+ // Multiply one side to break the commutativity of +. Multiplication with a
+ // number coprime to |numeric_limits<size_t>::max() + 1| is bijective so
+ // randomness is preserved.
+ return (backtrace_hash * 3) + type_hash;
+}
+
+} // BASE_HASH_NAMESPACE
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_context.h b/libchrome/base/trace_event/heap_profiler_allocation_context.h
new file mode 100644
index 0000000..24e2dec
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_context.h
@@ -0,0 +1,131 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/containers/hash_tables.h"
+
+namespace base {
+namespace trace_event {
+
+// When heap profiling is enabled, tracing keeps track of the allocation
+// context for each allocation intercepted. It is generated by the
+// |AllocationContextTracker| which keeps stacks of context in TLS.
+// The tracker is initialized lazily.
+
+// The backtrace in the allocation context is a snapshot of the stack. For now,
+// this is the pseudo stack where frames are created by trace event macros. In
+// the future, we might add the option to use the native call stack. In that
+// case, |Backtrace| and |AllocationContextTracker::GetContextSnapshot| might
+// have different implementations that can be selected by a compile time flag.
+
+// The number of stack frames stored in the backtrace is a trade off between
+// memory used for tracing and accuracy. Measurements done on a prototype
+// revealed that:
+//
+// - In 60 percent of the cases, pseudo stack depth <= 7.
+// - In 87 percent of the cases, pseudo stack depth <= 9.
+// - In 95 percent of the cases, pseudo stack depth <= 11.
+//
+// See the design doc (https://goo.gl/4s7v7b) for more details.
+
+// Represents (pseudo) stack frame. Used in Backtrace class below.
+//
+// Conceptually stack frame is identified by its value, and type is used
+// mostly to properly format the value. Value is expected to be a valid
+// pointer from process' address space.
+struct BASE_EXPORT StackFrame {
+ enum class Type {
+ TRACE_EVENT_NAME, // const char* string
+ THREAD_NAME, // const char* thread name
+ PROGRAM_COUNTER, // as returned by stack tracing (e.g. by StackTrace)
+ };
+
+ static StackFrame FromTraceEventName(const char* name) {
+ return {Type::TRACE_EVENT_NAME, name};
+ }
+ static StackFrame FromThreadName(const char* name) {
+ return {Type::THREAD_NAME, name};
+ }
+ static StackFrame FromProgramCounter(const void* pc) {
+ return {Type::PROGRAM_COUNTER, pc};
+ }
+
+ Type type;
+ const void* value;
+};
+
+bool BASE_EXPORT operator < (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator == (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator != (const StackFrame& lhs, const StackFrame& rhs);
+
+struct BASE_EXPORT Backtrace {
+ Backtrace();
+
+ // If the stack is higher than what can be stored here, the bottom frames
+ // (the ones closer to main()) are stored. Depth of 12 is enough for most
+ // pseudo traces (see above), but not for native traces, where we need more.
+ enum { kMaxFrameCount = 48 };
+ StackFrame frames[kMaxFrameCount];
+ size_t frame_count;
+};
+
+bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
+bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
+
+// The |AllocationContext| is context metadata that is kept for every allocation
+// when heap profiling is enabled. To simplify memory management for book-
+// keeping, this struct has a fixed size.
+struct BASE_EXPORT AllocationContext {
+ AllocationContext();
+ AllocationContext(const Backtrace& backtrace, const char* type_name);
+
+ Backtrace backtrace;
+
+ // Type name of the type stored in the allocated memory. A null pointer
+ // indicates "unknown type". Grouping is done by comparing pointers, not by
+ // deep string comparison. In a component build, where a type name can have a
+ // string literal in several dynamic libraries, this may distort grouping.
+ const char* type_name;
+};
+
+bool BASE_EXPORT operator==(const AllocationContext& lhs,
+ const AllocationContext& rhs);
+bool BASE_EXPORT operator!=(const AllocationContext& lhs,
+ const AllocationContext& rhs);
+
+// Struct to store the size and count of the allocations.
+struct AllocationMetrics {
+ size_t size;
+ size_t count;
+};
+
+} // namespace trace_event
+} // namespace base
+
+namespace BASE_HASH_NAMESPACE {
+
+template <>
+struct BASE_EXPORT hash<base::trace_event::StackFrame> {
+ size_t operator()(const base::trace_event::StackFrame& frame) const;
+};
+
+template <>
+struct BASE_EXPORT hash<base::trace_event::Backtrace> {
+ size_t operator()(const base::trace_event::Backtrace& backtrace) const;
+};
+
+template <>
+struct BASE_EXPORT hash<base::trace_event::AllocationContext> {
+ size_t operator()(const base::trace_event::AllocationContext& context) const;
+};
+
+} // BASE_HASH_NAMESPACE
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_context_tracker.cc b/libchrome/base/trace_event/heap_profiler_allocation_context_tracker.cc
new file mode 100644
index 0000000..31f311a
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -0,0 +1,246 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
+namespace base {
+namespace trace_event {
+
+subtle::Atomic32 AllocationContextTracker::capture_mode_ =
+ static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
+
+namespace {
+
+const size_t kMaxStackDepth = 128u;
+const size_t kMaxTaskDepth = 16u;
+AllocationContextTracker* const kInitializingSentinel =
+ reinterpret_cast<AllocationContextTracker*>(-1);
+const char kTracingOverhead[] = "tracing_overhead";
+
+ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
+
+// This function is added to the TLS slot to clean up the instance when the
+// thread exits.
+void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
+ delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
+}
+
+// Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
+// deadlock when lock is already held by ThreadIdNameManager before the current
+// allocation. Gets the thread name from kernel if available or returns a string
+// with id. This function intenionally leaks the allocated strings since they
+// are used to tag allocations even after the thread dies.
+const char* GetAndLeakThreadName() {
+ char name[16];
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ // If the thread name is not set, try to get it from prctl. Thread name might
+ // not be set in cases where the thread started before heap profiling was
+ // enabled.
+ int err = prctl(PR_GET_NAME, name);
+ if (!err) {
+ return strdup(name);
+ }
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+
+ // Use tid if we don't have a thread name.
+ snprintf(name, sizeof(name), "%lu",
+ static_cast<unsigned long>(PlatformThread::CurrentId()));
+ return strdup(name);
+}
+
+} // namespace
+
+// static
+AllocationContextTracker*
+AllocationContextTracker::GetInstanceForCurrentThread() {
+ AllocationContextTracker* tracker =
+ static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
+ if (tracker == kInitializingSentinel)
+ return nullptr; // Re-entrancy case.
+
+ if (!tracker) {
+ g_tls_alloc_ctx_tracker.Set(kInitializingSentinel);
+ tracker = new AllocationContextTracker();
+ g_tls_alloc_ctx_tracker.Set(tracker);
+ }
+
+ return tracker;
+}
+
+AllocationContextTracker::AllocationContextTracker()
+ : thread_name_(nullptr), ignore_scope_depth_(0) {
+ pseudo_stack_.reserve(kMaxStackDepth);
+ task_contexts_.reserve(kMaxTaskDepth);
+}
+AllocationContextTracker::~AllocationContextTracker() {}
+
+// static
+void AllocationContextTracker::SetCurrentThreadName(const char* name) {
+ if (name && capture_mode() != CaptureMode::DISABLED) {
+ GetInstanceForCurrentThread()->thread_name_ = name;
+ }
+}
+
+// static
+void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
+ // When enabling capturing, also initialize the TLS slot. This does not create
+ // a TLS instance yet.
+ if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
+ g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
+
+ // Release ordering ensures that when a thread observes |capture_mode_| to
+ // be true through an acquire load, the TLS slot has been initialized.
+ subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
+}
+
+void AllocationContextTracker::PushPseudoStackFrame(
+ const char* trace_event_name) {
+ // Impose a limit on the height to verify that every push is popped, because
+ // in practice the pseudo stack never grows higher than ~20 frames.
+ if (pseudo_stack_.size() < kMaxStackDepth)
+ pseudo_stack_.push_back(trace_event_name);
+ else
+ NOTREACHED();
+}
+
+void AllocationContextTracker::PopPseudoStackFrame(
+ const char* trace_event_name) {
+ // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
+ // scope, the frame was never pushed, so it is possible that pop is called
+ // on an empty stack.
+ if (pseudo_stack_.empty())
+ return;
+
+ // Assert that pushes and pops are nested correctly. This DCHECK can be
+ // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
+ // without a corresponding TRACE_EVENT_BEGIN).
+ DCHECK_EQ(trace_event_name, pseudo_stack_.back())
+ << "Encountered an unmatched TRACE_EVENT_END";
+
+ pseudo_stack_.pop_back();
+}
+
+void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
+ DCHECK(context);
+ if (task_contexts_.size() < kMaxTaskDepth)
+ task_contexts_.push_back(context);
+ else
+ NOTREACHED();
+}
+
+void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
+ // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
+ // scope, the context was never pushed, so it is possible that pop is called
+ // on an empty stack.
+ if (task_contexts_.empty())
+ return;
+
+ DCHECK_EQ(context, task_contexts_.back())
+ << "Encountered an unmatched context end";
+ task_contexts_.pop_back();
+}
+
+// static
+AllocationContext AllocationContextTracker::GetContextSnapshot() {
+ AllocationContext ctx;
+
+ if (ignore_scope_depth_) {
+ ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
+ ctx.type_name = kTracingOverhead;
+ ctx.backtrace.frame_count = 1;
+ return ctx;
+ }
+
+ CaptureMode mode = static_cast<CaptureMode>(
+ subtle::NoBarrier_Load(&capture_mode_));
+
+ auto* backtrace = std::begin(ctx.backtrace.frames);
+ auto* backtrace_end = std::end(ctx.backtrace.frames);
+
+ if (!thread_name_) {
+ // Ignore the string allocation made by GetAndLeakThreadName to avoid
+ // reentrancy.
+ ignore_scope_depth_++;
+ thread_name_ = GetAndLeakThreadName();
+ ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
+ DCHECK(thread_name_);
+ ignore_scope_depth_--;
+ }
+
+ // Add the thread name as the first entry in pseudo stack.
+ if (thread_name_) {
+ *backtrace++ = StackFrame::FromThreadName(thread_name_);
+ }
+
+ switch (mode) {
+ case CaptureMode::DISABLED:
+ {
+ break;
+ }
+ case CaptureMode::PSEUDO_STACK:
+ {
+ for (const char* event_name: pseudo_stack_) {
+ if (backtrace == backtrace_end) {
+ break;
+ }
+ *backtrace++ = StackFrame::FromTraceEventName(event_name);
+ }
+ break;
+ }
+ case CaptureMode::NATIVE_STACK:
+ {
+ // Backtrace contract requires us to return bottom frames, i.e.
+ // from main() and up. Stack unwinding produces top frames, i.e.
+ // from this point and up until main(). We request many frames to
+ // make sure we reach main(), and then copy bottom portion of them.
+ const void* frames[128];
+ static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
+ "not requesting enough frames to fill Backtrace");
+#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
+ size_t frame_count = debug::TraceStackFramePointers(
+ frames,
+ arraysize(frames),
+ 1 /* exclude this function from the trace */ );
+#else
+ size_t frame_count = 0;
+ NOTREACHED();
+#endif
+
+ // Copy frames backwards
+ size_t backtrace_capacity = backtrace_end - backtrace;
+ size_t top_frame_index = (backtrace_capacity >= frame_count) ?
+ 0 :
+ frame_count - backtrace_capacity;
+ for (size_t i = frame_count; i > top_frame_index;) {
+ const void* frame = frames[--i];
+ *backtrace++ = StackFrame::FromProgramCounter(frame);
+ }
+ break;
+ }
+ }
+
+ ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
+
+ // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
+ // (component name) in the heap profiler and not piggy back on the type name.
+ ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
+
+ return ctx;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_context_tracker.h b/libchrome/base/trace_event/heap_profiler_allocation_context_tracker.h
new file mode 100644
index 0000000..454200c
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -0,0 +1,110 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
+
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/debug/stack_trace.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+// The allocation context tracker keeps track of thread-local context for heap
+// profiling. It includes a pseudo stack of trace events. On every allocation
+// the tracker provides a snapshot of its context in the form of an
+// |AllocationContext| that is to be stored together with the allocation
+// details.
+class BASE_EXPORT AllocationContextTracker {
+ public:
+ enum class CaptureMode: int32_t {
+ DISABLED, // Don't capture anything
+ PSEUDO_STACK, // GetContextSnapshot() returns pseudo stack trace
+ NATIVE_STACK // GetContextSnapshot() returns native (real) stack trace
+ };
+
+ // Globally sets capturing mode.
+ // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
+ static void SetCaptureMode(CaptureMode mode);
+
+ // Returns global capturing mode.
+ inline static CaptureMode capture_mode() {
+ // A little lag after heap profiling is enabled or disabled is fine, it is
+ // more important that the check is as cheap as possible when capturing is
+ // not enabled, so do not issue a memory barrier in the fast path.
+ if (subtle::NoBarrier_Load(&capture_mode_) ==
+ static_cast<int32_t>(CaptureMode::DISABLED))
+ return CaptureMode::DISABLED;
+
+ // In the slow path, an acquire load is required to pair with the release
+ // store in |SetCaptureMode|. This is to ensure that the TLS slot for
+ // the thread-local allocation context tracker has been initialized if
+ // |capture_mode| returns something other than DISABLED.
+ return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
+ }
+
+ // Returns the thread-local instance, creating one if necessary. Returns
+ // always a valid instance, unless it is called re-entrantly, in which case
+ // returns nullptr in the nested calls.
+ static AllocationContextTracker* GetInstanceForCurrentThread();
+
+ // Set the thread name in the AllocationContextTracker of the current thread
+ // if capture is enabled.
+ static void SetCurrentThreadName(const char* name);
+
+ // Starts and ends a new ignore scope between which the allocations are
+ // ignored in the heap profiler. A dummy context that short circuits to
+ // "tracing_overhead" is returned for these allocations.
+ void begin_ignore_scope() { ignore_scope_depth_++; }
+ void end_ignore_scope() {
+ if (ignore_scope_depth_)
+ ignore_scope_depth_--;
+ }
+
+ // Pushes a frame onto the thread-local pseudo stack.
+ void PushPseudoStackFrame(const char* trace_event_name);
+
+ // Pops a frame from the thread-local pseudo stack.
+ void PopPseudoStackFrame(const char* trace_event_name);
+
+ // Push and pop current task's context. A stack is used to support nested
+ // tasks and the top of the stack will be used in allocation context.
+ void PushCurrentTaskContext(const char* context);
+ void PopCurrentTaskContext(const char* context);
+
+ // Returns a snapshot of the current thread-local context.
+ AllocationContext GetContextSnapshot();
+
+ ~AllocationContextTracker();
+
+ private:
+ AllocationContextTracker();
+
+ static subtle::Atomic32 capture_mode_;
+
+ // The pseudo stack where frames are |TRACE_EVENT| names.
+ std::vector<const char*> pseudo_stack_;
+
+ // The thread name is used as the first entry in the pseudo stack.
+ const char* thread_name_;
+
+ // Stack of tasks' contexts. Context serves as a different dimension than
+ // pseudo stack to cluster allocations.
+ std::vector<const char*> task_contexts_;
+
+ uint32_t ignore_scope_depth_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/libchrome/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
new file mode 100644
index 0000000..3064a6a
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -0,0 +1,285 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <iterator>
+
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/trace_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Define all strings once, because the pseudo stack requires pointer equality,
+// and string interning is unreliable.
+const char kThreadName[] = "TestThread";
+const char kCupcake[] = "Cupcake";
+const char kDonut[] = "Donut";
+const char kEclair[] = "Eclair";
+const char kFroyo[] = "Froyo";
+const char kGingerbread[] = "Gingerbread";
+
+// Asserts that the fixed-size array |expected_backtrace| matches the backtrace
+// in |AllocationContextTracker::GetContextSnapshot|.
+template <size_t N>
+void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
+ AllocationContext ctx =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+
+ auto* actual = std::begin(ctx.backtrace.frames);
+ auto* actual_bottom = actual + ctx.backtrace.frame_count;
+ auto expected = std::begin(expected_backtrace);
+ auto expected_bottom = std::end(expected_backtrace);
+
+ // Note that this requires the pointers to be equal, this is not doing a deep
+ // string comparison.
+ for (; actual != actual_bottom && expected != expected_bottom;
+ actual++, expected++)
+ ASSERT_EQ(*expected, *actual);
+
+ // Ensure that the height of the stacks is the same.
+ ASSERT_EQ(actual, actual_bottom);
+ ASSERT_EQ(expected, expected_bottom);
+}
+
+void AssertBacktraceContainsOnlyThreadName() {
+ StackFrame t = StackFrame::FromThreadName(kThreadName);
+ AllocationContext ctx =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+
+ ASSERT_EQ(1u, ctx.backtrace.frame_count);
+ ASSERT_EQ(t, ctx.backtrace.frames[0]);
+}
+
+class AllocationContextTrackerTest : public testing::Test {
+ public:
+ void SetUp() override {
+ TraceConfig config("");
+ TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
+ AllocationContextTracker::SetCaptureMode(
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+ AllocationContextTracker::SetCurrentThreadName(kThreadName);
+ }
+
+ void TearDown() override {
+ AllocationContextTracker::SetCaptureMode(
+ AllocationContextTracker::CaptureMode::DISABLED);
+ TraceLog::GetInstance()->SetDisabled();
+ }
+};
+
+// Check that |TRACE_EVENT| macros push and pop to the pseudo stack correctly.
+TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
+ StackFrame t = StackFrame::FromThreadName(kThreadName);
+ StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+ StackFrame d = StackFrame::FromTraceEventName(kDonut);
+ StackFrame e = StackFrame::FromTraceEventName(kEclair);
+ StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+ AssertBacktraceContainsOnlyThreadName();
+
+ {
+ TRACE_EVENT0("Testing", kCupcake);
+ StackFrame frame_c[] = {t, c};
+ AssertBacktraceEquals(frame_c);
+
+ {
+ TRACE_EVENT0("Testing", kDonut);
+ StackFrame frame_cd[] = {t, c, d};
+ AssertBacktraceEquals(frame_cd);
+ }
+
+ AssertBacktraceEquals(frame_c);
+
+ {
+ TRACE_EVENT0("Testing", kEclair);
+ StackFrame frame_ce[] = {t, c, e};
+ AssertBacktraceEquals(frame_ce);
+ }
+
+ AssertBacktraceEquals(frame_c);
+ }
+
+ AssertBacktraceContainsOnlyThreadName();
+
+ {
+ TRACE_EVENT0("Testing", kFroyo);
+ StackFrame frame_f[] = {t, f};
+ AssertBacktraceEquals(frame_f);
+ }
+
+ AssertBacktraceContainsOnlyThreadName();
+}
+
+// Same as |PseudoStackScopedTrace|, but now test the |TRACE_EVENT_BEGIN| and
+// |TRACE_EVENT_END| macros.
+TEST_F(AllocationContextTrackerTest, PseudoStackBeginEndTrace) {
+ StackFrame t = StackFrame::FromThreadName(kThreadName);
+ StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+ StackFrame d = StackFrame::FromTraceEventName(kDonut);
+ StackFrame e = StackFrame::FromTraceEventName(kEclair);
+ StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+ StackFrame frame_c[] = {t, c};
+ StackFrame frame_cd[] = {t, c, d};
+ StackFrame frame_ce[] = {t, c, e};
+ StackFrame frame_f[] = {t, f};
+
+ AssertBacktraceContainsOnlyThreadName();
+
+ TRACE_EVENT_BEGIN0("Testing", kCupcake);
+ AssertBacktraceEquals(frame_c);
+
+ TRACE_EVENT_BEGIN0("Testing", kDonut);
+ AssertBacktraceEquals(frame_cd);
+ TRACE_EVENT_END0("Testing", kDonut);
+
+ AssertBacktraceEquals(frame_c);
+
+ TRACE_EVENT_BEGIN0("Testing", kEclair);
+ AssertBacktraceEquals(frame_ce);
+ TRACE_EVENT_END0("Testing", kEclair);
+
+ AssertBacktraceEquals(frame_c);
+ TRACE_EVENT_END0("Testing", kCupcake);
+
+ AssertBacktraceContainsOnlyThreadName();
+
+ TRACE_EVENT_BEGIN0("Testing", kFroyo);
+ AssertBacktraceEquals(frame_f);
+ TRACE_EVENT_END0("Testing", kFroyo);
+
+ AssertBacktraceContainsOnlyThreadName();
+}
+
+TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
+ StackFrame t = StackFrame::FromThreadName(kThreadName);
+ StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+ StackFrame d = StackFrame::FromTraceEventName(kDonut);
+ StackFrame e = StackFrame::FromTraceEventName(kEclair);
+ StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+ StackFrame frame_c[] = {t, c};
+ StackFrame frame_cd[] = {t, c, d};
+ StackFrame frame_e[] = {t, e};
+ StackFrame frame_ef[] = {t, e, f};
+
+ AssertBacktraceContainsOnlyThreadName();
+
+ TRACE_EVENT_BEGIN0("Testing", kCupcake);
+ AssertBacktraceEquals(frame_c);
+
+ {
+ TRACE_EVENT0("Testing", kDonut);
+ AssertBacktraceEquals(frame_cd);
+ }
+
+ AssertBacktraceEquals(frame_c);
+ TRACE_EVENT_END0("Testing", kCupcake);
+ AssertBacktraceContainsOnlyThreadName();
+
+ {
+ TRACE_EVENT0("Testing", kEclair);
+ AssertBacktraceEquals(frame_e);
+
+ TRACE_EVENT_BEGIN0("Testing", kFroyo);
+ AssertBacktraceEquals(frame_ef);
+ TRACE_EVENT_END0("Testing", kFroyo);
+ AssertBacktraceEquals(frame_e);
+ }
+
+ AssertBacktraceContainsOnlyThreadName();
+}
+
+TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
+ StackFrame t = StackFrame::FromThreadName(kThreadName);
+ StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+ StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+ // Push 11 events onto the pseudo stack.
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kCupcake);
+
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kCupcake);
+
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kDonut);
+ TRACE_EVENT0("Testing", kEclair);
+ TRACE_EVENT0("Testing", kFroyo);
+
+ {
+ TRACE_EVENT0("Testing", kGingerbread);
+ AllocationContext ctx =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+
+ // The pseudo stack relies on pointer equality, not deep string comparisons.
+ ASSERT_EQ(t, ctx.backtrace.frames[0]);
+ ASSERT_EQ(c, ctx.backtrace.frames[1]);
+ ASSERT_EQ(f, ctx.backtrace.frames[11]);
+ }
+
+ {
+ AllocationContext ctx =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+ ASSERT_EQ(t, ctx.backtrace.frames[0]);
+ ASSERT_EQ(c, ctx.backtrace.frames[1]);
+ ASSERT_EQ(f, ctx.backtrace.frames[11]);
+ }
+}
+
+TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
+ const char kContext1[] = "context1";
+ const char kContext2[] = "context2";
+ {
+ // The context from the scoped task event should be used as type name.
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
+ AllocationContext ctx1 =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+ ASSERT_EQ(kContext1, ctx1.type_name);
+
+ // In case of nested events, the last event's context should be used.
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
+ AllocationContext ctx2 =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+ ASSERT_EQ(kContext2, ctx2.type_name);
+ }
+
+ // Type should be nullptr without task event.
+ AllocationContext ctx =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+ ASSERT_FALSE(ctx.type_name);
+}
+
+TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
+ TRACE_EVENT0("Testing", kCupcake);
+ TRACE_EVENT0("Testing", kDonut);
+ HEAP_PROFILER_SCOPED_IGNORE;
+ AllocationContext ctx =
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot();
+ const StringPiece kTracingOverhead("tracing_overhead");
+ ASSERT_EQ(kTracingOverhead,
+ static_cast<const char*>(ctx.backtrace.frames[0].value));
+ ASSERT_EQ(1u, ctx.backtrace.frame_count);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_register.cc b/libchrome/base/trace_event/heap_profiler_allocation_register.cc
new file mode 100644
index 0000000..2c2cd37
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_register.cc
@@ -0,0 +1,180 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_register.h"
+
+#include <algorithm>
+
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+AllocationRegister::ConstIterator::ConstIterator(
+ const AllocationRegister& alloc_register, AllocationIndex index)
+ : register_(alloc_register),
+ index_(index) {}
+
+void AllocationRegister::ConstIterator::operator++() {
+ index_ = register_.allocations_.Next(index_ + 1);
+}
+
+bool AllocationRegister::ConstIterator::operator!=(
+ const ConstIterator& other) const {
+ return index_ != other.index_;
+}
+
+AllocationRegister::Allocation
+AllocationRegister::ConstIterator::operator*() const {
+ return register_.GetAllocation(index_);
+}
+
+size_t AllocationRegister::BacktraceHasher::operator () (
+ const Backtrace& backtrace) const {
+ const size_t kSampleLength = 10;
+
+ uintptr_t total_value = 0;
+
+ size_t head_end = std::min(backtrace.frame_count, kSampleLength);
+ for (size_t i = 0; i != head_end; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+ }
+
+ size_t tail_start = backtrace.frame_count -
+ std::min(backtrace.frame_count - head_end, kSampleLength);
+ for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
+ total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+ }
+
+ total_value += backtrace.frame_count;
+
+ // These magic constants give best results in terms of average collisions
+ // per backtrace. They were found by replaying real backtraces from Linux
+ // and Android against different hash functions.
+ return (total_value * 131101) >> 14;
+}
+
+size_t AllocationRegister::AddressHasher::operator () (
+ const void* address) const {
+ // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
+ // been chosen carefully based on measurements with real-word data (addresses
+ // recorded from a Chrome trace run). It is the first prime after 2^17. For
+ // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
+ // buckets. Microbenchmarks show that this simple scheme outperforms fancy
+ // hashes like Murmur3 by 20 to 40 percent.
+ const uintptr_t key = reinterpret_cast<uintptr_t>(address);
+ const uintptr_t a = 131101;
+ const uintptr_t shift = 14;
+ const uintptr_t h = (key * a) >> shift;
+ return h;
+}
+
+AllocationRegister::AllocationRegister()
+ : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
+
+AllocationRegister::AllocationRegister(size_t allocation_capacity,
+ size_t backtrace_capacity)
+ : allocations_(allocation_capacity),
+ backtraces_(backtrace_capacity) {}
+
+AllocationRegister::~AllocationRegister() {
+}
+
+void AllocationRegister::Insert(const void* address,
+ size_t size,
+ const AllocationContext& context) {
+ DCHECK(address != nullptr);
+ if (size == 0) {
+ return;
+ }
+
+ AllocationInfo info = {
+ size,
+ context.type_name,
+ InsertBacktrace(context.backtrace)
+ };
+
+ // Try to insert the allocation.
+ auto index_and_flag = allocations_.Insert(address, info);
+ if (!index_and_flag.second) {
+ // |address| is already there - overwrite the allocation info.
+ auto& old_info = allocations_.Get(index_and_flag.first).second;
+ RemoveBacktrace(old_info.backtrace_index);
+ old_info = info;
+ }
+}
+
+void AllocationRegister::Remove(const void* address) {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return;
+ }
+
+ const AllocationInfo& info = allocations_.Get(index).second;
+ RemoveBacktrace(info.backtrace_index);
+ allocations_.Remove(index);
+}
+
+bool AllocationRegister::Get(const void* address,
+ Allocation* out_allocation) const {
+ auto index = allocations_.Find(address);
+ if (index == AllocationMap::kInvalidKVIndex) {
+ return false;
+ }
+
+ if (out_allocation) {
+ *out_allocation = GetAllocation(index);
+ }
+ return true;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::begin() const {
+ return ConstIterator(*this, allocations_.Next(0));
+}
+
+AllocationRegister::ConstIterator AllocationRegister::end() const {
+ return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
+}
+
+void AllocationRegister::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) const {
+ size_t allocated = sizeof(AllocationRegister);
+ size_t resident = sizeof(AllocationRegister)
+ + allocations_.EstimateUsedMemory()
+ + backtraces_.EstimateUsedMemory();
+ overhead->Add("AllocationRegister", allocated, resident);
+}
+
+AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
+ const Backtrace& backtrace) {
+ auto index = backtraces_.Insert(backtrace, 0).first;
+ auto& backtrace_and_count = backtraces_.Get(index);
+ backtrace_and_count.second++;
+ return index;
+}
+
+void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
+ auto& backtrace_and_count = backtraces_.Get(index);
+ if (--backtrace_and_count.second == 0) {
+ // Backtrace is not referenced anymore - remove it.
+ backtraces_.Remove(index);
+ }
+}
+
+AllocationRegister::Allocation AllocationRegister::GetAllocation(
+ AllocationMap::KVIndex index) const {
+ const auto& address_and_info = allocations_.Get(index);
+ const auto& backtrace_and_count = backtraces_.Get(
+ address_and_info.second.backtrace_index);
+ return {
+ address_and_info.first,
+ address_and_info.second.size,
+ AllocationContext(
+ backtrace_and_count.first,
+ address_and_info.second.type_name)
+ };
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_register.h b/libchrome/base/trace_event/heap_profiler_allocation_register.h
new file mode 100644
index 0000000..86e2721
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_register.h
@@ -0,0 +1,356 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/template_util.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class AllocationRegisterTest;
+
+namespace internal {
+
+// Allocates a region of virtual address space of |size| rounded up to the
+// system page size. The memory is zeroed by the system. A guard page is
+// added after the end.
+void* AllocateGuardedVirtualMemory(size_t size);
+
+// Frees a region of virtual address space allocated by a call to
+// |AllocateVirtualMemory|.
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
+
+// Hash map that mmaps memory only once in the constructor. Its API is
+// similar to std::unordered_map, only index (KVIndex) is used to address
+template <size_t NumBuckets, class Key, class Value, class KeyHasher>
+class FixedHashMap {
+ // To keep things simple we don't call destructors.
+ static_assert(is_trivially_destructible<Key>::value &&
+ is_trivially_destructible<Value>::value,
+ "Key and Value shouldn't have destructors");
+ public:
+ using KVPair = std::pair<const Key, Value>;
+
+ // For implementation simplicity API uses integer index instead
+ // of iterators. Most operations (except FindValidIndex) on KVIndex
+ // are O(1).
+ using KVIndex = size_t;
+ static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+
+ // Capacity controls how many items this hash map can hold, and largely
+ // affects memory footprint.
+ FixedHashMap(size_t capacity)
+ : num_cells_(capacity),
+ cells_(static_cast<Cell*>(
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+ buckets_(static_cast<Bucket*>(
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+ free_list_(nullptr),
+ next_unused_cell_(0) {}
+
+ ~FixedHashMap() {
+ FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
+ FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
+ }
+
+ std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
+ Cell** p_cell = Lookup(key);
+ Cell* cell = *p_cell;
+ if (cell) {
+ return {static_cast<KVIndex>(cell - cells_), false}; // not inserted
+ }
+
+ // Get a free cell and link it.
+ *p_cell = cell = GetFreeCell();
+ cell->p_prev = p_cell;
+ cell->next = nullptr;
+
+ // Initialize key/value pair. Since key is 'const Key' this is the
+ // only way to initialize it.
+ new (&cell->kv) KVPair(key, value);
+
+ return {static_cast<KVIndex>(cell - cells_), true}; // inserted
+ }
+
+ void Remove(KVIndex index) {
+ DCHECK_LT(index, next_unused_cell_);
+
+ Cell* cell = &cells_[index];
+
+ // Unlink the cell.
+ *cell->p_prev = cell->next;
+ if (cell->next) {
+ cell->next->p_prev = cell->p_prev;
+ }
+ cell->p_prev = nullptr; // mark as free
+
+ // Add it to the free list.
+ cell->next = free_list_;
+ free_list_ = cell;
+ }
+
+ KVIndex Find(const Key& key) const {
+ Cell* cell = *Lookup(key);
+ return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
+ }
+
+ KVPair& Get(KVIndex index) {
+ return cells_[index].kv;
+ }
+
+ const KVPair& Get(KVIndex index) const {
+ return cells_[index].kv;
+ }
+
+ // Finds next index that has a KVPair associated with it. Search starts
+ // with the specified index. Returns kInvalidKVIndex if nothing was found.
+ // To find the first valid index, call this function with 0. Continue
+ // calling with the last_index + 1 until kInvalidKVIndex is returned.
+ KVIndex Next(KVIndex index) const {
+ for (;index < next_unused_cell_; ++index) {
+ if (cells_[index].p_prev) {
+ return index;
+ }
+ }
+ return kInvalidKVIndex;
+ }
+
+ // Estimates number of bytes used in allocated memory regions.
+ size_t EstimateUsedMemory() const {
+ size_t page_size = base::GetPageSize();
+ // |next_unused_cell_| is the first cell that wasn't touched, i.e.
+ // it's the number of touched cells.
+ return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
+ bits::Align(sizeof(Bucket) * NumBuckets, page_size);
+ }
+
+ private:
+ friend base::trace_event::AllocationRegisterTest;
+
+ struct Cell {
+ KVPair kv;
+ Cell* next;
+
+ // Conceptually this is |prev| in a doubly linked list. However, buckets
+ // also participate in the bucket's cell list - they point to the list's
+ // head and also need to be linked / unlinked properly. To treat these two
+ // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
+ // that points to this Cell" kind of thing. So |p_prev| points to a bucket
+ // for the first cell in a list, and points to |next| of the previous cell
+ // for any other cell. With that Lookup() is the only function that handles
+ // buckets / cells differently.
+ // If |p_prev| is nullptr, the cell is in the free list.
+ Cell** p_prev;
+ };
+
+ using Bucket = Cell*;
+
+ // Returns a pointer to the cell that contains or should contain the entry
+ // for |key|. The pointer may point at an element of |buckets_| or at the
+ // |next| member of an element of |cells_|.
+ Cell** Lookup(const Key& key) const {
+ // The list head is in |buckets_| at the hash offset.
+ Cell** p_cell = &buckets_[Hash(key)];
+
+ // Chase down the list until the cell that holds |key| is found,
+ // or until the list ends.
+ while (*p_cell && (*p_cell)->kv.first != key) {
+ p_cell = &(*p_cell)->next;
+ }
+
+ return p_cell;
+ }
+
+ // Returns a cell that is not being used to store an entry (either by
+ // recycling from the free list or by taking a fresh cell).
+ Cell* GetFreeCell() {
+ // First try to re-use a cell from the free list.
+ if (free_list_) {
+ Cell* cell = free_list_;
+ free_list_ = cell->next;
+ return cell;
+ }
+
+ // Otherwise pick the next cell that has not been touched before.
+ size_t idx = next_unused_cell_;
+ next_unused_cell_++;
+
+ // If the hash table has too little capacity (when too little address space
+ // was reserved for |cells_|), |next_unused_cell_| can be an index outside
+ // of the allocated storage. A guard page is allocated there to crash the
+ // program in that case. There are alternative solutions:
+ // - Deal with it, increase capacity by reallocating |cells_|.
+ // - Refuse to insert and let the caller deal with it.
+ // Because free cells are re-used before accessing fresh cells with a higher
+ // index, and because reserving address space without touching it is cheap,
+ // the simplest solution is to just allocate a humongous chunk of address
+ // space.
+
+ DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+
+ return &cells_[idx];
+ }
+
+ // Returns a value in the range [0, NumBuckets - 1] (inclusive).
+ size_t Hash(const Key& key) const {
+ if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
+ // NumBuckets is a power of 2.
+ return KeyHasher()(key) & (NumBuckets - 1);
+ } else {
+ return KeyHasher()(key) % NumBuckets;
+ }
+ }
+
+ // Number of cells.
+ size_t const num_cells_;
+
+ // The array of cells. This array is backed by mmapped memory. Lower indices
+ // are accessed first, higher indices are accessed only when the |free_list_|
+ // is empty. This is to minimize the amount of resident memory used.
+ Cell* const cells_;
+
+ // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
+ // contain the pointer to the linked list of cells for |Hash(key)|.
+ // This array is backed by mmapped memory.
+ mutable Bucket* buckets_;
+
+ // The head of the free list.
+ Cell* free_list_;
+
+ // The index of the first element of |cells_| that has not been used before.
+ // If the free list is empty and a new cell is needed, the cell at this index
+ // is used. This is the high water mark for the number of entries stored.
+ size_t next_unused_cell_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
+};
+
+} // namespace internal
+
+class TraceEventMemoryOverhead;
+
+// The allocation register keeps track of all allocations that have not been
+// freed. Internally it has two hashtables: one for Backtraces and one for
+// actual allocations. Sizes of both hashtables are fixed, and this class
+// allocates (mmaps) only in its constructor.
+class BASE_EXPORT AllocationRegister {
+ public:
+ // Details about an allocation.
+ struct Allocation {
+ const void* address;
+ size_t size;
+ AllocationContext context;
+ };
+
+ // An iterator that iterates entries in no particular order.
+ class BASE_EXPORT ConstIterator {
+ public:
+ void operator++();
+ bool operator!=(const ConstIterator& other) const;
+ Allocation operator*() const;
+
+ private:
+ friend class AllocationRegister;
+ using AllocationIndex = size_t;
+
+ ConstIterator(const AllocationRegister& alloc_register,
+ AllocationIndex index);
+
+ const AllocationRegister& register_;
+ AllocationIndex index_;
+ };
+
+ AllocationRegister();
+ AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
+
+ ~AllocationRegister();
+
+ // Inserts allocation details into the table. If the address was present
+ // already, its details are updated. |address| must not be null.
+ void Insert(const void* address,
+ size_t size,
+ const AllocationContext& context);
+
+ // Removes the address from the table if it is present. It is ok to call this
+ // with a null pointer.
+ void Remove(const void* address);
+
+ // Finds allocation for the address and fills |out_allocation|.
+ bool Get(const void* address, Allocation* out_allocation) const;
+
+ ConstIterator begin() const;
+ ConstIterator end() const;
+
+ // Estimates memory overhead including |sizeof(AllocationRegister)|.
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
+
+ private:
+ friend AllocationRegisterTest;
+
+ // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
+ // hashing and should be changed together with AddressHasher.
+ static const size_t kAllocationBuckets = 1 << 18;
+ static const size_t kAllocationCapacity = 1500000;
+
+ // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
+ // needing to tweak BacktraceHasher implementation.
+ static const size_t kBacktraceBuckets = 1 << 15;
+ static const size_t kBacktraceCapacity = kBacktraceBuckets;
+
+ struct BacktraceHasher {
+ size_t operator () (const Backtrace& backtrace) const;
+ };
+
+ using BacktraceMap = internal::FixedHashMap<
+ kBacktraceBuckets,
+ Backtrace,
+ size_t, // Number of references to the backtrace (the key). Incremented
+ // when an allocation that references the backtrace is inserted,
+ // and decremented when the allocation is removed. When the
+ // number drops to zero, the backtrace is removed from the map.
+ BacktraceHasher>;
+
+ struct AllocationInfo {
+ size_t size;
+ const char* type_name;
+ BacktraceMap::KVIndex backtrace_index;
+ };
+
+ struct AddressHasher {
+ size_t operator () (const void* address) const;
+ };
+
+ using AllocationMap = internal::FixedHashMap<
+ kAllocationBuckets,
+ const void*,
+ AllocationInfo,
+ AddressHasher>;
+
+ BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
+ void RemoveBacktrace(BacktraceMap::KVIndex index);
+
+ Allocation GetAllocation(AllocationMap::KVIndex) const;
+
+ AllocationMap allocations_;
+ BacktraceMap backtraces_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
diff --git a/libchrome/base/trace_event/heap_profiler_allocation_register_posix.cc b/libchrome/base/trace_event/heap_profiler_allocation_register_posix.cc
new file mode 100644
index 0000000..94eeb4d
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_allocation_register_posix.cc
@@ -0,0 +1,58 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_register.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace base {
+namespace trace_event {
+namespace internal {
+
+namespace {
+size_t GetGuardSize() {
+ return GetPageSize();
+}
+}
+
+void* AllocateGuardedVirtualMemory(size_t size) {
+ size = bits::Align(size, GetPageSize());
+
+ // Add space for a guard page at the end.
+ size_t map_size = size + GetGuardSize();
+
+ void* addr = mmap(nullptr, map_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ PCHECK(addr != MAP_FAILED);
+
+ // Mark the last page of the allocated address space as inaccessible
+ // (PROT_NONE). The read/write accessible space is still at least |min_size|
+ // bytes.
+ void* guard_addr =
+ reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
+ int result = mprotect(guard_addr, GetGuardSize(), PROT_NONE);
+ PCHECK(result == 0);
+
+ return addr;
+}
+
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
+ size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
+ munmap(address, size);
+}
+
+} // namespace internal
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_heap_dump_writer.cc b/libchrome/base/trace_event/heap_profiler_heap_dump_writer.cc
new file mode 100644
index 0000000..1bf06db
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -0,0 +1,323 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iterator>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_log.h"
+
+// Most of what the |HeapDumpWriter| does is aggregating detailed information
+// about the heap and deciding what to dump. The Input to this process is a list
+// of |AllocationContext|s and size pairs.
+//
+// The pairs are grouped into |Bucket|s. A bucket is a group of (context, size)
+// pairs where the properties of the contexts share a prefix. (Type name is
+// considered a list of length one here.) First all pairs are put into one
+// bucket that represents the entire heap. Then this bucket is recursively
+// broken down into smaller buckets. Each bucket keeps track of whether further
+// breakdown is possible.
+
+namespace base {
+namespace trace_event {
+namespace internal {
+namespace {
+
+// Denotes a property of |AllocationContext| to break down by.
+enum class BreakDownMode { kByBacktrace, kByTypeName };
+
+// A group of bytes for which the context shares a prefix.
+struct Bucket {
+ Bucket()
+ : size(0),
+ count(0),
+ backtrace_cursor(0),
+ is_broken_down_by_type_name(false) {}
+
+ std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
+ metrics_by_context;
+
+ // The sum of the sizes of |metrics_by_context|.
+ size_t size;
+
+ // The sum of number of allocations of |metrics_by_context|.
+ size_t count;
+
+ // The index of the stack frame that has not yet been broken down by. For all
+ // elements in this bucket, the stack frames 0 up to (but not including) the
+ // cursor, must be equal.
+ size_t backtrace_cursor;
+
+ // When true, the type name for all elements in this bucket must be equal.
+ bool is_broken_down_by_type_name;
+};
+
+// Comparison operator to order buckets by their size.
+bool operator<(const Bucket& lhs, const Bucket& rhs) {
+ return lhs.size < rhs.size;
+}
+
+// Groups the allocations in the bucket by |break_by|. The buckets in the
+// returned list will have |backtrace_cursor| advanced or
+// |is_broken_down_by_type_name| set depending on the property to group by.
+std::vector<Bucket> GetSubbuckets(const Bucket& bucket,
+ BreakDownMode break_by) {
+ base::hash_map<const void*, Bucket> breakdown;
+
+
+ if (break_by == BreakDownMode::kByBacktrace) {
+ for (const auto& context_and_metrics : bucket.metrics_by_context) {
+ const Backtrace& backtrace = context_and_metrics.first->backtrace;
+ const StackFrame* begin = std::begin(backtrace.frames);
+ const StackFrame* end = begin + backtrace.frame_count;
+ const StackFrame* cursor = begin + bucket.backtrace_cursor;
+
+ DCHECK_LE(cursor, end);
+
+ if (cursor != end) {
+ Bucket& subbucket = breakdown[cursor->value];
+ subbucket.size += context_and_metrics.second.size;
+ subbucket.count += context_and_metrics.second.count;
+ subbucket.metrics_by_context.push_back(context_and_metrics);
+ subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
+ subbucket.is_broken_down_by_type_name =
+ bucket.is_broken_down_by_type_name;
+ DCHECK_GT(subbucket.size, 0u);
+ DCHECK_GT(subbucket.count, 0u);
+ }
+ }
+ } else if (break_by == BreakDownMode::kByTypeName) {
+ if (!bucket.is_broken_down_by_type_name) {
+ for (const auto& context_and_metrics : bucket.metrics_by_context) {
+ const AllocationContext* context = context_and_metrics.first;
+ Bucket& subbucket = breakdown[context->type_name];
+ subbucket.size += context_and_metrics.second.size;
+ subbucket.count += context_and_metrics.second.count;
+ subbucket.metrics_by_context.push_back(context_and_metrics);
+ subbucket.backtrace_cursor = bucket.backtrace_cursor;
+ subbucket.is_broken_down_by_type_name = true;
+ DCHECK_GT(subbucket.size, 0u);
+ DCHECK_GT(subbucket.count, 0u);
+ }
+ }
+ }
+
+ std::vector<Bucket> buckets;
+ buckets.reserve(breakdown.size());
+ for (auto key_bucket : breakdown)
+ buckets.push_back(key_bucket.second);
+
+ return buckets;
+}
+
+// Breaks down the bucket by |break_by|. Returns only buckets that contribute
+// more than |min_size_bytes| to the total size. The long tail is omitted.
+std::vector<Bucket> BreakDownBy(const Bucket& bucket,
+ BreakDownMode break_by,
+ size_t min_size_bytes) {
+ std::vector<Bucket> buckets = GetSubbuckets(bucket, break_by);
+
+ // Ensure that |buckets| is a max-heap (the data structure, not memory heap),
+ // so its front contains the largest bucket. Buckets should be iterated
+ // ordered by size, but sorting the vector is overkill because the long tail
+ // of small buckets will be discarded. By using a max-heap, the optimal case
+ // where all but the first bucket are discarded is O(n). The worst case where
+ // no bucket is discarded is doing a heap sort, which is O(n log n).
+ std::make_heap(buckets.begin(), buckets.end());
+
+ // Keep including buckets until adding one would increase the number of
+ // bytes accounted for by |min_size_bytes|. The large buckets end up in
+ // [it, end()), [begin(), it) is the part that contains the max-heap
+ // of small buckets.
+ std::vector<Bucket>::iterator it;
+ for (it = buckets.end(); it != buckets.begin(); --it) {
+ if (buckets.front().size < min_size_bytes)
+ break;
+
+ // Put the largest bucket in [begin, it) at |it - 1| and max-heapify
+ // [begin, it - 1). This puts the next largest bucket at |buckets.front()|.
+ std::pop_heap(buckets.begin(), it);
+ }
+
+ // At this point, |buckets| looks like this (numbers are bucket sizes):
+ //
+ // <-- max-heap of small buckets --->
+ // <-- large buckets by ascending size -->
+ // [ 19 | 11 | 13 | 7 | 2 | 5 | ... | 83 | 89 | 97 ]
+ // ^ ^ ^
+ // | | |
+ // begin() it end()
+
+ // Discard the long tail of buckets that contribute less than a percent.
+ buckets.erase(buckets.begin(), it);
+
+ return buckets;
+}
+
+} // namespace
+
+bool operator<(Entry lhs, Entry rhs) {
+ // There is no need to compare |size|. If the backtrace and type name are
+ // equal then the sizes must be equal as well.
+ return std::tie(lhs.stack_frame_id, lhs.type_id) <
+ std::tie(rhs.stack_frame_id, rhs.type_id);
+}
+
+HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+ TypeNameDeduplicator* type_name_deduplicator,
+ uint32_t breakdown_threshold_bytes)
+ : stack_frame_deduplicator_(stack_frame_deduplicator),
+ type_name_deduplicator_(type_name_deduplicator),
+ breakdown_threshold_bytes_(breakdown_threshold_bytes) {
+}
+
+HeapDumpWriter::~HeapDumpWriter() {}
+
+bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
+ // The contexts in the bucket are all different, but the [begin, cursor) range
+ // is equal for all contexts in the bucket, and the type names are the same if
+ // |is_broken_down_by_type_name| is set.
+ DCHECK(!bucket.metrics_by_context.empty());
+
+ const AllocationContext* context = bucket.metrics_by_context.front().first;
+
+ const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
+ const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
+ DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
+
+ Entry entry;
+ entry.stack_frame_id = stack_frame_deduplicator_->Insert(
+ backtrace_begin, backtrace_end);
+
+ // Deduplicate the type name, or use ID -1 if type name is not set.
+ entry.type_id = bucket.is_broken_down_by_type_name
+ ? type_name_deduplicator_->Insert(context->type_name)
+ : -1;
+
+ entry.size = bucket.size;
+ entry.count = bucket.count;
+
+ auto position_and_inserted = entries_.insert(entry);
+ return position_and_inserted.second;
+}
+
+void HeapDumpWriter::BreakDown(const Bucket& bucket) {
+ auto by_backtrace = BreakDownBy(bucket,
+ BreakDownMode::kByBacktrace,
+ breakdown_threshold_bytes_);
+ auto by_type_name = BreakDownBy(bucket,
+ BreakDownMode::kByTypeName,
+ breakdown_threshold_bytes_);
+
+ // Insert entries for the buckets. If a bucket was not present before, it has
+ // not been broken down before, so recursively continue breaking down in that
+ // case. There might be multiple routes to the same entry (first break down
+ // by type name, then by backtrace, or first by backtrace and then by type),
+ // so a set is used to avoid dumping and breaking down entries more than once.
+
+ for (const Bucket& subbucket : by_backtrace)
+ if (AddEntryForBucket(subbucket))
+ BreakDown(subbucket);
+
+ for (const Bucket& subbucket : by_type_name)
+ if (AddEntryForBucket(subbucket))
+ BreakDown(subbucket);
+}
+
+const std::set<Entry>& HeapDumpWriter::Summarize(
+ const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context) {
+ // Start with one bucket that represents the entire heap. Iterate by
+ // reference, because the allocation contexts are going to point to allocation
+ // contexts stored in |metrics_by_context|.
+ Bucket root_bucket;
+ for (const auto& context_and_metrics : metrics_by_context) {
+ DCHECK_GT(context_and_metrics.second.size, 0u);
+ DCHECK_GT(context_and_metrics.second.count, 0u);
+ const AllocationContext* context = &context_and_metrics.first;
+ root_bucket.metrics_by_context.push_back(
+ std::make_pair(context, context_and_metrics.second));
+ root_bucket.size += context_and_metrics.second.size;
+ root_bucket.count += context_and_metrics.second.count;
+ }
+
+ AddEntryForBucket(root_bucket);
+
+ // Recursively break down the heap and fill |entries_| with entries to dump.
+ BreakDown(root_bucket);
+
+ return entries_;
+}
+
+std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
+ std::string buffer;
+ std::unique_ptr<TracedValue> traced_value(new TracedValue);
+
+ traced_value->BeginArray("entries");
+
+ for (const Entry& entry : entries) {
+ traced_value->BeginDictionary();
+
+ // Format size as hexadecimal string into |buffer|.
+ SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
+ traced_value->SetString("size", buffer);
+
+ SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
+ traced_value->SetString("count", buffer);
+
+ if (entry.stack_frame_id == -1) {
+ // An empty backtrace (which will have ID -1) is represented by the empty
+ // string, because there is no leaf frame to reference in |stackFrames|.
+ traced_value->SetString("bt", "");
+ } else {
+ // Format index of the leaf frame as a string, because |stackFrames| is a
+ // dictionary, not an array.
+ SStringPrintf(&buffer, "%i", entry.stack_frame_id);
+ traced_value->SetString("bt", buffer);
+ }
+
+ // Type ID -1 (cumulative size for all types) is represented by the absence
+ // of the "type" key in the dictionary.
+ if (entry.type_id != -1) {
+ // Format the type ID as a string.
+ SStringPrintf(&buffer, "%i", entry.type_id);
+ traced_value->SetString("type", buffer);
+ }
+
+ traced_value->EndDictionary();
+ }
+
+ traced_value->EndArray(); // "entries"
+ return traced_value;
+}
+
+} // namespace internal
+
+std::unique_ptr<TracedValue> ExportHeapDump(
+ const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
+ const MemoryDumpSessionState& session_state) {
+ internal::HeapDumpWriter writer(
+ session_state.stack_frame_deduplicator(),
+ session_state.type_name_deduplicator(),
+ session_state.memory_dump_config().heap_profiler_options
+ .breakdown_threshold_bytes);
+ return Serialize(writer.Summarize(metrics_by_context));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_heap_dump_writer.h b/libchrome/base/trace_event/heap_profiler_heap_dump_writer.h
new file mode 100644
index 0000000..6e9d29d
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_heap_dump_writer.h
@@ -0,0 +1,113 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpSessionState;
+class StackFrameDeduplicator;
+class TracedValue;
+class TypeNameDeduplicator;
+
+// Aggregates |metrics_by_context|, recursively breaks down the heap, and
+// returns a traced value with an "entries" array that can be dumped in the
+// trace log, following the format described in https://goo.gl/KY7zVE. The
+// number of entries is kept reasonable because long tails are not included.
+BASE_EXPORT std::unique_ptr<TracedValue> ExportHeapDump(
+ const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
+ const MemoryDumpSessionState& session_state);
+
+namespace internal {
+
+namespace {
+struct Bucket;
+}
+
+// An entry in the "entries" array as described in https://goo.gl/KY7zVE.
+struct BASE_EXPORT Entry {
+ size_t size;
+ size_t count;
+
+ // References a backtrace in the stack frame deduplicator. -1 means empty
+ // backtrace (the root of the tree).
+ int stack_frame_id;
+
+ // References a type name in the type name deduplicator. -1 indicates that
+ // the size is the cumulative size for all types (the root of the tree).
+ int type_id;
+};
+
+// Comparison operator to enable putting |Entry| in a |std::set|.
+BASE_EXPORT bool operator<(Entry lhs, Entry rhs);
+
+// Serializes entries to an "entries" array in a traced value.
+BASE_EXPORT std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& dump);
+
+// Helper class to dump a snapshot of an |AllocationRegister| or other heap
+// bookkeeping structure into a |TracedValue|. This class is intended to be
+// used as a one-shot local instance on the stack.
+class BASE_EXPORT HeapDumpWriter {
+ public:
+ // The |stack_frame_deduplicator| and |type_name_deduplicator| are not owned.
+ // The heap dump writer assumes exclusive access to them during the lifetime
+ // of the dump writer. The heap dumps are broken down for allocations bigger
+ // than |breakdown_threshold_bytes|.
+ HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+ TypeNameDeduplicator* type_name_deduplicator,
+ uint32_t breakdown_threshold_bytes);
+
+ ~HeapDumpWriter();
+
+ // Aggregates allocations to compute the total size of the heap, then breaks
+ // down the heap recursively. This produces the values that should be dumped
+ // in the "entries" array. The number of entries is kept reasonable because
+ // long tails are not included. Use |Serialize| to convert to a traced value.
+ const std::set<Entry>& Summarize(
+ const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context);
+
+ private:
+ // Inserts an |Entry| for |Bucket| into |entries_|. Returns false if the
+ // entry was present before, true if it was not.
+ bool AddEntryForBucket(const Bucket& bucket);
+
+ // Recursively breaks down a bucket into smaller buckets and adds entries for
+ // the buckets worth dumping to |entries_|.
+ void BreakDown(const Bucket& bucket);
+
+ // The collection of entries that is filled by |Summarize|.
+ std::set<Entry> entries_;
+
+ // Helper for generating the |stackFrames| dictionary. Not owned, must outlive
+ // this heap dump writer instance.
+ StackFrameDeduplicator* const stack_frame_deduplicator_;
+
+ // Helper for converting type names to IDs. Not owned, must outlive this heap
+ // dump writer instance.
+ TypeNameDeduplicator* const type_name_deduplicator_;
+
+ // Minimum size of an allocation for which an allocation bucket will be
+ // broken down with children.
+ uint32_t breakdown_threshold_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapDumpWriter);
+};
+
+} // namespace internal
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
diff --git a/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
new file mode 100644
index 0000000..49a2350
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -0,0 +1,135 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <string>
+#include <utility>
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
+ int parent_frame_index)
+ : frame(frame), parent_frame_index(parent_frame_index) {}
+StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
+StackFrameDeduplicator::FrameNode::~FrameNode() {}
+
+StackFrameDeduplicator::StackFrameDeduplicator() {}
+StackFrameDeduplicator::~StackFrameDeduplicator() {}
+
+int StackFrameDeduplicator::Insert(const StackFrame* beginFrame,
+ const StackFrame* endFrame) {
+ int frame_index = -1;
+ std::map<StackFrame, int>* nodes = &roots_;
+
+ // Loop through the frames, early out when a frame is null.
+ for (const StackFrame* it = beginFrame; it != endFrame; it++) {
+ StackFrame frame = *it;
+
+ auto node = nodes->find(frame);
+ if (node == nodes->end()) {
+ // There is no tree node for this frame yet, create it. The parent node
+ // is the node associated with the previous frame.
+ FrameNode frame_node(frame, frame_index);
+
+ // The new frame node will be appended, so its index is the current size
+ // of the vector.
+ frame_index = static_cast<int>(frames_.size());
+
+ // Add the node to the trie so it will be found next time.
+ nodes->insert(std::make_pair(frame, frame_index));
+
+ // Append the node after modifying |nodes|, because the |frames_| vector
+ // might need to resize, and this invalidates the |nodes| pointer.
+ frames_.push_back(frame_node);
+ } else {
+ // A tree node for this frame exists. Look for the next one.
+ frame_index = node->second;
+ }
+
+ nodes = &frames_[frame_index].children;
+ }
+
+ return frame_index;
+}
+
+void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
+ out->append("{"); // Begin the |stackFrames| dictionary.
+
+ int i = 0;
+ auto frame_node = begin();
+ auto it_end = end();
+ std::string stringify_buffer;
+
+ while (frame_node != it_end) {
+ // The |stackFrames| format is a dictionary, not an array, so the
+ // keys are stringified indices. Write the index manually, then use
+ // |TracedValue| to format the object. This is to avoid building the
+ // entire dictionary as a |TracedValue| in memory.
+ SStringPrintf(&stringify_buffer, "\"%d\":", i);
+ out->append(stringify_buffer);
+
+ std::unique_ptr<TracedValue> frame_node_value(new TracedValue);
+ const StackFrame& frame = frame_node->frame;
+ switch (frame.type) {
+ case StackFrame::Type::TRACE_EVENT_NAME:
+ frame_node_value->SetString(
+ "name", static_cast<const char*>(frame.value));
+ break;
+ case StackFrame::Type::THREAD_NAME:
+ SStringPrintf(&stringify_buffer,
+ "[Thread: %s]",
+ static_cast<const char*>(frame.value));
+ frame_node_value->SetString("name", stringify_buffer);
+ break;
+ case StackFrame::Type::PROGRAM_COUNTER:
+ SStringPrintf(&stringify_buffer,
+ "pc:%" PRIxPTR,
+ reinterpret_cast<uintptr_t>(frame.value));
+ frame_node_value->SetString("name", stringify_buffer);
+ break;
+ }
+ if (frame_node->parent_frame_index >= 0) {
+ SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
+ frame_node_value->SetString("parent", stringify_buffer);
+ }
+ frame_node_value->AppendAsTraceFormat(out);
+
+ i++;
+ frame_node++;
+
+ if (frame_node != it_end)
+ out->append(",");
+ }
+
+ out->append("}"); // End the |stackFrames| dictionary.
+}
+
+void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ // The sizes here are only estimates; they fail to take into account the
+ // overhead of the tree nodes for the map, but as an estimate this should be
+ // fine.
+ size_t maps_size = roots_.size() * sizeof(std::pair<StackFrame, int>);
+ size_t frames_allocated = frames_.capacity() * sizeof(FrameNode);
+ size_t frames_resident = frames_.size() * sizeof(FrameNode);
+
+ for (const FrameNode& node : frames_)
+ maps_size += node.children.size() * sizeof(std::pair<StackFrame, int>);
+
+ overhead->Add("StackFrameDeduplicator",
+ sizeof(StackFrameDeduplicator) + maps_size + frames_allocated,
+ sizeof(StackFrameDeduplicator) + maps_size + frames_resident);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator.h
new file mode 100644
index 0000000..4932534
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -0,0 +1,79 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEventMemoryOverhead;
+
+// A data structure that allows grouping a set of backtraces in a space-
+// efficient manner by creating a call tree and writing it as a set of (node,
+// parent) pairs. The tree nodes reference both parent and children. The parent
+// is referenced by index into |frames_|. The children are referenced via a map
+// of |StackFrame|s to index into |frames_|. So there is a trie for bottum-up
+// lookup of a backtrace for deduplication, and a tree for compact storage in
+// the trace log.
+class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat {
+ public:
+ // A node in the call tree.
+ struct FrameNode {
+ FrameNode(StackFrame frame, int parent_frame_index);
+ FrameNode(const FrameNode& other);
+ ~FrameNode();
+
+ StackFrame frame;
+
+ // The index of the parent stack frame in |frames_|, or -1 if there is no
+ // parent frame (when it is at the bottom of the call stack).
+ int parent_frame_index;
+
+ // Indices into |frames_| of frames called from the current frame.
+ std::map<StackFrame, int> children;
+ };
+
+ using ConstIterator = std::vector<FrameNode>::const_iterator;
+
+ StackFrameDeduplicator();
+ ~StackFrameDeduplicator() override;
+
+ // Inserts a backtrace where |beginFrame| is a pointer to the bottom frame
+ // (e.g. main) and |endFrame| is a pointer past the top frame (most recently
+ // called function), and returns the index of its leaf node in |frames_|.
+ // Returns -1 if the backtrace is empty.
+ int Insert(const StackFrame* beginFrame, const StackFrame* endFrame);
+
+ // Iterators over the frame nodes in the call tree.
+ ConstIterator begin() const { return frames_.begin(); }
+ ConstIterator end() const { return frames_.end(); }
+
+ // Writes the |stackFrames| dictionary as defined in https://goo.gl/GerkV8 to
+ // the trace log.
+ void AppendAsTraceFormat(std::string* out) const override;
+
+ // Estimates memory overhead including |sizeof(StackFrameDeduplicator)|.
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
+
+ private:
+ std::map<StackFrame, int> roots_;
+ std::vector<FrameNode> frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(StackFrameDeduplicator);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_STACK_FRAME_DEDUPLICATOR_H_
diff --git a/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc b/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
new file mode 100644
index 0000000..2215ede
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
@@ -0,0 +1,152 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+
+#include <iterator>
+#include <memory>
+
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Define all strings once, because the deduplicator requires pointer equality,
+// and string interning is unreliable.
+StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
+StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
+StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
+StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
+StackFrame kMalloc = StackFrame::FromTraceEventName("malloc");
+
+TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
+ StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc};
+
+ // The call tree should look like this (index in brackets).
+ //
+ // BrowserMain [0]
+ // CreateWidget [1]
+ // malloc [2]
+
+ std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+ ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
+
+ auto iter = dedup->begin();
+ ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+ ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+ ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
+ ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+ ASSERT_EQ(kMalloc, (iter + 2)->frame);
+ ASSERT_EQ(1, (iter + 2)->parent_frame_index);
+
+ ASSERT_EQ(iter + 3, dedup->end());
+}
+
+TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) {
+ StackFrame null_frame = StackFrame::FromTraceEventName(nullptr);
+ StackFrame bt[] = {kBrowserMain, null_frame, kMalloc};
+
+ // Deduplicator doesn't care about what's inside StackFrames,
+ // and handles nullptr StackFrame values as any other.
+ //
+ // So the call tree should look like this (index in brackets).
+ //
+ // BrowserMain [0]
+ // (null) [1]
+ // malloc [2]
+
+ std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+ ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
+
+ auto iter = dedup->begin();
+ ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+ ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+ ASSERT_EQ(null_frame, (iter + 1)->frame);
+ ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+ ASSERT_EQ(kMalloc, (iter + 2)->frame);
+ ASSERT_EQ(1, (iter + 2)->parent_frame_index);
+
+ ASSERT_EQ(iter + 3, dedup->end());
+}
+
+// Test that there can be different call trees (there can be multiple bottom
+// frames). Also verify that frames with the same name but a different caller
+// are represented as distinct nodes.
+TEST(StackFrameDeduplicatorTest, MultipleRoots) {
+ StackFrame bt0[] = {kBrowserMain, kCreateWidget};
+ StackFrame bt1[] = {kRendererMain, kCreateWidget};
+
+ // The call tree should look like this (index in brackets).
+ //
+ // BrowserMain [0]
+ // CreateWidget [1]
+ // RendererMain [2]
+ // CreateWidget [3]
+ //
+ // Note that there will be two instances of CreateWidget,
+ // with different parents.
+
+ std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+ ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
+ ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1)));
+
+ auto iter = dedup->begin();
+ ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+ ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+ ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
+ ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+ ASSERT_EQ(kRendererMain, (iter + 2)->frame);
+ ASSERT_EQ(-1, (iter + 2)->parent_frame_index);
+
+ ASSERT_EQ(kCreateWidget, (iter + 3)->frame);
+ ASSERT_EQ(2, (iter + 3)->parent_frame_index);
+
+ ASSERT_EQ(iter + 4, dedup->end());
+}
+
+TEST(StackFrameDeduplicatorTest, Deduplication) {
+ StackFrame bt0[] = {kBrowserMain, kCreateWidget};
+ StackFrame bt1[] = {kBrowserMain, kInitialize};
+
+ // The call tree should look like this (index in brackets).
+ //
+ // BrowserMain [0]
+ // CreateWidget [1]
+ // Initialize [2]
+ //
+ // Note that BrowserMain will be re-used.
+
+ std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+ ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
+ ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
+
+ auto iter = dedup->begin();
+ ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+ ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+ ASSERT_EQ(kCreateWidget, (iter + 1)->frame);
+ ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+ ASSERT_EQ(kInitialize, (iter + 2)->frame);
+ ASSERT_EQ(0, (iter + 2)->parent_frame_index);
+
+ ASSERT_EQ(iter + 3, dedup->end());
+
+ // Inserting the same backtrace again should return the index of the existing
+ // node.
+ ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
+ ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
+ ASSERT_EQ(dedup->begin() + 3, dedup->end());
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_type_name_deduplicator.cc b/libchrome/base/trace_event/heap_profiler_type_name_deduplicator.cc
new file mode 100644
index 0000000..055f86a
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -0,0 +1,107 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string>
+#include <utility>
+
+#include "base/json/string_escape.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Extract directory name if |type_name| was file name. Otherwise, return
+// |type_name|.
+StringPiece ExtractDirNameFromFileName(const char* type_name) {
+ StringPiece result(type_name);
+ size_t last_seperator = result.find_last_of("\\/");
+
+ // If |type_name| was a not a file path, the seperator will not be found, so
+ // the whole type name is returned.
+ if (last_seperator == StringPiece::npos)
+ return result;
+
+ // Remove the file name from the path.
+ result.remove_suffix(result.length() - last_seperator);
+
+ // Remove the parent directory references.
+ const char kParentDirectory[] = "..";
+ const size_t kParentDirectoryLength = 3; // '../' or '..\'.
+ while (result.starts_with(kParentDirectory)) {
+ result.remove_prefix(kParentDirectoryLength);
+ }
+ return result;
+}
+
+} // namespace
+
+TypeNameDeduplicator::TypeNameDeduplicator() {
+ // A null pointer has type ID 0 ("unknown type");
+ type_ids_.insert(std::make_pair(nullptr, 0));
+}
+
+TypeNameDeduplicator::~TypeNameDeduplicator() {}
+
+int TypeNameDeduplicator::Insert(const char* type_name) {
+ auto result = type_ids_.insert(std::make_pair(type_name, 0));
+ auto& elem = result.first;
+ bool did_not_exist_before = result.second;
+
+ if (did_not_exist_before) {
+ // The type IDs are assigned sequentially and they are zero-based, so
+ // |size() - 1| is the ID of the new element.
+ elem->second = static_cast<int>(type_ids_.size() - 1);
+ }
+
+ return elem->second;
+}
+
+void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
+ out->append("{"); // Begin the type names dictionary.
+
+ auto it = type_ids_.begin();
+ std::string buffer;
+
+ // Write the first entry manually; the null pointer must not be dereferenced.
+ // (The first entry is the null pointer because a |std::map| is ordered.)
+ it++;
+ out->append("\"0\":\"[unknown]\"");
+
+ for (; it != type_ids_.end(); it++) {
+ // Type IDs in the trace are strings, write them as stringified keys of
+ // a dictionary.
+ SStringPrintf(&buffer, ",\"%d\":", it->second);
+
+ // TODO(ssid): crbug.com/594803 the type name is misused for file name in
+ // some cases.
+ StringPiece type_info = ExtractDirNameFromFileName(it->first);
+
+ // |EscapeJSONString| appends, it does not overwrite |buffer|.
+ bool put_in_quotes = true;
+ EscapeJSONString(type_info, put_in_quotes, &buffer);
+ out->append(buffer);
+ }
+
+ out->append("}"); // End the type names dictionary.
+}
+
+void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ // The size here is only an estimate; it fails to take into account the size
+ // of the tree nodes for the map, but as an estimate this should be fine.
+ size_t map_size = type_ids_.size() * sizeof(std::pair<const char*, int>);
+
+ overhead->Add("TypeNameDeduplicator",
+ sizeof(TypeNameDeduplicator) + map_size);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/heap_profiler_type_name_deduplicator.h b/libchrome/base/trace_event/heap_profiler_type_name_deduplicator.h
new file mode 100644
index 0000000..2d26c73
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_type_name_deduplicator.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEventMemoryOverhead;
+
+// Data structure that assigns a unique numeric ID to |const char*|s.
+class BASE_EXPORT TypeNameDeduplicator : public ConvertableToTraceFormat {
+ public:
+ TypeNameDeduplicator();
+ ~TypeNameDeduplicator() override;
+
+ // Inserts a type name and returns its ID.
+ int Insert(const char* type_name);
+
+ // Writes the type ID -> type name mapping to the trace log.
+ void AppendAsTraceFormat(std::string* out) const override;
+
+ // Estimates memory overhead including |sizeof(TypeNameDeduplicator)|.
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
+
+ private:
+ // Map from type name to type ID.
+ std::map<const char*, int> type_ids_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeNameDeduplicator);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_TYPE_NAME_DEDUPLICATOR_H_
diff --git a/libchrome/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/libchrome/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
new file mode 100644
index 0000000..b2e681a
--- /dev/null
+++ b/libchrome/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
@@ -0,0 +1,96 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/json/json_reader.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Define all strings once, because the deduplicator requires pointer equality,
+// and string interning is unreliable.
+const char kInt[] = "int";
+const char kBool[] = "bool";
+const char kString[] = "string";
+const char kNeedsEscape[] = "\"quotes\"";
+
+#if defined(OS_POSIX)
+const char kTaskFileName[] = "../../base/trace_event/trace_log.cc";
+const char kTaskPath[] = "base/trace_event";
+#else
+const char kTaskFileName[] = "..\\..\\base\\memory\\memory_win.cc";
+const char kTaskPath[] = "base\\memory";
+#endif
+
+std::unique_ptr<Value> DumpAndReadBack(
+ const TypeNameDeduplicator& deduplicator) {
+ std::string json;
+ deduplicator.AppendAsTraceFormat(&json);
+ return JSONReader::Read(json);
+}
+
+// Inserts a single type name into a new TypeNameDeduplicator instance and
+// checks if the value gets inserted and the exported value for |type_name| is
+// the same as |expected_value|.
+void TestInsertTypeAndReadback(const char* type_name,
+ const char* expected_value) {
+ std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+ ASSERT_EQ(1, dedup->Insert(type_name));
+
+ std::unique_ptr<Value> type_names = DumpAndReadBack(*dedup);
+ ASSERT_NE(nullptr, type_names);
+
+ const DictionaryValue* dictionary;
+ ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
+
+ // When the type name was inserted, it got ID 1. The exported key "1"
+ // should be equal to |expected_value|.
+ std::string value;
+ ASSERT_TRUE(dictionary->GetString("1", &value));
+ ASSERT_EQ(expected_value, value);
+}
+
+} // namespace
+
+TEST(TypeNameDeduplicatorTest, Deduplication) {
+ // The type IDs should be like this:
+ // 0: [unknown]
+ // 1: int
+ // 2: bool
+ // 3: string
+
+ std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+ ASSERT_EQ(1, dedup->Insert(kInt));
+ ASSERT_EQ(2, dedup->Insert(kBool));
+ ASSERT_EQ(3, dedup->Insert(kString));
+
+ // Inserting again should return the same IDs.
+ ASSERT_EQ(2, dedup->Insert(kBool));
+ ASSERT_EQ(1, dedup->Insert(kInt));
+ ASSERT_EQ(3, dedup->Insert(kString));
+
+ // A null pointer should yield type ID 0.
+ ASSERT_EQ(0, dedup->Insert(nullptr));
+}
+
+TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
+ // Reading json should not fail, because the type name should have been
+ // escaped properly and exported value should contain quotes.
+ TestInsertTypeAndReadback(kNeedsEscape, kNeedsEscape);
+}
+
+TEST(TypeNameDeduplicatorTest, TestExtractFileName) {
+ // The exported value for passed file name should be the folders in the path.
+ TestInsertTypeAndReadback(kTaskFileName, kTaskPath);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/malloc_dump_provider.cc b/libchrome/base/trace_event/malloc_dump_provider.cc
new file mode 100644
index 0000000..c3d3258
--- /dev/null
+++ b/libchrome/base/trace_event/malloc_dump_provider.cc
@@ -0,0 +1,258 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/malloc_dump_provider.h"
+
+#include <stddef.h>
+
+#include "base/allocator/allocator_extension.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/features.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_allocation_register.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <malloc/malloc.h>
+#else
+#include <malloc.h>
+#endif
+
+namespace base {
+namespace trace_event {
+
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+namespace {
+
+using allocator::AllocatorDispatch;
+
+void* HookAlloc(const AllocatorDispatch* self, size_t size) {
+ const AllocatorDispatch* const next = self->next;
+ void* ptr = next->alloc_function(next, size);
+ if (ptr)
+ MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+ return ptr;
+}
+
+void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
+ const AllocatorDispatch* const next = self->next;
+ void* ptr = next->alloc_zero_initialized_function(next, n, size);
+ if (ptr)
+ MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
+ return ptr;
+}
+
+void* HookllocAligned(const AllocatorDispatch* self,
+ size_t alignment,
+ size_t size) {
+ const AllocatorDispatch* const next = self->next;
+ void* ptr = next->alloc_aligned_function(next, alignment, size);
+ if (ptr)
+ MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+ return ptr;
+}
+
+void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
+ const AllocatorDispatch* const next = self->next;
+ void* ptr = next->realloc_function(next, address, size);
+ MallocDumpProvider::GetInstance()->RemoveAllocation(address);
+ if (size > 0) // realloc(size == 0) means free().
+ MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+ return ptr;
+}
+
+void HookFree(const AllocatorDispatch* self, void* address) {
+ if (address)
+ MallocDumpProvider::GetInstance()->RemoveAllocation(address);
+ const AllocatorDispatch* const next = self->next;
+ next->free_function(next, address);
+}
+
+AllocatorDispatch g_allocator_hooks = {
+ &HookAlloc, /* alloc_function */
+ &HookZeroInitAlloc, /* alloc_zero_initialized_function */
+ &HookllocAligned, /* alloc_aligned_function */
+ &HookRealloc, /* realloc_function */
+ &HookFree, /* free_function */
+ nullptr, /* next */
+};
+
+} // namespace
+#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+
+// static
+const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
+
+// static
+MallocDumpProvider* MallocDumpProvider::GetInstance() {
+ return Singleton<MallocDumpProvider,
+ LeakySingletonTraits<MallocDumpProvider>>::get();
+}
+
+MallocDumpProvider::MallocDumpProvider()
+ : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {}
+
+MallocDumpProvider::~MallocDumpProvider() {}
+
+// Called at trace dump point time. Creates a snapshot the memory counters for
+// the current process.
+bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
+ size_t total_virtual_size = 0;
+ size_t resident_size = 0;
+ size_t allocated_objects_size = 0;
+#if defined(USE_TCMALLOC)
+ bool res =
+ allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
+ DCHECK(res);
+ res = allocator::GetNumericProperty("generic.total_physical_bytes",
+ &resident_size);
+ DCHECK(res);
+ res = allocator::GetNumericProperty("generic.current_allocated_bytes",
+ &allocated_objects_size);
+ DCHECK(res);
+#elif defined(OS_MACOSX) || defined(OS_IOS)
+ malloc_statistics_t stats;
+ memset(&stats, 0, sizeof(stats));
+ malloc_zone_statistics(nullptr, &stats);
+ total_virtual_size = stats.size_allocated;
+ allocated_objects_size = stats.size_in_use;
+
+ // The resident size is approximated to the max size in use, which would count
+ // the total size of all regions other than the free bytes at the end of each
+ // region. In each allocation region the allocations are rounded off to a
+ // fixed quantum, so the excess region will not be resident.
+ // See crrev.com/1531463004 for detailed explanation.
+ resident_size = stats.max_size_in_use;
+#else
+ struct mallinfo info = mallinfo();
+ DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
+
+ // In case of Android's jemalloc |arena| is 0 and the outer pages size is
+ // reported by |hblkhd|. In case of dlmalloc the total is given by
+ // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
+ total_virtual_size = info.arena + info.hblkhd;
+ resident_size = info.uordblks;
+ allocated_objects_size = info.uordblks;
+#endif
+
+ MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
+ outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
+ total_virtual_size);
+ outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, resident_size);
+
+ // Total allocated space is given by |uordblks|.
+ MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
+ inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ allocated_objects_size);
+
+ if (resident_size - allocated_objects_size > 0) {
+ // Explicitly specify why is extra memory resident. In tcmalloc it accounts
+ // for free lists and caches. In mac and ios it accounts for the
+ // fragmentation and metadata.
+ MemoryAllocatorDump* other_dump =
+ pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
+ other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ resident_size - allocated_objects_size);
+ }
+
+ // Heap profiler dumps.
+ if (!heap_profiler_enabled_)
+ return true;
+
+ // The dumps of the heap profiler should be created only when heap profiling
+ // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested.
+ // However, when enabled, the overhead of the heap profiler should be always
+ // reported to avoid oscillations of the malloc total in LIGHT dumps.
+
+ tid_dumping_heap_ = PlatformThread::CurrentId();
+ // At this point the Insert/RemoveAllocation hooks will ignore this thread.
+ // Enclosing all the temporariy data structures in a scope, so that the heap
+ // profiler does not see unabalanced malloc/free calls from these containers.
+ {
+ TraceEventMemoryOverhead overhead;
+ hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+ {
+ AutoLock lock(allocation_register_lock_);
+ if (allocation_register_) {
+ if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+ for (const auto& alloc_size : *allocation_register_) {
+ AllocationMetrics& metrics = metrics_by_context[alloc_size.context];
+ metrics.size += alloc_size.size;
+ metrics.count++;
+ }
+ }
+ allocation_register_->EstimateTraceMemoryOverhead(&overhead);
+ }
+ } // lock(allocation_register_lock_)
+ pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
+ }
+ tid_dumping_heap_ = kInvalidThreadId;
+
+ return true;
+}
+
+void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ if (enabled) {
+ {
+ AutoLock lock(allocation_register_lock_);
+ allocation_register_.reset(new AllocationRegister());
+ }
+ allocator::InsertAllocatorDispatch(&g_allocator_hooks);
+ } else {
+ AutoLock lock(allocation_register_lock_);
+ allocation_register_.reset();
+ // Insert/RemoveAllocation below will no-op if the register is torn down.
+ // Once disabled, heap profiling will not re-enabled anymore for the
+ // lifetime of the process.
+ }
+#endif
+ heap_profiler_enabled_ = enabled;
+}
+
+void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
+ // CurrentId() can be a slow operation (crbug.com/497226). This apparently
+ // redundant condition short circuits the CurrentID() calls when unnecessary.
+ if (tid_dumping_heap_ != kInvalidThreadId &&
+ tid_dumping_heap_ == PlatformThread::CurrentId())
+ return;
+
+ // AllocationContextTracker will return nullptr when called re-reentrantly.
+ // This is the case of GetInstanceForCurrentThread() being called for the
+ // first time, which causes a new() inside the tracker which re-enters the
+ // heap profiler, in which case we just want to early out.
+ auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
+ if (!tracker)
+ return;
+ AllocationContext context = tracker->GetContextSnapshot();
+
+ AutoLock lock(allocation_register_lock_);
+ if (!allocation_register_)
+ return;
+
+ allocation_register_->Insert(address, size, context);
+}
+
+void MallocDumpProvider::RemoveAllocation(void* address) {
+ // No re-entrancy is expected here as none of the calls below should
+ // cause a free()-s (|allocation_register_| does its own heap management).
+ if (tid_dumping_heap_ != kInvalidThreadId &&
+ tid_dumping_heap_ == PlatformThread::CurrentId())
+ return;
+ AutoLock lock(allocation_register_lock_);
+ if (!allocation_register_)
+ return;
+ allocation_register_->Remove(address);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/malloc_dump_provider.h b/libchrome/base/trace_event/malloc_dump_provider.h
new file mode 100644
index 0000000..4746cf5
--- /dev/null
+++ b/libchrome/base/trace_event/malloc_dump_provider.h
@@ -0,0 +1,69 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
+#define BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
+
+#include <istream>
+#include <memory>
+
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "build/build_config.h"
+
+#if defined(OS_LINUX) || defined(OS_ANDROID) || \
+ (defined(OS_MACOSX) && !defined(OS_IOS))
+#define MALLOC_MEMORY_TRACING_SUPPORTED
+#endif
+
+namespace base {
+namespace trace_event {
+
+class AllocationRegister;
+
+// Dump provider which collects process-wide memory stats.
+class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
+ public:
+ // Name of the allocated_objects dump. Use this to declare suballocator dumps
+ // from other dump providers.
+ static const char kAllocatedObjects[];
+
+ static MallocDumpProvider* GetInstance();
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
+
+ void OnHeapProfilingEnabled(bool enabled) override;
+
+ // For heap profiling.
+ void InsertAllocation(void* address, size_t size);
+ void RemoveAllocation(void* address);
+
+ private:
+ friend struct DefaultSingletonTraits<MallocDumpProvider>;
+
+ MallocDumpProvider();
+ ~MallocDumpProvider() override;
+
+ // For heap profiling.
+ bool heap_profiler_enabled_;
+ std::unique_ptr<AllocationRegister> allocation_register_;
+ Lock allocation_register_lock_;
+
+ // When in OnMemoryDump(), this contains the current thread ID.
+ // This is to prevent re-entrancy in the heap profiler when the heap dump
+ // generation is malloc/new-ing for its own bookeeping data structures.
+ PlatformThreadId tid_dumping_heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(MallocDumpProvider);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
diff --git a/libchrome/base/trace_event/memory_allocator_dump.cc b/libchrome/base/trace_event/memory_allocator_dump.cc
new file mode 100644
index 0000000..7583763
--- /dev/null
+++ b/libchrome/base/trace_event/memory_allocator_dump.cc
@@ -0,0 +1,107 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_allocator_dump.h"
+
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+const char MemoryAllocatorDump::kNameSize[] = "size";
+const char MemoryAllocatorDump::kNameObjectCount[] = "object_count";
+const char MemoryAllocatorDump::kTypeScalar[] = "scalar";
+const char MemoryAllocatorDump::kTypeString[] = "string";
+const char MemoryAllocatorDump::kUnitsBytes[] = "bytes";
+const char MemoryAllocatorDump::kUnitsObjects[] = "objects";
+
+MemoryAllocatorDump::MemoryAllocatorDump(const std::string& absolute_name,
+ ProcessMemoryDump* process_memory_dump,
+ const MemoryAllocatorDumpGuid& guid)
+ : absolute_name_(absolute_name),
+ process_memory_dump_(process_memory_dump),
+ attributes_(new TracedValue),
+ guid_(guid),
+ flags_(Flags::DEFAULT) {
+ // The |absolute_name| cannot be empty.
+ DCHECK(!absolute_name.empty());
+
+ // The |absolute_name| can contain slash separator, but not leading or
+ // trailing ones.
+ DCHECK(absolute_name[0] != '/' && *absolute_name.rbegin() != '/');
+}
+
+// If the caller didn't provide a guid, make one up by hashing the
+// absolute_name with the current PID.
+// Rationale: |absolute_name| is already supposed to be unique within a
+// process, the pid will make it unique among all processes.
+MemoryAllocatorDump::MemoryAllocatorDump(const std::string& absolute_name,
+ ProcessMemoryDump* process_memory_dump)
+ : MemoryAllocatorDump(absolute_name,
+ process_memory_dump,
+ MemoryAllocatorDumpGuid(StringPrintf(
+ "%d:%s",
+ TraceLog::GetInstance()->process_id(),
+ absolute_name.c_str()))) {
+ string_conversion_buffer_.reserve(16);
+}
+
+MemoryAllocatorDump::~MemoryAllocatorDump() {
+}
+
+void MemoryAllocatorDump::AddScalar(const char* name,
+ const char* units,
+ uint64_t value) {
+ SStringPrintf(&string_conversion_buffer_, "%" PRIx64, value);
+ attributes_->BeginDictionary(name);
+ attributes_->SetString("type", kTypeScalar);
+ attributes_->SetString("units", units);
+ attributes_->SetString("value", string_conversion_buffer_);
+ attributes_->EndDictionary();
+}
+
+void MemoryAllocatorDump::AddScalarF(const char* name,
+ const char* units,
+ double value) {
+ attributes_->BeginDictionary(name);
+ attributes_->SetString("type", kTypeScalar);
+ attributes_->SetString("units", units);
+ attributes_->SetDouble("value", value);
+ attributes_->EndDictionary();
+}
+
+void MemoryAllocatorDump::AddString(const char* name,
+ const char* units,
+ const std::string& value) {
+ // String attributes are disabled in background mode.
+ if (process_memory_dump_->dump_args().level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND) {
+ NOTREACHED();
+ return;
+ }
+
+ attributes_->BeginDictionary(name);
+ attributes_->SetString("type", kTypeString);
+ attributes_->SetString("units", units);
+ attributes_->SetString("value", value);
+ attributes_->EndDictionary();
+}
+
+void MemoryAllocatorDump::AsValueInto(TracedValue* value) const {
+ value->BeginDictionaryWithCopiedName(absolute_name_);
+ value->SetString("guid", guid_.ToString());
+ value->SetValue("attrs", *attributes_);
+ if (flags_)
+ value->SetInteger("flags", flags_);
+ value->EndDictionary(); // "allocator_name/heap_subheap": { ... }
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_allocator_dump.h b/libchrome/base/trace_event/memory_allocator_dump.h
new file mode 100644
index 0000000..7d10236
--- /dev/null
+++ b/libchrome/base/trace_event/memory_allocator_dump.h
@@ -0,0 +1,110 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
+#define BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpManager;
+class ProcessMemoryDump;
+class TracedValue;
+
+// Data model for user-land memory allocator dumps.
+class BASE_EXPORT MemoryAllocatorDump {
+ public:
+ enum Flags {
+ DEFAULT = 0,
+
+ // A dump marked weak will be discarded by TraceViewer.
+ WEAK = 1 << 0,
+ };
+
+ // MemoryAllocatorDump is owned by ProcessMemoryDump.
+ MemoryAllocatorDump(const std::string& absolute_name,
+ ProcessMemoryDump* process_memory_dump,
+ const MemoryAllocatorDumpGuid& guid);
+ MemoryAllocatorDump(const std::string& absolute_name,
+ ProcessMemoryDump* process_memory_dump);
+ ~MemoryAllocatorDump();
+
+ // Standard attribute |name|s for the AddScalar and AddString() methods.
+ static const char kNameSize[]; // To represent allocated space.
+ static const char kNameObjectCount[]; // To represent number of objects.
+
+ // Standard attribute |unit|s for the AddScalar and AddString() methods.
+ static const char kUnitsBytes[]; // Unit name to represent bytes.
+ static const char kUnitsObjects[]; // Unit name to represent #objects.
+
+ // Constants used only internally and by tests.
+ static const char kTypeScalar[]; // Type name for scalar attributes.
+ static const char kTypeString[]; // Type name for string attributes.
+
+ // Setters for scalar attributes. Some examples:
+ // - "size" column (all dumps are expected to have at least this one):
+ // AddScalar(kNameSize, kUnitsBytes, 1234);
+ // - Some extra-column reporting internal details of the subsystem:
+ // AddScalar("number_of_freelist_entires", kUnitsObjects, 42)
+ // - Other informational column (will not be auto-added in the UI)
+ // AddScalarF("kittens_ratio", "ratio", 42.0f)
+ void AddScalar(const char* name, const char* units, uint64_t value);
+ void AddScalarF(const char* name, const char* units, double value);
+ void AddString(const char* name, const char* units, const std::string& value);
+
+ // Absolute name, unique within the scope of an entire ProcessMemoryDump.
+ const std::string& absolute_name() const { return absolute_name_; }
+
+ // Called at trace generation time to populate the TracedValue.
+ void AsValueInto(TracedValue* value) const;
+
+ // Get the ProcessMemoryDump instance that owns this.
+ ProcessMemoryDump* process_memory_dump() const {
+ return process_memory_dump_;
+ }
+
+ // Use enum Flags to set values.
+ void set_flags(int flags) { flags_ |= flags; }
+ void clear_flags(int flags) { flags_ &= ~flags; }
+ int flags() { return flags_; }
+
+ // |guid| is an optional global dump identifier, unique across all processes
+ // within the scope of a global dump. It is only required when using the
+ // graph APIs (see TODO_method_name) to express retention / suballocation or
+ // cross process sharing. See crbug.com/492102 for design docs.
+ // Subsequent MemoryAllocatorDump(s) with the same |absolute_name| are
+ // expected to have the same guid.
+ const MemoryAllocatorDumpGuid& guid() const { return guid_; }
+
+ TracedValue* attributes_for_testing() const { return attributes_.get(); }
+
+ private:
+ const std::string absolute_name_;
+ ProcessMemoryDump* const process_memory_dump_; // Not owned (PMD owns this).
+ std::unique_ptr<TracedValue> attributes_;
+ MemoryAllocatorDumpGuid guid_;
+ int flags_; // See enum Flags.
+
+ // A local buffer for Sprintf conversion on fastpath. Avoids allocating
+ // temporary strings on each AddScalar() call.
+ std::string string_conversion_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryAllocatorDump);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
diff --git a/libchrome/base/trace_event/memory_allocator_dump_guid.cc b/libchrome/base/trace_event/memory_allocator_dump_guid.cc
new file mode 100644
index 0000000..bf4389a
--- /dev/null
+++ b/libchrome/base/trace_event/memory_allocator_dump_guid.cc
@@ -0,0 +1,38 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_allocator_dump_guid.h"
+
+#include "base/format_macros.h"
+#include "base/sha1.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+uint64_t HashString(const std::string& str) {
+ uint64_t hash[(kSHA1Length + sizeof(uint64_t) - 1) / sizeof(uint64_t)] = {0};
+ SHA1HashBytes(reinterpret_cast<const unsigned char*>(str.data()), str.size(),
+ reinterpret_cast<unsigned char*>(hash));
+ return hash[0];
+}
+} // namespace
+
+MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid(uint64_t guid) : guid_(guid) {}
+
+MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid()
+ : MemoryAllocatorDumpGuid(0u) {
+}
+
+MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid(const std::string& guid_str)
+ : MemoryAllocatorDumpGuid(HashString(guid_str)) {
+}
+
+std::string MemoryAllocatorDumpGuid::ToString() const {
+ return StringPrintf("%" PRIx64, guid_);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_allocator_dump_guid.h b/libchrome/base/trace_event/memory_allocator_dump_guid.h
new file mode 100644
index 0000000..b6472c6
--- /dev/null
+++ b/libchrome/base/trace_event/memory_allocator_dump_guid.h
@@ -0,0 +1,51 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
+#define BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+class BASE_EXPORT MemoryAllocatorDumpGuid {
+ public:
+ MemoryAllocatorDumpGuid();
+ explicit MemoryAllocatorDumpGuid(uint64_t guid);
+
+ // Utility ctor to hash a GUID if the caller prefers a string. The caller
+ // still has to ensure that |guid_str| is unique, per snapshot, within the
+ // global scope of all the traced processes.
+ explicit MemoryAllocatorDumpGuid(const std::string& guid_str);
+
+ uint64_t ToUint64() const { return guid_; }
+
+ // Returns a (hex-encoded) string representation of the guid.
+ std::string ToString() const;
+
+ bool empty() const { return guid_ == 0u; }
+
+ bool operator==(const MemoryAllocatorDumpGuid& other) const {
+ return guid_ == other.guid_;
+ }
+
+ bool operator!=(const MemoryAllocatorDumpGuid& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ uint64_t guid_;
+
+ // Deliberately copy-able.
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
diff --git a/libchrome/base/trace_event/memory_allocator_dump_unittest.cc b/libchrome/base/trace_event/memory_allocator_dump_unittest.cc
new file mode 100644
index 0000000..1bf9715
--- /dev/null
+++ b/libchrome/base/trace_event/memory_allocator_dump_unittest.cc
@@ -0,0 +1,190 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_allocator_dump.h"
+
+#include <stdint.h>
+
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/values.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class FakeMemoryAllocatorDumpProvider : public MemoryDumpProvider {
+ public:
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override {
+ MemoryAllocatorDump* root_heap =
+ pmd->CreateAllocatorDump("foobar_allocator");
+
+ root_heap->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 4096);
+ root_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects, 42);
+ root_heap->AddScalar("attr1", "units1", 1234);
+ root_heap->AddString("attr2", "units2", "string_value");
+ root_heap->AddScalarF("attr3", "units3", 42.5f);
+
+ MemoryAllocatorDump* sub_heap =
+ pmd->CreateAllocatorDump("foobar_allocator/sub_heap");
+ sub_heap->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 1);
+ sub_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects, 3);
+
+ pmd->CreateAllocatorDump("foobar_allocator/sub_heap/empty");
+ // Leave the rest of sub heap deliberately uninitialized, to check that
+ // CreateAllocatorDump returns a properly zero-initialized object.
+
+ return true;
+ }
+};
+
+std::unique_ptr<Value> CheckAttribute(const MemoryAllocatorDump* dump,
+ const std::string& name,
+ const char* expected_type,
+ const char* expected_units) {
+ std::unique_ptr<Value> raw_attrs =
+ dump->attributes_for_testing()->ToBaseValue();
+ DictionaryValue* args = nullptr;
+ DictionaryValue* arg = nullptr;
+ std::string arg_value;
+ const Value* out_value = nullptr;
+ EXPECT_TRUE(raw_attrs->GetAsDictionary(&args));
+ EXPECT_TRUE(args->GetDictionary(name, &arg));
+ EXPECT_TRUE(arg->GetString("type", &arg_value));
+ EXPECT_EQ(expected_type, arg_value);
+ EXPECT_TRUE(arg->GetString("units", &arg_value));
+ EXPECT_EQ(expected_units, arg_value);
+ EXPECT_TRUE(arg->Get("value", &out_value));
+ return out_value ? out_value->CreateDeepCopy() : std::unique_ptr<Value>();
+}
+
+void CheckString(const MemoryAllocatorDump* dump,
+ const std::string& name,
+ const char* expected_type,
+ const char* expected_units,
+ const std::string& expected_value) {
+ std::string attr_str_value;
+ auto attr_value = CheckAttribute(dump, name, expected_type, expected_units);
+ EXPECT_TRUE(attr_value->GetAsString(&attr_str_value));
+ EXPECT_EQ(expected_value, attr_str_value);
+}
+
+void CheckScalar(const MemoryAllocatorDump* dump,
+ const std::string& name,
+ const char* expected_units,
+ uint64_t expected_value) {
+ CheckString(dump, name, MemoryAllocatorDump::kTypeScalar, expected_units,
+ StringPrintf("%" PRIx64, expected_value));
+}
+
+void CheckScalarF(const MemoryAllocatorDump* dump,
+ const std::string& name,
+ const char* expected_units,
+ double expected_value) {
+ auto attr_value = CheckAttribute(dump, name, MemoryAllocatorDump::kTypeScalar,
+ expected_units);
+ double attr_double_value;
+ EXPECT_TRUE(attr_value->GetAsDouble(&attr_double_value));
+ EXPECT_EQ(expected_value, attr_double_value);
+}
+
+} // namespace
+
+TEST(MemoryAllocatorDumpTest, GuidGeneration) {
+ std::unique_ptr<MemoryAllocatorDump> mad(
+ new MemoryAllocatorDump("foo", nullptr, MemoryAllocatorDumpGuid(0x42u)));
+ ASSERT_EQ("42", mad->guid().ToString());
+
+ // If the dumper does not provide a Guid, the MAD will make one up on the
+ // flight. Furthermore that Guid will stay stable across across multiple
+ // snapshots if the |absolute_name| of the dump doesn't change
+ mad.reset(new MemoryAllocatorDump("bar", nullptr));
+ const MemoryAllocatorDumpGuid guid_bar = mad->guid();
+ ASSERT_FALSE(guid_bar.empty());
+ ASSERT_FALSE(guid_bar.ToString().empty());
+ ASSERT_EQ(guid_bar, mad->guid());
+
+ mad.reset(new MemoryAllocatorDump("bar", nullptr));
+ const MemoryAllocatorDumpGuid guid_bar_2 = mad->guid();
+ ASSERT_EQ(guid_bar, guid_bar_2);
+
+ mad.reset(new MemoryAllocatorDump("baz", nullptr));
+ const MemoryAllocatorDumpGuid guid_baz = mad->guid();
+ ASSERT_NE(guid_bar, guid_baz);
+}
+
+TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
+ FakeMemoryAllocatorDumpProvider fmadp;
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
+
+ fmadp.OnMemoryDump(dump_args, &pmd);
+
+ ASSERT_EQ(3u, pmd.allocator_dumps().size());
+
+ const MemoryAllocatorDump* root_heap =
+ pmd.GetAllocatorDump("foobar_allocator");
+ ASSERT_NE(nullptr, root_heap);
+ EXPECT_EQ("foobar_allocator", root_heap->absolute_name());
+ CheckScalar(root_heap, MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 4096);
+ CheckScalar(root_heap, MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects, 42);
+ CheckScalar(root_heap, "attr1", "units1", 1234);
+ CheckString(root_heap, "attr2", MemoryAllocatorDump::kTypeString, "units2",
+ "string_value");
+ CheckScalarF(root_heap, "attr3", "units3", 42.5f);
+
+ const MemoryAllocatorDump* sub_heap =
+ pmd.GetAllocatorDump("foobar_allocator/sub_heap");
+ ASSERT_NE(nullptr, sub_heap);
+ EXPECT_EQ("foobar_allocator/sub_heap", sub_heap->absolute_name());
+ CheckScalar(sub_heap, MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes, 1);
+ CheckScalar(sub_heap, MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects, 3);
+ const MemoryAllocatorDump* empty_sub_heap =
+ pmd.GetAllocatorDump("foobar_allocator/sub_heap/empty");
+ ASSERT_NE(nullptr, empty_sub_heap);
+ EXPECT_EQ("foobar_allocator/sub_heap/empty", empty_sub_heap->absolute_name());
+ auto raw_attrs = empty_sub_heap->attributes_for_testing()->ToBaseValue();
+ DictionaryValue* attrs = nullptr;
+ ASSERT_TRUE(raw_attrs->GetAsDictionary(&attrs));
+ ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameSize));
+ ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectCount));
+
+ // Check that the AsValueInfo doesn't hit any DCHECK.
+ std::unique_ptr<TracedValue> traced_value(new TracedValue);
+ pmd.AsValueInto(traced_value.get());
+}
+
+// DEATH tests are not supported in Android / iOS.
+#if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
+TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
+ FakeMemoryAllocatorDumpProvider fmadp;
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+ ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
+ pmd.CreateAllocatorDump("foo_allocator");
+ pmd.CreateAllocatorDump("bar_allocator/heap");
+ ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
+ ASSERT_DEATH(pmd.CreateAllocatorDump("bar_allocator/heap"), "");
+ ASSERT_DEATH(pmd.CreateAllocatorDump(""), "");
+}
+#endif
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_dump_manager.cc b/libchrome/base/trace_event/memory_dump_manager.cc
new file mode 100644
index 0000000..eed070a
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_manager.cc
@@ -0,0 +1,883 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_manager.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/atomic_sequence_num.h"
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/debugging_flags.h"
+#include "base/debug/stack_trace.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/malloc_dump_provider.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/trace_event/java_heap_dump_provider_android.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/trace_event/winheap_dump_provider_win.h"
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const int kTraceEventNumArgs = 1;
+const char* kTraceEventArgNames[] = {"dumps"};
+const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
+
+StaticAtomicSequenceNumber g_next_guid;
+MemoryDumpManager* g_instance_for_testing = nullptr;
+
+// Callback wrapper to hook upon the completion of RequestGlobalDump() and
+// inject trace markers.
+void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
+ uint64_t dump_guid,
+ bool success) {
+ TRACE_EVENT_NESTABLE_ASYNC_END1(
+ MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
+ TRACE_ID_MANGLE(dump_guid), "success", success);
+
+ if (!wrapped_callback.is_null()) {
+ wrapped_callback.Run(dump_guid, success);
+ wrapped_callback.Reset();
+ }
+}
+
+// Proxy class which wraps a ConvertableToTraceFormat owned by the
+// |session_state| into a proxy object that can be added to the trace event log.
+// This is to solve the problem that the MemoryDumpSessionState is refcounted
+// but the tracing subsystem wants a std::unique_ptr<ConvertableToTraceFormat>.
+template <typename T>
+struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
+ using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const;
+
+ SessionStateConvertableProxy(
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ GetterFunctPtr getter_function)
+ : session_state(session_state), getter_function(getter_function) {}
+
+ void AppendAsTraceFormat(std::string* out) const override {
+ return (session_state.get()->*getter_function)()->AppendAsTraceFormat(out);
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ return (session_state.get()->*getter_function)()
+ ->EstimateTraceMemoryOverhead(overhead);
+ }
+
+ scoped_refptr<MemoryDumpSessionState> session_state;
+ GetterFunctPtr const getter_function;
+};
+
+} // namespace
+
+// static
+const char* const MemoryDumpManager::kTraceCategory =
+ TRACE_DISABLED_BY_DEFAULT("memory-infra");
+
+// static
+const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
+
+// static
+const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
+
+// static
+const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
+
+// static
+const char* const MemoryDumpManager::kSystemAllocatorPoolName =
+#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
+ MallocDumpProvider::kAllocatedObjects;
+#elif defined(OS_WIN)
+ WinHeapDumpProvider::kAllocatedObjects;
+#else
+ nullptr;
+#endif
+
+// static
+MemoryDumpManager* MemoryDumpManager::GetInstance() {
+ if (g_instance_for_testing)
+ return g_instance_for_testing;
+
+ return Singleton<MemoryDumpManager,
+ LeakySingletonTraits<MemoryDumpManager>>::get();
+}
+
+// static
+void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
+ g_instance_for_testing = instance;
+}
+
+MemoryDumpManager::MemoryDumpManager()
+ : delegate_(nullptr),
+ is_coordinator_(false),
+ memory_tracing_enabled_(0),
+ tracing_process_id_(kInvalidTracingProcessId),
+ dumper_registrations_ignored_for_testing_(false),
+ heap_profiling_enabled_(false) {
+ g_next_guid.GetNext(); // Make sure that first guid is not zero.
+
+ // At this point the command line may not be initialized but we try to
+ // enable the heap profiler to capture allocations as soon as possible.
+ EnableHeapProfilingIfNeeded();
+}
+
+MemoryDumpManager::~MemoryDumpManager() {
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+}
+
+void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
+ if (heap_profiling_enabled_)
+ return;
+
+ if (!CommandLine::InitializedForCurrentProcess() ||
+ !CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kEnableHeapProfiling))
+ return;
+
+ std::string profiling_mode = CommandLine::ForCurrentProcess()
+ ->GetSwitchValueASCII(switches::kEnableHeapProfiling);
+ if (profiling_mode == "") {
+ AllocationContextTracker::SetCaptureMode(
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+ }
+ else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
+#if HAVE_TRACE_STACK_FRAME_POINTERS && \
+ (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+ // We need frame pointers for native tracing to work, and they are
+ // enabled in profiling and debug builds.
+ AllocationContextTracker::SetCaptureMode(
+ AllocationContextTracker::CaptureMode::NATIVE_STACK);
+#else
+ CHECK(false) << "'" << profiling_mode << "' mode for "
+ << switches::kEnableHeapProfiling << " flag is not supported "
+ << "for this platform / build type.";
+#endif
+ } else {
+ CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
+ << switches::kEnableHeapProfiling << " flag.";
+ }
+
+ for (auto mdp : dump_providers_)
+ mdp->dump_provider->OnHeapProfilingEnabled(true);
+ heap_profiling_enabled_ = true;
+}
+
+void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
+ bool is_coordinator) {
+ {
+ AutoLock lock(lock_);
+ DCHECK(delegate);
+ DCHECK(!delegate_);
+ delegate_ = delegate;
+ is_coordinator_ = is_coordinator;
+ EnableHeapProfilingIfNeeded();
+ }
+
+// Enable the core dump providers.
+#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
+ RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
+#endif
+
+#if defined(OS_ANDROID)
+ RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
+ nullptr);
+#endif
+
+#if defined(OS_WIN)
+ RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
+#endif
+
+ // If tracing was enabled before initializing MemoryDumpManager, we missed the
+ // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
+ TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
+ TraceLog::GetInstance()->AddEnabledStateObserver(this);
+ if (is_tracing_already_enabled)
+ OnTraceLogEnabled();
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ MemoryDumpProvider::Options options) {
+ options.dumps_on_single_thread_task_runner = true;
+ RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ // Set |dumps_on_single_thread_task_runner| to true because all providers
+ // without task runner are run on dump thread.
+ MemoryDumpProvider::Options options;
+ options.dumps_on_single_thread_task_runner = true;
+ RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ MemoryDumpProvider::Options options) {
+ DCHECK(task_runner);
+ options.dumps_on_single_thread_task_runner = false;
+ RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderInternal(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options) {
+ if (dumper_registrations_ignored_for_testing_)
+ return;
+
+ bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
+ new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+ whitelisted_for_background_mode);
+
+ {
+ AutoLock lock(lock_);
+ bool already_registered = !dump_providers_.insert(mdpinfo).second;
+ // This actually happens in some tests which don't have a clean tear-down
+ // path for RenderThreadImpl::Init().
+ if (already_registered)
+ return;
+ }
+
+ if (heap_profiling_enabled_)
+ mdp->OnHeapProfilingEnabled(true);
+}
+
+void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
+ UnregisterDumpProviderInternal(mdp, false /* delete_async */);
+}
+
+void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
+ std::unique_ptr<MemoryDumpProvider> mdp) {
+ UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
+}
+
+void MemoryDumpManager::UnregisterDumpProviderInternal(
+ MemoryDumpProvider* mdp,
+ bool take_mdp_ownership_and_delete_async) {
+ std::unique_ptr<MemoryDumpProvider> owned_mdp;
+ if (take_mdp_ownership_and_delete_async)
+ owned_mdp.reset(mdp);
+
+ AutoLock lock(lock_);
+
+ auto mdp_iter = dump_providers_.begin();
+ for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
+ if ((*mdp_iter)->dump_provider == mdp)
+ break;
+ }
+
+ if (mdp_iter == dump_providers_.end())
+ return; // Not registered / already unregistered.
+
+ if (take_mdp_ownership_and_delete_async) {
+ // The MDP will be deleted whenever the MDPInfo struct will, that is either:
+ // - At the end of this function, if no dump is in progress.
+ // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
+ // removed from |pending_dump_providers|.
+ DCHECK(!(*mdp_iter)->owned_dump_provider);
+ (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
+ } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ // If you hit this DCHECK, your dump provider has a bug.
+ // Unregistration of a MemoryDumpProvider is safe only if:
+ // - The MDP has specified a sequenced task runner affinity AND the
+ // unregistration happens on the same task runner. So that the MDP cannot
+ // unregister and be in the middle of a OnMemoryDump() at the same time.
+ // - The MDP has NOT specified a task runner affinity and its ownership is
+ // transferred via UnregisterAndDeleteDumpProviderSoon().
+ // In all the other cases, it is not possible to guarantee that the
+ // unregistration will not race with OnMemoryDump() calls.
+ DCHECK((*mdp_iter)->task_runner &&
+ (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
+ << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
+ << "unregister itself in a racy way. Please file a crbug.";
+ }
+
+ // The MDPInfo instance can still be referenced by the
+ // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
+ // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
+ // to just skip it, without actually invoking the |mdp|, which might be
+ // destroyed by the caller soon after this method returns.
+ (*mdp_iter)->disabled = true;
+ dump_providers_.erase(mdp_iter);
+}
+
+void MemoryDumpManager::RequestGlobalDump(
+ MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ const MemoryDumpCallback& callback) {
+ // Bail out immediately if tracing is not enabled at all or if the dump mode
+ // is not allowed.
+ if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
+ !IsDumpModeAllowed(level_of_detail)) {
+ VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
+ << " tracing category is not enabled or the requested dump mode is "
+ "not allowed by trace config.";
+ if (!callback.is_null())
+ callback.Run(0u /* guid */, false /* success */);
+ return;
+ }
+
+ const uint64_t guid =
+ TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
+
+ // Creates an async event to keep track of the global dump evolution.
+ // The |wrapped_callback| will generate the ASYNC_END event and then invoke
+ // the real |callback| provided by the caller.
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "GlobalMemoryDump",
+ TRACE_ID_MANGLE(guid));
+ MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
+
+ // Technically there is no need to grab the |lock_| here as the delegate is
+ // long-lived and can only be set by Initialize(), which is locked and
+ // necessarily happens before memory_tracing_enabled_ == true.
+ // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
+ // (memory-infra is enabled) we're not in the fast-path anymore.
+ MemoryDumpManagerDelegate* delegate;
+ {
+ AutoLock lock(lock_);
+ delegate = delegate_;
+ }
+
+ // The delegate will coordinate the IPC broadcast and at some point invoke
+ // CreateProcessDump() to get a dump for the current process.
+ MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
+ delegate->RequestGlobalMemoryDump(args, wrapped_callback);
+}
+
+void MemoryDumpManager::RequestGlobalDump(
+ MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail) {
+ RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
+}
+
+void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
+ TRACE_ID_MANGLE(args.dump_guid));
+
+ // If argument filter is enabled then only background mode dumps should be
+ // allowed. In case the trace config passed for background tracing session
+ // missed the allowed modes argument, it crashes here instead of creating
+ // unexpected dumps.
+ if (TraceLog::GetInstance()
+ ->GetCurrentTraceConfig()
+ .IsArgumentFilterEnabled()) {
+ CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+ }
+
+ std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
+ {
+ AutoLock lock(lock_);
+
+ // |dump_thread_| can be nullptr is tracing was disabled before reaching
+ // here. SetupNextMemoryDump() is robust enough to tolerate it and will
+ // NACK the dump.
+ pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
+ args, dump_providers_, session_state_, callback,
+ dump_thread_ ? dump_thread_->task_runner() : nullptr));
+
+ // Safety check to prevent reaching here without calling RequestGlobalDump,
+ // with disallowed modes. If |session_state_| is null then tracing is
+ // disabled.
+ CHECK(!session_state_ ||
+ session_state_->memory_dump_config().allowed_dump_modes.count(
+ args.level_of_detail));
+ }
+
+ TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
+ TRACE_ID_MANGLE(args.dump_guid),
+ TRACE_EVENT_FLAG_FLOW_OUT);
+
+ // Start the process dump. This involves task runner hops as specified by the
+ // MemoryDumpProvider(s) in RegisterDumpProvider()).
+ SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
+// PostTask is always required for a generic SequencedTaskRunner to ensure that
+// no other task is running on it concurrently. SetupNextMemoryDump() and
+// InvokeOnMemoryDump() are called alternatively which linearizes the dump
+// provider's OnMemoryDump invocations.
+// At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
+// active at any time for a given PMD, regardless of status of the |lock_|.
+// |lock_| is used in these functions purely to ensure consistency w.r.t.
+// (un)registrations of |dump_providers_|.
+void MemoryDumpManager::SetupNextMemoryDump(
+ std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+ HEAP_PROFILER_SCOPED_IGNORE;
+ // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
+ // in the PostTask below don't end up registering their own dump providers
+ // (for discounting trace memory overhead) while holding the |lock_|.
+ TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
+
+ // |dump_thread_| might be destroyed before getting this point.
+ // It means that tracing was disabled right before starting this dump.
+ // Anyway either tracing is stopped or this was the last hop, create a trace
+ // event, add it to the trace and finalize process dump invoking the callback.
+ if (!pmd_async_state->dump_thread_task_runner.get()) {
+ if (pmd_async_state->pending_dump_providers.empty()) {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before finalizing the dump";
+ } else {
+ VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+ << " before dumping "
+ << pmd_async_state->pending_dump_providers.back().get()->name;
+ }
+ pmd_async_state->dump_successful = false;
+ pmd_async_state->pending_dump_providers.clear();
+ }
+ if (pmd_async_state->pending_dump_providers.empty())
+ return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
+
+ // Read MemoryDumpProviderInfo thread safety considerations in
+ // memory_dump_manager.h when accessing |mdpinfo| fields.
+ MemoryDumpProviderInfo* mdpinfo =
+ pmd_async_state->pending_dump_providers.back().get();
+
+ // If we are in background tracing, we should invoke only the whitelisted
+ // providers. Ignore other providers and continue.
+ if (pmd_async_state->req_args.level_of_detail ==
+ MemoryDumpLevelOfDetail::BACKGROUND &&
+ !mdpinfo->whitelisted_for_background_mode) {
+ pmd_async_state->pending_dump_providers.pop_back();
+ return SetupNextMemoryDump(std::move(pmd_async_state));
+ }
+
+ // If the dump provider did not specify a task runner affinity, dump on
+ // |dump_thread_| which is already checked above for presence.
+ SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
+ if (!task_runner) {
+ DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
+ task_runner = pmd_async_state->dump_thread_task_runner.get();
+ DCHECK(task_runner);
+ }
+
+ if (mdpinfo->options.dumps_on_single_thread_task_runner &&
+ task_runner->RunsTasksOnCurrentThread()) {
+ // If |dumps_on_single_thread_task_runner| is true then no PostTask is
+ // required if we are on the right thread.
+ return InvokeOnMemoryDump(pmd_async_state.release());
+ }
+
+ bool did_post_task = task_runner->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
+ Unretained(pmd_async_state.get())));
+
+ if (did_post_task) {
+ // Ownership is tranferred to InvokeOnMemoryDump().
+ ignore_result(pmd_async_state.release());
+ return;
+ }
+
+ // PostTask usually fails only if the process or thread is shut down. So, the
+ // dump provider is disabled here. But, don't disable unbound dump providers.
+ // The utility thread is normally shutdown when disabling the trace and
+ // getting here in this case is expected.
+ if (mdpinfo->task_runner) {
+ LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Failed to post task on the task runner provided.";
+
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
+ AutoLock lock(lock_);
+ mdpinfo->disabled = true;
+ }
+
+ // PostTask failed. Ignore the dump provider and continue.
+ pmd_async_state->pending_dump_providers.pop_back();
+ SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// This function is called on the right task runner for current MDP. It is
+// either the task runner specified by MDP or |dump_thread_task_runner| if the
+// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
+// (unless disabled).
+void MemoryDumpManager::InvokeOnMemoryDump(
+ ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+ HEAP_PROFILER_SCOPED_IGNORE;
+ // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
+ // why it isn't is because of the corner case logic of |did_post_task|
+ // above, which needs to take back the ownership of the |pmd_async_state| when
+ // the PostTask() fails.
+ // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
+ // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
+ // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
+ auto pmd_async_state = WrapUnique(owned_pmd_async_state);
+ owned_pmd_async_state = nullptr;
+
+ // Read MemoryDumpProviderInfo thread safety considerations in
+ // memory_dump_manager.h when accessing |mdpinfo| fields.
+ MemoryDumpProviderInfo* mdpinfo =
+ pmd_async_state->pending_dump_providers.back().get();
+
+ DCHECK(!mdpinfo->task_runner ||
+ mdpinfo->task_runner->RunsTasksOnCurrentThread());
+
+ bool should_dump;
+ {
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
+ AutoLock lock(lock_);
+
+ // Unregister the dump provider if it failed too many times consecutively.
+ if (!mdpinfo->disabled &&
+ mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+ mdpinfo->disabled = true;
+ LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Dump failed multiple times consecutively.";
+ }
+ should_dump = !mdpinfo->disabled;
+ } // AutoLock lock(lock_);
+
+ if (should_dump) {
+ // Invoke the dump provider.
+ TRACE_EVENT_WITH_FLOW1(kTraceCategory,
+ "MemoryDumpManager::InvokeOnMemoryDump",
+ TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
+ "dump_provider.name", mdpinfo->name);
+
+ // Pid of the target process being dumped. Often kNullProcessId (= current
+ // process), non-zero when the coordinator process creates dumps on behalf
+ // of child processes (see crbug.com/461788).
+ ProcessId target_pid = mdpinfo->options.target_pid;
+ MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+ ProcessMemoryDump* pmd =
+ pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
+ args);
+ bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
+ mdpinfo->consecutive_failures =
+ dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
+ }
+
+ pmd_async_state->pending_dump_providers.pop_back();
+ SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// static
+void MemoryDumpManager::FinalizeDumpAndAddToTrace(
+ std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+ HEAP_PROFILER_SCOPED_IGNORE;
+ DCHECK(pmd_async_state->pending_dump_providers.empty());
+ const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
+ if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
+ scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
+ pmd_async_state->callback_task_runner;
+ callback_task_runner->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
+ Passed(&pmd_async_state)));
+ return;
+ }
+
+ TRACE_EVENT_WITH_FLOW0(kTraceCategory,
+ "MemoryDumpManager::FinalizeDumpAndAddToTrace",
+ TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
+
+ for (const auto& kv : pmd_async_state->process_dumps) {
+ ProcessId pid = kv.first; // kNullProcessId for the current process.
+ ProcessMemoryDump* process_memory_dump = kv.second.get();
+ std::unique_ptr<TracedValue> traced_value(new TracedValue);
+ process_memory_dump->AsValueInto(traced_value.get());
+ traced_value->SetString("level_of_detail",
+ MemoryDumpLevelOfDetailToString(
+ pmd_async_state->req_args.level_of_detail));
+ const char* const event_name =
+ MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
+
+ std::unique_ptr<ConvertableToTraceFormat> event_value(
+ std::move(traced_value));
+ TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
+ TRACE_EVENT_PHASE_MEMORY_DUMP,
+ TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
+ trace_event_internal::kGlobalScope, dump_guid, pid,
+ kTraceEventNumArgs, kTraceEventArgNames,
+ kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
+ TRACE_EVENT_FLAG_HAS_ID);
+ }
+
+ bool tracing_still_enabled;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
+ if (!tracing_still_enabled) {
+ pmd_async_state->dump_successful = false;
+ VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
+ << " the dump was completed";
+ }
+
+ if (!pmd_async_state->callback.is_null()) {
+ pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
+ pmd_async_state->callback.Reset();
+ }
+
+ TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
+ TRACE_ID_MANGLE(dump_guid));
+}
+
+void MemoryDumpManager::OnTraceLogEnabled() {
+ bool enabled;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
+ if (!enabled)
+ return;
+
+ // Initialize the TraceLog for the current thread. This is to avoid that the
+ // TraceLog memory dump provider is registered lazily in the PostTask() below
+ // while the |lock_| is taken;
+ TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
+
+ // Spin-up the thread used to invoke unbound dump providers.
+ std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
+ if (!dump_thread->Start()) {
+ LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
+ return;
+ }
+
+ const TraceConfig trace_config =
+ TraceLog::GetInstance()->GetCurrentTraceConfig();
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
+ if (heap_profiling_enabled_) {
+ // If heap profiling is enabled, the stack frame deduplicator and type name
+ // deduplicator will be in use. Add a metadata events to write the frames
+ // and type IDs.
+ session_state->SetStackFrameDeduplicator(
+ WrapUnique(new StackFrameDeduplicator));
+
+ session_state->SetTypeNameDeduplicator(
+ WrapUnique(new TypeNameDeduplicator));
+
+ TRACE_EVENT_API_ADD_METADATA_EVENT(
+ TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
+ "stackFrames",
+ WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
+ session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
+
+ TRACE_EVENT_API_ADD_METADATA_EVENT(
+ TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
+ "typeNames",
+ WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
+ session_state, &MemoryDumpSessionState::type_name_deduplicator)));
+ }
+
+ {
+ AutoLock lock(lock_);
+
+ DCHECK(delegate_); // At this point we must have a delegate.
+ session_state_ = session_state;
+
+ DCHECK(!dump_thread_);
+ dump_thread_ = std::move(dump_thread);
+
+ subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+
+ // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+ // when running memory benchmarks until telemetry uses TraceConfig to
+ // enable/disable periodic dumps. See crbug.com/529184 .
+ if (!is_coordinator_ ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ "enable-memory-benchmarking")) {
+ return;
+ }
+ }
+
+ // Enable periodic dumps if necessary.
+ periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
+}
+
+void MemoryDumpManager::OnTraceLogDisabled() {
+ // There might be a memory dump in progress while this happens. Therefore,
+ // ensure that the MDM state which depends on the tracing enabled / disabled
+ // state is always accessed by the dumping methods holding the |lock_|.
+ subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
+ std::unique_ptr<Thread> dump_thread;
+ {
+ AutoLock lock(lock_);
+ dump_thread = std::move(dump_thread_);
+ session_state_ = nullptr;
+ }
+
+ // Thread stops are blocking and must be performed outside of the |lock_|
+ // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
+ periodic_dump_timer_.Stop();
+ if (dump_thread)
+ dump_thread->Stop();
+}
+
+bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
+ AutoLock lock(lock_);
+ if (!session_state_)
+ return false;
+ return session_state_->memory_dump_config().allowed_dump_modes.count(
+ dump_mode) != 0;
+}
+
+uint64_t MemoryDumpManager::GetTracingProcessId() const {
+ return delegate_->GetTracingProcessId();
+}
+
+MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+ MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode)
+ : dump_provider(dump_provider),
+ name(name),
+ task_runner(std::move(task_runner)),
+ options(options),
+ consecutive_failures(0),
+ disabled(false),
+ whitelisted_for_background_mode(whitelisted_for_background_mode) {}
+
+MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+
+bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
+ const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
+ if (!a || !b)
+ return a.get() < b.get();
+ // Ensure that unbound providers (task_runner == nullptr) always run last.
+ // Rationale: some unbound dump providers are known to be slow, keep them last
+ // to avoid skewing timings of the other dump providers.
+ return std::tie(a->task_runner, a->dump_provider) >
+ std::tie(b->task_runner, b->dump_provider);
+}
+
+MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
+ MemoryDumpRequestArgs req_args,
+ const MemoryDumpProviderInfo::OrderedSet& dump_providers,
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ MemoryDumpCallback callback,
+ scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)
+ : req_args(req_args),
+ session_state(std::move(session_state)),
+ callback(callback),
+ dump_successful(true),
+ callback_task_runner(ThreadTaskRunnerHandle::Get()),
+ dump_thread_task_runner(std::move(dump_thread_task_runner)) {
+ pending_dump_providers.reserve(dump_providers.size());
+ pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
+}
+
+MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
+}
+
+ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
+ GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
+ const MemoryDumpArgs& dump_args) {
+ auto iter = process_dumps.find(pid);
+ if (iter == process_dumps.end()) {
+ std::unique_ptr<ProcessMemoryDump> new_pmd(
+ new ProcessMemoryDump(session_state, dump_args));
+ iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
+ }
+ return iter->second.get();
+}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
+ Stop();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
+ const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
+ if (triggers_list.empty())
+ return;
+
+ // At the moment the periodic support is limited to at most one periodic
+ // trigger per dump mode. All intervals should be an integer multiple of the
+ // smallest interval specified.
+ periodic_dumps_count_ = 0;
+ uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
+ uint32_t light_dump_period_ms = 0;
+ uint32_t heavy_dump_period_ms = 0;
+ DCHECK_LE(triggers_list.size(), 3u);
+ auto* mdm = MemoryDumpManager::GetInstance();
+ for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
+ DCHECK_NE(0u, config.periodic_interval_ms);
+ switch (config.level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
+ break;
+ case MemoryDumpLevelOfDetail::LIGHT:
+ DCHECK_EQ(0u, light_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
+ light_dump_period_ms = config.periodic_interval_ms;
+ break;
+ case MemoryDumpLevelOfDetail::DETAILED:
+ DCHECK_EQ(0u, heavy_dump_period_ms);
+ DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
+ heavy_dump_period_ms = config.periodic_interval_ms;
+ break;
+ }
+ min_timer_period_ms =
+ std::min(min_timer_period_ms, config.periodic_interval_ms);
+ }
+
+ DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
+ light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
+ DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+ heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
+
+ timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
+ base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
+ base::Unretained(this)));
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
+ if (IsRunning()) {
+ timer_.Stop();
+ }
+}
+
+bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
+ return timer_.IsRunning();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
+ MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+ if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
+ level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+ ++periodic_dumps_count_;
+
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_dump_manager.h b/libchrome/base/trace_event/memory_dump_manager.h
new file mode 100644
index 0000000..06b772c
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_manager.h
@@ -0,0 +1,410 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+class Thread;
+
+namespace trace_event {
+
+class MemoryDumpManagerDelegate;
+class MemoryDumpProvider;
+class MemoryDumpSessionState;
+
+// This is the interface exposed to the rest of the codebase to deal with
+// memory tracing. The main entry point for clients is represented by
+// RequestDumpPoint(). The extension by Un(RegisterDumpProvider).
+class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
+ public:
+ static const char* const kTraceCategory;
+ static const char* const kLogPrefix;
+
+ // This value is returned as the tracing id of the child processes by
+ // GetTracingProcessId() when tracing is not enabled.
+ static const uint64_t kInvalidTracingProcessId;
+
+ static MemoryDumpManager* GetInstance();
+
+ // Invoked once per process to listen to trace begin / end events.
+ // Initialization can happen after (Un)RegisterMemoryDumpProvider() calls
+ // and the MemoryDumpManager guarantees to support this.
+ // On the other side, the MemoryDumpManager will not be fully operational
+ // (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
+ // Arguments:
+ // is_coordinator: if true this MemoryDumpManager instance will act as a
+ // coordinator and schedule periodic dumps (if enabled via TraceConfig);
+ // false when the MemoryDumpManager is initialized in a slave process.
+ // delegate: inversion-of-control interface for embedder-specific behaviors
+ // (multiprocess handshaking). See the lifetime and thread-safety
+ // requirements in the |MemoryDumpManagerDelegate| docstring.
+ void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
+
+ // (Un)Registers a MemoryDumpProvider instance.
+ // Args:
+ // - mdp: the MemoryDumpProvider instance to be registered. MemoryDumpManager
+ // does NOT take memory ownership of |mdp|, which is expected to either
+ // be a singleton or unregister itself.
+ // - name: a friendly name (duplicates allowed). Used for debugging and
+ // run-time profiling of memory-infra internals. Must be a long-lived
+ // C string.
+ // - task_runner: either a SingleThreadTaskRunner or SequencedTaskRunner. All
+ // the calls to |mdp| will be run on the given |task_runner|. If passed
+ // null |mdp| should be able to handle calls on arbitrary threads.
+ // - options: extra optional arguments. See memory_dump_provider.h.
+ void RegisterDumpProvider(MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SingleThreadTaskRunner> task_runner);
+ void RegisterDumpProvider(MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ MemoryDumpProvider::Options options);
+ void RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ MemoryDumpProvider::Options options);
+ void UnregisterDumpProvider(MemoryDumpProvider* mdp);
+
+ // Unregisters an unbound dump provider and takes care about its deletion
+ // asynchronously. Can be used only for for dump providers with no
+ // task-runner affinity.
+ // This method takes ownership of the dump provider and guarantees that:
+ // - The |mdp| will be deleted at some point in the near future.
+ // - Its deletion will not happen concurrently with the OnMemoryDump() call.
+ // Note that OnMemoryDump() calls can still happen after this method returns.
+ void UnregisterAndDeleteDumpProviderSoon(
+ std::unique_ptr<MemoryDumpProvider> mdp);
+
+ // Requests a memory dump. The dump might happen or not depending on the
+ // filters and categories specified when enabling tracing.
+ // The optional |callback| is executed asynchronously, on an arbitrary thread,
+ // to notify about the completion of the global dump (i.e. after all the
+ // processes have dumped) and its success (true iff all the dumps were
+ // successful).
+ void RequestGlobalDump(MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail,
+ const MemoryDumpCallback& callback);
+
+ // Same as above (still asynchronous), but without callback.
+ void RequestGlobalDump(MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail);
+
+ // TraceLog::EnabledStateObserver implementation.
+ void OnTraceLogEnabled() override;
+ void OnTraceLogDisabled() override;
+
+ // Returns true if the dump mode is allowed for current tracing session.
+ bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+
+ // Returns the MemoryDumpSessionState object, which is shared by all the
+ // ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
+ // session lifetime.
+ const scoped_refptr<MemoryDumpSessionState>& session_state_for_testing()
+ const {
+ return session_state_;
+ }
+
+ // Returns a unique id for identifying the processes. The id can be
+ // retrieved by child processes only when tracing is enabled. This is
+ // intended to express cross-process sharing of memory dumps on the
+ // child-process side, without having to know its own child process id.
+ uint64_t GetTracingProcessId() const;
+
+ // Returns the name for a the allocated_objects dump. Use this to declare
+ // suballocator dumps from other dump providers.
+ // It will return nullptr if there is no dump provider for the system
+ // allocator registered (which is currently the case for Mac OS).
+ const char* system_allocator_pool_name() const {
+ return kSystemAllocatorPoolName;
+ };
+
+ // When set to true, calling |RegisterMemoryDumpProvider| is a no-op.
+ void set_dumper_registrations_ignored_for_testing(bool ignored) {
+ dumper_registrations_ignored_for_testing_ = ignored;
+ }
+
+ private:
+ friend std::default_delete<MemoryDumpManager>; // For the testing instance.
+ friend struct DefaultSingletonTraits<MemoryDumpManager>;
+ friend class MemoryDumpManagerDelegate;
+ friend class MemoryDumpManagerTest;
+
+ // Descriptor used to hold information about registered MDPs.
+ // Some important considerations about lifetime of this object:
+ // - In nominal conditions, all the MemoryDumpProviderInfo instances live in
+ // the |dump_providers_| collection (% unregistration while dumping).
+ // - Upon each dump they (actually their scoped_refptr-s) are copied into
+ // the ProcessMemoryDumpAsyncState. This is to allow removal (see below).
+ // - When the MDP.OnMemoryDump() is invoked, the corresponding MDPInfo copy
+ // inside ProcessMemoryDumpAsyncState is removed.
+ // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
+ // - If UnregisterDumpProvider() is called while a dump is in progress, the
+ // MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
+ // when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
+ // - The non-const fields of MemoryDumpProviderInfo are safe to access only
+ // on tasks running in the |task_runner|, unless the thread has been
+ // destroyed.
+ struct MemoryDumpProviderInfo
+ : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+ // Define a total order based on the |task_runner| affinity, so that MDPs
+ // belonging to the same SequencedTaskRunner are adjacent in the set.
+ struct Comparator {
+ bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+ const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+ };
+ using OrderedSet =
+ std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+ MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool whitelisted_for_background_mode);
+
+ MemoryDumpProvider* const dump_provider;
+
+ // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+ // nullptr in all other cases.
+ std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+ // Human readable name, for debugging and testing. Not necessarily unique.
+ const char* const name;
+
+ // The task runner affinity. Can be nullptr, in which case the dump provider
+ // will be invoked on |dump_thread_|.
+ const scoped_refptr<SequencedTaskRunner> task_runner;
+
+ // The |options| arg passed to RegisterDumpProvider().
+ const MemoryDumpProvider::Options options;
+
+ // For fail-safe logic (auto-disable failing MDPs).
+ int consecutive_failures;
+
+ // Flagged either by the auto-disable logic or during unregistration.
+ bool disabled;
+
+ // True if the dump provider is whitelisted for background mode.
+ const bool whitelisted_for_background_mode;
+
+ private:
+ friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+ ~MemoryDumpProviderInfo();
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+ };
+
+ // Holds the state of a process memory dump that needs to be carried over
+ // across task runners in order to fulfil an asynchronous CreateProcessDump()
+ // request. At any time exactly one task runner owns a
+ // ProcessMemoryDumpAsyncState.
+ struct ProcessMemoryDumpAsyncState {
+ ProcessMemoryDumpAsyncState(
+ MemoryDumpRequestArgs req_args,
+ const MemoryDumpProviderInfo::OrderedSet& dump_providers,
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ MemoryDumpCallback callback,
+ scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner);
+ ~ProcessMemoryDumpAsyncState();
+
+ // Gets or creates the memory dump container for the given target process.
+ ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(
+ ProcessId pid,
+ const MemoryDumpArgs& dump_args);
+
+ // A map of ProcessId -> ProcessMemoryDump, one for each target process
+ // being dumped from the current process. Typically each process dumps only
+ // for itself, unless dump providers specify a different |target_process| in
+ // MemoryDumpProvider::Options.
+ std::map<ProcessId, std::unique_ptr<ProcessMemoryDump>> process_dumps;
+
+ // The arguments passed to the initial CreateProcessDump() request.
+ const MemoryDumpRequestArgs req_args;
+
+ // An ordered sequence of dump providers that have to be invoked to complete
+ // the dump. This is a copy of |dump_providers_| at the beginning of a dump
+ // and becomes empty at the end, when all dump providers have been invoked.
+ std::vector<scoped_refptr<MemoryDumpProviderInfo>> pending_dump_providers;
+
+ // The trace-global session state.
+ scoped_refptr<MemoryDumpSessionState> session_state;
+
+ // Callback passed to the initial call to CreateProcessDump().
+ MemoryDumpCallback callback;
+
+ // The |success| field that will be passed as argument to the |callback|.
+ bool dump_successful;
+
+ // The thread on which FinalizeDumpAndAddToTrace() (and hence |callback|)
+ // should be invoked. This is the thread on which the initial
+ // CreateProcessDump() request was called.
+ const scoped_refptr<SingleThreadTaskRunner> callback_task_runner;
+
+ // The thread on which unbound dump providers should be invoked.
+ // This is essentially |dump_thread_|.task_runner() but needs to be kept
+ // as a separate variable as it needs to be accessed by arbitrary dumpers'
+ // threads outside of the lock_ to avoid races when disabling tracing.
+ // It is immutable for all the duration of a tracing session.
+ const scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
+ };
+
+ // Sets up periodic memory dump timers to start global dump requests based on
+ // the dump triggers from trace config.
+ class BASE_EXPORT PeriodicGlobalDumpTimer {
+ public:
+ PeriodicGlobalDumpTimer();
+ ~PeriodicGlobalDumpTimer();
+
+ void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
+ triggers_list);
+ void Stop();
+
+ bool IsRunning();
+
+ private:
+ // Periodically called by the timer.
+ void RequestPeriodicGlobalDump();
+
+ RepeatingTimer timer_;
+ uint32_t periodic_dumps_count_;
+ uint32_t light_dump_rate_;
+ uint32_t heavy_dump_rate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
+ };
+
+ static const int kMaxConsecutiveFailuresCount;
+ static const char* const kSystemAllocatorPoolName;
+
+ MemoryDumpManager();
+ ~MemoryDumpManager() override;
+
+ static void SetInstanceForTesting(MemoryDumpManager* instance);
+ static void FinalizeDumpAndAddToTrace(
+ std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+ // Enable heap profiling if kEnableHeapProfiling is specified.
+ void EnableHeapProfilingIfNeeded();
+
+ // Internal, used only by MemoryDumpManagerDelegate.
+ // Creates a memory dump for the current process and appends it to the trace.
+ // |callback| will be invoked asynchronously upon completion on the same
+ // thread on which CreateProcessDump() was called.
+ void CreateProcessDump(const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback);
+
+ // Calls InvokeOnMemoryDump() for the next MDP on the task runner specified by
+ // the MDP while registration. On failure to do so, skips and continues to
+ // next MDP.
+ void SetupNextMemoryDump(
+ std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+ // Invokes OnMemoryDump() of the next MDP and calls SetupNextMemoryDump() at
+ // the end to continue the ProcessMemoryDump. Should be called on the MDP task
+ // runner.
+ void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+
+ // Helper for RegierDumpProvider* functions.
+ void RegisterDumpProviderInternal(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ scoped_refptr<SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options);
+
+ // Helper for the public UnregisterDumpProvider* functions.
+ void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
+ bool take_mdp_ownership_and_delete_async);
+
+ // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
+ // runner affinity (MDPs belonging to the same task runners are adjacent).
+ MemoryDumpProviderInfo::OrderedSet dump_providers_;
+
+ // Shared among all the PMDs to keep state scoped to the tracing session.
+ scoped_refptr<MemoryDumpSessionState> session_state_;
+
+ MemoryDumpManagerDelegate* delegate_; // Not owned.
+
+ // When true, this instance is in charge of coordinating periodic dumps.
+ bool is_coordinator_;
+
+ // Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
+ // to guard against disabling logging while dumping on another thread.
+ Lock lock_;
+
+ // Optimization to avoid attempting any memory dump (i.e. to not walk an empty
+ // dump_providers_enabled_ list) when tracing is not enabled.
+ subtle::AtomicWord memory_tracing_enabled_;
+
+ // For time-triggered periodic dumps.
+ PeriodicGlobalDumpTimer periodic_dump_timer_;
+
+ // Thread used for MemoryDumpProviders which don't specify a task runner
+ // affinity.
+ std::unique_ptr<Thread> dump_thread_;
+
+ // The unique id of the child process. This is created only for tracing and is
+ // expected to be valid only when tracing is enabled.
+ uint64_t tracing_process_id_;
+
+ // When true, calling |RegisterMemoryDumpProvider| is a no-op.
+ bool dumper_registrations_ignored_for_testing_;
+
+ // Whether new memory dump providers should be told to enable heap profiling.
+ bool heap_profiling_enabled_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpManager);
+};
+
+// The delegate is supposed to be long lived (read: a Singleton) and thread
+// safe (i.e. should expect calls from any thread and handle thread hopping).
+class BASE_EXPORT MemoryDumpManagerDelegate {
+ public:
+ virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) = 0;
+
+ // Returns tracing process id of the current process. This is used by
+ // MemoryDumpManager::GetTracingProcessId.
+ virtual uint64_t GetTracingProcessId() const = 0;
+
+ protected:
+ MemoryDumpManagerDelegate() {}
+ virtual ~MemoryDumpManagerDelegate() {}
+
+ void CreateProcessDump(const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ MemoryDumpManager::GetInstance()->CreateProcessDump(args, callback);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpManagerDelegate);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
diff --git a/libchrome/base/trace_event/memory_dump_manager_unittest.cc b/libchrome/base/trace_event/memory_dump_manager_unittest.cc
new file mode 100644
index 0000000..d14093c
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_manager_unittest.cc
@@ -0,0 +1,1171 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_manager.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/bind_helpers.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_io_thread.h"
+#include "base/test/trace_event_analyzer.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::AnyNumber;
+using testing::AtMost;
+using testing::Between;
+using testing::Invoke;
+using testing::Return;
+
+namespace base {
+namespace trace_event {
+
+// GTest matchers for MemoryDumpRequestArgs arguments.
+MATCHER(IsDetailedDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
+}
+
+MATCHER(IsLightDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
+}
+
+MATCHER(IsBackgroundDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
+}
+
+namespace {
+
+const char* kMDPName = "TestDumpProvider";
+const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
+
+void RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options,
+ const char* name = kMDPName) {
+ MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
+ mdm->set_dumper_registrations_ignored_for_testing(false);
+ mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
+ mdm->set_dumper_registrations_ignored_for_testing(true);
+}
+
+void RegisterDumpProvider(MemoryDumpProvider* mdp) {
+ RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
+}
+
+void RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ const MemoryDumpProvider::Options& options) {
+ MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
+ mdm->set_dumper_registrations_ignored_for_testing(false);
+ mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
+ options);
+ mdm->set_dumper_registrations_ignored_for_testing(true);
+}
+
+void OnTraceDataCollected(Closure quit_closure,
+ trace_event::TraceResultBuffer* buffer,
+ const scoped_refptr<RefCountedString>& json,
+ bool has_more_events) {
+ buffer->AddFragment(json->data());
+ if (!has_more_events)
+ quit_closure.Run();
+}
+
+} // namespace
+
+// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
+// requests locally to the MemoryDumpManager instead of performing IPC dances.
+class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
+ public:
+ MemoryDumpManagerDelegateForTesting() {
+ ON_CALL(*this, RequestGlobalMemoryDump(_, _))
+ .WillByDefault(Invoke(
+ this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
+ }
+
+ MOCK_METHOD2(RequestGlobalMemoryDump,
+ void(const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback));
+
+ uint64_t GetTracingProcessId() const override {
+ NOTREACHED();
+ return MemoryDumpManager::kInvalidTracingProcessId;
+ }
+
+ // Promote the CreateProcessDump to public so it can be used by test fixtures.
+ using MemoryDumpManagerDelegate::CreateProcessDump;
+};
+
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+ MOCK_METHOD0(Destructor, void());
+ MOCK_METHOD2(OnMemoryDump,
+ bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
+
+ MockMemoryDumpProvider() : enable_mock_destructor(false) {
+ ON_CALL(*this, OnMemoryDump(_, _))
+ .WillByDefault(Invoke([](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ // |session_state| should not be null under any circumstances when
+ // invoking a memory dump. The problem might arise in race conditions
+ // like crbug.com/600570 .
+ EXPECT_TRUE(pmd->session_state().get() != nullptr);
+ return true;
+ }));
+ }
+ ~MockMemoryDumpProvider() override {
+ if (enable_mock_destructor)
+ Destructor();
+ }
+
+ bool enable_mock_destructor;
+};
+
+class TestSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+ TestSequencedTaskRunner()
+ : worker_pool_(
+ new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
+ enabled_(true),
+ num_of_post_tasks_(0) {}
+
+ void set_enabled(bool value) { enabled_ = value; }
+ unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
+
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override {
+ NOTREACHED();
+ return false;
+ }
+
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override {
+ num_of_post_tasks_++;
+ if (enabled_)
+ return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+ return false;
+ }
+
+ bool RunsTasksOnCurrentThread() const override {
+ return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
+ }
+
+ private:
+ ~TestSequencedTaskRunner() override {}
+
+ scoped_refptr<SequencedWorkerPool> worker_pool_;
+ const SequencedWorkerPool::SequenceToken token_;
+ bool enabled_;
+ unsigned num_of_post_tasks_;
+};
+
+class MemoryDumpManagerTest : public testing::Test {
+ public:
+ MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
+
+ void SetUp() override {
+ last_callback_success_ = false;
+ message_loop_.reset(new MessageLoop());
+ mdm_.reset(new MemoryDumpManager());
+ MemoryDumpManager::SetInstanceForTesting(mdm_.get());
+ ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
+ delegate_.reset(new MemoryDumpManagerDelegateForTesting);
+ }
+
+ void TearDown() override {
+ MemoryDumpManager::SetInstanceForTesting(nullptr);
+ mdm_.reset();
+ delegate_.reset();
+ message_loop_.reset();
+ TraceLog::DeleteForTesting();
+ }
+
+ // Turns a Closure into a MemoryDumpCallback, keeping track of the callback
+ // result and taking care of posting the closure on the correct task runner.
+ void DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,
+ Closure closure,
+ uint64_t dump_guid,
+ bool success) {
+ last_callback_success_ = success;
+ task_runner->PostTask(FROM_HERE, closure);
+ }
+
+ protected:
+ void InitializeMemoryDumpManager(bool is_coordinator) {
+ mdm_->set_dumper_registrations_ignored_for_testing(true);
+ mdm_->Initialize(delegate_.get(), is_coordinator);
+ }
+
+ void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail) {
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
+ run_loop.Run();
+ }
+
+ void EnableTracingWithLegacyCategories(const char* category) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(category, ""),
+ TraceLog::RECORDING_MODE);
+ }
+
+ void EnableTracingWithTraceConfig(const std::string& trace_config) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config),
+ TraceLog::RECORDING_MODE);
+ }
+
+ void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
+
+ bool IsPeriodicDumpingEnabled() const {
+ return mdm_->periodic_dump_timer_.IsRunning();
+ }
+
+ int GetMaxConsecutiveFailuresCount() const {
+ return MemoryDumpManager::kMaxConsecutiveFailuresCount;
+ }
+
+ const MemoryDumpProvider::Options kDefaultOptions;
+ std::unique_ptr<MemoryDumpManager> mdm_;
+ std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+ bool last_callback_success_;
+
+ private:
+ std::unique_ptr<MessageLoop> message_loop_;
+
+ // We want our singleton torn down after each test.
+ ShadowingAtExitManager at_exit_manager_;
+};
+
+// Basic sanity checks. Registers a memory dump provider and checks that it is
+// called, but only when memory-infra is enabled.
+TEST_F(MemoryDumpManagerTest, SingleDumper) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
+ RegisterDumpProvider(&mdp);
+
+ // Check that the dumper is not called if the memory category is not enabled.
+ EnableTracingWithLegacyCategories("foobar-but-not-memory");
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+
+ // Now repeat enabling the memory category and check that the dumper is
+ // invoked this time.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
+ for (int i = 0; i < 3; ++i)
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ // Finally check the unregister logic: the delegate will be invoked but not
+ // the dump provider, as it has been unregistered.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+
+ for (int i = 0; i < 3; ++i) {
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+ DisableTracing();
+}
+
+// Checks that requesting dumps with high level of detail actually propagates
+// the level of the detail properly to OnMemoryDump() call on dump providers.
+TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
+
+ RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ // Check that requesting dumps with low level of detail actually propagates to
+ // OnMemoryDump() call on dump providers.
+ RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::LIGHT);
+ DisableTracing();
+ mdm_->UnregisterDumpProvider(&mdp);
+}
+
+// Checks that the SharedSessionState object is acqually shared over time.
+TEST_F(MemoryDumpManagerTest, SharedSessionState) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
+ RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp2);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ const MemoryDumpSessionState* session_state =
+ mdm_->session_state_for_testing().get();
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+ .Times(2)
+ .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ EXPECT_EQ(session_state, pmd->session_state().get());
+ return true;
+ }));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
+ .Times(2)
+ .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ EXPECT_EQ(session_state, pmd->session_state().get());
+ return true;
+ }));
+
+ for (int i = 0; i < 2; ++i) {
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+
+ DisableTracing();
+}
+
+// Checks that the (Un)RegisterDumpProvider logic behaves sanely.
+TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
+
+ // Enable only mdp1.
+ RegisterDumpProvider(&mdp1);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+
+ // Invert: enable mdp1 and disable mdp2.
+ mdm_->UnregisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp2);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+
+ // Enable both mdp1 and mdp2.
+ RegisterDumpProvider(&mdp1);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+}
+
+// Checks that the dump provider invocations depend only on the current
+// registration state and not on previous registrations and dumps.
+TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
+
+ RegisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+
+ RegisterDumpProvider(&mdp);
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+
+ RegisterDumpProvider(&mdp);
+ mdm_->UnregisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+}
+
+// Checks that the MemoryDumpManager respects the thread affinity when a
+// MemoryDumpProvider specifies a task_runner(). The test starts creating 8
+// threads and registering a MemoryDumpProvider on each of them. At each
+// iteration, one thread is removed, to check the live unregistration logic.
+TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ const uint32_t kNumInitialThreads = 8;
+
+ std::vector<std::unique_ptr<Thread>> threads;
+ std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
+
+ // Create the threads and setup the expectations. Given that at each iteration
+ // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
+ // invoked a number of times equal to its index.
+ for (uint32_t i = kNumInitialThreads; i > 0; --i) {
+ threads.push_back(WrapUnique(new Thread("test thread")));
+ auto* thread = threads.back().get();
+ thread->Start();
+ scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
+ mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+ auto* mdp = mdps.back().get();
+ RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+ .Times(i)
+ .WillRepeatedly(Invoke(
+ [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread());
+ return true;
+ }));
+ }
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ while (!threads.empty()) {
+ last_callback_success_ = false;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_TRUE(last_callback_success_);
+
+ // Unregister a MDP and destroy one thread at each iteration to check the
+ // live unregistration logic. The unregistration needs to happen on the same
+ // thread the MDP belongs to.
+ {
+ RunLoop run_loop;
+ Closure unregistration =
+ Bind(&MemoryDumpManager::UnregisterDumpProvider,
+ Unretained(mdm_.get()), Unretained(mdps.back().get()));
+ threads.back()->task_runner()->PostTaskAndReply(FROM_HERE, unregistration,
+ run_loop.QuitClosure());
+ run_loop.Run();
+ }
+ mdps.pop_back();
+ threads.back()->Stop();
+ threads.pop_back();
+ }
+
+ DisableTracing();
+}
+
+// Check that the memory dump calls are always posted on task runner for
+// SequencedTaskRunner case and that the dump provider gets disabled when
+// PostTask fails, but the dump still succeeds.
+TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::vector<MockMemoryDumpProvider> mdps(3);
+ scoped_refptr<TestSequencedTaskRunner> task_runner1(
+ make_scoped_refptr(new TestSequencedTaskRunner()));
+ scoped_refptr<TestSequencedTaskRunner> task_runner2(
+ make_scoped_refptr(new TestSequencedTaskRunner()));
+ RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
+ kDefaultOptions);
+ RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
+ kDefaultOptions);
+ RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
+ kDefaultOptions);
+ // |mdps[0]| should be disabled permanently after first dump.
+ EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
+ EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ task_runner1->set_enabled(false);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
+ // to same task runner.
+ EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
+ EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
+ EXPECT_TRUE(last_callback_success_);
+
+ task_runner1->set_enabled(true);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
+ EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
+ EXPECT_TRUE(last_callback_success_);
+ DisableTracing();
+}
+
+// Checks that providers get disabled after 3 consecutive failures, but not
+// otherwise (e.g., if interleaved).
+TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
+
+ RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp2);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(kNumDumps);
+
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+ .Times(GetMaxConsecutiveFailuresCount())
+ .WillRepeatedly(Return(false));
+
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
+ .WillOnce(Return(false))
+ .WillOnce(Return(true))
+ .WillOnce(Return(false))
+ .WillOnce(Return(false))
+ .WillOnce(Return(true))
+ .WillOnce(Return(false));
+
+ for (int i = 0; i < kNumDumps; i++) {
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+
+ DisableTracing();
+}
+
+// Sneakily registers an extra memory dump provider while an existing one is
+// dumping and expect it to take part in the already active tracing session.
+TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
+
+ RegisterDumpProvider(&mdp1);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
+
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+ .Times(4)
+ .WillOnce(Return(true))
+ .WillOnce(
+ Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ RegisterDumpProvider(&mdp2);
+ return true;
+ }))
+ .WillRepeatedly(Return(true));
+
+ // Depending on the insertion order (before or after mdp1), mdp2 might be
+ // called also immediately after it gets registered.
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
+ .Times(Between(2, 3))
+ .WillRepeatedly(Return(true));
+
+ for (int i = 0; i < 4; i++) {
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+
+ DisableTracing();
+}
+
+// Like RegisterDumperWhileDumping, but unregister the dump provider instead.
+TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
+
+ RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
+ RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
+
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+ .Times(4)
+ .WillOnce(Return(true))
+ .WillOnce(
+ Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
+ return true;
+ }))
+ .WillRepeatedly(Return(true));
+
+ // Depending on the insertion order (before or after mdp1), mdp2 might have
+ // been already called when UnregisterDumpProvider happens.
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
+ .Times(Between(1, 2))
+ .WillRepeatedly(Return(true));
+
+ for (int i = 0; i < 4; i++) {
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+
+ DisableTracing();
+}
+
+// Checks that the dump does not abort when unregistering a provider while
+// dumping from a different thread than the dumping thread.
+TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::vector<std::unique_ptr<TestIOThread>> threads;
+ std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
+
+ for (int i = 0; i < 2; i++) {
+ threads.push_back(
+ WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+ mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+ RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
+ kDefaultOptions);
+ }
+
+ int on_memory_dump_call_count = 0;
+
+ // When OnMemoryDump is called on either of the dump providers, it will
+ // unregister the other one.
+ for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+ int other_idx = (mdps.front() == mdp);
+ TestIOThread* other_thread = threads[other_idx].get();
+ MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
+ auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
+ const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+ other_thread->PostTaskAndWait(
+ FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
+ base::Unretained(&*mdm_), other_mdp));
+ on_memory_dump_call_count++;
+ return true;
+ };
+
+ // OnMemoryDump is called once for the provider that dumps first, and zero
+ // times for the other provider.
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+ .Times(AtMost(1))
+ .WillOnce(Invoke(on_dump));
+ }
+
+ last_callback_success_ = false;
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ ASSERT_EQ(1, on_memory_dump_call_count);
+ ASSERT_TRUE(last_callback_success_);
+
+ DisableTracing();
+}
+
+// If a thread (with a dump provider living on it) is torn down during a dump
+// its dump provider should be skipped but the dump itself should succeed.
+TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::vector<std::unique_ptr<TestIOThread>> threads;
+ std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
+
+ for (int i = 0; i < 2; i++) {
+ threads.push_back(
+ WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+ mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+ RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
+ kDefaultOptions);
+ }
+
+ int on_memory_dump_call_count = 0;
+
+ // When OnMemoryDump is called on either of the dump providers, it will
+ // tear down the thread of the other one.
+ for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+ int other_idx = (mdps.front() == mdp);
+ TestIOThread* other_thread = threads[other_idx].get();
+ auto on_dump = [other_thread, &on_memory_dump_call_count](
+ const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+ other_thread->Stop();
+ on_memory_dump_call_count++;
+ return true;
+ };
+
+ // OnMemoryDump is called once for the provider that dumps first, and zero
+ // times for the other provider.
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+ .Times(AtMost(1))
+ .WillOnce(Invoke(on_dump));
+ }
+
+ last_callback_success_ = false;
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ ASSERT_EQ(1, on_memory_dump_call_count);
+ ASSERT_TRUE(last_callback_success_);
+
+ DisableTracing();
+}
+
+// Checks that a NACK callback is invoked if RequestGlobalDump() is called when
+// tracing is not enabled.
+TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ RegisterDumpProvider(&mdp1);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+
+ last_callback_success_ = true;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+}
+
+// Checks that is the MemoryDumpManager is initialized after tracing already
+// began, it will still late-join the party (real use case: startup tracing).
+TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
+ MockMemoryDumpProvider mdp;
+ RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ // First check that a RequestGlobalDump() issued before the MemoryDumpManager
+ // initialization gets NACK-ed cleanly.
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+ }
+
+ // Now late-initialize the MemoryDumpManager and check that the
+ // RequestGlobalDump completes successfully.
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_TRUE(last_callback_success_);
+ }
+ DisableTracing();
+}
+
+// This test (and the MemoryDumpManagerTestCoordinator below) crystallizes the
+// expectations of the chrome://tracing UI and chrome telemetry w.r.t. periodic
+// dumps in memory-infra, handling gracefully the transition between the legacy
+// and the new-style (JSON-based) TraceConfig.
+TEST_F(MemoryDumpManagerTest, TraceConfigExpectations) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
+
+ // Don't trigger the default behavior of the mock delegate in this test,
+ // which would short-circuit the dump request to the actual
+ // CreateProcessDump().
+ // We don't want to create any dump in this test, only check whether the dumps
+ // are requested or not.
+ ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
+
+ // Enabling memory-infra in a non-coordinator process should not trigger any
+ // periodic dumps.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a non-coordinator
+ // process with a fully defined trigger config should NOT enable any periodic
+ // dumps.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(1, 5));
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+ MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
+ ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
+
+ // Enabling memory-infra with the legacy TraceConfig (category filter) in
+ // a coordinator process should enable periodic dumps.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_TRUE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process without specifying any "memory_dump_config" section should enable
+ // periodic dumps. This is to preserve the behavior chrome://tracing UI, that
+ // is: ticking memory-infra should dump periodically with the default config.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_NoTriggers());
+ EXPECT_TRUE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process with an empty "memory_dump_config" should NOT enable periodic
+ // dumps. This is the way telemetry is supposed to use memory-infra with
+ // only explicitly triggered dumps.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process with a fully defined trigger config should cause periodic dumps to
+ // be performed in the correct order.
+ RunLoop run_loop;
+ auto quit_closure = run_loop.QuitClosure();
+
+ const int kHeavyDumpRate = 5;
+ const int kLightDumpPeriodMs = 1;
+ const int kHeavyDumpPeriodMs = kHeavyDumpRate * kLightDumpPeriodMs;
+ // The expected sequence with light=1ms, heavy=5ms is H,L,L,L,L,H,...
+ testing::InSequence sequence;
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .Times(kHeavyDumpRate - 1);
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .Times(kHeavyDumpRate - 2);
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ }));
+
+ // Swallow all the final spurious calls until tracing gets disabled.
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(
+ kLightDumpPeriodMs, kHeavyDumpPeriodMs));
+ run_loop.Run();
+ DisableTracing();
+}
+
+// Tests against race conditions that might arise when disabling tracing in the
+// middle of a global memory dump.
+TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+
+ // Register a bound dump provider.
+ std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
+ mdp_thread->Start();
+ MockMemoryDumpProvider mdp_with_affinity;
+ RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(),
+ kDefaultOptions);
+
+ // Register also an unbound dump provider. Unbound dump providers are always
+ // invoked after bound ones.
+ MockMemoryDumpProvider unbound_mdp;
+ RegisterDumpProvider(&unbound_mdp, nullptr, kDefaultOptions);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp_with_affinity, OnMemoryDump(_, _))
+ .Times(1)
+ .WillOnce(
+ Invoke([&tracing_disabled_event](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ tracing_disabled_event.Wait();
+
+ // At this point tracing has been disabled and the
+ // MemoryDumpManager.dump_thread_ has been shut down.
+ return true;
+ }));
+
+ // |unbound_mdp| should never be invoked because the thread for unbound dump
+ // providers has been shutdown in the meanwhile.
+ EXPECT_CALL(unbound_mdp, OnMemoryDump(_, _)).Times(0);
+
+ last_callback_success_ = true;
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ DisableTracing();
+ tracing_disabled_event.Signal();
+ run_loop.Run();
+
+ EXPECT_FALSE(last_callback_success_);
+}
+
+// Tests against race conditions that can happen if tracing is disabled before
+// the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
+TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
+ base::WaitableEvent tracing_disabled_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+
+ std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
+ mdp_thread->Start();
+
+ // Create both same-thread MDP and another MDP with dedicated thread
+ MockMemoryDumpProvider mdp1;
+ RegisterDumpProvider(&mdp1);
+ MockMemoryDumpProvider mdp2;
+ RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
+ .WillOnce(Invoke([this](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ DisableTracing();
+ delegate_->CreateProcessDump(args, callback);
+ }));
+
+ // If tracing is disabled for current session CreateProcessDump() should NOT
+ // request dumps from providers. Real-world regression: crbug.com/600570 .
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
+
+ last_callback_success_ = true;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+}
+
+TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) {
+ using trace_analyzer::Query;
+
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+
+ // Standard provider with default options (create dump for current process).
+ MemoryDumpProvider::Options options;
+ MockMemoryDumpProvider mdp1;
+ RegisterDumpProvider(&mdp1, nullptr, options);
+
+ // Provider with out-of-process dumping.
+ MockMemoryDumpProvider mdp2;
+ options.target_pid = 123;
+ RegisterDumpProvider(&mdp2, nullptr, options);
+
+ // Another provider with out-of-process dumping.
+ MockMemoryDumpProvider mdp3;
+ options.target_pid = 456;
+ RegisterDumpProvider(&mdp3, nullptr, options);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
+ EXPECT_CALL(mdp3, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+
+ // Flush the trace into JSON.
+ trace_event::TraceResultBuffer buffer;
+ TraceResultBuffer::SimpleOutput trace_output;
+ buffer.SetOutputCallback(trace_output.GetCallback());
+ RunLoop run_loop;
+ buffer.Start();
+ trace_event::TraceLog::GetInstance()->Flush(
+ Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer)));
+ run_loop.Run();
+ buffer.Finish();
+
+ // Analyze the JSON.
+ std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = WrapUnique(
+ trace_analyzer::TraceAnalyzer::Create(trace_output.json_output));
+ trace_analyzer::TraceEventVector events;
+ analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP),
+ &events);
+
+ ASSERT_EQ(3u, events.size());
+ ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(123)));
+ ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(456)));
+ ASSERT_EQ(1u, trace_analyzer::CountMatches(
+ events, Query::EventPidIs(GetCurrentProcId())));
+ ASSERT_EQ(events[0]->id, events[1]->id);
+ ASSERT_EQ(events[0]->id, events[2]->id);
+}
+
+// Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the
+// unregistration should actually delete the providers and not leak them.
+TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ static const int kNumProviders = 3;
+ int dtor_count = 0;
+ std::vector<std::unique_ptr<MemoryDumpProvider>> mdps;
+ for (int i = 0; i < kNumProviders; ++i) {
+ std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+ mdp->enable_mock_destructor = true;
+ EXPECT_CALL(*mdp, Destructor())
+ .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
+ RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
+ mdps.push_back(std::move(mdp));
+ }
+
+ while (!mdps.empty()) {
+ mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back()));
+ mdps.pop_back();
+ }
+
+ ASSERT_EQ(kNumProviders, dtor_count);
+}
+
+// This test checks against races when unregistering an unbound dump provider
+// from another thread while dumping. It registers one MDP and, when
+// OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon()
+// from another thread. The OnMemoryDump() and the dtor call are expected to
+// happen on the same thread (the MemoryDumpManager utility thread).
+TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+ mdp->enable_mock_destructor = true;
+ RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
+
+ base::PlatformThreadRef thread_ref;
+ auto self_unregister_from_another_thread = [&mdp, &thread_ref](
+ const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ thread_ref = PlatformThread::CurrentRef();
+ TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
+ thread_for_unregistration.PostTaskAndWait(
+ FROM_HERE,
+ base::Bind(
+ &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
+ base::Unretained(MemoryDumpManager::GetInstance()),
+ base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp)))));
+ thread_for_unregistration.Stop();
+ return true;
+ };
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+ .Times(1)
+ .WillOnce(Invoke(self_unregister_from_another_thread));
+ EXPECT_CALL(*mdp, Destructor())
+ .Times(1)
+ .WillOnce(Invoke([&thread_ref]() {
+ EXPECT_EQ(thread_ref, PlatformThread::CurrentRef());
+ }));
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+ for (int i = 0; i < 2; ++i) {
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp1.get());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
+ RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
+ kWhitelistedMDPName);
+
+ EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::BACKGROUND);
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+
+ RunLoop run_loop;
+ auto quit_closure = run_loop.QuitClosure();
+
+ testing::InSequence sequence;
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .Times(5);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+ .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ }));
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */));
+
+ // Only background mode dumps should be allowed with the trace config.
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::LIGHT);
+ EXPECT_FALSE(last_callback_success_);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_FALSE(last_callback_success_);
+
+ ASSERT_TRUE(IsPeriodicDumpingEnabled());
+ run_loop.Run();
+ DisableTracing();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_dump_provider.h b/libchrome/base/trace_event/memory_dump_provider.h
new file mode 100644
index 0000000..2c50286
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_provider.h
@@ -0,0 +1,64 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/trace_event/memory_dump_request_args.h"
+
+namespace base {
+namespace trace_event {
+
+class ProcessMemoryDump;
+
+// The contract interface that memory dump providers must implement.
+class BASE_EXPORT MemoryDumpProvider {
+ public:
+ // Optional arguments for MemoryDumpManager::RegisterDumpProvider().
+ struct Options {
+ Options()
+ : target_pid(kNullProcessId),
+ dumps_on_single_thread_task_runner(false) {}
+
+ // If the dump provider generates dumps on behalf of another process,
+ // |target_pid| contains the pid of that process.
+ // The default value is kNullProcessId, which means that the dump provider
+ // generates dumps for the current process.
+ ProcessId target_pid;
+
+ // |dumps_on_single_thread_task_runner| is true if the dump provider runs on
+ // a SingleThreadTaskRunner, which is usually the case. It is faster to run
+ // all providers that run on the same thread together without thread hops.
+ bool dumps_on_single_thread_task_runner;
+ };
+
+ virtual ~MemoryDumpProvider() {}
+
+ // Called by the MemoryDumpManager when generating memory dumps.
+ // The |args| specify if the embedder should generate light/heavy dumps on
+ // dump requests. The embedder should return true if the |pmd| was
+ // successfully populated, false if something went wrong and the dump should
+ // be considered invalid.
+ // (Note, the MemoryDumpManager has a fail-safe logic which will disable the
+ // MemoryDumpProvider for the entire trace session if it fails consistently).
+ virtual bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) = 0;
+
+ // Called by the MemoryDumpManager when an allocator should start or stop
+ // collecting extensive allocation data, if supported.
+ virtual void OnHeapProfilingEnabled(bool) {}
+
+ protected:
+ MemoryDumpProvider() {}
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProvider);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
diff --git a/libchrome/base/trace_event/memory_dump_request_args.cc b/libchrome/base/trace_event/memory_dump_request_args.cc
new file mode 100644
index 0000000..e6c5b87
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_request_args.cc
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_request_args.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
+ switch (dump_type) {
+ case MemoryDumpType::TASK_BEGIN:
+ return "task_begin";
+ case MemoryDumpType::TASK_END:
+ return "task_end";
+ case MemoryDumpType::PERIODIC_INTERVAL:
+ return "periodic_interval";
+ case MemoryDumpType::EXPLICITLY_TRIGGERED:
+ return "explicitly_triggered";
+ }
+ NOTREACHED();
+ return "unknown";
+}
+
+const char* MemoryDumpLevelOfDetailToString(
+ const MemoryDumpLevelOfDetail& level_of_detail) {
+ switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::BACKGROUND:
+ return "background";
+ case MemoryDumpLevelOfDetail::LIGHT:
+ return "light";
+ case MemoryDumpLevelOfDetail::DETAILED:
+ return "detailed";
+ }
+ NOTREACHED();
+ return "unknown";
+}
+
+MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
+ const std::string& str) {
+ if (str == "background")
+ return MemoryDumpLevelOfDetail::BACKGROUND;
+ if (str == "light")
+ return MemoryDumpLevelOfDetail::LIGHT;
+ if (str == "detailed")
+ return MemoryDumpLevelOfDetail::DETAILED;
+ NOTREACHED();
+ return MemoryDumpLevelOfDetail::LAST;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_dump_request_args.h b/libchrome/base/trace_event/memory_dump_request_args.h
new file mode 100644
index 0000000..f3ff9d8
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_request_args.h
@@ -0,0 +1,84 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
+
+// This file defines the types and structs used to issue memory dump requests.
+// These are also used in the IPCs for coordinating inter-process memory dumps.
+
+#include <stdint.h>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+
+namespace base {
+namespace trace_event {
+
+// Captures the reason why a memory dump is being requested. This is to allow
+// selective enabling of dumps, filtering and post-processing.
+enum class MemoryDumpType {
+ TASK_BEGIN, // Dumping memory at the beginning of a message-loop task.
+ TASK_END, // Dumping memory at the ending of a message-loop task.
+ PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
+ EXPLICITLY_TRIGGERED, // Non maskable dump request.
+ LAST = EXPLICITLY_TRIGGERED // For IPC macros.
+};
+
+// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+enum class MemoryDumpLevelOfDetail : uint32_t {
+ FIRST,
+
+ // For background tracing mode. The dump time is quick, and typically just the
+ // totals are expected. Suballocations need not be specified. Dump name must
+ // contain only pre-defined strings and string arguments cannot be added.
+ BACKGROUND = FIRST,
+
+ // For the levels below, MemoryDumpProvider instances must guarantee that the
+ // total size reported in the root node is consistent. Only the granularity of
+ // the child MemoryAllocatorDump(s) differs with the levels.
+
+ // Few entries, typically a fixed number, per dump.
+ LIGHT,
+
+ // Unrestricted amount of entries per dump.
+ DETAILED,
+
+ LAST = DETAILED
+};
+
+// Initial request arguments for a global memory dump. (see
+// MemoryDumpManager::RequestGlobalMemoryDump()).
+struct BASE_EXPORT MemoryDumpRequestArgs {
+ // Globally unique identifier. In multi-process dumps, all processes issue a
+ // local dump with the same guid. This allows the trace importers to
+ // reconstruct the global dump.
+ uint64_t dump_guid;
+
+ MemoryDumpType dump_type;
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
+// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
+// providers. Dump providers are expected to read the args for creating dumps.
+struct MemoryDumpArgs {
+ // Specifies how detailed the dumps should be.
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
+using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
+
+BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+
+BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
+ const MemoryDumpLevelOfDetail& level_of_detail);
+
+BASE_EXPORT MemoryDumpLevelOfDetail
+StringToMemoryDumpLevelOfDetail(const std::string& str);
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
diff --git a/libchrome/base/trace_event/memory_dump_session_state.cc b/libchrome/base/trace_event/memory_dump_session_state.cc
new file mode 100644
index 0000000..b3d9a8c
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_session_state.cc
@@ -0,0 +1,32 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_dump_session_state.h"
+
+namespace base {
+namespace trace_event {
+
+MemoryDumpSessionState::MemoryDumpSessionState() {}
+
+MemoryDumpSessionState::~MemoryDumpSessionState() {}
+
+void MemoryDumpSessionState::SetStackFrameDeduplicator(
+ std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
+ DCHECK(!stack_frame_deduplicator_);
+ stack_frame_deduplicator_ = std::move(stack_frame_deduplicator);
+}
+
+void MemoryDumpSessionState::SetTypeNameDeduplicator(
+ std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator) {
+ DCHECK(!type_name_deduplicator_);
+ type_name_deduplicator_ = std::move(type_name_deduplicator);
+}
+
+void MemoryDumpSessionState::SetMemoryDumpConfig(
+ const TraceConfig::MemoryDumpConfig& config) {
+ memory_dump_config_ = config;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_dump_session_state.h b/libchrome/base/trace_event/memory_dump_session_state.h
new file mode 100644
index 0000000..f199ec1
--- /dev/null
+++ b/libchrome/base/trace_event/memory_dump_session_state.h
@@ -0,0 +1,69 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
+#define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/trace_config.h"
+
+namespace base {
+namespace trace_event {
+
+// Container for state variables that should be shared across all the memory
+// dumps in a tracing session.
+class BASE_EXPORT MemoryDumpSessionState
+ : public RefCountedThreadSafe<MemoryDumpSessionState> {
+ public:
+ MemoryDumpSessionState();
+
+ // Returns the stack frame deduplicator that should be used by memory dump
+ // providers when doing a heap dump.
+ StackFrameDeduplicator* stack_frame_deduplicator() const {
+ return stack_frame_deduplicator_.get();
+ }
+
+ void SetStackFrameDeduplicator(
+ std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
+
+ // Returns the type name deduplicator that should be used by memory dump
+ // providers when doing a heap dump.
+ TypeNameDeduplicator* type_name_deduplicator() const {
+ return type_name_deduplicator_.get();
+ }
+
+ void SetTypeNameDeduplicator(
+ std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
+
+ const TraceConfig::MemoryDumpConfig& memory_dump_config() const {
+ return memory_dump_config_;
+ }
+
+ void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
+
+ private:
+ friend class RefCountedThreadSafe<MemoryDumpSessionState>;
+ ~MemoryDumpSessionState();
+
+ // Deduplicates backtraces in heap dumps so they can be written once when the
+ // trace is finalized.
+ std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
+
+ // Deduplicates type names in heap dumps so they can be written once when the
+ // trace is finalized.
+ std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
+
+ // The memory dump config, copied at the time when the tracing session was
+ // started.
+ TraceConfig::MemoryDumpConfig memory_dump_config_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
diff --git a/libchrome/base/trace_event/memory_infra_background_whitelist.cc b/libchrome/base/trace_event/memory_infra_background_whitelist.cc
new file mode 100644
index 0000000..aed187f
--- /dev/null
+++ b/libchrome/base/trace_event/memory_infra_background_whitelist.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include <ctype.h>
+#include <string.h>
+
+#include <string>
+
+namespace base {
+namespace trace_event {
+namespace {
+
+// The names of dump providers whitelisted for background tracing. Dump
+// providers can be added here only if the background mode dump has very
+// less performance and memory overhead.
+const char* const kDumpProviderWhitelist[] = {
+ "BlinkGC",
+ "ChildDiscardableSharedMemoryManager",
+ "DOMStorage",
+ "HostDiscardableSharedMemoryManager",
+ "IndexedDBBackingStore",
+ "JavaHeap",
+ "LeveldbValueStore",
+ "Malloc",
+ "PartitionAlloc",
+ "ProcessMemoryMetrics",
+ "Skia",
+ "Sql",
+ "V8Isolate",
+ "WinHeap",
+ nullptr // End of list marker.
+};
+
+// A list of string names that are allowed for the memory allocator dumps in
+// background mode.
+const char* const kAllocatorDumpNameWhitelist[] = {
+ "blink_gc",
+ "blink_gc/allocated_objects",
+ "discardable",
+ "discardable/child_0x?",
+ "dom_storage/0x?/cache_size",
+ "dom_storage/session_storage_0x?",
+ "java_heap",
+ "java_heap/allocated_objects",
+ "leveldb/index_db/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Settings/0x?",
+ "leveldb/value_store/Extensions.Database.Open.Rules/0x?",
+ "leveldb/value_store/Extensions.Database.Open.State/0x?",
+ "leveldb/value_store/Extensions.Database.Open/0x?",
+ "leveldb/value_store/Extensions.Database.Restore/0x?",
+ "leveldb/value_store/Extensions.Database.Value.Restore/0x?",
+ "malloc",
+ "malloc/allocated_objects",
+ "malloc/metadata_fragmentation_caches",
+ "partition_alloc/allocated_objects",
+ "partition_alloc/partitions",
+ "partition_alloc/partitions/buffer",
+ "partition_alloc/partitions/fast_malloc",
+ "partition_alloc/partitions/layout",
+ "skia/sk_glyph_cache",
+ "skia/sk_resource_cache",
+ "sqlite",
+ "v8/isolate_0x?/heap_spaces",
+ "v8/isolate_0x?/heap_spaces/code_space",
+ "v8/isolate_0x?/heap_spaces/large_object_space",
+ "v8/isolate_0x?/heap_spaces/map_space",
+ "v8/isolate_0x?/heap_spaces/new_space",
+ "v8/isolate_0x?/heap_spaces/old_space",
+ "v8/isolate_0x?/heap_spaces/other_spaces",
+ "v8/isolate_0x?/malloc",
+ "v8/isolate_0x?/zapped_for_debug",
+ "winheap",
+ "winheap/allocated_objects",
+ nullptr // End of list marker.
+};
+
+const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
+const char* const* g_allocator_dump_name_whitelist =
+ kAllocatorDumpNameWhitelist;
+
+} // namespace
+
+bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
+ for (size_t i = 0; g_dump_provider_whitelist[i] != nullptr; ++i) {
+ if (strcmp(mdp_name, g_dump_provider_whitelist[i]) == 0)
+ return true;
+ }
+ return false;
+}
+
+bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
+ // Remove special characters, numbers (including hexadecimal which are marked
+ // by '0x') from the given string.
+ const size_t length = name.size();
+ std::string stripped_str;
+ stripped_str.reserve(length);
+ bool parsing_hex = false;
+ for (size_t i = 0; i < length; ++i) {
+ if (parsing_hex && isxdigit(name[i]))
+ continue;
+ parsing_hex = false;
+ if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
+ parsing_hex = true;
+ stripped_str.append("0x?");
+ ++i;
+ } else {
+ stripped_str.push_back(name[i]);
+ }
+ }
+
+ for (size_t i = 0; g_allocator_dump_name_whitelist[i] != nullptr; ++i) {
+ if (stripped_str == g_allocator_dump_name_whitelist[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void SetDumpProviderWhitelistForTesting(const char* const* list) {
+ g_dump_provider_whitelist = list;
+}
+
+void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
+ g_allocator_dump_name_whitelist = list;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/memory_infra_background_whitelist.h b/libchrome/base/trace_event/memory_infra_background_whitelist.h
new file mode 100644
index 0000000..b8d704a
--- /dev/null
+++ b/libchrome/base/trace_event/memory_infra_background_whitelist.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+
+// This file contains the whitelists for background mode to limit the tracing
+// overhead and remove sensitive information from traces.
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// Checks if the given |mdp_name| is in the whitelist.
+bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
+
+// Checks if the given |name| matches any of the whitelisted patterns.
+bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
+
+// The whitelist is replaced with the given list for tests. The last element of
+// the list must be nullptr.
+void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
+void BASE_EXPORT
+SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
diff --git a/libchrome/base/trace_event/process_memory_dump.cc b/libchrome/base/trace_event/process_memory_dump.cc
new file mode 100644
index 0000000..8269892
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_dump.cc
@@ -0,0 +1,373 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/process_memory_dump.h"
+
+#include <errno.h>
+
+#include <vector>
+
+#include "base/memory/ptr_util.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/process_memory_totals.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "build/build_config.h"
+
+#if defined(OS_IOS)
+#include <sys/sysctl.h>
+#endif
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <Psapi.h>
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const char kEdgeTypeOwnership[] = "ownership";
+
+std::string GetSharedGlobalAllocatorDumpName(
+ const MemoryAllocatorDumpGuid& guid) {
+ return "global/" + guid.ToString();
+}
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
+ return (mapped_size + page_size - 1) / page_size;
+}
+#endif
+
+} // namespace
+
+// static
+bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+// static
+size_t ProcessMemoryDump::GetSystemPageSize() {
+#if defined(OS_IOS)
+ // On iOS, getpagesize() returns the user page sizes, but for allocating
+ // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
+ // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
+ int pagesize;
+ size_t pagesize_len;
+ int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
+ if (!status && pagesize_len == sizeof(pagesize)) {
+ if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
+ return pagesize;
+ }
+ LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
+ // Falls back to getpagesize() although it may be wrong in certain cases.
+#endif // defined(OS_IOS)
+ return base::GetPageSize();
+}
+
+// static
+size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
+ size_t mapped_size) {
+ const size_t page_size = GetSystemPageSize();
+ const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
+ DCHECK_EQ(0u, start_pointer % page_size);
+
+ size_t offset = 0;
+ size_t total_resident_size = 0;
+ bool failure = false;
+
+ // An array as large as number of pages in memory segment needs to be passed
+ // to the query function. To avoid allocating a large array, the given block
+ // of memory is split into chunks of size |kMaxChunkSize|.
+ const size_t kMaxChunkSize = 8 * 1024 * 1024;
+ size_t max_vec_size =
+ GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
+#if defined(OS_MACOSX) || defined(OS_IOS)
+ std::unique_ptr<char[]> vec(new char[max_vec_size]);
+#elif defined(OS_WIN)
+ std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
+ new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
+#elif defined(OS_POSIX)
+ std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
+#endif
+
+ while (offset < mapped_size) {
+ uintptr_t chunk_start = (start_pointer + offset);
+ const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
+ const size_t page_count = GetSystemPageCount(chunk_size, page_size);
+ size_t resident_page_count = 0;
+
+#if defined(OS_MACOSX) || defined(OS_IOS)
+ // mincore in MAC does not fail with EAGAIN.
+ failure =
+ !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
+ for (size_t i = 0; i < page_count; i++)
+ resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
+#elif defined(OS_WIN)
+ for (size_t i = 0; i < page_count; i++) {
+ vec[i].VirtualAddress =
+ reinterpret_cast<void*>(chunk_start + i * page_size);
+ }
+ DWORD vec_size = static_cast<DWORD>(
+ page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
+ failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
+
+ for (size_t i = 0; i < page_count; i++)
+ resident_page_count += vec[i].VirtualAttributes.Valid;
+#elif defined(OS_POSIX)
+ int error_counter = 0;
+ int result = 0;
+ // HANDLE_EINTR tries for 100 times. So following the same pattern.
+ do {
+ result =
+ mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
+ } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
+ failure = !!result;
+
+ for (size_t i = 0; i < page_count; i++)
+ resident_page_count += vec[i] & 1;
+#endif
+
+ if (failure)
+ break;
+
+ total_resident_size += resident_page_count * page_size;
+ offset += kMaxChunkSize;
+ }
+
+ DCHECK(!failure);
+ if (failure) {
+ total_resident_size = 0;
+ LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
+ }
+ return total_resident_size;
+}
+#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+
+ProcessMemoryDump::ProcessMemoryDump(
+ scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args)
+ : has_process_totals_(false),
+ has_process_mmaps_(false),
+ session_state_(std::move(session_state)),
+ dump_args_(dump_args) {}
+
+ProcessMemoryDump::~ProcessMemoryDump() {}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
+ const std::string& absolute_name) {
+ return AddAllocatorDumpInternal(
+ WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
+ const std::string& absolute_name,
+ const MemoryAllocatorDumpGuid& guid) {
+ return AddAllocatorDumpInternal(
+ WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
+ std::unique_ptr<MemoryAllocatorDump> mad) {
+ // In background mode return the black hole dump, if invalid dump name is
+ // given.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+ !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
+ return GetBlackHoleMad();
+ }
+
+ auto insertion_result = allocator_dumps_.insert(
+ std::make_pair(mad->absolute_name(), std::move(mad)));
+ MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
+ DCHECK(insertion_result.second) << "Duplicate name: "
+ << inserted_mad->absolute_name();
+ return inserted_mad;
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
+ const std::string& absolute_name) const {
+ auto it = allocator_dumps_.find(absolute_name);
+ if (it != allocator_dumps_.end())
+ return it->second.get();
+ if (black_hole_mad_)
+ return black_hole_mad_.get();
+ return nullptr;
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
+ const std::string& absolute_name) {
+ MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
+ return mad ? mad : CreateAllocatorDump(absolute_name);
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
+ const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
+ // A shared allocator dump can be shared within a process and the guid could
+ // have been created already.
+ MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+ if (mad) {
+ // The weak flag is cleared because this method should create a non-weak
+ // dump.
+ mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
+ return mad;
+ }
+ return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
+ const MemoryAllocatorDumpGuid& guid) {
+ // Global dumps are disabled in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return GetBlackHoleMad();
+
+ MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+ if (mad)
+ return mad;
+ mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+ mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
+ return mad;
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
+ const MemoryAllocatorDumpGuid& guid) const {
+ return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
+}
+
+void ProcessMemoryDump::DumpHeapUsage(
+ const base::hash_map<base::trace_event::AllocationContext,
+ base::trace_event::AllocationMetrics>& metrics_by_context,
+ base::trace_event::TraceEventMemoryOverhead& overhead,
+ const char* allocator_name) {
+ if (!metrics_by_context.empty()) {
+ DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
+ std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
+ metrics_by_context, *session_state());
+ heap_dumps_[allocator_name] = std::move(heap_dump);
+ }
+
+ std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
+ allocator_name);
+ overhead.DumpInto(base_name.c_str(), this);
+}
+
+void ProcessMemoryDump::Clear() {
+ if (has_process_totals_) {
+ process_totals_.Clear();
+ has_process_totals_ = false;
+ }
+
+ if (has_process_mmaps_) {
+ process_mmaps_.Clear();
+ has_process_mmaps_ = false;
+ }
+
+ allocator_dumps_.clear();
+ allocator_dumps_edges_.clear();
+ heap_dumps_.clear();
+}
+
+void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
+ DCHECK(!other->has_process_totals() && !other->has_process_mmaps());
+
+ // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
+ // into this ProcessMemoryDump, checking for duplicates.
+ for (auto& it : other->allocator_dumps_)
+ AddAllocatorDumpInternal(std::move(it.second));
+ other->allocator_dumps_.clear();
+
+ // Move all the edges.
+ allocator_dumps_edges_.insert(allocator_dumps_edges_.end(),
+ other->allocator_dumps_edges_.begin(),
+ other->allocator_dumps_edges_.end());
+ other->allocator_dumps_edges_.clear();
+
+ for (auto& it : other->heap_dumps_) {
+ DCHECK_EQ(0ul, heap_dumps_.count(it.first));
+ heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
+ }
+ other->heap_dumps_.clear();
+}
+
+void ProcessMemoryDump::AsValueInto(TracedValue* value) const {
+ if (has_process_totals_) {
+ value->BeginDictionary("process_totals");
+ process_totals_.AsValueInto(value);
+ value->EndDictionary();
+ }
+
+ if (has_process_mmaps_) {
+ value->BeginDictionary("process_mmaps");
+ process_mmaps_.AsValueInto(value);
+ value->EndDictionary();
+ }
+
+ if (allocator_dumps_.size() > 0) {
+ value->BeginDictionary("allocators");
+ for (const auto& allocator_dump_it : allocator_dumps_)
+ allocator_dump_it.second->AsValueInto(value);
+ value->EndDictionary();
+ }
+
+ if (heap_dumps_.size() > 0) {
+ value->BeginDictionary("heaps");
+ for (const auto& name_and_dump : heap_dumps_)
+ value->SetValueWithCopiedName(name_and_dump.first, *name_and_dump.second);
+ value->EndDictionary(); // "heaps"
+ }
+
+ value->BeginArray("allocators_graph");
+ for (const MemoryAllocatorDumpEdge& edge : allocator_dumps_edges_) {
+ value->BeginDictionary();
+ value->SetString("source", edge.source.ToString());
+ value->SetString("target", edge.target.ToString());
+ value->SetInteger("importance", edge.importance);
+ value->SetString("type", edge.type);
+ value->EndDictionary();
+ }
+ value->EndArray();
+}
+
+void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+ const MemoryAllocatorDumpGuid& target,
+ int importance) {
+ allocator_dumps_edges_.push_back(
+ {source, target, importance, kEdgeTypeOwnership});
+}
+
+void ProcessMemoryDump::AddOwnershipEdge(
+ const MemoryAllocatorDumpGuid& source,
+ const MemoryAllocatorDumpGuid& target) {
+ AddOwnershipEdge(source, target, 0 /* importance */);
+}
+
+void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
+ const std::string& target_node_name) {
+ // Do not create new dumps for suballocations in background mode.
+ if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+ return;
+
+ std::string child_mad_name = target_node_name + "/__" + source.ToString();
+ MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
+ AddOwnershipEdge(source, target_child_mad->guid());
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
+ DCHECK(is_black_hole_non_fatal_for_testing_);
+ if (!black_hole_mad_)
+ black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
+ return black_hole_mad_.get();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/process_memory_dump.h b/libchrome/base/trace_event/process_memory_dump.h
new file mode 100644
index 0000000..d020c7d
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_dump.h
@@ -0,0 +1,223 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
+#define BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
+
+#include <stddef.h>
+
+#include <unordered_map>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/process_memory_maps.h"
+#include "base/trace_event/process_memory_totals.h"
+#include "build/build_config.h"
+
+// Define COUNT_RESIDENT_BYTES_SUPPORTED if platform supports counting of the
+// resident memory.
+#if (defined(OS_POSIX) && !defined(OS_NACL)) || defined(OS_WIN)
+#define COUNT_RESIDENT_BYTES_SUPPORTED
+#endif
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpManager;
+class MemoryDumpSessionState;
+class TracedValue;
+
+// ProcessMemoryDump is as a strongly typed container which holds the dumps
+// produced by the MemoryDumpProvider(s) for a specific process.
+class BASE_EXPORT ProcessMemoryDump {
+ public:
+ struct MemoryAllocatorDumpEdge {
+ MemoryAllocatorDumpGuid source;
+ MemoryAllocatorDumpGuid target;
+ int importance;
+ const char* type;
+ };
+
+ // Maps allocator dumps absolute names (allocator_name/heap/subheap) to
+ // MemoryAllocatorDump instances.
+ using AllocatorDumpsMap =
+ std::unordered_map<std::string, std::unique_ptr<MemoryAllocatorDump>>;
+
+ using HeapDumpsMap =
+ std::unordered_map<std::string, std::unique_ptr<TracedValue>>;
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+ // Returns the number of bytes in a kernel memory page. Some platforms may
+ // have a different value for kernel page sizes from user page sizes. It is
+ // important to use kernel memory page sizes for resident bytes calculation.
+ // In most cases, the two are the same.
+ static size_t GetSystemPageSize();
+
+ // Returns the total bytes resident for a virtual address range, with given
+ // |start_address| and |mapped_size|. |mapped_size| is specified in bytes. The
+ // value returned is valid only if the given range is currently mmapped by the
+ // process. The |start_address| must be page-aligned.
+ static size_t CountResidentBytes(void* start_address, size_t mapped_size);
+#endif
+
+ ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state,
+ const MemoryDumpArgs& dump_args);
+ ~ProcessMemoryDump();
+
+ // Creates a new MemoryAllocatorDump with the given name and returns the
+ // empty object back to the caller.
+ // Arguments:
+ // absolute_name: a name that uniquely identifies allocator dumps produced
+ // by this provider. It is possible to specify nesting by using a
+ // path-like string (e.g., v8/isolate1/heap1, v8/isolate1/heap2).
+ // Leading or trailing slashes are not allowed.
+ // guid: an optional identifier, unique among all processes within the
+ // scope of a global dump. This is only relevant when using
+ // AddOwnershipEdge() to express memory sharing. If omitted,
+ // it will be automatically generated.
+ // ProcessMemoryDump handles the memory ownership of its MemoryAllocatorDumps.
+ MemoryAllocatorDump* CreateAllocatorDump(const std::string& absolute_name);
+ MemoryAllocatorDump* CreateAllocatorDump(const std::string& absolute_name,
+ const MemoryAllocatorDumpGuid& guid);
+
+ // Looks up a MemoryAllocatorDump given its allocator and heap names, or
+ // nullptr if not found.
+ MemoryAllocatorDump* GetAllocatorDump(const std::string& absolute_name) const;
+
+ MemoryAllocatorDump* GetOrCreateAllocatorDump(
+ const std::string& absolute_name);
+
+ // Creates a shared MemoryAllocatorDump, to express cross-process sharing.
+ // Shared allocator dumps are allowed to have duplicate guids within the
+ // global scope, in order to reference the same dump from multiple processes.
+ // See the design doc goo.gl/keU6Bf for reference usage patterns.
+ MemoryAllocatorDump* CreateSharedGlobalAllocatorDump(
+ const MemoryAllocatorDumpGuid& guid);
+
+ // Creates a shared MemoryAllocatorDump as CreateSharedGlobalAllocatorDump,
+ // but with a WEAK flag. A weak dump will be discarded unless a non-weak dump
+ // is created using CreateSharedGlobalAllocatorDump by at least one process.
+ // The WEAK flag does not apply if a non-weak dump with the same GUID already
+ // exists or is created later. All owners and children of the discarded dump
+ // will also be discarded transitively.
+ MemoryAllocatorDump* CreateWeakSharedGlobalAllocatorDump(
+ const MemoryAllocatorDumpGuid& guid);
+
+ // Looks up a shared MemoryAllocatorDump given its guid.
+ MemoryAllocatorDump* GetSharedGlobalAllocatorDump(
+ const MemoryAllocatorDumpGuid& guid) const;
+
+ // Returns the map of the MemoryAllocatorDumps added to this dump.
+ const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
+
+ // Dumps heap usage with |allocator_name|.
+ void DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,
+ base::trace_event::AllocationMetrics>&
+ metrics_by_context,
+ base::trace_event::TraceEventMemoryOverhead& overhead,
+ const char* allocator_name);
+
+ // Adds an ownership relationship between two MemoryAllocatorDump(s) with the
+ // semantics: |source| owns |target|, and has the effect of attributing
+ // the memory usage of |target| to |source|. |importance| is optional and
+ // relevant only for the cases of co-ownership, where it acts as a z-index:
+ // the owner with the highest importance will be attributed |target|'s memory.
+ void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+ const MemoryAllocatorDumpGuid& target,
+ int importance);
+ void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
+ const MemoryAllocatorDumpGuid& target);
+
+ const std::vector<MemoryAllocatorDumpEdge>& allocator_dumps_edges() const {
+ return allocator_dumps_edges_;
+ }
+
+ // Utility method to add a suballocation relationship with the following
+ // semantics: |source| is suballocated from |target_node_name|.
+ // This creates a child node of |target_node_name| and adds an ownership edge
+ // between |source| and the new child node. As a result, the UI will not
+ // account the memory of |source| in the target node.
+ void AddSuballocation(const MemoryAllocatorDumpGuid& source,
+ const std::string& target_node_name);
+
+ const scoped_refptr<MemoryDumpSessionState>& session_state() const {
+ return session_state_;
+ }
+
+ // Removes all the MemoryAllocatorDump(s) contained in this instance. This
+ // ProcessMemoryDump can be safely reused as if it was new once this returns.
+ void Clear();
+
+ // Merges all MemoryAllocatorDump(s) contained in |other| inside this
+ // ProcessMemoryDump, transferring their ownership to this instance.
+ // |other| will be an empty ProcessMemoryDump after this method returns.
+ // This is to allow dump providers to pre-populate ProcessMemoryDump instances
+ // and later move their contents into the ProcessMemoryDump passed as argument
+ // of the MemoryDumpProvider::OnMemoryDump(ProcessMemoryDump*) callback.
+ void TakeAllDumpsFrom(ProcessMemoryDump* other);
+
+ // Called at trace generation time to populate the TracedValue.
+ void AsValueInto(TracedValue* value) const;
+
+ ProcessMemoryTotals* process_totals() { return &process_totals_; }
+ bool has_process_totals() const { return has_process_totals_; }
+ void set_has_process_totals() { has_process_totals_ = true; }
+
+ ProcessMemoryMaps* process_mmaps() { return &process_mmaps_; }
+ bool has_process_mmaps() const { return has_process_mmaps_; }
+ void set_has_process_mmaps() { has_process_mmaps_ = true; }
+
+ const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+
+ const MemoryDumpArgs& dump_args() const { return dump_args_; }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+
+ MemoryAllocatorDump* AddAllocatorDumpInternal(
+ std::unique_ptr<MemoryAllocatorDump> mad);
+
+ MemoryAllocatorDump* GetBlackHoleMad();
+
+ ProcessMemoryTotals process_totals_;
+ bool has_process_totals_;
+
+ ProcessMemoryMaps process_mmaps_;
+ bool has_process_mmaps_;
+
+ AllocatorDumpsMap allocator_dumps_;
+ HeapDumpsMap heap_dumps_;
+
+ // State shared among all PMDs instances created in a given trace session.
+ scoped_refptr<MemoryDumpSessionState> session_state_;
+
+ // Keeps track of relationships between MemoryAllocatorDump(s).
+ std::vector<MemoryAllocatorDumpEdge> allocator_dumps_edges_;
+
+ // Level of detail of the current dump.
+ const MemoryDumpArgs dump_args_;
+
+ // This allocator dump is returned when an invalid dump is created in
+ // background mode. The attributes of the dump are ignored and not added to
+ // the trace.
+ std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
+
+ // When set to true, the DCHECK(s) for invalid dump creations on the
+ // background mode are disabled for testing.
+ static bool is_black_hole_non_fatal_for_testing_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
diff --git a/libchrome/base/trace_event/process_memory_dump_unittest.cc b/libchrome/base/trace_event/process_memory_dump_unittest.cc
new file mode 100644
index 0000000..571774a
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_dump_unittest.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/process_memory_dump.h"
+
+#include <stddef.h>
+
+#include "base/memory/aligned_memory.h"
+#include "base/memory/ptr_util.h"
+#include "base/process/process_metrics.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const char* const kTestDumpNameWhitelist[] = {
+ "Whitelisted/TestName", "Whitelisted/TestName_0x?",
+ "Whitelisted/0x?/TestName", nullptr};
+
+TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
+ auto it = pmd.heap_dumps().find(name);
+ return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
+}
+
+} // namespace
+
+TEST(ProcessMemoryDumpTest, Clear) {
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ pmd1->CreateAllocatorDump("mad1");
+ pmd1->CreateAllocatorDump("mad2");
+ ASSERT_FALSE(pmd1->allocator_dumps().empty());
+
+ pmd1->process_totals()->set_resident_set_bytes(42);
+ pmd1->set_has_process_totals();
+
+ pmd1->process_mmaps()->AddVMRegion(ProcessMemoryMaps::VMRegion());
+ pmd1->set_has_process_mmaps();
+
+ pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
+ MemoryAllocatorDumpGuid(4242));
+
+ MemoryAllocatorDumpGuid shared_mad_guid1(1);
+ MemoryAllocatorDumpGuid shared_mad_guid2(2);
+ pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
+
+ pmd1->Clear();
+ ASSERT_TRUE(pmd1->allocator_dumps().empty());
+ ASSERT_TRUE(pmd1->allocator_dumps_edges().empty());
+ ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad1"));
+ ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
+ ASSERT_FALSE(pmd1->has_process_totals());
+ ASSERT_FALSE(pmd1->has_process_mmaps());
+ ASSERT_TRUE(pmd1->process_mmaps()->vm_regions().empty());
+ ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+ ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+
+ // Check that calling AsValueInto() doesn't cause a crash.
+ std::unique_ptr<TracedValue> traced_value(new TracedValue);
+ pmd1->AsValueInto(traced_value.get());
+
+ // Check that the pmd can be reused and behaves as expected.
+ auto* mad1 = pmd1->CreateAllocatorDump("mad1");
+ auto* mad3 = pmd1->CreateAllocatorDump("mad3");
+ auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
+ pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
+ ASSERT_EQ(4u, pmd1->allocator_dumps().size());
+ ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
+ ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
+ ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
+ ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+ ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+ ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+ ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
+
+ traced_value.reset(new TracedValue);
+ pmd1->AsValueInto(traced_value.get());
+
+ pmd1.reset();
+}
+
+TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
+ std::unique_ptr<TracedValue> traced_value(new TracedValue);
+ hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+ metrics_by_context[AllocationContext()] = { 1, 1 };
+ TraceEventMemoryOverhead overhead;
+
+ scoped_refptr<MemoryDumpSessionState> session_state =
+ new MemoryDumpSessionState;
+ session_state->SetStackFrameDeduplicator(
+ WrapUnique(new StackFrameDeduplicator));
+ session_state->SetTypeNameDeduplicator(
+ WrapUnique(new TypeNameDeduplicator));
+ std::unique_ptr<ProcessMemoryDump> pmd1(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
+ auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
+ pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
+ pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
+
+ std::unique_ptr<ProcessMemoryDump> pmd2(
+ new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+ auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
+ auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
+ pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
+ pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
+
+ MemoryAllocatorDumpGuid shared_mad_guid1(1);
+ MemoryAllocatorDumpGuid shared_mad_guid2(2);
+ auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+ auto* shared_mad2 =
+ pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
+
+ pmd1->TakeAllDumpsFrom(pmd2.get());
+
+ // Make sure that pmd2 is empty but still usable after it has been emptied.
+ ASSERT_TRUE(pmd2->allocator_dumps().empty());
+ ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
+ ASSERT_TRUE(pmd2->heap_dumps().empty());
+ pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
+ ASSERT_EQ(1u, pmd2->allocator_dumps().size());
+ ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
+ pmd2->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
+ MemoryAllocatorDumpGuid(4242));
+
+ // Check that calling AsValueInto() doesn't cause a crash.
+ pmd2->AsValueInto(traced_value.get());
+
+ // Free the |pmd2| to check that the memory ownership of the two MAD(s)
+ // has been transferred to |pmd1|.
+ pmd2.reset();
+
+ // Now check that |pmd1| has been effectively merged.
+ ASSERT_EQ(6u, pmd1->allocator_dumps().size());
+ ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
+ ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
+ ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
+ ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
+ ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
+ ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+ ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+ ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
+ ASSERT_EQ(4u, pmd1->heap_dumps().size());
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
+ ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
+
+ // Check that calling AsValueInto() doesn't cause a crash.
+ traced_value.reset(new TracedValue);
+ pmd1->AsValueInto(traced_value.get());
+
+ pmd1.reset();
+}
+
+TEST(ProcessMemoryDumpTest, Suballocations) {
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ const std::string allocator_dump_name = "fakealloc/allocated_objects";
+ pmd->CreateAllocatorDump(allocator_dump_name);
+
+ // Create one allocation with an auto-assigned guid and mark it as a
+ // suballocation of "fakealloc/allocated_objects".
+ auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
+ pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
+
+ // Same here, but this time create an allocation with an explicit guid.
+ auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
+ MemoryAllocatorDumpGuid(0x42));
+ pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
+
+ // Now check that AddSuballocation() has created anonymous child dumps under
+ // "fakealloc/allocated_objects".
+ auto anon_node_1_it = pmd->allocator_dumps().find(
+ allocator_dump_name + "/__" + pic1_dump->guid().ToString());
+ ASSERT_NE(pmd->allocator_dumps().end(), anon_node_1_it);
+
+ auto anon_node_2_it =
+ pmd->allocator_dumps().find(allocator_dump_name + "/__42");
+ ASSERT_NE(pmd->allocator_dumps().end(), anon_node_2_it);
+
+ // Finally check that AddSuballocation() has created also the
+ // edges between the pictures and the anonymous allocator child dumps.
+ bool found_edge[2]{false, false};
+ for (const auto& e : pmd->allocator_dumps_edges()) {
+ found_edge[0] |= (e.source == pic1_dump->guid() &&
+ e.target == anon_node_1_it->second->guid());
+ found_edge[1] |= (e.source == pic2_dump->guid() &&
+ e.target == anon_node_2_it->second->guid());
+ }
+ ASSERT_TRUE(found_edge[0]);
+ ASSERT_TRUE(found_edge[1]);
+
+ // Check that calling AsValueInto() doesn't cause a crash.
+ std::unique_ptr<TracedValue> traced_value(new TracedValue);
+ pmd->AsValueInto(traced_value.get());
+
+ pmd.reset();
+}
+
+TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+ MemoryAllocatorDumpGuid shared_mad_guid(1);
+ auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
+ ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+ auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+ ASSERT_EQ(shared_mad1, shared_mad2);
+ ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+ auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ ASSERT_EQ(shared_mad1, shared_mad3);
+ ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+ auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+ ASSERT_EQ(shared_mad1, shared_mad4);
+ ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+
+ auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+ ASSERT_EQ(shared_mad1, shared_mad5);
+ ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+}
+
+TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
+ MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+ std::unique_ptr<ProcessMemoryDump> pmd(
+ new ProcessMemoryDump(nullptr, background_args));
+ ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
+ SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
+ MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+
+ // Invalid dump names.
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
+ EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
+ EXPECT_EQ(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
+
+ // Global dumps.
+ MemoryAllocatorDumpGuid guid(1);
+ EXPECT_EQ(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
+ EXPECT_EQ(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
+
+ // Suballocations.
+ pmd->AddSuballocation(guid, "malloc/allocated_objects");
+ EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
+ EXPECT_EQ(0u, pmd->allocator_dumps_.size());
+
+ // Valid dump names.
+ EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
+ EXPECT_NE(black_hole_mad,
+ pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
+
+ // GetAllocatorDump is consistent.
+ EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+ EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
+}
+
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+TEST(ProcessMemoryDumpTest, CountResidentBytes) {
+ const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
+
+ // Allocate few page of dirty memory and check if it is resident.
+ const size_t size1 = 5 * page_size;
+ std::unique_ptr<char, base::AlignedFreeDeleter> memory1(
+ static_cast<char*>(base::AlignedAlloc(size1, page_size)));
+ memset(memory1.get(), 0, size1);
+ size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1.get(), size1);
+ ASSERT_EQ(res1, size1);
+
+ // Allocate a large memory segment (> 8Mib).
+ const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
+ std::unique_ptr<char, base::AlignedFreeDeleter> memory2(
+ static_cast<char*>(base::AlignedAlloc(kVeryLargeMemorySize, page_size)));
+ memset(memory2.get(), 0, kVeryLargeMemorySize);
+ size_t res2 = ProcessMemoryDump::CountResidentBytes(memory2.get(),
+ kVeryLargeMemorySize);
+ ASSERT_EQ(res2, kVeryLargeMemorySize);
+}
+#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/process_memory_maps.cc b/libchrome/base/trace_event/process_memory_maps.cc
new file mode 100644
index 0000000..a121239
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_maps.cc
@@ -0,0 +1,77 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/process_memory_maps.h"
+
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsRead = 4;
+const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite = 2;
+const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsExec = 1;
+const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsMayshare = 128;
+
+ProcessMemoryMaps::VMRegion::VMRegion()
+ : start_address(0),
+ size_in_bytes(0),
+ protection_flags(0),
+ byte_stats_private_dirty_resident(0),
+ byte_stats_private_clean_resident(0),
+ byte_stats_shared_dirty_resident(0),
+ byte_stats_shared_clean_resident(0),
+ byte_stats_swapped(0),
+ byte_stats_proportional_resident(0) {
+}
+
+ProcessMemoryMaps::VMRegion::VMRegion(const VMRegion& other) = default;
+
+ProcessMemoryMaps::ProcessMemoryMaps() {
+}
+
+ProcessMemoryMaps::~ProcessMemoryMaps() {
+}
+
+void ProcessMemoryMaps::AsValueInto(TracedValue* value) const {
+ static const char kHexFmt[] = "%" PRIx64;
+
+ // Refer to the design doc goo.gl/sxfFY8 for the semantic of these fields.
+ value->BeginArray("vm_regions");
+ for (const auto& region : vm_regions_) {
+ value->BeginDictionary();
+
+ value->SetString("sa", StringPrintf(kHexFmt, region.start_address));
+ value->SetString("sz", StringPrintf(kHexFmt, region.size_in_bytes));
+ value->SetInteger("pf", region.protection_flags);
+ value->SetString("mf", region.mapped_file);
+
+ value->BeginDictionary("bs"); // byte stats
+ value->SetString(
+ "pss", StringPrintf(kHexFmt, region.byte_stats_proportional_resident));
+ value->SetString(
+ "pd", StringPrintf(kHexFmt, region.byte_stats_private_dirty_resident));
+ value->SetString(
+ "pc", StringPrintf(kHexFmt, region.byte_stats_private_clean_resident));
+ value->SetString(
+ "sd", StringPrintf(kHexFmt, region.byte_stats_shared_dirty_resident));
+ value->SetString(
+ "sc", StringPrintf(kHexFmt, region.byte_stats_shared_clean_resident));
+ value->SetString("sw", StringPrintf(kHexFmt, region.byte_stats_swapped));
+ value->EndDictionary();
+
+ value->EndDictionary();
+ }
+ value->EndArray();
+}
+
+void ProcessMemoryMaps::Clear() {
+ vm_regions_.clear();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/process_memory_maps.h b/libchrome/base/trace_event/process_memory_maps.h
new file mode 100644
index 0000000..6a73674
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_maps.h
@@ -0,0 +1,72 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_H_
+#define BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+class TracedValue;
+
+// Data model for process-wide memory stats.
+class BASE_EXPORT ProcessMemoryMaps {
+ public:
+ struct BASE_EXPORT VMRegion {
+ static const uint32_t kProtectionFlagsRead;
+ static const uint32_t kProtectionFlagsWrite;
+ static const uint32_t kProtectionFlagsExec;
+ static const uint32_t kProtectionFlagsMayshare;
+
+ VMRegion();
+ VMRegion(const VMRegion& other);
+
+ uint64_t start_address;
+ uint64_t size_in_bytes;
+ uint32_t protection_flags;
+ std::string mapped_file;
+
+ // private_dirty_resident + private_clean_resident + shared_dirty_resident +
+ // shared_clean_resident = resident set size.
+ uint64_t byte_stats_private_dirty_resident;
+ uint64_t byte_stats_private_clean_resident;
+ uint64_t byte_stats_shared_dirty_resident;
+ uint64_t byte_stats_shared_clean_resident;
+
+ uint64_t byte_stats_swapped;
+
+ // For multiprocess accounting.
+ uint64_t byte_stats_proportional_resident;
+ };
+
+ ProcessMemoryMaps();
+ ~ProcessMemoryMaps();
+
+ void AddVMRegion(const VMRegion& region) { vm_regions_.push_back(region); }
+ const std::vector<VMRegion>& vm_regions() const { return vm_regions_; }
+
+ // Called at trace generation time to populate the TracedValue.
+ void AsValueInto(TracedValue* value) const;
+
+ // Clears up all the VMRegion(s) stored.
+ void Clear();
+
+ private:
+ std::vector<VMRegion> vm_regions_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessMemoryMaps);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_H_
diff --git a/libchrome/base/trace_event/process_memory_totals.cc b/libchrome/base/trace_event/process_memory_totals.cc
new file mode 100644
index 0000000..de27ab3
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_totals.cc
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/process_memory_totals.h"
+
+#include "base/format_macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/trace_event_argument.h"
+
+namespace base {
+namespace trace_event {
+
+ProcessMemoryTotals::ProcessMemoryTotals()
+ : resident_set_bytes_(0),
+ peak_resident_set_bytes_(0),
+ is_peak_rss_resetable_(false) {
+}
+
+ProcessMemoryTotals::~ProcessMemoryTotals() {}
+
+void ProcessMemoryTotals::AsValueInto(TracedValue* value) const {
+ value->SetString("resident_set_bytes",
+ StringPrintf("%" PRIx64, resident_set_bytes_));
+ if (peak_resident_set_bytes_ > 0) {
+ value->SetString("peak_resident_set_bytes",
+ StringPrintf("%" PRIx64, peak_resident_set_bytes_));
+ value->SetBoolean("is_peak_rss_resetable", is_peak_rss_resetable_);
+ }
+
+ for (const auto it : extra_fields_) {
+ value->SetString(it.first, StringPrintf("%" PRIx64, it.second));
+ }
+}
+
+void ProcessMemoryTotals::Clear() {
+ resident_set_bytes_ = 0;
+}
+
+void ProcessMemoryTotals::SetExtraFieldInBytes(const char* name,
+ uint64_t value) {
+ DCHECK_EQ(0u, extra_fields_.count(name));
+ extra_fields_[name] = value;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/process_memory_totals.h b/libchrome/base/trace_event/process_memory_totals.h
new file mode 100644
index 0000000..329967a
--- /dev/null
+++ b/libchrome/base/trace_event/process_memory_totals.h
@@ -0,0 +1,63 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_H_
+#define BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_H_
+
+#include <stdint.h>
+
+#include <map>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+class TracedValue;
+
+// Data model for process-wide memory stats.
+class BASE_EXPORT ProcessMemoryTotals {
+ public:
+ ProcessMemoryTotals();
+ ~ProcessMemoryTotals();
+
+ // Called at trace generation time to populate the TracedValue.
+ void AsValueInto(TracedValue* value) const;
+
+ // Clears up all the data collected.
+ void Clear();
+
+ uint64_t resident_set_bytes() const { return resident_set_bytes_; }
+ void set_resident_set_bytes(uint64_t value) { resident_set_bytes_ = value; }
+
+ uint64_t peak_resident_set_bytes() const { return peak_resident_set_bytes_; }
+ void set_peak_resident_set_bytes(uint64_t value) {
+ peak_resident_set_bytes_ = value;
+ }
+
+ // On some platforms (recent linux kernels, see goo.gl/sMvAVz) the peak rss
+ // can be reset. When is_peak_rss_resettable == true, the peak refers to
+ // peak from the previous measurement. When false, it is the absolute peak
+ // since the start of the process.
+ bool is_peak_rss_resetable() const { return is_peak_rss_resetable_; }
+ void set_is_peak_rss_resetable(bool value) { is_peak_rss_resetable_ = value; }
+
+ void SetExtraFieldInBytes(const char* name, uint64_t value);
+
+ private:
+ uint64_t resident_set_bytes_;
+ uint64_t peak_resident_set_bytes_;
+ bool is_peak_rss_resetable_;
+
+ // Extra metrics for OS-specific statistics.
+ std::map<const char*, uint64_t> extra_fields_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessMemoryTotals);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_H_
diff --git a/libchrome/base/trace_event/trace_buffer.cc b/libchrome/base/trace_event/trace_buffer.cc
new file mode 100644
index 0000000..d40f430
--- /dev/null
+++ b/libchrome/base/trace_event/trace_buffer.cc
@@ -0,0 +1,344 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_buffer.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class TraceBufferRingBuffer : public TraceBuffer {
+ public:
+ TraceBufferRingBuffer(size_t max_chunks)
+ : max_chunks_(max_chunks),
+ recyclable_chunks_queue_(new size_t[queue_capacity()]),
+ queue_head_(0),
+ queue_tail_(max_chunks),
+ current_iteration_index_(0),
+ current_chunk_seq_(1) {
+ chunks_.reserve(max_chunks);
+ for (size_t i = 0; i < max_chunks; ++i)
+ recyclable_chunks_queue_[i] = i;
+ }
+
+ std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ HEAP_PROFILER_SCOPED_IGNORE;
+
+ // Because the number of threads is much less than the number of chunks,
+ // the queue should never be empty.
+ DCHECK(!QueueIsEmpty());
+
+ *index = recyclable_chunks_queue_[queue_head_];
+ queue_head_ = NextQueueIndex(queue_head_);
+ current_iteration_index_ = queue_head_;
+
+ if (*index >= chunks_.size())
+ chunks_.resize(*index + 1);
+
+ TraceBufferChunk* chunk = chunks_[*index].release();
+ chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
+ if (chunk)
+ chunk->Reset(current_chunk_seq_++);
+ else
+ chunk = new TraceBufferChunk(current_chunk_seq_++);
+
+ return std::unique_ptr<TraceBufferChunk>(chunk);
+ }
+
+ void ReturnChunk(size_t index,
+ std::unique_ptr<TraceBufferChunk> chunk) override {
+ // When this method is called, the queue should not be full because it
+ // can contain all chunks including the one to be returned.
+ DCHECK(!QueueIsFull());
+ DCHECK(chunk);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ chunks_[index] = std::move(chunk);
+ recyclable_chunks_queue_[queue_tail_] = index;
+ queue_tail_ = NextQueueIndex(queue_tail_);
+ }
+
+ bool IsFull() const override { return false; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ if (chunks_.empty())
+ return NULL;
+
+ while (current_iteration_index_ != queue_tail_) {
+ size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
+ current_iteration_index_ = NextQueueIndex(current_iteration_index_);
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ DCHECK(chunks_[chunk_index]);
+ return chunks_[chunk_index].get();
+ }
+ return NULL;
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ overhead->Add("TraceBufferRingBuffer", sizeof(*this));
+ for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+ queue_index = NextQueueIndex(queue_index)) {
+ size_t chunk_index = recyclable_chunks_queue_[queue_index];
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
+ }
+ }
+
+ private:
+ bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
+
+ size_t QueueSize() const {
+ return queue_tail_ > queue_head_
+ ? queue_tail_ - queue_head_
+ : queue_tail_ + queue_capacity() - queue_head_;
+ }
+
+ bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
+
+ size_t queue_capacity() const {
+ // One extra space to help distinguish full state and empty state.
+ return max_chunks_ + 1;
+ }
+
+ size_t NextQueueIndex(size_t index) const {
+ index++;
+ if (index >= queue_capacity())
+ index = 0;
+ return index;
+ }
+
+ size_t max_chunks_;
+ std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
+
+ std::unique_ptr<size_t[]> recyclable_chunks_queue_;
+ size_t queue_head_;
+ size_t queue_tail_;
+
+ size_t current_iteration_index_;
+ uint32_t current_chunk_seq_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
+};
+
+class TraceBufferVector : public TraceBuffer {
+ public:
+ TraceBufferVector(size_t max_chunks)
+ : in_flight_chunk_count_(0),
+ current_iteration_index_(0),
+ max_chunks_(max_chunks) {
+ chunks_.reserve(max_chunks_);
+ }
+
+ std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ HEAP_PROFILER_SCOPED_IGNORE;
+
+ // This function may be called when adding normal events or indirectly from
+ // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
+ // have to add the metadata events and flush thread-local buffers even if
+ // the buffer is full.
+ *index = chunks_.size();
+ chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
+ ++in_flight_chunk_count_;
+ // + 1 because zero chunk_seq is not allowed.
+ return std::unique_ptr<TraceBufferChunk>(
+ new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
+ }
+
+ void ReturnChunk(size_t index,
+ std::unique_ptr<TraceBufferChunk> chunk) override {
+ DCHECK_GT(in_flight_chunk_count_, 0u);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ --in_flight_chunk_count_;
+ chunks_[index] = chunk.release();
+ }
+
+ bool IsFull() const override { return chunks_.size() >= max_chunks_; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ while (current_iteration_index_ < chunks_.size()) {
+ // Skip in-flight chunks.
+ const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
+ if (chunk)
+ return chunk;
+ }
+ return NULL;
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ const size_t chunks_ptr_vector_allocated_size =
+ sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
+ const size_t chunks_ptr_vector_resident_size =
+ sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
+ overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
+ chunks_ptr_vector_resident_size);
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ TraceBufferChunk* chunk = chunks_[i];
+ // Skip the in-flight (nullptr) chunks. They will be accounted by the
+ // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
+ if (chunk)
+ chunk->EstimateTraceMemoryOverhead(overhead);
+ }
+ }
+
+ private:
+ size_t in_flight_chunk_count_;
+ size_t current_iteration_index_;
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
+};
+
+} // namespace
+
+TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
+
+TraceBufferChunk::~TraceBufferChunk() {}
+
+void TraceBufferChunk::Reset(uint32_t new_seq) {
+ for (size_t i = 0; i < next_free_; ++i)
+ chunk_[i].Reset();
+ next_free_ = 0;
+ seq_ = new_seq;
+ cached_overhead_estimate_.reset();
+}
+
+TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
+ DCHECK(!IsFull());
+ *event_index = next_free_++;
+ return &chunk_[*event_index];
+}
+
+void TraceBufferChunk::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ if (!cached_overhead_estimate_) {
+ cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
+
+ // When estimating the size of TraceBufferChunk, exclude the array of trace
+ // events, as they are computed individually below.
+ cached_overhead_estimate_->Add("TraceBufferChunk",
+ sizeof(*this) - sizeof(chunk_));
+ }
+
+ const size_t num_cached_estimated_events =
+ cached_overhead_estimate_->GetCount("TraceEvent");
+ DCHECK_LE(num_cached_estimated_events, size());
+
+ if (IsFull() && num_cached_estimated_events == size()) {
+ overhead->Update(*cached_overhead_estimate_);
+ return;
+ }
+
+ for (size_t i = num_cached_estimated_events; i < size(); ++i)
+ chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
+
+ if (IsFull()) {
+ cached_overhead_estimate_->AddSelf();
+ } else {
+ // The unused TraceEvents in |chunks_| are not cached. They will keep
+ // changing as new TraceEvents are added to this chunk, so they are
+ // computed on the fly.
+ const size_t num_unused_trace_events = capacity() - size();
+ overhead->Add("TraceEvent (unused)",
+ num_unused_trace_events * sizeof(TraceEvent));
+ }
+
+ overhead->Update(*cached_overhead_estimate_);
+}
+
+TraceResultBuffer::OutputCallback
+TraceResultBuffer::SimpleOutput::GetCallback() {
+ return Bind(&SimpleOutput::Append, Unretained(this));
+}
+
+void TraceResultBuffer::SimpleOutput::Append(
+ const std::string& json_trace_output) {
+ json_output += json_trace_output;
+}
+
+TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
+
+TraceResultBuffer::~TraceResultBuffer() {}
+
+void TraceResultBuffer::SetOutputCallback(
+ const OutputCallback& json_chunk_callback) {
+ output_callback_ = json_chunk_callback;
+}
+
+void TraceResultBuffer::Start() {
+ append_comma_ = false;
+ output_callback_.Run("[");
+}
+
+void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
+ if (append_comma_)
+ output_callback_.Run(",");
+ append_comma_ = true;
+ output_callback_.Run(trace_fragment);
+}
+
+void TraceResultBuffer::Finish() {
+ output_callback_.Run("]");
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
+ return new TraceBufferRingBuffer(max_chunks);
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
+ return new TraceBufferVector(max_chunks);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_buffer.h b/libchrome/base/trace_event/trace_buffer.h
new file mode 100644
index 0000000..4885a3c
--- /dev/null
+++ b/libchrome/base/trace_event/trace_buffer.h
@@ -0,0 +1,130 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_BUFFER_H_
+#define BASE_TRACE_EVENT_TRACE_BUFFER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+namespace trace_event {
+
+// TraceBufferChunk is the basic unit of TraceBuffer.
+class BASE_EXPORT TraceBufferChunk {
+ public:
+ explicit TraceBufferChunk(uint32_t seq);
+ ~TraceBufferChunk();
+
+ void Reset(uint32_t new_seq);
+ TraceEvent* AddTraceEvent(size_t* event_index);
+ bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
+
+ uint32_t seq() const { return seq_; }
+ size_t capacity() const { return kTraceBufferChunkSize; }
+ size_t size() const { return next_free_; }
+
+ TraceEvent* GetEventAt(size_t index) {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+ const TraceEvent* GetEventAt(size_t index) const {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ // These values must be kept consistent with the numbers of bits of
+ // chunk_index and event_index fields in TraceEventHandle
+ // (in trace_event_impl.h).
+ static const size_t kMaxChunkIndex = (1u << 26) - 1;
+ static const size_t kTraceBufferChunkSize = 64;
+
+ private:
+ size_t next_free_;
+ std::unique_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
+ TraceEvent chunk_[kTraceBufferChunkSize];
+ uint32_t seq_;
+};
+
+// TraceBuffer holds the events as they are collected.
+class BASE_EXPORT TraceBuffer {
+ public:
+ virtual ~TraceBuffer() {}
+
+ virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
+ virtual void ReturnChunk(size_t index,
+ std::unique_ptr<TraceBufferChunk> chunk) = 0;
+
+ virtual bool IsFull() const = 0;
+ virtual size_t Size() const = 0;
+ virtual size_t Capacity() const = 0;
+ virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
+
+ // For iteration. Each TraceBuffer can only be iterated once.
+ virtual const TraceBufferChunk* NextChunk() = 0;
+
+
+ // Computes an estimate of the size of the buffer, including all the retained
+ // objects.
+ virtual void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) = 0;
+
+ static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks);
+ static TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
+};
+
+// TraceResultBuffer collects and converts trace fragments returned by TraceLog
+// to JSON output.
+class BASE_EXPORT TraceResultBuffer {
+ public:
+ typedef base::Callback<void(const std::string&)> OutputCallback;
+
+ // If you don't need to stream JSON chunks out efficiently, and just want to
+ // get a complete JSON string after calling Finish, use this struct to collect
+ // JSON trace output.
+ struct BASE_EXPORT SimpleOutput {
+ OutputCallback GetCallback();
+ void Append(const std::string& json_string);
+
+ // Do what you want with the json_output_ string after calling
+ // TraceResultBuffer::Finish.
+ std::string json_output;
+ };
+
+ TraceResultBuffer();
+ ~TraceResultBuffer();
+
+ // Set callback. The callback will be called during Start with the initial
+ // JSON output and during AddFragment and Finish with following JSON output
+ // chunks. The callback target must live past the last calls to
+ // TraceResultBuffer::Start/AddFragment/Finish.
+ void SetOutputCallback(const OutputCallback& json_chunk_callback);
+
+ // Start JSON output. This resets all internal state, so you can reuse
+ // the TraceResultBuffer by calling Start.
+ void Start();
+
+ // Call AddFragment 0 or more times to add trace fragments from TraceLog.
+ void AddFragment(const std::string& trace_fragment);
+
+ // When all fragments have been added, call Finish to complete the JSON
+ // formatted output.
+ void Finish();
+
+ private:
+ OutputCallback output_callback_;
+ bool append_comma_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_BUFFER_H_
diff --git a/libchrome/base/trace_event/trace_config.cc b/libchrome/base/trace_event/trace_config.cc
new file mode 100644
index 0000000..b343ea0
--- /dev/null
+++ b/libchrome/base/trace_event/trace_config.cc
@@ -0,0 +1,706 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_config.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// String options that can be used to initialize TraceOptions.
+const char kRecordUntilFull[] = "record-until-full";
+const char kRecordContinuously[] = "record-continuously";
+const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
+const char kTraceToConsole[] = "trace-to-console";
+const char kEnableSampling[] = "enable-sampling";
+const char kEnableSystrace[] = "enable-systrace";
+const char kEnableArgumentFilter[] = "enable-argument-filter";
+
+// String parameters that can be used to parse the trace config string.
+const char kRecordModeParam[] = "record_mode";
+const char kEnableSamplingParam[] = "enable_sampling";
+const char kEnableSystraceParam[] = "enable_systrace";
+const char kEnableArgumentFilterParam[] = "enable_argument_filter";
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+const char kSyntheticDelaysParam[] = "synthetic_delays";
+
+const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
+
+// String parameters that is used to parse memory dump config in trace config
+// string.
+const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kAllowedDumpModesParam[] = "allowed_dump_modes";
+const char kTriggersParam[] = "triggers";
+const char kPeriodicIntervalParam[] = "periodic_interval_ms";
+const char kModeParam[] = "mode";
+const char kHeapProfilerOptions[] = "heap_profiler_options";
+const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
+
+// Default configuration of memory dumps.
+const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
+ 2000, // periodic_interval_ms
+ MemoryDumpLevelOfDetail::DETAILED};
+const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
+ 250, // periodic_interval_ms
+ MemoryDumpLevelOfDetail::LIGHT};
+
+class ConvertableTraceConfigToTraceFormat
+ : public base::trace_event::ConvertableToTraceFormat {
+ public:
+ explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
+ : trace_config_(trace_config) {}
+ ~ConvertableTraceConfigToTraceFormat() override {}
+
+ void AppendAsTraceFormat(std::string* out) const override {
+ out->append(trace_config_.ToString());
+ }
+
+ private:
+ const TraceConfig trace_config_;
+};
+
+std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
+ std::set<MemoryDumpLevelOfDetail> all_modes;
+ for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
+ mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+ all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
+ }
+ return all_modes;
+}
+
+} // namespace
+
+TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler()
+ : breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {}
+
+void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
+ breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
+}
+
+void TraceConfig::ResetMemoryDumpConfig(
+ const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+ memory_dump_config_.Clear();
+ memory_dump_config_ = memory_dump_config;
+}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
+ const MemoryDumpConfig& other) = default;
+
+TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {}
+
+void TraceConfig::MemoryDumpConfig::Clear() {
+ allowed_dump_modes.clear();
+ triggers.clear();
+ heap_profiler_options.Clear();
+}
+
+TraceConfig::TraceConfig() {
+ InitializeDefault();
+}
+
+TraceConfig::TraceConfig(StringPiece category_filter_string,
+ StringPiece trace_options_string) {
+ InitializeFromStrings(category_filter_string, trace_options_string);
+}
+
+TraceConfig::TraceConfig(StringPiece category_filter_string,
+ TraceRecordMode record_mode) {
+ std::string trace_options_string;
+ switch (record_mode) {
+ case RECORD_UNTIL_FULL:
+ trace_options_string = kRecordUntilFull;
+ break;
+ case RECORD_CONTINUOUSLY:
+ trace_options_string = kRecordContinuously;
+ break;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ trace_options_string = kRecordAsMuchAsPossible;
+ break;
+ case ECHO_TO_CONSOLE:
+ trace_options_string = kTraceToConsole;
+ break;
+ default:
+ NOTREACHED();
+ }
+ InitializeFromStrings(category_filter_string, trace_options_string);
+}
+
+TraceConfig::TraceConfig(const DictionaryValue& config) {
+ InitializeFromConfigDict(config);
+}
+
+TraceConfig::TraceConfig(StringPiece config_string) {
+ if (!config_string.empty())
+ InitializeFromConfigString(config_string);
+ else
+ InitializeDefault();
+}
+
+TraceConfig::TraceConfig(const TraceConfig& tc)
+ : record_mode_(tc.record_mode_),
+ enable_sampling_(tc.enable_sampling_),
+ enable_systrace_(tc.enable_systrace_),
+ enable_argument_filter_(tc.enable_argument_filter_),
+ memory_dump_config_(tc.memory_dump_config_),
+ included_categories_(tc.included_categories_),
+ disabled_categories_(tc.disabled_categories_),
+ excluded_categories_(tc.excluded_categories_),
+ synthetic_delays_(tc.synthetic_delays_) {}
+
+TraceConfig::~TraceConfig() {
+}
+
+TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
+ if (this == &rhs)
+ return *this;
+
+ record_mode_ = rhs.record_mode_;
+ enable_sampling_ = rhs.enable_sampling_;
+ enable_systrace_ = rhs.enable_systrace_;
+ enable_argument_filter_ = rhs.enable_argument_filter_;
+ memory_dump_config_ = rhs.memory_dump_config_;
+ included_categories_ = rhs.included_categories_;
+ disabled_categories_ = rhs.disabled_categories_;
+ excluded_categories_ = rhs.excluded_categories_;
+ synthetic_delays_ = rhs.synthetic_delays_;
+ return *this;
+}
+
+const TraceConfig::StringList& TraceConfig::GetSyntheticDelayValues() const {
+ return synthetic_delays_;
+}
+
+std::string TraceConfig::ToString() const {
+ std::unique_ptr<DictionaryValue> dict = ToDict();
+ std::string json;
+ JSONWriter::Write(*dict, &json);
+ return json;
+}
+
+std::unique_ptr<ConvertableToTraceFormat>
+TraceConfig::AsConvertableToTraceFormat() const {
+ return WrapUnique(new ConvertableTraceConfigToTraceFormat(*this));
+}
+
+std::string TraceConfig::ToCategoryFilterString() const {
+ std::string filter_string;
+ WriteCategoryFilterString(included_categories_, &filter_string, true);
+ WriteCategoryFilterString(disabled_categories_, &filter_string, true);
+ WriteCategoryFilterString(excluded_categories_, &filter_string, false);
+ WriteCategoryFilterString(synthetic_delays_, &filter_string);
+ return filter_string;
+}
+
+bool TraceConfig::IsCategoryGroupEnabled(
+ const char* category_group_name) const {
+ // TraceLog should call this method only as part of enabling/disabling
+ // categories.
+
+ bool had_enabled_by_default = false;
+ DCHECK(category_group_name);
+ std::string category_group_name_str = category_group_name;
+ StringTokenizer category_group_tokens(category_group_name_str, ",");
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+ // Don't allow empty tokens, nor tokens with leading or trailing space.
+ DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ category_group_token))
+ << "Disallowed category string";
+ if (IsCategoryEnabled(category_group_token.c_str()))
+ return true;
+
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+ had_enabled_by_default = true;
+ }
+ // Do a second pass to check for explicitly disabled categories
+ // (those explicitly enabled have priority due to first pass).
+ category_group_tokens.Reset();
+ bool category_group_disabled = false;
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+ for (const std::string& category : excluded_categories_) {
+ if (MatchPattern(category_group_token, category)) {
+ // Current token of category_group_name is present in excluded_list.
+ // Flag the exclusion and proceed further to check if any of the
+ // remaining categories of category_group_name is not present in the
+ // excluded_ list.
+ category_group_disabled = true;
+ break;
+ }
+ // One of the category of category_group_name is not present in
+ // excluded_ list. So, if it's not a disabled-by-default category,
+ // it has to be included_ list. Enable the category_group_name
+ // for recording.
+ if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
+ category_group_disabled = false;
+ }
+ }
+ // One of the categories present in category_group_name is not present in
+ // excluded_ list. Implies this category_group_name group can be enabled
+ // for recording, since one of its groups is enabled for recording.
+ if (!category_group_disabled)
+ break;
+ }
+ // If the category group is not excluded, and there are no included patterns
+ // we consider this category group enabled, as long as it had categories
+ // other than disabled-by-default.
+ return !category_group_disabled && had_enabled_by_default &&
+ included_categories_.empty();
+}
+
+void TraceConfig::Merge(const TraceConfig& config) {
+ if (record_mode_ != config.record_mode_
+ || enable_sampling_ != config.enable_sampling_
+ || enable_systrace_ != config.enable_systrace_
+ || enable_argument_filter_ != config.enable_argument_filter_) {
+ DLOG(ERROR) << "Attempting to merge trace config with a different "
+ << "set of options.";
+ }
+
+ // Keep included patterns only if both filters have an included entry.
+ // Otherwise, one of the filter was specifying "*" and we want to honor the
+ // broadest filter.
+ if (HasIncludedPatterns() && config.HasIncludedPatterns()) {
+ included_categories_.insert(included_categories_.end(),
+ config.included_categories_.begin(),
+ config.included_categories_.end());
+ } else {
+ included_categories_.clear();
+ }
+
+ memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
+ config.memory_dump_config_.triggers.begin(),
+ config.memory_dump_config_.triggers.end());
+
+ disabled_categories_.insert(disabled_categories_.end(),
+ config.disabled_categories_.begin(),
+ config.disabled_categories_.end());
+ excluded_categories_.insert(excluded_categories_.end(),
+ config.excluded_categories_.begin(),
+ config.excluded_categories_.end());
+ synthetic_delays_.insert(synthetic_delays_.end(),
+ config.synthetic_delays_.begin(),
+ config.synthetic_delays_.end());
+}
+
+void TraceConfig::Clear() {
+ record_mode_ = RECORD_UNTIL_FULL;
+ enable_sampling_ = false;
+ enable_systrace_ = false;
+ enable_argument_filter_ = false;
+ included_categories_.clear();
+ disabled_categories_.clear();
+ excluded_categories_.clear();
+ synthetic_delays_.clear();
+ memory_dump_config_.Clear();
+}
+
+void TraceConfig::InitializeDefault() {
+ record_mode_ = RECORD_UNTIL_FULL;
+ enable_sampling_ = false;
+ enable_systrace_ = false;
+ enable_argument_filter_ = false;
+}
+
+void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
+ record_mode_ = RECORD_UNTIL_FULL;
+ std::string record_mode;
+ if (dict.GetString(kRecordModeParam, &record_mode)) {
+ if (record_mode == kRecordUntilFull) {
+ record_mode_ = RECORD_UNTIL_FULL;
+ } else if (record_mode == kRecordContinuously) {
+ record_mode_ = RECORD_CONTINUOUSLY;
+ } else if (record_mode == kTraceToConsole) {
+ record_mode_ = ECHO_TO_CONSOLE;
+ } else if (record_mode == kRecordAsMuchAsPossible) {
+ record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
+ }
+ }
+
+ bool val;
+ enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
+ enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
+ enable_argument_filter_ =
+ dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
+
+ const ListValue* category_list = nullptr;
+ if (dict.GetList(kIncludedCategoriesParam, &category_list))
+ SetCategoriesFromIncludedList(*category_list);
+ if (dict.GetList(kExcludedCategoriesParam, &category_list))
+ SetCategoriesFromExcludedList(*category_list);
+ if (dict.GetList(kSyntheticDelaysParam, &category_list))
+ SetSyntheticDelaysFromList(*category_list);
+
+ if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ // If dump triggers not set, the client is using the legacy with just
+ // category enabled. So, use the default periodic dump config.
+ const DictionaryValue* memory_dump_config = nullptr;
+ if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
+ SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
+ else
+ SetDefaultMemoryDumpConfig();
+ }
+}
+
+void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
+ auto dict = DictionaryValue::From(JSONReader::Read(config_string));
+ if (dict)
+ InitializeFromConfigDict(*dict);
+ else
+ InitializeDefault();
+}
+
+void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
+ StringPiece trace_options_string) {
+ if (!category_filter_string.empty()) {
+ std::vector<std::string> split = SplitString(
+ category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const std::string& category : split) {
+ // Ignore empty categories.
+ if (category.empty())
+ continue;
+ // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
+ if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+ CompareCase::SENSITIVE) &&
+ category.back() == ')') {
+ std::string synthetic_category = category.substr(
+ strlen(kSyntheticDelayCategoryFilterPrefix),
+ category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
+ size_t name_length = synthetic_category.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != synthetic_category.size() - 1) {
+ synthetic_delays_.push_back(synthetic_category);
+ }
+ } else if (category.front() == '-') {
+ // Excluded categories start with '-'.
+ // Remove '-' from category string.
+ excluded_categories_.push_back(category.substr(1));
+ } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_categories_.push_back(category);
+ } else {
+ included_categories_.push_back(category);
+ }
+ }
+ }
+
+ record_mode_ = RECORD_UNTIL_FULL;
+ enable_sampling_ = false;
+ enable_systrace_ = false;
+ enable_argument_filter_ = false;
+ if (!trace_options_string.empty()) {
+ std::vector<std::string> split =
+ SplitString(trace_options_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ for (const std::string& token : split) {
+ if (token == kRecordUntilFull) {
+ record_mode_ = RECORD_UNTIL_FULL;
+ } else if (token == kRecordContinuously) {
+ record_mode_ = RECORD_CONTINUOUSLY;
+ } else if (token == kTraceToConsole) {
+ record_mode_ = ECHO_TO_CONSOLE;
+ } else if (token == kRecordAsMuchAsPossible) {
+ record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
+ } else if (token == kEnableSampling) {
+ enable_sampling_ = true;
+ } else if (token == kEnableSystrace) {
+ enable_systrace_ = true;
+ } else if (token == kEnableArgumentFilter) {
+ enable_argument_filter_ = true;
+ }
+ }
+ }
+
+ if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ SetDefaultMemoryDumpConfig();
+ }
+}
+
+void TraceConfig::SetCategoriesFromIncludedList(
+ const ListValue& included_list) {
+ included_categories_.clear();
+ for (size_t i = 0; i < included_list.GetSize(); ++i) {
+ std::string category;
+ if (!included_list.GetString(i, &category))
+ continue;
+ if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_categories_.push_back(category);
+ } else {
+ included_categories_.push_back(category);
+ }
+ }
+}
+
+void TraceConfig::SetCategoriesFromExcludedList(
+ const ListValue& excluded_list) {
+ excluded_categories_.clear();
+ for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
+ std::string category;
+ if (excluded_list.GetString(i, &category))
+ excluded_categories_.push_back(category);
+ }
+}
+
+void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
+ synthetic_delays_.clear();
+ for (size_t i = 0; i < list.GetSize(); ++i) {
+ std::string delay;
+ if (!list.GetString(i, &delay))
+ continue;
+ // Synthetic delays are of the form "delay;option;option;...".
+ size_t name_length = delay.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != delay.size() - 1) {
+ synthetic_delays_.push_back(delay);
+ }
+ }
+}
+
+void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
+ const char* param,
+ const StringList& categories) const {
+ if (categories.empty())
+ return;
+
+ auto list = MakeUnique<ListValue>();
+ for (const std::string& category : categories)
+ list->AppendString(category);
+ dict->Set(param, std::move(list));
+}
+
+void TraceConfig::SetMemoryDumpConfigFromConfigDict(
+ const DictionaryValue& memory_dump_config) {
+ // Set allowed dump modes.
+ memory_dump_config_.allowed_dump_modes.clear();
+ const ListValue* allowed_modes_list;
+ if (memory_dump_config.GetList(kAllowedDumpModesParam, &allowed_modes_list)) {
+ for (size_t i = 0; i < allowed_modes_list->GetSize(); ++i) {
+ std::string level_of_detail_str;
+ allowed_modes_list->GetString(i, &level_of_detail_str);
+ memory_dump_config_.allowed_dump_modes.insert(
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str));
+ }
+ } else {
+ // If allowed modes param is not given then allow all modes by default.
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+ }
+
+ // Set triggers
+ memory_dump_config_.triggers.clear();
+ const ListValue* trigger_list = nullptr;
+ if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
+ trigger_list->GetSize() > 0) {
+ for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
+ const DictionaryValue* trigger = nullptr;
+ if (!trigger_list->GetDictionary(i, &trigger))
+ continue;
+
+ int interval = 0;
+ if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
+ continue;
+
+ DCHECK_GT(interval, 0);
+ MemoryDumpConfig::Trigger dump_config;
+ dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
+ std::string level_of_detail_str;
+ trigger->GetString(kModeParam, &level_of_detail_str);
+ dump_config.level_of_detail =
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+ memory_dump_config_.triggers.push_back(dump_config);
+ }
+ }
+
+ // Set heap profiler options
+ const DictionaryValue* heap_profiler_options = nullptr;
+ if (memory_dump_config.GetDictionary(kHeapProfilerOptions,
+ &heap_profiler_options)) {
+ int min_size_bytes = 0;
+ if (heap_profiler_options->GetInteger(kBreakdownThresholdBytes,
+ &min_size_bytes)
+ && min_size_bytes >= 0) {
+ memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+ static_cast<size_t>(min_size_bytes);
+ } else {
+ memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+ MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes;
+ }
+ }
+}
+
+void TraceConfig::SetDefaultMemoryDumpConfig() {
+ memory_dump_config_.Clear();
+ memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
+ memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
+ memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
+}
+
+std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
+ auto dict = MakeUnique<DictionaryValue>();
+ switch (record_mode_) {
+ case RECORD_UNTIL_FULL:
+ dict->SetString(kRecordModeParam, kRecordUntilFull);
+ break;
+ case RECORD_CONTINUOUSLY:
+ dict->SetString(kRecordModeParam, kRecordContinuously);
+ break;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ dict->SetString(kRecordModeParam, kRecordAsMuchAsPossible);
+ break;
+ case ECHO_TO_CONSOLE:
+ dict->SetString(kRecordModeParam, kTraceToConsole);
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
+ dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
+ dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
+
+ StringList categories(included_categories_);
+ categories.insert(categories.end(),
+ disabled_categories_.begin(),
+ disabled_categories_.end());
+ AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
+ AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
+ AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
+
+ if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ auto allowed_modes = MakeUnique<ListValue>();
+ for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
+ allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
+
+ auto memory_dump_config = MakeUnique<DictionaryValue>();
+ memory_dump_config->Set(kAllowedDumpModesParam, std::move(allowed_modes));
+
+ auto triggers_list = MakeUnique<ListValue>();
+ for (const auto& config : memory_dump_config_.triggers) {
+ auto trigger_dict = MakeUnique<DictionaryValue>();
+ trigger_dict->SetInteger(kPeriodicIntervalParam,
+ static_cast<int>(config.periodic_interval_ms));
+ trigger_dict->SetString(
+ kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
+ triggers_list->Append(std::move(trigger_dict));
+ }
+
+ // Empty triggers will still be specified explicitly since it means that
+ // the periodic dumps are not enabled.
+ memory_dump_config->Set(kTriggersParam, std::move(triggers_list));
+
+ if (memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes !=
+ MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes) {
+ auto options = MakeUnique<DictionaryValue>();
+ options->SetInteger(
+ kBreakdownThresholdBytes,
+ memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+ memory_dump_config->Set(kHeapProfilerOptions, std::move(options));
+ }
+ dict->Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
+ }
+ return dict;
+}
+
+std::string TraceConfig::ToTraceOptionsString() const {
+ std::string ret;
+ switch (record_mode_) {
+ case RECORD_UNTIL_FULL:
+ ret = kRecordUntilFull;
+ break;
+ case RECORD_CONTINUOUSLY:
+ ret = kRecordContinuously;
+ break;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ ret = kRecordAsMuchAsPossible;
+ break;
+ case ECHO_TO_CONSOLE:
+ ret = kTraceToConsole;
+ break;
+ default:
+ NOTREACHED();
+ }
+ if (enable_sampling_)
+ ret = ret + "," + kEnableSampling;
+ if (enable_systrace_)
+ ret = ret + "," + kEnableSystrace;
+ if (enable_argument_filter_)
+ ret = ret + "," + kEnableArgumentFilter;
+ return ret;
+}
+
+void TraceConfig::WriteCategoryFilterString(const StringList& values,
+ std::string* out,
+ bool included) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : values) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
+ ++token_cnt;
+ }
+}
+
+void TraceConfig::WriteCategoryFilterString(const StringList& delays,
+ std::string* out) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (const std::string& category : delays) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
+ category.c_str());
+ ++token_cnt;
+ }
+}
+
+bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
+ // Check the disabled- filters and the disabled-* wildcard first so that a
+ // "*" filter does not include the disabled.
+ for (const std::string& category : disabled_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+ return false;
+
+ for (const std::string& category : included_categories_) {
+ if (MatchPattern(category_name, category))
+ return true;
+ }
+
+ return false;
+}
+
+bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ StringPiece str) {
+ return str.empty() || str.front() == ' ' || str.back() == ' ';
+}
+
+bool TraceConfig::HasIncludedPatterns() const {
+ return !included_categories_.empty();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_config.h b/libchrome/base/trace_event/trace_config.h
new file mode 100644
index 0000000..91d6f1f
--- /dev/null
+++ b/libchrome/base/trace_event/trace_config.h
@@ -0,0 +1,289 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_H_
+
+#include <stdint.h>
+
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/strings/string_piece.h"
+#include "base/trace_event/memory_dump_request_args.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+class ConvertableToTraceFormat;
+
+// Options determines how the trace buffer stores data.
+enum TraceRecordMode {
+ // Record until the trace buffer is full.
+ RECORD_UNTIL_FULL,
+
+ // Record until the user ends the trace. The trace buffer is a fixed size
+ // and we use it as a ring buffer during recording.
+ RECORD_CONTINUOUSLY,
+
+ // Record until the trace buffer is full, but with a huge buffer size.
+ RECORD_AS_MUCH_AS_POSSIBLE,
+
+ // Echo to console. Events are discarded.
+ ECHO_TO_CONSOLE,
+};
+
+class BASE_EXPORT TraceConfig {
+ public:
+ using StringList = std::vector<std::string>;
+
+ // Specifies the memory dump config for tracing.
+ // Used only when "memory-infra" category is enabled.
+ struct BASE_EXPORT MemoryDumpConfig {
+ MemoryDumpConfig();
+ MemoryDumpConfig(const MemoryDumpConfig& other);
+ ~MemoryDumpConfig();
+
+ // Specifies the triggers in the memory dump config.
+ struct Trigger {
+ uint32_t periodic_interval_ms;
+ MemoryDumpLevelOfDetail level_of_detail;
+ };
+
+ // Specifies the configuration options for the heap profiler.
+ struct HeapProfiler {
+ // Default value for |breakdown_threshold_bytes|.
+ enum { kDefaultBreakdownThresholdBytes = 1024 };
+
+ HeapProfiler();
+
+ // Reset the options to default.
+ void Clear();
+
+ uint32_t breakdown_threshold_bytes;
+ };
+
+ // Reset the values in the config.
+ void Clear();
+
+ // Set of memory dump modes allowed for the tracing session. The explicitly
+ // triggered dumps will be successful only if the dump mode is allowed in
+ // the config.
+ std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
+
+ std::vector<Trigger> triggers;
+ HeapProfiler heap_profiler_options;
+ };
+
+ TraceConfig();
+
+ // Create TraceConfig object from category filter and trace options strings.
+ //
+ // |category_filter_string| is a comma-delimited list of category wildcards.
+ // A category can have an optional '-' prefix to make it an excluded category.
+ // All the same rules apply above, so for example, having both included and
+ // excluded categories in the same list would not be supported.
+ //
+ // Category filters can also be used to configure synthetic delays.
+ //
+ // |trace_options_string| is a comma-delimited list of trace options.
+ // Possible options are: "record-until-full", "record-continuously",
+ // "record-as-much-as-possible", "trace-to-console", "enable-sampling",
+ // "enable-systrace" and "enable-argument-filter".
+ // The first 4 options are trace recoding modes and hence
+ // mutually exclusive. If more than one trace recording modes appear in the
+ // options_string, the last one takes precedence. If none of the trace
+ // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
+ //
+ // The trace option will first be reset to the default option
+ // (record_mode set to RECORD_UNTIL_FULL, enable_sampling, enable_systrace,
+ // and enable_argument_filter set to false) before options parsed from
+ // |trace_options_string| are applied on it. If |trace_options_string| is
+ // invalid, the final state of trace options is undefined.
+ //
+ // Example: TraceConfig("test_MyTest*", "record-until-full");
+ // Example: TraceConfig("test_MyTest*,test_OtherStuff",
+ // "record-continuously, enable-sampling");
+ // Example: TraceConfig("-excluded_category1,-excluded_category2",
+ // "record-until-full, trace-to-console");
+ // would set ECHO_TO_CONSOLE as the recording mode.
+ // Example: TraceConfig("-*,webkit", "");
+ // would disable everything but webkit; and use default options.
+ // Example: TraceConfig("-webkit", "");
+ // would enable everything but webkit; and use default options.
+ // Example: TraceConfig("DELAY(gpu.PresentingFrame;16)", "");
+ // would make swap buffers always take at least 16 ms; and use
+ // default options.
+ // Example: TraceConfig("DELAY(gpu.PresentingFrame;16;oneshot)", "");
+ // would make swap buffers take at least 16 ms the first time it is
+ // called; and use default options.
+ // Example: TraceConfig("DELAY(gpu.PresentingFrame;16;alternating)", "");
+ // would make swap buffers take at least 16 ms every other time it
+ // is called; and use default options.
+ TraceConfig(StringPiece category_filter_string,
+ StringPiece trace_options_string);
+
+ TraceConfig(StringPiece category_filter_string, TraceRecordMode record_mode);
+
+ // Create TraceConfig object from the trace config string.
+ //
+ // |config_string| is a dictionary formatted as a JSON string, containing both
+ // category filters and trace options.
+ //
+ // Example:
+ // {
+ // "record_mode": "record-continuously",
+ // "enable_sampling": true,
+ // "enable_systrace": true,
+ // "enable_argument_filter": true,
+ // "included_categories": ["included",
+ // "inc_pattern*",
+ // "disabled-by-default-memory-infra"],
+ // "excluded_categories": ["excluded", "exc_pattern*"],
+ // "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"],
+ // "memory_dump_config": {
+ // "triggers": [
+ // {
+ // "mode": "detailed",
+ // "periodic_interval_ms": 2000
+ // }
+ // ]
+ // }
+ // }
+ //
+ // Note: memory_dump_config can be specified only if
+ // disabled-by-default-memory-infra category is enabled.
+ explicit TraceConfig(StringPiece config_string);
+
+ // Functionally identical to the above, but takes a parsed dictionary as input
+ // instead of its JSON serialization.
+ explicit TraceConfig(const DictionaryValue& config);
+
+ TraceConfig(const TraceConfig& tc);
+
+ ~TraceConfig();
+
+ TraceConfig& operator=(const TraceConfig& rhs);
+
+ // Return a list of the synthetic delays specified in this category filter.
+ const StringList& GetSyntheticDelayValues() const;
+
+ TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
+ bool IsSamplingEnabled() const { return enable_sampling_; }
+ bool IsSystraceEnabled() const { return enable_systrace_; }
+ bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
+
+ void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
+ void EnableSampling() { enable_sampling_ = true; }
+ void EnableSystrace() { enable_systrace_ = true; }
+ void EnableArgumentFilter() { enable_argument_filter_ = true; }
+
+ // Writes the string representation of the TraceConfig. The string is JSON
+ // formatted.
+ std::string ToString() const;
+
+ // Returns a copy of the TraceConfig wrapped in a ConvertableToTraceFormat
+ std::unique_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
+
+ // Write the string representation of the CategoryFilter part.
+ std::string ToCategoryFilterString() const;
+
+ // Returns true if at least one category in the list is enabled by this
+ // trace config. This is used to determine if the category filters are
+ // enabled in the TRACE_* macros.
+ bool IsCategoryGroupEnabled(const char* category_group) const;
+
+ // Merges config with the current TraceConfig
+ void Merge(const TraceConfig& config);
+
+ void Clear();
+
+ // Clears and resets the memory dump config.
+ void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+
+ const MemoryDumpConfig& memory_dump_config() const {
+ return memory_dump_config_;
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+ TraceConfigFromInvalidLegacyStrings);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+ IsEmptyOrContainsLeadingOrTrailingWhitespace);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+ EmptyAndAsteriskCategoryFilterString);
+
+ // The default trace config, used when none is provided.
+ // Allows all non-disabled-by-default categories through, except if they end
+ // in the suffix 'Debug' or 'Test'.
+ void InitializeDefault();
+
+ // Initialize from a config dictionary.
+ void InitializeFromConfigDict(const DictionaryValue& dict);
+
+ // Initialize from a config string.
+ void InitializeFromConfigString(StringPiece config_string);
+
+ // Initialize from category filter and trace options strings
+ void InitializeFromStrings(StringPiece category_filter_string,
+ StringPiece trace_options_string);
+
+ void SetCategoriesFromIncludedList(const ListValue& included_list);
+ void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+ void SetSyntheticDelaysFromList(const ListValue& list);
+ void AddCategoryToDict(DictionaryValue* dict,
+ const char* param,
+ const StringList& categories) const;
+
+ void SetMemoryDumpConfigFromConfigDict(
+ const DictionaryValue& memory_dump_config);
+ void SetDefaultMemoryDumpConfig();
+
+ std::unique_ptr<DictionaryValue> ToDict() const;
+
+ std::string ToTraceOptionsString() const;
+
+ void WriteCategoryFilterString(const StringList& values,
+ std::string* out,
+ bool included) const;
+ void WriteCategoryFilterString(const StringList& delays,
+ std::string* out) const;
+
+ // Returns true if the category is enabled according to this trace config.
+ // This tells whether a category is enabled from the TraceConfig's
+ // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+ // category is enabled from the tracing runtime's perspective.
+ bool IsCategoryEnabled(const char* category_name) const;
+
+ static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
+
+ bool HasIncludedPatterns() const;
+
+ TraceRecordMode record_mode_;
+ bool enable_sampling_ : 1;
+ bool enable_systrace_ : 1;
+ bool enable_argument_filter_ : 1;
+
+ MemoryDumpConfig memory_dump_config_;
+
+ StringList included_categories_;
+ StringList disabled_categories_;
+ StringList excluded_categories_;
+ StringList synthetic_delays_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_H_
diff --git a/libchrome/base/trace_event/trace_config_memory_test_util.h b/libchrome/base/trace_event/trace_config_memory_test_util.h
new file mode 100644
index 0000000..6b47f8d
--- /dev/null
+++ b/libchrome/base/trace_event/trace_config_memory_test_util.h
@@ -0,0 +1,103 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceConfigMemoryTestUtil {
+ public:
+ static std::string GetTraceConfig_PeriodicTriggers(int light_period,
+ int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"light\","
+ "\"periodic_interval_ms\":%d"
+ "},"
+ "{"
+ "\"mode\":\"detailed\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ }
+
+ static std::string GetTraceConfig_EmptyTriggers() {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory);
+ }
+
+ static std::string GetTraceConfig_NoTriggers() {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory);
+ }
+
+ static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"background\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory, period_ms);
+ }
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
diff --git a/libchrome/base/trace_event/trace_config_unittest.cc b/libchrome/base/trace_event/trace_config_unittest.cc
new file mode 100644
index 0000000..4b46b2f
--- /dev/null
+++ b/libchrome/base/trace_event/trace_config_unittest.cc
@@ -0,0 +1,685 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/json/json_reader.h"
+#include "base/macros.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+const char kDefaultTraceConfigString[] =
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"record_mode\":\"record-until-full\""
+ "}";
+
+const char kCustomTraceConfigString[] =
+ "{"
+ "\"enable_argument_filter\":true,"
+ "\"enable_sampling\":true,"
+ "\"enable_systrace\":true,"
+ "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+ "\"included_categories\":[\"included\","
+ "\"inc_pattern*\","
+ "\"disabled-by-default-cc\","
+ "\"disabled-by-default-memory-infra\"],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":10240"
+ "},"
+ "\"triggers\":["
+ "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
+ "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-continuously\","
+ "\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
+ "}";
+
+void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+ // Default trace config enables every category filter except the
+ // disabled-by-default-* ones.
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,not-excluded-category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,disabled-by-default-cc"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled(
+ "disabled-by-default-cc,disabled-by-default-cc2"));
+}
+
+} // namespace
+
+TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
+ // From trace options strings
+ TraceConfig config("", "record-until-full");
+ EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", "record-continuously");
+ EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", "trace-to-console");
+ EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", "record-as-much-as-possible");
+ EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-as-much-as-possible",
+ config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", "record-until-full, enable-sampling");
+ EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+ EXPECT_TRUE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-until-full,enable-sampling",
+ config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", "enable-systrace, record-continuously");
+ EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_TRUE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-continuously,enable-systrace",
+ config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", "enable-argument-filter,record-as-much-as-possible");
+ EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_TRUE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-as-much-as-possible,enable-argument-filter",
+ config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig(
+ "",
+ "enable-systrace,trace-to-console,enable-sampling,enable-argument-filter");
+ EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+ EXPECT_TRUE(config.IsSamplingEnabled());
+ EXPECT_TRUE(config.IsSystraceEnabled());
+ EXPECT_TRUE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ(
+ "trace-to-console,enable-sampling,enable-systrace,enable-argument-filter",
+ config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig(
+ "", "record-continuously, record-until-full, trace-to-console");
+ EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
+
+ // From TraceRecordMode
+ config = TraceConfig("", RECORD_UNTIL_FULL);
+ EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", RECORD_CONTINUOUSLY);
+ EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", ECHO_TO_CONSOLE);
+ EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("", RECORD_AS_MUCH_AS_POSSIBLE);
+ EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("record-as-much-as-possible",
+ config.ToTraceOptionsString().c_str());
+
+ // From category filter strings
+ config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*", "");
+ EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+ config.ToCategoryFilterString().c_str());
+
+ config = TraceConfig("only_inc_cat", "");
+ EXPECT_STREQ("only_inc_cat", config.ToCategoryFilterString().c_str());
+
+ config = TraceConfig("-only_exc_cat", "");
+ EXPECT_STREQ("-only_exc_cat", config.ToCategoryFilterString().c_str());
+
+ config = TraceConfig("disabled-by-default-cc,-excluded", "");
+ EXPECT_STREQ("disabled-by-default-cc,-excluded",
+ config.ToCategoryFilterString().c_str());
+
+ config = TraceConfig("disabled-by-default-cc,included", "");
+ EXPECT_STREQ("included,disabled-by-default-cc",
+ config.ToCategoryFilterString().c_str());
+
+ config = TraceConfig("DELAY(test.Delay1;16),included", "");
+ EXPECT_STREQ("included,DELAY(test.Delay1;16)",
+ config.ToCategoryFilterString().c_str());
+
+ // From both trace options and category filter strings
+ config = TraceConfig("", "");
+ EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
+ "enable-systrace, trace-to-console, enable-sampling");
+ EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+ EXPECT_TRUE(config.IsSamplingEnabled());
+ EXPECT_TRUE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+ config.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ config.ToTraceOptionsString().c_str());
+
+ // From both trace options and category filter strings with spaces.
+ config = TraceConfig(" included , -excluded, inc_pattern*, ,-exc_pattern* ",
+ "enable-systrace, ,trace-to-console, enable-sampling ");
+ EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+ EXPECT_TRUE(config.IsSamplingEnabled());
+ EXPECT_TRUE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+ config.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ config.ToTraceOptionsString().c_str());
+
+ // From category filter string and TraceRecordMode
+ config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
+ RECORD_CONTINUOUSLY);
+ EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+ config.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
+}
+
+TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
+ TraceConfig config("", "foo-bar-baz");
+ EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_FALSE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
+
+ config = TraceConfig("arbitrary-category", "foo-bar-baz, enable-systrace");
+ EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+ EXPECT_FALSE(config.IsSamplingEnabled());
+ EXPECT_TRUE(config.IsSystraceEnabled());
+ EXPECT_FALSE(config.IsArgumentFilterEnabled());
+ EXPECT_STREQ("arbitrary-category", config.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("record-until-full,enable-systrace",
+ config.ToTraceOptionsString().c_str());
+
+ const char* const configs[] = {
+ "",
+ "DELAY(",
+ "DELAY(;",
+ "DELAY(;)",
+ "DELAY(test.Delay)",
+ "DELAY(test.Delay;)"
+ };
+ for (size_t i = 0; i < arraysize(configs); i++) {
+ TraceConfig tc(configs[i], "");
+ EXPECT_EQ(0u, tc.GetSyntheticDelayValues().size());
+ }
+}
+
+TEST(TraceConfigTest, ConstructDefaultTraceConfig) {
+ TraceConfig tc;
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ // Constructors from category filter string and trace option string.
+ TraceConfig tc_asterisk("*", "");
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ TraceConfig tc_empty_category_filter("", "");
+ EXPECT_STREQ("", tc_empty_category_filter.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_category_filter.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_category_filter);
+
+ // Constructor from JSON formated config string.
+ TraceConfig tc_empty_json_string("");
+ EXPECT_STREQ("", tc_empty_json_string.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString,
+ tc_empty_json_string.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_empty_json_string);
+
+ // Constructor from dictionary value.
+ DictionaryValue dict;
+ TraceConfig tc_dict(dict);
+ EXPECT_STREQ("", tc_dict.ToCategoryFilterString().c_str());
+ EXPECT_STREQ(kDefaultTraceConfigString, tc_dict.ToString().c_str());
+ CheckDefaultTraceConfigBehavior(tc_dict);
+}
+
+TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
+ TraceConfig tc_empty("", "");
+ TraceConfig tc_asterisk("*", "");
+
+ EXPECT_STREQ("", tc_empty.ToCategoryFilterString().c_str());
+ EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+
+ // Both fall back to default config.
+ CheckDefaultTraceConfigBehavior(tc_empty);
+ CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+ // They differ only for internal checking.
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
+ EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
+ EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+}
+
+TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
+ TraceConfig tc("foo,disabled-by-default-foo", "");
+ EXPECT_STREQ("foo,disabled-by-default-foo",
+ tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ tc = TraceConfig("disabled-by-default-foo", "");
+ EXPECT_STREQ("disabled-by-default-foo", tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+}
+
+TEST(TraceConfigTest, TraceConfigFromDict) {
+ // Passing in empty dictionary will result in default trace config.
+ DictionaryValue dict;
+ TraceConfig tc(dict);
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+
+ std::unique_ptr<Value> default_value(
+ JSONReader::Read(kDefaultTraceConfigString));
+ DCHECK(default_value);
+ const DictionaryValue* default_dict = nullptr;
+ bool is_dict = default_value->GetAsDictionary(&default_dict);
+ DCHECK(is_dict);
+ TraceConfig default_tc(*default_dict);
+ EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
+ EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
+ EXPECT_FALSE(default_tc.IsSamplingEnabled());
+ EXPECT_FALSE(default_tc.IsSystraceEnabled());
+ EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
+
+ std::unique_ptr<Value> custom_value(
+ JSONReader::Read(kCustomTraceConfigString));
+ DCHECK(custom_value);
+ const DictionaryValue* custom_dict = nullptr;
+ is_dict = custom_value->GetAsDictionary(&custom_dict);
+ DCHECK(is_dict);
+ TraceConfig custom_tc(*custom_dict);
+ EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
+ EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
+ EXPECT_TRUE(custom_tc.IsSamplingEnabled());
+ EXPECT_TRUE(custom_tc.IsSystraceEnabled());
+ EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("included,inc_pattern*,"
+ "disabled-by-default-cc,disabled-by-default-memory-infra,"
+ "-excluded,-exc_pattern*,"
+ "DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
+ custom_tc.ToCategoryFilterString().c_str());
+}
+
+TEST(TraceConfigTest, TraceConfigFromValidString) {
+ // Using some non-empty config string.
+ const char config_string[] =
+ "{"
+ "\"enable_argument_filter\":true,"
+ "\"enable_sampling\":true,"
+ "\"enable_systrace\":true,"
+ "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+ "\"included_categories\":[\"included\","
+ "\"inc_pattern*\","
+ "\"disabled-by-default-cc\"],"
+ "\"record_mode\":\"record-continuously\","
+ "\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
+ "}";
+ TraceConfig tc(config_string);
+
+ EXPECT_STREQ(config_string, tc.ToString().c_str());
+ EXPECT_EQ(RECORD_CONTINUOUSLY, tc.GetTraceRecordMode());
+ EXPECT_TRUE(tc.IsSamplingEnabled());
+ EXPECT_TRUE(tc.IsSystraceEnabled());
+ EXPECT_TRUE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("included,inc_pattern*,disabled-by-default-cc,-excluded,"
+ "-exc_pattern*,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
+ tc.ToCategoryFilterString().c_str());
+
+ EXPECT_TRUE(tc.IsCategoryEnabled("included"));
+ EXPECT_TRUE(tc.IsCategoryEnabled("inc_pattern_category"));
+ EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-cc"));
+ EXPECT_FALSE(tc.IsCategoryEnabled("excluded"));
+ EXPECT_FALSE(tc.IsCategoryEnabled("exc_pattern_category"));
+ EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-others"));
+ EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-nor-included"));
+
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("inc_pattern_category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("exc_pattern_category"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-others"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("not-excluded-nor-included"));
+
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("included,excluded"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,exc_pattern_category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("included,DELAY(test.Delay1;16)"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("DELAY(test.Delay1;16)"));
+
+ EXPECT_EQ(2u, tc.GetSyntheticDelayValues().size());
+ EXPECT_STREQ("test.Delay1;16", tc.GetSyntheticDelayValues()[0].c_str());
+ EXPECT_STREQ("test.Delay2;32", tc.GetSyntheticDelayValues()[1].c_str());
+
+ const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
+ TraceConfig tc2(config_string_2);
+ EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
+ EXPECT_FALSE(tc2.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc2.IsCategoryGroupEnabled("non-disabled-by-default-pattern"));
+ EXPECT_FALSE(tc2.IsCategoryGroupEnabled("disabled-by-default-pattern"));
+
+ // Clear
+ tc.Clear();
+ EXPECT_STREQ(tc.ToString().c_str(),
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"record_mode\":\"record-until-full\""
+ "}");
+}
+
+TEST(TraceConfigTest, TraceConfigFromInvalidString) {
+ // The config string needs to be a dictionary correctly formatted as a JSON
+ // string. Otherwise, it will fall back to the default initialization.
+ TraceConfig tc("");
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ tc = TraceConfig("This is an invalid config string.");
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
+ EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ // If the config string a dictionary formatted as a JSON string, it will
+ // initialize TraceConfig with best effort.
+ tc = TraceConfig("{}");
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+ CheckDefaultTraceConfigBehavior(tc);
+
+ const char invalid_config_string[] =
+ "{"
+ "\"enable_sampling\":\"true\","
+ "\"enable_systrace\":1,"
+ "\"excluded_categories\":[\"excluded\"],"
+ "\"included_categories\":\"not a list\","
+ "\"record_mode\":\"arbitrary-mode\","
+ "\"synthetic_delays\":[\"test.Delay1;16\","
+ "\"invalid-delay\","
+ "\"test.Delay2;32\"]"
+ "}";
+ tc = TraceConfig(invalid_config_string);
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+ EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+ EXPECT_STREQ("-excluded,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
+ tc.ToCategoryFilterString().c_str());
+
+ const char invalid_config_string_2[] =
+ "{"
+ "\"included_categories\":[\"category\",\"disabled-by-default-pattern\"],"
+ "\"excluded_categories\":[\"category\",\"disabled-by-default-pattern\"]"
+ "}";
+ tc = TraceConfig(invalid_config_string_2);
+ EXPECT_TRUE(tc.IsCategoryEnabled("category"));
+ EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-pattern"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("category"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-pattern"));
+}
+
+TEST(TraceConfigTest, MergingTraceConfigs) {
+ // Merge
+ TraceConfig tc;
+ TraceConfig tc2("included,-excluded,inc_pattern*,-exc_pattern*", "");
+ tc.Merge(tc2);
+ EXPECT_STREQ("{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ tc.ToString().c_str());
+
+ tc = TraceConfig("DELAY(test.Delay1;16)", "");
+ tc2 = TraceConfig("DELAY(test.Delay2;32)", "");
+ tc.Merge(tc2);
+ EXPECT_EQ(2u, tc.GetSyntheticDelayValues().size());
+ EXPECT_STREQ("test.Delay1;16", tc.GetSyntheticDelayValues()[0].c_str());
+ EXPECT_STREQ("test.Delay2;32", tc.GetSyntheticDelayValues()[1].c_str());
+}
+
+TEST(TraceConfigTest, IsCategoryGroupEnabled) {
+ // Enabling a disabled- category does not require all categories to be traced
+ // to be included.
+ TraceConfig tc("disabled-by-default-cc,-excluded", "");
+ EXPECT_STREQ("disabled-by-default-cc,-excluded",
+ tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("some_other_group"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded"));
+
+ // Enabled a disabled- category and also including makes all categories to
+ // be traced require including.
+ tc = TraceConfig("disabled-by-default-cc,included", "");
+ EXPECT_STREQ("included,disabled-by-default-cc",
+ tc.ToCategoryFilterString().c_str());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("other_included"));
+
+ // Excluding categories won't enable disabled-by-default ones with the
+ // excluded category is also present in the group.
+ tc = TraceConfig("-excluded", "");
+ EXPECT_STREQ("-excluded", tc.ToCategoryFilterString().c_str());
+ EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,disabled-by-default-cc"));
+}
+
+TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
+ // Test that IsEmptyOrContainsLeadingOrTrailingWhitespace actually catches
+ // categories that are explicitly forbidden.
+ // This method is called in a DCHECK to assert that we don't have these types
+ // of strings as categories.
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category "));
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category"));
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ "bad_category "));
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category"));
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ "bad_category "));
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category "));
+ EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ ""));
+ EXPECT_FALSE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ "good_category"));
+}
+
+TEST(TraceConfigTest, SetTraceOptionValues) {
+ TraceConfig tc;
+ EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+ EXPECT_FALSE(tc.IsSamplingEnabled());
+ EXPECT_FALSE(tc.IsSystraceEnabled());
+
+ tc.SetTraceRecordMode(RECORD_AS_MUCH_AS_POSSIBLE);
+ EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, tc.GetTraceRecordMode());
+
+ tc.EnableSampling();
+ EXPECT_TRUE(tc.IsSamplingEnabled());
+
+ tc.EnableSystrace();
+ EXPECT_TRUE(tc.IsSystraceEnabled());
+}
+
+TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
+ std::string tc_str1 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
+ TraceConfig tc1(tc_str1);
+ EXPECT_EQ(tc_str1, tc1.ToString());
+ EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
+
+ EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
+ tc1.memory_dump_config_.triggers[0].level_of_detail);
+
+ EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+ tc1.memory_dump_config_.triggers[1].level_of_detail);
+ EXPECT_EQ(
+ 2048u,
+ tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+
+ std::string tc_str2 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+ 1 /* period_ms */);
+ TraceConfig tc2(tc_str2);
+ EXPECT_EQ(tc_str2, tc2.ToString());
+ EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+ tc2.memory_dump_config_.triggers[0].level_of_detail);
+}
+
+TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
+ // Empty trigger list should also be specified when converting back to string.
+ TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+ EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
+ tc.ToString());
+ EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
+ EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+ ::kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config_.heap_profiler_options
+ .breakdown_threshold_bytes);
+}
+
+TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
+ TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
+ EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
+ EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+ ::kDefaultBreakdownThresholdBytes,
+ tc.memory_dump_config_.heap_profiler_options
+ .breakdown_threshold_bytes);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_event.gypi b/libchrome/base/trace_event/trace_event.gypi
new file mode 100644
index 0000000..f915780
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event.gypi
@@ -0,0 +1,107 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'trace_event_sources' : [
+ 'trace_event/blame_context.cc',
+ 'trace_event/blame_context.h',
+ 'trace_event/common/trace_event_common.h',
+ 'trace_event/heap_profiler.h',
+ 'trace_event/heap_profiler_allocation_context.cc',
+ 'trace_event/heap_profiler_allocation_context.h',
+ 'trace_event/heap_profiler_allocation_context_tracker.cc',
+ 'trace_event/heap_profiler_allocation_context_tracker.h',
+ 'trace_event/heap_profiler_allocation_register.cc',
+ 'trace_event/heap_profiler_allocation_register_posix.cc',
+ 'trace_event/heap_profiler_allocation_register_win.cc',
+ 'trace_event/heap_profiler_allocation_register.h',
+ 'trace_event/heap_profiler_heap_dump_writer.cc',
+ 'trace_event/heap_profiler_heap_dump_writer.h',
+ 'trace_event/heap_profiler_stack_frame_deduplicator.cc',
+ 'trace_event/heap_profiler_stack_frame_deduplicator.h',
+ 'trace_event/heap_profiler_type_name_deduplicator.cc',
+ 'trace_event/heap_profiler_type_name_deduplicator.h',
+ 'trace_event/java_heap_dump_provider_android.cc',
+ 'trace_event/java_heap_dump_provider_android.h',
+ 'trace_event/memory_allocator_dump.cc',
+ 'trace_event/memory_allocator_dump.h',
+ 'trace_event/memory_allocator_dump_guid.cc',
+ 'trace_event/memory_allocator_dump_guid.h',
+ 'trace_event/memory_dump_manager.cc',
+ 'trace_event/memory_dump_manager.h',
+ 'trace_event/memory_dump_provider.h',
+ 'trace_event/memory_dump_request_args.cc',
+ 'trace_event/memory_dump_request_args.h',
+ 'trace_event/memory_dump_session_state.cc',
+ 'trace_event/memory_dump_session_state.h',
+ 'trace_event/memory_infra_background_whitelist.cc',
+ 'trace_event/memory_infra_background_whitelist.h',
+ 'trace_event/process_memory_dump.cc',
+ 'trace_event/process_memory_dump.h',
+ 'trace_event/process_memory_maps.cc',
+ 'trace_event/process_memory_maps.h',
+ 'trace_event/process_memory_totals.cc',
+ 'trace_event/process_memory_totals.h',
+ 'trace_event/trace_buffer.cc',
+ 'trace_event/trace_buffer.h',
+ 'trace_event/trace_config.cc',
+ 'trace_event/trace_config.h',
+ 'trace_event/trace_event.h',
+ 'trace_event/trace_event_android.cc',
+ 'trace_event/trace_event_argument.cc',
+ 'trace_event/trace_event_argument.h',
+ 'trace_event/trace_event_etw_export_win.cc',
+ 'trace_event/trace_event_etw_export_win.h',
+ 'trace_event/trace_event_impl.cc',
+ 'trace_event/trace_event_impl.h',
+ 'trace_event/trace_event_memory_overhead.cc',
+ 'trace_event/trace_event_memory_overhead.h',
+ 'trace_event/trace_event_synthetic_delay.cc',
+ 'trace_event/trace_event_synthetic_delay.h',
+ 'trace_event/trace_event_system_stats_monitor.cc',
+ 'trace_event/trace_event_system_stats_monitor.h',
+ 'trace_event/trace_log.cc',
+ 'trace_event/trace_log.h',
+ 'trace_event/trace_log_constants.cc',
+ 'trace_event/trace_sampling_thread.cc',
+ 'trace_event/trace_sampling_thread.h',
+ 'trace_event/tracing_agent.cc',
+ 'trace_event/tracing_agent.h',
+ 'trace_event/winheap_dump_provider_win.cc',
+ 'trace_event/winheap_dump_provider_win.h',
+ ],
+ 'trace_event_test_sources' : [
+ 'trace_event/blame_context_unittest.cc',
+ 'trace_event/heap_profiler_allocation_context_tracker_unittest.cc',
+ 'trace_event/heap_profiler_allocation_register_unittest.cc',
+ 'trace_event/heap_profiler_heap_dump_writer_unittest.cc',
+ 'trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc',
+ 'trace_event/heap_profiler_type_name_deduplicator_unittest.cc',
+ 'trace_event/java_heap_dump_provider_android_unittest.cc',
+ 'trace_event/memory_allocator_dump_unittest.cc',
+ 'trace_event/memory_dump_manager_unittest.cc',
+ 'trace_event/process_memory_dump_unittest.cc',
+ 'trace_event/trace_config_memory_test_util.h',
+ 'trace_event/trace_config_unittest.cc',
+ 'trace_event/trace_event_argument_unittest.cc',
+ 'trace_event/trace_event_synthetic_delay_unittest.cc',
+ 'trace_event/trace_event_system_stats_monitor_unittest.cc',
+ 'trace_event/trace_event_unittest.cc',
+ 'trace_event/winheap_dump_provider_win_unittest.cc',
+ ],
+ 'conditions': [
+ ['OS == "linux" or OS=="android" or OS=="mac" or OS=="ios"', {
+ 'trace_event_sources': [
+ 'trace_event/malloc_dump_provider.cc',
+ 'trace_event/malloc_dump_provider.h',
+ ],
+ }],
+ ['OS == "android"', {
+ 'trace_event_test_sources' : [
+ 'trace_event/trace_event_android_unittest.cc',
+ ],
+ }],
+ ],
+ },
+}
diff --git a/libchrome/base/trace_event/trace_event.h b/libchrome/base/trace_event/trace_event.h
new file mode 100644
index 0000000..a075898
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event.h
@@ -0,0 +1,1091 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_H_
+
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_event_common.h instead of here.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+#include "base/trace_event/trace_log.h"
+#include "build/build_config.h"
+
+// By default, const char* argument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) \
+ trace_event_internal::TraceStringWithCopy(str)
+
+// By default, uint64_t ID argument values are not mangled with the Process ID
+// in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) \
+ trace_event_internal::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) \
+ trace_event_internal::TraceID::DontMangle(id)
+
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+ trace_event_internal::TraceID::WithScope(scope, id)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
+ bucket_number, category, name) \
+ trace_event_internal:: \
+ TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ trace_event_internal::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
+ bucket_number, category, name) \
+ trace_event_internal::TraceEventSamplingStateScope<bucket_number> \
+ traceEventSamplingScope(category "\0" name);
+
+#define TRACE_EVENT_API_CURRENT_THREAD_ID \
+ static_cast<int>(base::PlatformThread::CurrentId())
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (base::trace_event::TraceLog::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceLog::ENABLED_FOR_EVENT_CALLBACK | \
+ base::trace_event::TraceLog::ENABLED_FOR_ETW_EXPORT))
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const unsigned char*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ base::trace_event::TraceLog::GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
+ base::trace_event::TraceLog::GetInstance()->GetNumTracesRecorded
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// const char* scope,
+// unsigned long long id,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// std::unique_ptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ base::trace_event::TraceLog::GetInstance()->AddTraceEvent
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// const char* scope,
+// unsigned long long id,
+// unsigned long long bind_id,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// std::unique_ptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID \
+ base::trace_event::TraceLog::GetInstance()->AddTraceEventWithBindId
+
+// Add a trace event to the platform tracing system overriding the pid.
+// The resulting event will have tid = pid == (process_id passed here).
+// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// const char* scope,
+// unsigned long long id,
+// int process_id,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// std::unique_ptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID \
+ base::trace_event::TraceLog::GetInstance()->AddTraceEventWithProcessId
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// const char* scope,
+// unsigned long long id,
+// int thread_id,
+// const TimeTicks& timestamp,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// std::unique_ptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
+ base::trace_event::TraceLog::GetInstance() \
+ ->AddTraceEventWithThreadIdAndTimestamp
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const unsigned char* category_group_enabled,
+// const char* name,
+// base::trace_event::TraceEventHandle id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration
+
+// Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method
+// on the convertable value will be called at flush time.
+// TRACE_EVENT_API_ADD_METADATA_EVENT(
+// const unsigned char* category_group_enabled,
+// const char* event_name,
+// const char* arg_name,
+// std::unique_ptr<ConvertableToTraceFormat> arg_value)
+#define TRACE_EVENT_API_ADD_METADATA_EVENT \
+ trace_event_internal::AddMetadataEvent
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD base::subtle::AtomicWord
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) base::subtle::NoBarrier_Load(&(var))
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
+ base::subtle::NoBarrier_Store(&(var), (value))
+
+// Defines visibility for classes in trace_event.h
+#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
+
+// The thread buckets for the sampling profiler.
+TRACE_EVENT_API_CLASS_EXPORT extern \
+ TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+ trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+ INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const unsigned char*>(TRACE_EVENT_API_ATOMIC_LOAD( \
+ atomic)); \
+ if (UNLIKELY(!category_group_enabled)) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE(atomic, \
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const unsigned char* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
+ INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+ ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
+
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW( \
+ category_group, name, bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flow_flags; \
+ trace_event_internal::TraceID trace_event_bind_id(bind_id, \
+ &trace_event_flags); \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ trace_event_internal::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
+ timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, \
+ base::TimeTicks::FromInternalValue(timestamp), \
+ flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ trace_event_internal::TraceID trace_event_trace_id(id, \
+ &trace_event_flags); \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ thread_id, base::TimeTicks::FromInternalValue(timestamp), \
+ trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// metadata event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ TRACE_EVENT_API_ADD_METADATA_EVENT( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to enter and leave a
+// context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ struct INTERNAL_TRACE_EVENT_UID(ScopedContext) { \
+ public: \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) { \
+ TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_); \
+ } \
+ ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() { \
+ TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_); \
+ } \
+ \
+ private: \
+ uint64_t cid_; \
+ /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {}; \
+ void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {}; \
+ }; \
+ INTERNAL_TRACE_EVENT_UID(ScopedContext) \
+ INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
+// Implementation detail: internal macro to trace a task execution with the
+// location where it was posted from.
+#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task) \
+ TRACE_EVENT2("toplevel", run_function, "src_file", \
+ (task).posted_from.file_name(), "src_func", \
+ (task).posted_from.function_name()); \
+ TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
+ task_event)((task).posted_from.file_name());
+
+namespace trace_event_internal {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const std::nullptr_t kGlobalScope = nullptr;
+const unsigned long long kNoId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class WithScope {
+ public:
+ WithScope(const char* scope, unsigned long long raw_id)
+ : scope_(scope), raw_id_(raw_id) {}
+ unsigned long long raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+ private:
+ const char* scope_ = nullptr;
+ unsigned long long raw_id_;
+ };
+
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* raw_id)
+ : raw_id_(static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(raw_id))) {}
+ explicit DontMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit DontMangle(long long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit DontMangle(long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit DontMangle(int raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit DontMangle(short raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit DontMangle(signed char raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit DontMangle(WithScope scoped_id)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+ const char* scope() const { return scope_; }
+ unsigned long long raw_id() const { return raw_id_; }
+ private:
+ const char* scope_ = nullptr;
+ unsigned long long raw_id_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+ explicit ForceMangle(long long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit ForceMangle(long raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit ForceMangle(int raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit ForceMangle(short raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ explicit ForceMangle(signed char raw_id)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+ unsigned long long raw_id() const { return raw_id_; }
+ private:
+ unsigned long long raw_id_;
+ };
+ TraceID(const void* raw_id, unsigned int* flags)
+ : raw_id_(static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(raw_id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle maybe_scoped_id, unsigned int* /*flags*/)
+ : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+ TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned short raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
+ (void)flags;
+ }
+ TraceID(long long raw_id, unsigned int* flags)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+ TraceID(long raw_id, unsigned int* flags)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+ TraceID(int raw_id, unsigned int* flags)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+ TraceID(short raw_id, unsigned int* flags)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+ TraceID(signed char raw_id, unsigned int* flags)
+ : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+ TraceID(WithScope scoped_id, unsigned int* /*flags*/)
+ : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+
+ unsigned long long raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+
+ private:
+ const char* scope_ = nullptr;
+ unsigned long long raw_id_;
+};
+
+// Simple union to store various types as unsigned long long.
+union TraceValueUnion {
+ bool as_bool;
+ unsigned long long as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ const char* str() const { return str_; }
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+ arg_expression, \
+ union_member, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ unsigned long long* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg_expression; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ unsigned long long* value) { \
+ *type = value_type_id; \
+ *value = static_cast<unsigned long long>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, arg, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, arg, as_double,
+ TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, arg, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, arg, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, arg.str(),
+ as_string, TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// std::string version of SetTraceValue so that trace arguments can be strings.
+static inline void SetTraceValue(const std::string& arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ TraceValueUnion type_value;
+ type_value.as_string = arg.c_str();
+ *type = TRACE_VALUE_TYPE_COPY_STRING;
+ *value = type_value.as_uint;
+}
+
+// base::Time, base::TimeTicks, etc. versions of SetTraceValue to make it easier
+// to trace these types.
+static inline void SetTraceValue(const base::Time arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ *type = TRACE_VALUE_TYPE_INT;
+ *value = arg.ToInternalValue();
+}
+
+static inline void SetTraceValue(const base::TimeTicks arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ *type = TRACE_VALUE_TYPE_INT;
+ *value = arg.ToInternalValue();
+}
+
+static inline void SetTraceValue(const base::ThreadTicks arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ *type = TRACE_VALUE_TYPE_INT;
+ *value = arg.ToInternalValue();
+}
+
+// These AddTraceEvent and AddTraceEventWithThreadIdAndTimestamp template
+// functions are defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+ const int num_args = 1;
+ unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ convertable_values[1] = {std::move(arg1_val)};
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, num_args, &arg1_name, arg_types, NULL, convertable_values,
+ flags);
+}
+
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+
+ unsigned char arg_types[2];
+ unsigned long long arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ arg_types[1] = TRACE_VALUE_TYPE_CONVERTABLE;
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ convertable_values[2] = {nullptr, std::move(arg2_val)};
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+ flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+
+ unsigned char arg_types[2];
+ unsigned long long arg_values[2];
+ arg_types[0] = TRACE_VALUE_TYPE_CONVERTABLE;
+ arg_values[0] = 0;
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ convertable_values[2] = {std::move(arg1_val), nullptr};
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+ flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+ const char* arg2_name,
+ std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2] =
+ { TRACE_VALUE_TYPE_CONVERTABLE, TRACE_VALUE_TYPE_CONVERTABLE };
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ convertable_values[2] = {std::move(arg1_val), std::move(arg2_val)};
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, NULL, convertable_values,
+ flags);
+}
+
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
+}
+
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id) {
+ const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ const base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id);
+}
+
+template<class ARG1_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ unsigned char arg_types[1];
+ unsigned long long arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, num_args, &arg1_name, arg_types, arg_values, NULL, flags);
+}
+
+template<class ARG1_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id, arg1_name, arg1_val);
+}
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id, arg1_name, std::move(arg1_val));
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2];
+ unsigned long long arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, arg_values, NULL, flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id, arg1_name, std::move(arg1_val), arg2_name, arg2_val);
+}
+
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id, arg1_name, arg1_val, arg2_name, std::move(arg2_val));
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+ const char* arg2_name,
+ std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id, arg1_name, std::move(arg1_val), arg2_name, std::move(arg2_val));
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned int flags,
+ unsigned long long bind_id,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+ bind_id, arg1_name, arg1_val, arg2_name, arg2_val);
+}
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline void AddMetadataEvent(
+ const unsigned char* category_group_enabled,
+ const char* event_name,
+ const char* arg_name,
+ std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg_value) {
+ const char* arg_names[1] = {arg_name};
+ unsigned char arg_types[1] = {TRACE_VALUE_TYPE_CONVERTABLE};
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ convertable_values[1] = {std::move(arg_value)};
+ base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
+ category_group_enabled, event_name,
+ 1, // num_args
+ arg_names, arg_types,
+ nullptr, // arg_values
+ convertable_values, TRACE_EVENT_FLAG_NONE);
+}
+
+template <class ARG1_TYPE>
+static void AddMetadataEvent(const unsigned char* category_group_enabled,
+ const char* event_name,
+ const char* arg_name,
+ const ARG1_TYPE& arg_val) {
+ const int num_args = 1;
+ const char* arg_names[1] = {arg_name};
+ unsigned char arg_types[1];
+ unsigned long long arg_values[1];
+ SetTraceValue(arg_val, &arg_types[0], &arg_values[0]);
+
+ base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
+ category_group_enabled, event_name, num_args, arg_names, arg_types,
+ arg_values, nullptr, TRACE_EVENT_FLAG_NONE);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(NULL) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const unsigned char* category_group_enabled,
+ const char* name,
+ base::trace_event::TraceEventHandle event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const unsigned char* category_group_enabled;
+ const char* name;
+ base::trace_event::TraceEventHandle event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const unsigned char* category_group_enabled_;
+ const char* name_;
+ base::trace_event::TraceEventHandle event_handle_;
+};
+
+// This macro generates less code then TRACE_EVENT0 but is also
+// slower to execute when tracing is off. It should generally only be
+// used with code that is seldom executed or conditionally executed
+// when debugging.
+// For now the category_group must be "gpu".
+#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
+ trace_event_internal::ScopedTraceBinaryEfficient \
+ INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template<size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+ TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+ TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+ }
+
+ static inline const char* Current() {
+ return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
+ g_trace_state[BucketNumber]));
+ }
+
+ static inline void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(
+ g_trace_state[BucketNumber],
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+ const_cast<char*>(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace trace_event_internal
+
+namespace base {
+namespace trace_event {
+
+template<typename IDType> class TraceScopedTrackableObject {
+ public:
+ TraceScopedTrackableObject(const char* category_group, const char* name,
+ IDType id)
+ : category_group_(category_group),
+ name_(name),
+ id_(id) {
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group_, name_, id_);
+ }
+
+ template <typename ArgType> void snapshot(ArgType snapshot) {
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group_, name_, id_, snapshot);
+ }
+
+ ~TraceScopedTrackableObject() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group_, name_, id_);
+ }
+
+ private:
+ const char* category_group_;
+ const char* name_;
+ IDType id_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceScopedTrackableObject);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_H_
diff --git a/libchrome/base/trace_event/trace_event_argument.cc b/libchrome/base/trace_event/trace_event_argument.cc
new file mode 100644
index 0000000..336d964
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_argument.cc
@@ -0,0 +1,473 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_argument.h"
+
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/bits.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+const char kTypeStartDict = '{';
+const char kTypeEndDict = '}';
+const char kTypeStartArray = '[';
+const char kTypeEndArray = ']';
+const char kTypeBool = 'b';
+const char kTypeInt = 'i';
+const char kTypeDouble = 'd';
+const char kTypeString = 's';
+const char kTypeCStr = '*';
+
+#ifndef NDEBUG
+const bool kStackTypeDict = false;
+const bool kStackTypeArray = true;
+#define DCHECK_CURRENT_CONTAINER_IS(x) DCHECK_EQ(x, nesting_stack_.back())
+#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) DCHECK_EQ(x, nesting_stack_.size())
+#define DEBUG_PUSH_CONTAINER(x) nesting_stack_.push_back(x)
+#define DEBUG_POP_CONTAINER() nesting_stack_.pop_back()
+#else
+#define DCHECK_CURRENT_CONTAINER_IS(x) do {} while (0)
+#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) do {} while (0)
+#define DEBUG_PUSH_CONTAINER(x) do {} while (0)
+#define DEBUG_POP_CONTAINER() do {} while (0)
+#endif
+
+inline void WriteKeyNameAsRawPtr(Pickle& pickle, const char* ptr) {
+ pickle.WriteBytes(&kTypeCStr, 1);
+ pickle.WriteUInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr)));
+}
+
+inline void WriteKeyNameWithCopy(Pickle& pickle, base::StringPiece str) {
+ pickle.WriteBytes(&kTypeString, 1);
+ pickle.WriteString(str);
+}
+
+std::string ReadKeyName(PickleIterator& pickle_iterator) {
+ const char* type = nullptr;
+ bool res = pickle_iterator.ReadBytes(&type, 1);
+ std::string key_name;
+ if (res && *type == kTypeCStr) {
+ uint64_t ptr_value = 0;
+ res = pickle_iterator.ReadUInt64(&ptr_value);
+ key_name = reinterpret_cast<const char*>(static_cast<uintptr_t>(ptr_value));
+ } else if (res && *type == kTypeString) {
+ res = pickle_iterator.ReadString(&key_name);
+ }
+ DCHECK(res);
+ return key_name;
+}
+} // namespace
+
+TracedValue::TracedValue() : TracedValue(0) {
+}
+
+TracedValue::TracedValue(size_t capacity) {
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+ if (capacity)
+ pickle_.Reserve(capacity);
+}
+
+TracedValue::~TracedValue() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_POP_CONTAINER();
+ DCHECK_CONTAINER_STACK_DEPTH_EQ(0u);
+}
+
+void TracedValue::SetInteger(const char* name, int value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeInt, 1);
+ pickle_.WriteInt(value);
+ WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetIntegerWithCopiedName(base::StringPiece name, int value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeInt, 1);
+ pickle_.WriteInt(value);
+ WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetDouble(const char* name, double value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeDouble, 1);
+ pickle_.WriteDouble(value);
+ WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetDoubleWithCopiedName(base::StringPiece name,
+ double value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeDouble, 1);
+ pickle_.WriteDouble(value);
+ WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetBoolean(const char* name, bool value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeBool, 1);
+ pickle_.WriteBool(value);
+ WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetBooleanWithCopiedName(base::StringPiece name,
+ bool value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeBool, 1);
+ pickle_.WriteBool(value);
+ WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetString(const char* name, base::StringPiece value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeString, 1);
+ pickle_.WriteString(value);
+ WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::SetStringWithCopiedName(base::StringPiece name,
+ base::StringPiece value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeString, 1);
+ pickle_.WriteString(value);
+ WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::SetValue(const char* name, const TracedValue& value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ BeginDictionary(name);
+ pickle_.WriteBytes(value.pickle_.payload(),
+ static_cast<int>(value.pickle_.payload_size()));
+ EndDictionary();
+}
+
+void TracedValue::SetValueWithCopiedName(base::StringPiece name,
+ const TracedValue& value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ BeginDictionaryWithCopiedName(name);
+ pickle_.WriteBytes(value.pickle_.payload(),
+ static_cast<int>(value.pickle_.payload_size()));
+ EndDictionary();
+}
+
+void TracedValue::BeginDictionary(const char* name) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeStartDict, 1);
+ WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::BeginDictionaryWithCopiedName(base::StringPiece name) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeStartDict, 1);
+ WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::BeginArray(const char* name) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_PUSH_CONTAINER(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeStartArray, 1);
+ WriteKeyNameAsRawPtr(pickle_, name);
+}
+
+void TracedValue::BeginArrayWithCopiedName(base::StringPiece name) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_PUSH_CONTAINER(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeStartArray, 1);
+ WriteKeyNameWithCopy(pickle_, name);
+}
+
+void TracedValue::EndDictionary() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_POP_CONTAINER();
+ pickle_.WriteBytes(&kTypeEndDict, 1);
+}
+
+void TracedValue::AppendInteger(int value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeInt, 1);
+ pickle_.WriteInt(value);
+}
+
+void TracedValue::AppendDouble(double value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeDouble, 1);
+ pickle_.WriteDouble(value);
+}
+
+void TracedValue::AppendBoolean(bool value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeBool, 1);
+ pickle_.WriteBool(value);
+}
+
+void TracedValue::AppendString(base::StringPiece value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeString, 1);
+ pickle_.WriteString(value);
+}
+
+void TracedValue::BeginArray() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ DEBUG_PUSH_CONTAINER(kStackTypeArray);
+ pickle_.WriteBytes(&kTypeStartArray, 1);
+}
+
+void TracedValue::BeginDictionary() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+ pickle_.WriteBytes(&kTypeStartDict, 1);
+}
+
+void TracedValue::EndArray() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ DEBUG_POP_CONTAINER();
+ pickle_.WriteBytes(&kTypeEndArray, 1);
+}
+
+void TracedValue::SetValue(const char* name,
+ std::unique_ptr<base::Value> value) {
+ SetBaseValueWithCopiedName(name, *value);
+}
+
+void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
+ const base::Value& value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ switch (value.GetType()) {
+ case base::Value::TYPE_NULL:
+ case base::Value::TYPE_BINARY:
+ NOTREACHED();
+ break;
+
+ case base::Value::TYPE_BOOLEAN: {
+ bool bool_value;
+ value.GetAsBoolean(&bool_value);
+ SetBooleanWithCopiedName(name, bool_value);
+ } break;
+
+ case base::Value::TYPE_INTEGER: {
+ int int_value;
+ value.GetAsInteger(&int_value);
+ SetIntegerWithCopiedName(name, int_value);
+ } break;
+
+ case base::Value::TYPE_DOUBLE: {
+ double double_value;
+ value.GetAsDouble(&double_value);
+ SetDoubleWithCopiedName(name, double_value);
+ } break;
+
+ case base::Value::TYPE_STRING: {
+ const StringValue* string_value;
+ value.GetAsString(&string_value);
+ SetStringWithCopiedName(name, string_value->GetString());
+ } break;
+
+ case base::Value::TYPE_DICTIONARY: {
+ const DictionaryValue* dict_value;
+ value.GetAsDictionary(&dict_value);
+ BeginDictionaryWithCopiedName(name);
+ for (DictionaryValue::Iterator it(*dict_value); !it.IsAtEnd();
+ it.Advance()) {
+ SetBaseValueWithCopiedName(it.key(), it.value());
+ }
+ EndDictionary();
+ } break;
+
+ case base::Value::TYPE_LIST: {
+ const ListValue* list_value;
+ value.GetAsList(&list_value);
+ BeginArrayWithCopiedName(name);
+ for (const auto& base_value : *list_value)
+ AppendBaseValue(*base_value);
+ EndArray();
+ } break;
+ }
+}
+
+void TracedValue::AppendBaseValue(const base::Value& value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ switch (value.GetType()) {
+ case base::Value::TYPE_NULL:
+ case base::Value::TYPE_BINARY:
+ NOTREACHED();
+ break;
+
+ case base::Value::TYPE_BOOLEAN: {
+ bool bool_value;
+ value.GetAsBoolean(&bool_value);
+ AppendBoolean(bool_value);
+ } break;
+
+ case base::Value::TYPE_INTEGER: {
+ int int_value;
+ value.GetAsInteger(&int_value);
+ AppendInteger(int_value);
+ } break;
+
+ case base::Value::TYPE_DOUBLE: {
+ double double_value;
+ value.GetAsDouble(&double_value);
+ AppendDouble(double_value);
+ } break;
+
+ case base::Value::TYPE_STRING: {
+ const StringValue* string_value;
+ value.GetAsString(&string_value);
+ AppendString(string_value->GetString());
+ } break;
+
+ case base::Value::TYPE_DICTIONARY: {
+ const DictionaryValue* dict_value;
+ value.GetAsDictionary(&dict_value);
+ BeginDictionary();
+ for (DictionaryValue::Iterator it(*dict_value); !it.IsAtEnd();
+ it.Advance()) {
+ SetBaseValueWithCopiedName(it.key(), it.value());
+ }
+ EndDictionary();
+ } break;
+
+ case base::Value::TYPE_LIST: {
+ const ListValue* list_value;
+ value.GetAsList(&list_value);
+ BeginArray();
+ for (const auto& base_value : *list_value)
+ AppendBaseValue(*base_value);
+ EndArray();
+ } break;
+ }
+}
+
+std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
+ std::unique_ptr<DictionaryValue> root(new DictionaryValue);
+ DictionaryValue* cur_dict = root.get();
+ ListValue* cur_list = nullptr;
+ std::vector<Value*> stack;
+ PickleIterator it(pickle_);
+ const char* type;
+
+ while (it.ReadBytes(&type, 1)) {
+ DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
+ switch (*type) {
+ case kTypeStartDict: {
+ auto* new_dict = new DictionaryValue();
+ if (cur_dict) {
+ cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
+ WrapUnique(new_dict));
+ stack.push_back(cur_dict);
+ cur_dict = new_dict;
+ } else {
+ cur_list->Append(WrapUnique(new_dict));
+ stack.push_back(cur_list);
+ cur_list = nullptr;
+ cur_dict = new_dict;
+ }
+ } break;
+
+ case kTypeEndArray:
+ case kTypeEndDict: {
+ if (stack.back()->GetAsDictionary(&cur_dict)) {
+ cur_list = nullptr;
+ } else if (stack.back()->GetAsList(&cur_list)) {
+ cur_dict = nullptr;
+ }
+ stack.pop_back();
+ } break;
+
+ case kTypeStartArray: {
+ auto* new_list = new ListValue();
+ if (cur_dict) {
+ cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
+ WrapUnique(new_list));
+ stack.push_back(cur_dict);
+ cur_dict = nullptr;
+ cur_list = new_list;
+ } else {
+ cur_list->Append(WrapUnique(new_list));
+ stack.push_back(cur_list);
+ cur_list = new_list;
+ }
+ } break;
+
+ case kTypeBool: {
+ bool value;
+ CHECK(it.ReadBool(&value));
+ if (cur_dict) {
+ cur_dict->SetBooleanWithoutPathExpansion(ReadKeyName(it), value);
+ } else {
+ cur_list->AppendBoolean(value);
+ }
+ } break;
+
+ case kTypeInt: {
+ int value;
+ CHECK(it.ReadInt(&value));
+ if (cur_dict) {
+ cur_dict->SetIntegerWithoutPathExpansion(ReadKeyName(it), value);
+ } else {
+ cur_list->AppendInteger(value);
+ }
+ } break;
+
+ case kTypeDouble: {
+ double value;
+ CHECK(it.ReadDouble(&value));
+ if (cur_dict) {
+ cur_dict->SetDoubleWithoutPathExpansion(ReadKeyName(it), value);
+ } else {
+ cur_list->AppendDouble(value);
+ }
+ } break;
+
+ case kTypeString: {
+ std::string value;
+ CHECK(it.ReadString(&value));
+ if (cur_dict) {
+ cur_dict->SetStringWithoutPathExpansion(ReadKeyName(it), value);
+ } else {
+ cur_list->AppendString(value);
+ }
+ } break;
+
+ default:
+ NOTREACHED();
+ }
+ }
+ DCHECK(stack.empty());
+ return std::move(root);
+}
+
+void TracedValue::AppendAsTraceFormat(std::string* out) const {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DCHECK_CONTAINER_STACK_DEPTH_EQ(1u);
+
+ // TODO(primiano): this could be smarter, skip the ToBaseValue encoding and
+ // produce the JSON on its own. This will require refactoring JSONWriter
+ // to decouple the base::Value traversal from the JSON writing bits
+ std::string tmp;
+ JSONWriter::Write(*ToBaseValue(), &tmp);
+ *out += tmp;
+}
+
+void TracedValue::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ overhead->Add("TracedValue",
+ /* allocated size */
+ pickle_.GetTotalAllocatedSize(),
+ /* resident size */
+ pickle_.size());
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_event_argument.h b/libchrome/base/trace_event/trace_event_argument.h
new file mode 100644
index 0000000..81d8c01
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_argument.h
@@ -0,0 +1,92 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/strings/string_piece.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+class Value;
+
+namespace trace_event {
+
+class BASE_EXPORT TracedValue : public ConvertableToTraceFormat {
+ public:
+ TracedValue();
+ explicit TracedValue(size_t capacity);
+ ~TracedValue() override;
+
+ void EndDictionary();
+ void EndArray();
+
+ // These methods assume that |name| is a long lived "quoted" string.
+ void SetInteger(const char* name, int value);
+ void SetDouble(const char* name, double value);
+ void SetBoolean(const char* name, bool value);
+ void SetString(const char* name, base::StringPiece value);
+ void SetValue(const char* name, const TracedValue& value);
+ void BeginDictionary(const char* name);
+ void BeginArray(const char* name);
+
+ // These, instead, can be safely passed a temporary string.
+ void SetIntegerWithCopiedName(base::StringPiece name, int value);
+ void SetDoubleWithCopiedName(base::StringPiece name, double value);
+ void SetBooleanWithCopiedName(base::StringPiece name, bool value);
+ void SetStringWithCopiedName(base::StringPiece name,
+ base::StringPiece value);
+ void SetValueWithCopiedName(base::StringPiece name,
+ const TracedValue& value);
+ void BeginDictionaryWithCopiedName(base::StringPiece name);
+ void BeginArrayWithCopiedName(base::StringPiece name);
+
+ void AppendInteger(int);
+ void AppendDouble(double);
+ void AppendBoolean(bool);
+ void AppendString(base::StringPiece);
+ void BeginArray();
+ void BeginDictionary();
+
+ // ConvertableToTraceFormat implementation.
+ void AppendAsTraceFormat(std::string* out) const override;
+
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
+
+ // DEPRECATED: do not use, here only for legacy reasons. These methods causes
+ // a copy-and-translation of the base::Value into the equivalent TracedValue.
+ // TODO(primiano): migrate the (three) existing clients to the cheaper
+ // SetValue(TracedValue) API. crbug.com/495628.
+ void SetValue(const char* name, std::unique_ptr<base::Value> value);
+ void SetBaseValueWithCopiedName(base::StringPiece name,
+ const base::Value& value);
+ void AppendBaseValue(const base::Value& value);
+
+ // Public for tests only.
+ std::unique_ptr<base::Value> ToBaseValue() const;
+
+ private:
+ Pickle pickle_;
+
+#ifndef NDEBUG
+ // In debug builds checks the pairings of {Start,End}{Dictionary,Array}
+ std::vector<bool> nesting_stack_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(TracedValue);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
diff --git a/libchrome/base/trace_event/trace_event_argument_unittest.cc b/libchrome/base/trace_event/trace_event_argument_unittest.cc
new file mode 100644
index 0000000..61395f4
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_argument_unittest.cc
@@ -0,0 +1,165 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_argument.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/memory/ptr_util.h"
+#include "base/values.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+TEST(TraceEventArgumentTest, FlatDictionary) {
+ std::unique_ptr<TracedValue> value(new TracedValue());
+ value->SetInteger("int", 2014);
+ value->SetDouble("double", 0.0);
+ value->SetBoolean("bool", true);
+ value->SetString("string", "string");
+ std::string json = "PREFIX";
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ(
+ "PREFIX{\"bool\":true,\"double\":0.0,\"int\":2014,\"string\":\"string\"}",
+ json);
+}
+
+TEST(TraceEventArgumentTest, NoDotPathExpansion) {
+ std::unique_ptr<TracedValue> value(new TracedValue());
+ value->SetInteger("in.t", 2014);
+ value->SetDouble("doub.le", 0.0);
+ value->SetBoolean("bo.ol", true);
+ value->SetString("str.ing", "str.ing");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ(
+ "{\"bo.ol\":true,\"doub.le\":0.0,\"in.t\":2014,\"str.ing\":\"str.ing\"}",
+ json);
+}
+
+TEST(TraceEventArgumentTest, Hierarchy) {
+ std::unique_ptr<TracedValue> value(new TracedValue());
+ value->SetInteger("i0", 2014);
+ value->BeginDictionary("dict1");
+ value->SetInteger("i1", 2014);
+ value->BeginDictionary("dict2");
+ value->SetBoolean("b2", false);
+ value->EndDictionary();
+ value->SetString("s1", "foo");
+ value->EndDictionary();
+ value->SetDouble("d0", 0.0);
+ value->SetBoolean("b0", true);
+ value->BeginArray("a1");
+ value->AppendInteger(1);
+ value->AppendBoolean(true);
+ value->BeginDictionary();
+ value->SetInteger("i2", 3);
+ value->EndDictionary();
+ value->EndArray();
+ value->SetString("s0", "foo");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ(
+ "{\"a1\":[1,true,{\"i2\":3}],\"b0\":true,\"d0\":0.0,\"dict1\":{\"dict2\":"
+ "{\"b2\":false},\"i1\":2014,\"s1\":\"foo\"},\"i0\":2014,\"s0\":"
+ "\"foo\"}",
+ json);
+}
+
+TEST(TraceEventArgumentTest, LongStrings) {
+ std::string kLongString = "supercalifragilisticexpialidocious";
+ std::string kLongString2 = "0123456789012345678901234567890123456789";
+ char kLongString3[4096];
+ for (size_t i = 0; i < sizeof(kLongString3); ++i)
+ kLongString3[i] = 'a' + (i % 25);
+ kLongString3[sizeof(kLongString3) - 1] = '\0';
+
+ std::unique_ptr<TracedValue> value(new TracedValue());
+ value->SetString("a", "short");
+ value->SetString("b", kLongString);
+ value->BeginArray("c");
+ value->AppendString(kLongString2);
+ value->AppendString("");
+ value->BeginDictionary();
+ value->SetString("a", kLongString3);
+ value->EndDictionary();
+ value->EndArray();
+
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ("{\"a\":\"short\",\"b\":\"" + kLongString + "\",\"c\":[\"" +
+ kLongString2 + "\",\"\",{\"a\":\"" + kLongString3 + "\"}]}",
+ json);
+}
+
+TEST(TraceEventArgumentTest, PassBaseValue) {
+ FundamentalValue int_value(42);
+ FundamentalValue bool_value(true);
+ FundamentalValue double_value(42.0f);
+
+ auto dict_value = WrapUnique(new DictionaryValue);
+ dict_value->SetBoolean("bool", true);
+ dict_value->SetInteger("int", 42);
+ dict_value->SetDouble("double", 42.0f);
+ dict_value->SetString("string", std::string("a") + "b");
+ dict_value->SetString("string", std::string("a") + "b");
+
+ auto list_value = WrapUnique(new ListValue);
+ list_value->AppendBoolean(false);
+ list_value->AppendInteger(1);
+ list_value->AppendString("in_list");
+ list_value->Append(std::move(dict_value));
+
+ std::unique_ptr<TracedValue> value(new TracedValue());
+ value->BeginDictionary("outer_dict");
+ value->SetValue("inner_list", std::move(list_value));
+ value->EndDictionary();
+
+ dict_value.reset();
+ list_value.reset();
+
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ(
+ "{\"outer_dict\":{\"inner_list\":[false,1,\"in_list\",{\"bool\":true,"
+ "\"double\":42.0,\"int\":42,\"string\":\"ab\"}]}}",
+ json);
+}
+
+TEST(TraceEventArgumentTest, PassTracedValue) {
+ auto dict_value = WrapUnique(new TracedValue());
+ dict_value->SetInteger("a", 1);
+
+ auto nested_dict_value = WrapUnique(new TracedValue());
+ nested_dict_value->SetInteger("b", 2);
+ nested_dict_value->BeginArray("c");
+ nested_dict_value->AppendString("foo");
+ nested_dict_value->EndArray();
+
+ dict_value->SetValue("e", *nested_dict_value);
+
+ // Check the merged result.
+ std::string json;
+ dict_value->AppendAsTraceFormat(&json);
+ EXPECT_EQ("{\"a\":1,\"e\":{\"b\":2,\"c\":[\"foo\"]}}", json);
+
+ // Check that the passed nestd dict was left unouthced.
+ json = "";
+ nested_dict_value->AppendAsTraceFormat(&json);
+ EXPECT_EQ("{\"b\":2,\"c\":[\"foo\"]}", json);
+
+ // And that it is still usable.
+ nested_dict_value->SetInteger("f", 3);
+ nested_dict_value->BeginDictionary("g");
+ nested_dict_value->EndDictionary();
+ json = "";
+ nested_dict_value->AppendAsTraceFormat(&json);
+ EXPECT_EQ("{\"b\":2,\"c\":[\"foo\"],\"f\":3,\"g\":{}}", json);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_event_impl.cc b/libchrome/base/trace_event/trace_event_impl.cc
new file mode 100644
index 0000000..f469f2f
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_impl.cc
@@ -0,0 +1,426 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_impl.h"
+
+#include <stddef.h>
+
+#include "base/format_macros.h"
+#include "base/json/string_escape.h"
+#include "base/process/process_handle.h"
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
+
+// Copies |*member| into |*buffer|, sets |*member| to point to this new
+// location, and then advances |*buffer| by the amount written.
+void CopyTraceEventParameter(char** buffer,
+ const char** member,
+ const char* end) {
+ if (*member) {
+ size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
+ DCHECK_LE(static_cast<int>(written), end - *buffer);
+ *member = *buffer;
+ *buffer += written;
+ }
+}
+
+} // namespace
+
+TraceEvent::TraceEvent()
+ : duration_(TimeDelta::FromInternalValue(-1)),
+ scope_(trace_event_internal::kGlobalScope),
+ id_(0u),
+ category_group_enabled_(NULL),
+ name_(NULL),
+ thread_id_(0),
+ flags_(0),
+ phase_(TRACE_EVENT_PHASE_BEGIN) {
+ for (int i = 0; i < kTraceMaxNumArgs; ++i)
+ arg_names_[i] = NULL;
+ memset(arg_values_, 0, sizeof(arg_values_));
+}
+
+TraceEvent::~TraceEvent() {
+}
+
+void TraceEvent::MoveFrom(std::unique_ptr<TraceEvent> other) {
+ timestamp_ = other->timestamp_;
+ thread_timestamp_ = other->thread_timestamp_;
+ duration_ = other->duration_;
+ scope_ = other->scope_;
+ id_ = other->id_;
+ category_group_enabled_ = other->category_group_enabled_;
+ name_ = other->name_;
+ if (other->flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID)
+ process_id_ = other->process_id_;
+ else
+ thread_id_ = other->thread_id_;
+ phase_ = other->phase_;
+ flags_ = other->flags_;
+ parameter_copy_storage_ = std::move(other->parameter_copy_storage_);
+
+ for (int i = 0; i < kTraceMaxNumArgs; ++i) {
+ arg_names_[i] = other->arg_names_[i];
+ arg_types_[i] = other->arg_types_[i];
+ arg_values_[i] = other->arg_values_[i];
+ convertable_values_[i] = std::move(other->convertable_values_[i]);
+ }
+}
+
+void TraceEvent::Initialize(
+ int thread_id,
+ TimeTicks timestamp,
+ ThreadTicks thread_timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned long long bind_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ timestamp_ = timestamp;
+ thread_timestamp_ = thread_timestamp;
+ duration_ = TimeDelta::FromInternalValue(-1);
+ scope_ = scope;
+ id_ = id;
+ category_group_enabled_ = category_group_enabled;
+ name_ = name;
+ thread_id_ = thread_id;
+ phase_ = phase;
+ flags_ = flags;
+ bind_id_ = bind_id;
+
+ // Clamp num_args since it may have been set by a third_party library.
+ num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
+ int i = 0;
+ for (; i < num_args; ++i) {
+ arg_names_[i] = arg_names[i];
+ arg_types_[i] = arg_types[i];
+
+ if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+ convertable_values_[i] = std::move(convertable_values[i]);
+ } else {
+ arg_values_[i].as_uint = arg_values[i];
+ convertable_values_[i].reset();
+ }
+ }
+ for (; i < kTraceMaxNumArgs; ++i) {
+ arg_names_[i] = NULL;
+ arg_values_[i].as_uint = 0u;
+ convertable_values_[i].reset();
+ arg_types_[i] = TRACE_VALUE_TYPE_UINT;
+ }
+
+ bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
+ size_t alloc_size = 0;
+ if (copy) {
+ alloc_size += GetAllocLength(name) + GetAllocLength(scope);
+ for (i = 0; i < num_args; ++i) {
+ alloc_size += GetAllocLength(arg_names_[i]);
+ if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
+ arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
+ }
+ }
+
+ bool arg_is_copy[kTraceMaxNumArgs];
+ for (i = 0; i < num_args; ++i) {
+ // No copying of convertable types, we retain ownership.
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ continue;
+
+ // We only take a copy of arg_vals if they are of type COPY_STRING.
+ arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
+ if (arg_is_copy[i])
+ alloc_size += GetAllocLength(arg_values_[i].as_string);
+ }
+
+ if (alloc_size) {
+ parameter_copy_storage_.reset(new std::string);
+ parameter_copy_storage_->resize(alloc_size);
+ char* ptr = string_as_array(parameter_copy_storage_.get());
+ const char* end = ptr + alloc_size;
+ if (copy) {
+ CopyTraceEventParameter(&ptr, &name_, end);
+ CopyTraceEventParameter(&ptr, &scope_, end);
+ for (i = 0; i < num_args; ++i) {
+ CopyTraceEventParameter(&ptr, &arg_names_[i], end);
+ }
+ }
+ for (i = 0; i < num_args; ++i) {
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ continue;
+ if (arg_is_copy[i])
+ CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
+ }
+ DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
+ }
+}
+
+void TraceEvent::Reset() {
+ // Only reset fields that won't be initialized in Initialize(), or that may
+ // hold references to other objects.
+ duration_ = TimeDelta::FromInternalValue(-1);
+ parameter_copy_storage_.reset();
+ for (int i = 0; i < kTraceMaxNumArgs; ++i)
+ convertable_values_[i].reset();
+}
+
+void TraceEvent::UpdateDuration(const TimeTicks& now,
+ const ThreadTicks& thread_now) {
+ DCHECK_EQ(duration_.ToInternalValue(), -1);
+ duration_ = now - timestamp_;
+
+ // |thread_timestamp_| can be empty if the thread ticks clock wasn't
+ // initialized when it was recorded.
+ if (thread_timestamp_ != ThreadTicks())
+ thread_duration_ = thread_now - thread_timestamp_;
+}
+
+void TraceEvent::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ overhead->Add("TraceEvent", sizeof(*this));
+
+ if (parameter_copy_storage_)
+ overhead->AddString(*parameter_copy_storage_);
+
+ for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i]->EstimateTraceMemoryOverhead(overhead);
+ }
+}
+
+// static
+void TraceEvent::AppendValueAsJSON(unsigned char type,
+ TraceEvent::TraceValue value,
+ std::string* out) {
+ switch (type) {
+ case TRACE_VALUE_TYPE_BOOL:
+ *out += value.as_bool ? "true" : "false";
+ break;
+ case TRACE_VALUE_TYPE_UINT:
+ StringAppendF(out, "%" PRIu64, static_cast<uint64_t>(value.as_uint));
+ break;
+ case TRACE_VALUE_TYPE_INT:
+ StringAppendF(out, "%" PRId64, static_cast<int64_t>(value.as_int));
+ break;
+ case TRACE_VALUE_TYPE_DOUBLE: {
+ // FIXME: base/json/json_writer.cc is using the same code,
+ // should be made into a common method.
+ std::string real;
+ double val = value.as_double;
+ if (std::isfinite(val)) {
+ real = DoubleToString(val);
+ // Ensure that the number has a .0 if there's no decimal or 'e'. This
+ // makes sure that when we read the JSON back, it's interpreted as a
+ // real rather than an int.
+ if (real.find('.') == std::string::npos &&
+ real.find('e') == std::string::npos &&
+ real.find('E') == std::string::npos) {
+ real.append(".0");
+ }
+ // The JSON spec requires that non-integer values in the range (-1,1)
+ // have a zero before the decimal point - ".52" is not valid, "0.52" is.
+ if (real[0] == '.') {
+ real.insert(0, "0");
+ } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
+ // "-.1" bad "-0.1" good
+ real.insert(1, "0");
+ }
+ } else if (std::isnan(val)){
+ // The JSON spec doesn't allow NaN and Infinity (since these are
+ // objects in EcmaScript). Use strings instead.
+ real = "\"NaN\"";
+ } else if (val < 0) {
+ real = "\"-Infinity\"";
+ } else {
+ real = "\"Infinity\"";
+ }
+ StringAppendF(out, "%s", real.c_str());
+ break;
+ }
+ case TRACE_VALUE_TYPE_POINTER:
+ // JSON only supports double and int numbers.
+ // So as not to lose bits from a 64-bit pointer, output as a hex string.
+ StringAppendF(
+ out, "\"0x%" PRIx64 "\"",
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value.as_pointer)));
+ break;
+ case TRACE_VALUE_TYPE_STRING:
+ case TRACE_VALUE_TYPE_COPY_STRING:
+ EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
+ break;
+ default:
+ NOTREACHED() << "Don't know how to print this value";
+ break;
+ }
+}
+
+void TraceEvent::AppendAsJSON(
+ std::string* out,
+ const ArgumentFilterPredicate& argument_filter_predicate) const {
+ int64_t time_int64 = timestamp_.ToInternalValue();
+ int process_id;
+ int thread_id;
+ if ((flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID) &&
+ process_id_ != kNullProcessId) {
+ process_id = process_id_;
+ thread_id = -1;
+ } else {
+ process_id = TraceLog::GetInstance()->process_id();
+ thread_id = thread_id_;
+ }
+ const char* category_group_name =
+ TraceLog::GetCategoryGroupName(category_group_enabled_);
+
+ // Category group checked at category creation time.
+ DCHECK(!strchr(name_, '"'));
+ StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
+ ",\"ph\":\"%c\",\"cat\":\"%s\",\"name\":",
+ process_id, thread_id, time_int64, phase_, category_group_name);
+ EscapeJSONString(name_, true, out);
+ *out += ",\"args\":";
+
+ // Output argument names and values, stop at first NULL argument name.
+ // TODO(oysteine): The dual predicates here is a bit ugly; if the filtering
+ // capabilities need to grow even more precise we should rethink this
+ // approach
+ ArgumentNameFilterPredicate argument_name_filter_predicate;
+ bool strip_args =
+ arg_names_[0] && !argument_filter_predicate.is_null() &&
+ !argument_filter_predicate.Run(category_group_name, name_,
+ &argument_name_filter_predicate);
+
+ if (strip_args) {
+ *out += "\"__stripped__\"";
+ } else {
+ *out += "{";
+
+ for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+ if (i > 0)
+ *out += ",";
+ *out += "\"";
+ *out += arg_names_[i];
+ *out += "\":";
+
+ if (argument_name_filter_predicate.is_null() ||
+ argument_name_filter_predicate.Run(arg_names_[i])) {
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i]->AppendAsTraceFormat(out);
+ else
+ AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
+ } else {
+ *out += "\"__stripped__\"";
+ }
+ }
+
+ *out += "}";
+ }
+
+ if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
+ int64_t duration = duration_.ToInternalValue();
+ if (duration != -1)
+ StringAppendF(out, ",\"dur\":%" PRId64, duration);
+ if (!thread_timestamp_.is_null()) {
+ int64_t thread_duration = thread_duration_.ToInternalValue();
+ if (thread_duration != -1)
+ StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
+ }
+ }
+
+ // Output tts if thread_timestamp is valid.
+ if (!thread_timestamp_.is_null()) {
+ int64_t thread_time_int64 = thread_timestamp_.ToInternalValue();
+ StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
+ }
+
+ // Output async tts marker field if flag is set.
+ if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) {
+ StringAppendF(out, ", \"use_async_tts\":1");
+ }
+
+ // If id_ is set, print it out as a hex string so we don't loose any
+ // bits (it might be a 64-bit pointer).
+ if (flags_ & TRACE_EVENT_FLAG_HAS_ID) {
+ if (scope_ != trace_event_internal::kGlobalScope)
+ StringAppendF(out, ",\"scope\":\"%s\"", scope_);
+ StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64_t>(id_));
+ }
+
+ if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+ StringAppendF(out, ",\"bp\":\"e\"");
+
+ if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
+ (flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
+ StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
+ static_cast<uint64_t>(bind_id_));
+ }
+ if (flags_ & TRACE_EVENT_FLAG_FLOW_IN)
+ StringAppendF(out, ",\"flow_in\":true");
+ if (flags_ & TRACE_EVENT_FLAG_FLOW_OUT)
+ StringAppendF(out, ",\"flow_out\":true");
+
+ // Instant events also output their scope.
+ if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
+ char scope = '?';
+ switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
+ case TRACE_EVENT_SCOPE_GLOBAL:
+ scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
+ break;
+
+ case TRACE_EVENT_SCOPE_PROCESS:
+ scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
+ break;
+
+ case TRACE_EVENT_SCOPE_THREAD:
+ scope = TRACE_EVENT_SCOPE_NAME_THREAD;
+ break;
+ }
+ StringAppendF(out, ",\"s\":\"%c\"", scope);
+ }
+
+ *out += "}";
+}
+
+void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
+ *out << name_ << "[";
+ *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
+ *out << "]";
+ if (arg_names_[0]) {
+ *out << ", {";
+ for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+ if (i > 0)
+ *out << ", ";
+ *out << arg_names_[i] << ":";
+ std::string value_as_text;
+
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
+ else
+ AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
+
+ *out << value_as_text;
+ }
+ *out << "}";
+ }
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_event_impl.h b/libchrome/base/trace_event/trace_event_impl.h
new file mode 100644
index 0000000..4382217
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_impl.h
@@ -0,0 +1,189 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/observer_list.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/string_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_local.h"
+#include "base/trace_event/trace_event_memory_overhead.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class WaitableEvent;
+class MessageLoop;
+
+namespace trace_event {
+
+typedef base::Callback<bool(const char* arg_name)> ArgumentNameFilterPredicate;
+
+typedef base::Callback<bool(const char* category_group_name,
+ const char* event_name,
+ ArgumentNameFilterPredicate*)>
+ ArgumentFilterPredicate;
+
+// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
+// class must implement this interface.
+class BASE_EXPORT ConvertableToTraceFormat {
+ public:
+ ConvertableToTraceFormat() {}
+ virtual ~ConvertableToTraceFormat() {}
+
+ // Append the class info to the provided |out| string. The appended
+ // data must be a valid JSON object. Strings must be properly quoted, and
+ // escaped. There is no processing applied to the content after it is
+ // appended.
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
+
+ virtual void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ std::string ToString() const {
+ std::string result;
+ AppendAsTraceFormat(&result);
+ return result;
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormat);
+};
+
+const int kTraceMaxNumArgs = 2;
+
+struct TraceEventHandle {
+ uint32_t chunk_seq;
+ // These numbers of bits must be kept consistent with
+ // TraceBufferChunk::kMaxTrunkIndex and
+ // TraceBufferChunk::kTraceBufferChunkSize (in trace_buffer.h).
+ unsigned chunk_index : 26;
+ unsigned event_index : 6;
+};
+
+class BASE_EXPORT TraceEvent {
+ public:
+ union TraceValue {
+ bool as_bool;
+ unsigned long long as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+ };
+
+ TraceEvent();
+ ~TraceEvent();
+
+ void MoveFrom(std::unique_ptr<TraceEvent> other);
+
+ void Initialize(int thread_id,
+ TimeTicks timestamp,
+ ThreadTicks thread_timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned long long bind_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+
+ void Reset();
+
+ void UpdateDuration(const TimeTicks& now, const ThreadTicks& thread_now);
+
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ // Serialize event data to JSON
+ void AppendAsJSON(
+ std::string* out,
+ const ArgumentFilterPredicate& argument_filter_predicate) const;
+ void AppendPrettyPrinted(std::ostringstream* out) const;
+
+ static void AppendValueAsJSON(unsigned char type,
+ TraceValue value,
+ std::string* out);
+
+ TimeTicks timestamp() const { return timestamp_; }
+ ThreadTicks thread_timestamp() const { return thread_timestamp_; }
+ char phase() const { return phase_; }
+ int thread_id() const { return thread_id_; }
+ TimeDelta duration() const { return duration_; }
+ TimeDelta thread_duration() const { return thread_duration_; }
+ const char* scope() const { return scope_; }
+ unsigned long long id() const { return id_; }
+ unsigned int flags() const { return flags_; }
+
+ // Exposed for unittesting:
+
+ const std::string* parameter_copy_storage() const {
+ return parameter_copy_storage_.get();
+ }
+
+ const unsigned char* category_group_enabled() const {
+ return category_group_enabled_;
+ }
+
+ const char* name() const { return name_; }
+
+#if defined(OS_ANDROID)
+ void SendToATrace();
+#endif
+
+ private:
+ // Note: these are ordered by size (largest first) for optimal packing.
+ TimeTicks timestamp_;
+ ThreadTicks thread_timestamp_;
+ TimeDelta duration_;
+ TimeDelta thread_duration_;
+ // scope_ and id_ can be used to store phase-specific data.
+ const char* scope_;
+ unsigned long long id_;
+ TraceValue arg_values_[kTraceMaxNumArgs];
+ const char* arg_names_[kTraceMaxNumArgs];
+ std::unique_ptr<ConvertableToTraceFormat>
+ convertable_values_[kTraceMaxNumArgs];
+ const unsigned char* category_group_enabled_;
+ const char* name_;
+ std::unique_ptr<std::string> parameter_copy_storage_;
+ // Depending on TRACE_EVENT_FLAG_HAS_PROCESS_ID the event will have either:
+ // tid: thread_id_, pid: current_process_id (default case).
+ // tid: -1, pid: process_id_ (when flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID).
+ union {
+ int thread_id_;
+ int process_id_;
+ };
+ unsigned int flags_;
+ unsigned long long bind_id_;
+ unsigned char arg_types_[kTraceMaxNumArgs];
+ char phase_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEvent);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
diff --git a/libchrome/base/trace_event/trace_event_memory_overhead.cc b/libchrome/base/trace_event/trace_event_memory_overhead.cc
new file mode 100644
index 0000000..23579cb
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_memory_overhead.cc
@@ -0,0 +1,156 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+#include <algorithm>
+
+#include "base/bits.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/values.h"
+
+namespace base {
+namespace trace_event {
+
+TraceEventMemoryOverhead::TraceEventMemoryOverhead() {
+}
+
+TraceEventMemoryOverhead::~TraceEventMemoryOverhead() {
+}
+
+void TraceEventMemoryOverhead::AddOrCreateInternal(
+ const char* object_type,
+ size_t count,
+ size_t allocated_size_in_bytes,
+ size_t resident_size_in_bytes) {
+ auto it = allocated_objects_.find(object_type);
+ if (it == allocated_objects_.end()) {
+ allocated_objects_.insert(std::make_pair(
+ object_type,
+ ObjectCountAndSize(
+ {count, allocated_size_in_bytes, resident_size_in_bytes})));
+ return;
+ }
+ it->second.count += count;
+ it->second.allocated_size_in_bytes += allocated_size_in_bytes;
+ it->second.resident_size_in_bytes += resident_size_in_bytes;
+}
+
+void TraceEventMemoryOverhead::Add(const char* object_type,
+ size_t allocated_size_in_bytes) {
+ Add(object_type, allocated_size_in_bytes, allocated_size_in_bytes);
+}
+
+void TraceEventMemoryOverhead::Add(const char* object_type,
+ size_t allocated_size_in_bytes,
+ size_t resident_size_in_bytes) {
+ AddOrCreateInternal(object_type, 1, allocated_size_in_bytes,
+ resident_size_in_bytes);
+}
+
+void TraceEventMemoryOverhead::AddString(const std::string& str) {
+ // The number below are empirical and mainly based on profiling of real-world
+ // std::string implementations:
+ // - even short string end up malloc()-inc at least 32 bytes.
+ // - longer strings seem to malloc() multiples of 16 bytes.
+ const size_t capacity = bits::Align(str.capacity(), 16);
+ Add("std::string", sizeof(std::string) + std::max<size_t>(capacity, 32u));
+}
+
+void TraceEventMemoryOverhead::AddRefCountedString(
+ const RefCountedString& str) {
+ Add("RefCountedString", sizeof(RefCountedString));
+ AddString(str.data());
+}
+
+void TraceEventMemoryOverhead::AddValue(const Value& value) {
+ switch (value.GetType()) {
+ case Value::TYPE_NULL:
+ case Value::TYPE_BOOLEAN:
+ case Value::TYPE_INTEGER:
+ case Value::TYPE_DOUBLE:
+ Add("FundamentalValue", sizeof(Value));
+ break;
+
+ case Value::TYPE_STRING: {
+ const StringValue* string_value = nullptr;
+ value.GetAsString(&string_value);
+ Add("StringValue", sizeof(StringValue));
+ AddString(string_value->GetString());
+ } break;
+
+ case Value::TYPE_BINARY: {
+ const BinaryValue* binary_value = nullptr;
+ value.GetAsBinary(&binary_value);
+ Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize());
+ } break;
+
+ case Value::TYPE_DICTIONARY: {
+ const DictionaryValue* dictionary_value = nullptr;
+ value.GetAsDictionary(&dictionary_value);
+ Add("DictionaryValue", sizeof(DictionaryValue));
+ for (DictionaryValue::Iterator it(*dictionary_value); !it.IsAtEnd();
+ it.Advance()) {
+ AddString(it.key());
+ AddValue(it.value());
+ }
+ } break;
+
+ case Value::TYPE_LIST: {
+ const ListValue* list_value = nullptr;
+ value.GetAsList(&list_value);
+ Add("ListValue", sizeof(ListValue));
+ for (const auto& v : *list_value)
+ AddValue(*v);
+ } break;
+
+ default:
+ NOTREACHED();
+ }
+}
+
+void TraceEventMemoryOverhead::AddSelf() {
+ size_t estimated_size = sizeof(*this);
+ // If the SmallMap did overflow its static capacity, its elements will be
+ // allocated on the heap and have to be accounted separately.
+ if (allocated_objects_.UsingFullMap())
+ estimated_size += sizeof(map_type::value_type) * allocated_objects_.size();
+ Add("TraceEventMemoryOverhead", estimated_size);
+}
+
+size_t TraceEventMemoryOverhead::GetCount(const char* object_type) const {
+ const auto& it = allocated_objects_.find(object_type);
+ if (it == allocated_objects_.end())
+ return 0u;
+ return it->second.count;
+}
+
+void TraceEventMemoryOverhead::Update(const TraceEventMemoryOverhead& other) {
+ for (const auto& it : other.allocated_objects_) {
+ AddOrCreateInternal(it.first, it.second.count,
+ it.second.allocated_size_in_bytes,
+ it.second.resident_size_in_bytes);
+ }
+}
+
+void TraceEventMemoryOverhead::DumpInto(const char* base_name,
+ ProcessMemoryDump* pmd) const {
+ for (const auto& it : allocated_objects_) {
+ std::string dump_name = StringPrintf("%s/%s", base_name, it.first);
+ MemoryAllocatorDump* mad = pmd->CreateAllocatorDump(dump_name);
+ mad->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ it.second.allocated_size_in_bytes);
+ mad->AddScalar("resident_size", MemoryAllocatorDump::kUnitsBytes,
+ it.second.resident_size_in_bytes);
+ mad->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects, it.second.count);
+ }
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_event_memory_overhead.h b/libchrome/base/trace_event/trace_event_memory_overhead.h
new file mode 100644
index 0000000..a69c93f
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_memory_overhead.h
@@ -0,0 +1,77 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+#include "base/containers/hash_tables.h"
+#include "base/containers/small_map.h"
+#include "base/macros.h"
+
+namespace base {
+
+class RefCountedString;
+class Value;
+
+namespace trace_event {
+
+class ProcessMemoryDump;
+
+// Used to estimate the memory overhead of the tracing infrastructure.
+class BASE_EXPORT TraceEventMemoryOverhead {
+ public:
+ TraceEventMemoryOverhead();
+ ~TraceEventMemoryOverhead();
+
+ // Use this method to account the overhead of an object for which an estimate
+ // is known for both the allocated and resident memory.
+ void Add(const char* object_type,
+ size_t allocated_size_in_bytes,
+ size_t resident_size_in_bytes);
+
+ // Similar to Add() above, but assumes that
+ // |resident_size_in_bytes| == |allocated_size_in_bytes|.
+ void Add(const char* object_type, size_t allocated_size_in_bytes);
+
+ // Specialized profiling functions for commonly used object types.
+ void AddString(const std::string& str);
+ void AddValue(const Value& value);
+ void AddRefCountedString(const RefCountedString& str);
+
+ // Call this after all the Add* methods above to account the memory used by
+ // this TraceEventMemoryOverhead instance itself.
+ void AddSelf();
+
+ // Retrieves the count, that is, the count of Add*(|object_type|, ...) calls.
+ size_t GetCount(const char* object_type) const;
+
+ // Adds up and merges all the values from |other| to this instance.
+ void Update(const TraceEventMemoryOverhead& other);
+
+ void DumpInto(const char* base_name, ProcessMemoryDump* pmd) const;
+
+ private:
+ struct ObjectCountAndSize {
+ size_t count;
+ size_t allocated_size_in_bytes;
+ size_t resident_size_in_bytes;
+ };
+ using map_type = SmallMap<hash_map<const char*, ObjectCountAndSize>, 16>;
+ map_type allocated_objects_;
+
+ void AddOrCreateInternal(const char* object_type,
+ size_t count,
+ size_t allocated_size_in_bytes,
+ size_t resident_size_in_bytes);
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventMemoryOverhead);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
diff --git a/libchrome/base/trace_event/trace_event_synthetic_delay.cc b/libchrome/base/trace_event/trace_event_synthetic_delay.cc
new file mode 100644
index 0000000..b6ce284
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_synthetic_delay.cc
@@ -0,0 +1,231 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/macros.h"
+#include "base/memory/singleton.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+
+namespace {
+const int kMaxSyntheticDelays = 32;
+} // namespace
+
+namespace base {
+namespace trace_event {
+
+TraceEventSyntheticDelayClock::TraceEventSyntheticDelayClock() {}
+TraceEventSyntheticDelayClock::~TraceEventSyntheticDelayClock() {}
+
+class TraceEventSyntheticDelayRegistry : public TraceEventSyntheticDelayClock {
+ public:
+ static TraceEventSyntheticDelayRegistry* GetInstance();
+
+ TraceEventSyntheticDelay* GetOrCreateDelay(const char* name);
+ void ResetAllDelays();
+
+ // TraceEventSyntheticDelayClock implementation.
+ TimeTicks Now() override;
+
+ private:
+ TraceEventSyntheticDelayRegistry();
+
+ friend struct DefaultSingletonTraits<TraceEventSyntheticDelayRegistry>;
+
+ Lock lock_;
+ TraceEventSyntheticDelay delays_[kMaxSyntheticDelays];
+ TraceEventSyntheticDelay dummy_delay_;
+ subtle::Atomic32 delay_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayRegistry);
+};
+
+TraceEventSyntheticDelay::TraceEventSyntheticDelay()
+ : mode_(STATIC), begin_count_(0), trigger_count_(0), clock_(NULL) {}
+
+TraceEventSyntheticDelay::~TraceEventSyntheticDelay() {}
+
+TraceEventSyntheticDelay* TraceEventSyntheticDelay::Lookup(
+ const std::string& name) {
+ return TraceEventSyntheticDelayRegistry::GetInstance()->GetOrCreateDelay(
+ name.c_str());
+}
+
+void TraceEventSyntheticDelay::Initialize(
+ const std::string& name,
+ TraceEventSyntheticDelayClock* clock) {
+ name_ = name;
+ clock_ = clock;
+}
+
+void TraceEventSyntheticDelay::SetTargetDuration(TimeDelta target_duration) {
+ AutoLock lock(lock_);
+ target_duration_ = target_duration;
+ trigger_count_ = 0;
+ begin_count_ = 0;
+}
+
+void TraceEventSyntheticDelay::SetMode(Mode mode) {
+ AutoLock lock(lock_);
+ mode_ = mode;
+}
+
+void TraceEventSyntheticDelay::SetClock(TraceEventSyntheticDelayClock* clock) {
+ AutoLock lock(lock_);
+ clock_ = clock;
+}
+
+void TraceEventSyntheticDelay::Begin() {
+ // Note that we check for a non-zero target duration without locking to keep
+ // things quick for the common case when delays are disabled. Since the delay
+ // calculation is done with a lock held, it will always be correct. The only
+ // downside of this is that we may fail to apply some delays when the target
+ // duration changes.
+ if (!target_duration_.ToInternalValue())
+ return;
+
+ TimeTicks start_time = clock_->Now();
+ {
+ AutoLock lock(lock_);
+ if (++begin_count_ != 1)
+ return;
+ end_time_ = CalculateEndTimeLocked(start_time);
+ }
+}
+
+void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
+ // See note in Begin().
+ if (!target_duration_.ToInternalValue()) {
+ *out_end_time = TimeTicks();
+ return;
+ }
+
+ TimeTicks start_time = clock_->Now();
+ {
+ AutoLock lock(lock_);
+ *out_end_time = CalculateEndTimeLocked(start_time);
+ }
+}
+
+void TraceEventSyntheticDelay::End() {
+ // See note in Begin().
+ if (!target_duration_.ToInternalValue())
+ return;
+
+ TimeTicks end_time;
+ {
+ AutoLock lock(lock_);
+ if (!begin_count_ || --begin_count_ != 0)
+ return;
+ end_time = end_time_;
+ }
+ if (!end_time.is_null())
+ ApplyDelay(end_time);
+}
+
+void TraceEventSyntheticDelay::EndParallel(TimeTicks end_time) {
+ if (!end_time.is_null())
+ ApplyDelay(end_time);
+}
+
+TimeTicks TraceEventSyntheticDelay::CalculateEndTimeLocked(
+ TimeTicks start_time) {
+ if (mode_ == ONE_SHOT && trigger_count_++)
+ return TimeTicks();
+ else if (mode_ == ALTERNATING && trigger_count_++ % 2)
+ return TimeTicks();
+ return start_time + target_duration_;
+}
+
+void TraceEventSyntheticDelay::ApplyDelay(TimeTicks end_time) {
+ TRACE_EVENT0("synthetic_delay", name_.c_str());
+ while (clock_->Now() < end_time) {
+ // Busy loop.
+ }
+}
+
+TraceEventSyntheticDelayRegistry*
+TraceEventSyntheticDelayRegistry::GetInstance() {
+ return Singleton<
+ TraceEventSyntheticDelayRegistry,
+ LeakySingletonTraits<TraceEventSyntheticDelayRegistry> >::get();
+}
+
+TraceEventSyntheticDelayRegistry::TraceEventSyntheticDelayRegistry()
+ : delay_count_(0) {}
+
+TraceEventSyntheticDelay* TraceEventSyntheticDelayRegistry::GetOrCreateDelay(
+ const char* name) {
+ // Try to find an existing delay first without locking to make the common case
+ // fast.
+ int delay_count = subtle::Acquire_Load(&delay_count_);
+ for (int i = 0; i < delay_count; ++i) {
+ if (!strcmp(name, delays_[i].name_.c_str()))
+ return &delays_[i];
+ }
+
+ AutoLock lock(lock_);
+ delay_count = subtle::Acquire_Load(&delay_count_);
+ for (int i = 0; i < delay_count; ++i) {
+ if (!strcmp(name, delays_[i].name_.c_str()))
+ return &delays_[i];
+ }
+
+ DCHECK(delay_count < kMaxSyntheticDelays)
+ << "must increase kMaxSyntheticDelays";
+ if (delay_count >= kMaxSyntheticDelays)
+ return &dummy_delay_;
+
+ delays_[delay_count].Initialize(std::string(name), this);
+ subtle::Release_Store(&delay_count_, delay_count + 1);
+ return &delays_[delay_count];
+}
+
+TimeTicks TraceEventSyntheticDelayRegistry::Now() {
+ return TimeTicks::Now();
+}
+
+void TraceEventSyntheticDelayRegistry::ResetAllDelays() {
+ AutoLock lock(lock_);
+ int delay_count = subtle::Acquire_Load(&delay_count_);
+ for (int i = 0; i < delay_count; ++i) {
+ delays_[i].SetTargetDuration(TimeDelta());
+ delays_[i].SetClock(this);
+ }
+}
+
+void ResetTraceEventSyntheticDelays() {
+ TraceEventSyntheticDelayRegistry::GetInstance()->ResetAllDelays();
+}
+
+} // namespace trace_event
+} // namespace base
+
+namespace trace_event_internal {
+
+ScopedSyntheticDelay::ScopedSyntheticDelay(const char* name,
+ base::subtle::AtomicWord* impl_ptr)
+ : delay_impl_(GetOrCreateDelay(name, impl_ptr)) {
+ delay_impl_->BeginParallel(&end_time_);
+}
+
+ScopedSyntheticDelay::~ScopedSyntheticDelay() {
+ delay_impl_->EndParallel(end_time_);
+}
+
+base::trace_event::TraceEventSyntheticDelay* GetOrCreateDelay(
+ const char* name,
+ base::subtle::AtomicWord* impl_ptr) {
+ base::trace_event::TraceEventSyntheticDelay* delay_impl =
+ reinterpret_cast<base::trace_event::TraceEventSyntheticDelay*>(
+ base::subtle::Acquire_Load(impl_ptr));
+ if (!delay_impl) {
+ delay_impl =
+ base::trace_event::TraceEventSyntheticDelayRegistry::GetInstance()
+ ->GetOrCreateDelay(name);
+ base::subtle::Release_Store(
+ impl_ptr, reinterpret_cast<base::subtle::AtomicWord>(delay_impl));
+ }
+ return delay_impl;
+}
+
+} // namespace trace_event_internal
diff --git a/libchrome/base/trace_event/trace_event_synthetic_delay.h b/libchrome/base/trace_event/trace_event_synthetic_delay.h
new file mode 100644
index 0000000..59e2842
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_synthetic_delay.h
@@ -0,0 +1,167 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The synthetic delay framework makes it possible to dynamically inject
+// arbitrary delays into into different parts of the codebase. This can be used,
+// for instance, for testing various task scheduling algorithms.
+//
+// The delays are specified in terms of a target duration for a given block of
+// code. If the code executes faster than the duration, the thread is made to
+// sleep until the deadline is met.
+//
+// Code can be instrumented for delays with two sets of macros. First, for
+// delays that should apply within a scope, use the following macro:
+//
+// TRACE_EVENT_SYNTHETIC_DELAY("cc.LayerTreeHost.DrawAndSwap");
+//
+// For delaying operations that span multiple scopes, use:
+//
+// TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("cc.Scheduler.BeginMainFrame");
+// ...
+// TRACE_EVENT_SYNTHETIC_DELAY_END("cc.Scheduler.BeginMainFrame");
+//
+// Here BEGIN establishes the start time for the delay and END executes the
+// delay based on the remaining time. If BEGIN is called multiple times in a
+// row, END should be called a corresponding number of times. Only the last
+// call to END will have an effect.
+//
+// Note that a single delay may begin on one thread and end on another. This
+// implies that a single delay cannot not be applied in several threads at once.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_SYNTHETIC_DELAY_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_SYNTHETIC_DELAY_H_
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+
+// Apply a named delay in the current scope.
+#define TRACE_EVENT_SYNTHETIC_DELAY(name) \
+ static base::subtle::AtomicWord INTERNAL_TRACE_EVENT_UID(impl_ptr) = 0; \
+ trace_event_internal::ScopedSyntheticDelay INTERNAL_TRACE_EVENT_UID(delay)( \
+ name, &INTERNAL_TRACE_EVENT_UID(impl_ptr));
+
+// Begin a named delay, establishing its timing start point. May be called
+// multiple times as long as the calls to TRACE_EVENT_SYNTHETIC_DELAY_END are
+// balanced. Only the first call records the timing start point.
+#define TRACE_EVENT_SYNTHETIC_DELAY_BEGIN(name) \
+ do { \
+ static base::subtle::AtomicWord impl_ptr = 0; \
+ trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->Begin(); \
+ } while (false)
+
+// End a named delay. The delay is applied only if this call matches the
+// first corresponding call to TRACE_EVENT_SYNTHETIC_DELAY_BEGIN with the
+// same delay.
+#define TRACE_EVENT_SYNTHETIC_DELAY_END(name) \
+ do { \
+ static base::subtle::AtomicWord impl_ptr = 0; \
+ trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->End(); \
+ } while (false)
+
+template <typename Type>
+struct DefaultSingletonTraits;
+
+namespace base {
+namespace trace_event {
+
+// Time source for computing delay durations. Used for testing.
+class TRACE_EVENT_API_CLASS_EXPORT TraceEventSyntheticDelayClock {
+ public:
+ TraceEventSyntheticDelayClock();
+ virtual ~TraceEventSyntheticDelayClock();
+ virtual base::TimeTicks Now() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayClock);
+};
+
+// Single delay point instance.
+class TRACE_EVENT_API_CLASS_EXPORT TraceEventSyntheticDelay {
+ public:
+ enum Mode {
+ STATIC, // Apply the configured delay every time.
+ ONE_SHOT, // Apply the configured delay just once.
+ ALTERNATING // Apply the configured delay every other time.
+ };
+
+ // Returns an existing named delay instance or creates a new one with |name|.
+ static TraceEventSyntheticDelay* Lookup(const std::string& name);
+
+ void SetTargetDuration(TimeDelta target_duration);
+ void SetMode(Mode mode);
+ void SetClock(TraceEventSyntheticDelayClock* clock);
+
+ // Begin the delay, establishing its timing start point. May be called
+ // multiple times as long as the calls to End() are balanced. Only the first
+ // call records the timing start point.
+ void Begin();
+
+ // End the delay. The delay is applied only if this call matches the first
+ // corresponding call to Begin() with the same delay.
+ void End();
+
+ // Begin a parallel instance of the delay. Several parallel instances may be
+ // active simultaneously and will complete independently. The computed end
+ // time for the delay is stored in |out_end_time|, which should later be
+ // passed to EndParallel().
+ void BeginParallel(base::TimeTicks* out_end_time);
+
+ // End a previously started parallel delay. |end_time| is the delay end point
+ // computed by BeginParallel().
+ void EndParallel(base::TimeTicks end_time);
+
+ private:
+ TraceEventSyntheticDelay();
+ ~TraceEventSyntheticDelay();
+ friend class TraceEventSyntheticDelayRegistry;
+
+ void Initialize(const std::string& name,
+ TraceEventSyntheticDelayClock* clock);
+ base::TimeTicks CalculateEndTimeLocked(base::TimeTicks start_time);
+ void ApplyDelay(base::TimeTicks end_time);
+
+ Lock lock_;
+ Mode mode_;
+ std::string name_;
+ int begin_count_;
+ int trigger_count_;
+ base::TimeTicks end_time_;
+ base::TimeDelta target_duration_;
+ TraceEventSyntheticDelayClock* clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelay);
+};
+
+// Set the target durations of all registered synthetic delay points to zero.
+TRACE_EVENT_API_CLASS_EXPORT void ResetTraceEventSyntheticDelays();
+
+} // namespace trace_event
+} // namespace base
+
+namespace trace_event_internal {
+
+// Helper class for scoped delays. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedSyntheticDelay {
+ public:
+ explicit ScopedSyntheticDelay(const char* name,
+ base::subtle::AtomicWord* impl_ptr);
+ ~ScopedSyntheticDelay();
+
+ private:
+ base::trace_event::TraceEventSyntheticDelay* delay_impl_;
+ base::TimeTicks end_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSyntheticDelay);
+};
+
+// Helper for registering delays. Do not use directly.
+TRACE_EVENT_API_CLASS_EXPORT base::trace_event::TraceEventSyntheticDelay*
+ GetOrCreateDelay(const char* name, base::subtle::AtomicWord* impl_ptr);
+
+} // namespace trace_event_internal
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_SYNTHETIC_DELAY_H_
diff --git a/libchrome/base/trace_event/trace_event_synthetic_delay_unittest.cc b/libchrome/base/trace_event/trace_event_synthetic_delay_unittest.cc
new file mode 100644
index 0000000..97a4580
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_synthetic_delay_unittest.cc
@@ -0,0 +1,157 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_synthetic_delay.h"
+
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+namespace {
+
+const int kTargetDurationMs = 100;
+// Allow some leeway in timings to make it possible to run these tests with a
+// wall clock time source too.
+const int kShortDurationMs = 10;
+
+} // namespace
+
+class TraceEventSyntheticDelayTest : public testing::Test,
+ public TraceEventSyntheticDelayClock {
+ public:
+ TraceEventSyntheticDelayTest() {}
+ ~TraceEventSyntheticDelayTest() override { ResetTraceEventSyntheticDelays(); }
+
+ // TraceEventSyntheticDelayClock implementation.
+ base::TimeTicks Now() override {
+ AdvanceTime(base::TimeDelta::FromMilliseconds(kShortDurationMs / 10));
+ return now_;
+ }
+
+ TraceEventSyntheticDelay* ConfigureDelay(const char* name) {
+ TraceEventSyntheticDelay* delay = TraceEventSyntheticDelay::Lookup(name);
+ delay->SetClock(this);
+ delay->SetTargetDuration(
+ base::TimeDelta::FromMilliseconds(kTargetDurationMs));
+ return delay;
+ }
+
+ void AdvanceTime(base::TimeDelta delta) { now_ += delta; }
+
+ int64_t TestFunction() {
+ base::TimeTicks start = Now();
+ { TRACE_EVENT_SYNTHETIC_DELAY("test.Delay"); }
+ return (Now() - start).InMilliseconds();
+ }
+
+ int64_t AsyncTestFunctionBegin() {
+ base::TimeTicks start = Now();
+ { TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("test.AsyncDelay"); }
+ return (Now() - start).InMilliseconds();
+ }
+
+ int64_t AsyncTestFunctionEnd() {
+ base::TimeTicks start = Now();
+ { TRACE_EVENT_SYNTHETIC_DELAY_END("test.AsyncDelay"); }
+ return (Now() - start).InMilliseconds();
+ }
+
+ private:
+ base::TimeTicks now_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayTest);
+};
+
+TEST_F(TraceEventSyntheticDelayTest, StaticDelay) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.Delay");
+ delay->SetMode(TraceEventSyntheticDelay::STATIC);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, OneShotDelay) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.Delay");
+ delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+
+ delay->SetTargetDuration(
+ base::TimeDelta::FromMilliseconds(kTargetDurationMs));
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AlternatingDelay) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.Delay");
+ delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelay) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayExceeded) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ AdvanceTime(base::TimeDelta::FromMilliseconds(kTargetDurationMs));
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayNoActivation) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayNested) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayUnbalanced) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, ResetDelays) {
+ ConfigureDelay("test.Delay");
+ ResetTraceEventSyntheticDelays();
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, BeginParallel) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.AsyncDelay");
+ base::TimeTicks end_times[2];
+ base::TimeTicks start_time = Now();
+
+ delay->BeginParallel(&end_times[0]);
+ EXPECT_FALSE(end_times[0].is_null());
+
+ delay->BeginParallel(&end_times[1]);
+ EXPECT_FALSE(end_times[1].is_null());
+
+ delay->EndParallel(end_times[0]);
+ EXPECT_GE((Now() - start_time).InMilliseconds(), kTargetDurationMs);
+
+ start_time = Now();
+ delay->EndParallel(end_times[1]);
+ EXPECT_LT((Now() - start_time).InMilliseconds(), kShortDurationMs);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_event_system_stats_monitor.h b/libchrome/base/trace_event/trace_event_system_stats_monitor.h
new file mode 100644
index 0000000..14aa568
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_system_stats_monitor.h
@@ -0,0 +1,76 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/process/process_metrics.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+namespace trace_event {
+
+// Watches for chrome://tracing to be enabled or disabled. When tracing is
+// enabled, also enables system events profiling. This class is the preferred
+// way to turn system tracing on and off.
+class BASE_EXPORT TraceEventSystemStatsMonitor
+ : public TraceLog::EnabledStateObserver {
+ public:
+ // Length of time interval between stat profiles.
+ static const int kSamplingIntervalMilliseconds = 2000;
+
+ // |task_runner| must be the primary thread for the client
+ // process, e.g. the UI thread in a browser.
+ explicit TraceEventSystemStatsMonitor(
+ scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+ ~TraceEventSystemStatsMonitor() override;
+
+ // base::trace_event::TraceLog::EnabledStateChangedObserver overrides:
+ void OnTraceLogEnabled() override;
+ void OnTraceLogDisabled() override;
+
+ // Retrieves system profiling at the current time.
+ void DumpSystemStats();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(TraceSystemStatsMonitorTest,
+ TraceEventSystemStatsMonitor);
+
+ bool IsTimerRunningForTest() const;
+
+ void StartProfiling();
+
+ void StopProfiling();
+
+ // Ensures the observer starts and stops tracing on the primary thread.
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ // Timer to schedule system profile dumps.
+ RepeatingTimer dump_timer_;
+
+ WeakPtrFactory<TraceEventSystemStatsMonitor> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSystemStatsMonitor);
+};
+
+// Converts system memory profiling stats in |input| to
+// trace event compatible JSON and appends to |output|. Visible for testing.
+BASE_EXPORT void AppendSystemProfileAsTraceFormat(const SystemMetrics&
+ system_stats,
+ std::string* output);
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
diff --git a/libchrome/base/trace_event/trace_event_unittest.cc b/libchrome/base/trace_event/trace_event_unittest.cc
new file mode 100644
index 0000000..ff8ec2d
--- /dev/null
+++ b/libchrome/base/trace_event/trace_event_unittest.cc
@@ -0,0 +1,3184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event.h"
+
+#include <math.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <cstdlib>
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/memory/singleton.h"
+#include "base/process/process_handle.h"
+#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/strings/pattern.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+enum CompareOp {
+ IS_EQUAL,
+ IS_NOT_EQUAL,
+};
+
+struct JsonKeyValue {
+ const char* key;
+ const char* value;
+ CompareOp op;
+};
+
+const int kThreadId = 42;
+const int kAsyncId = 5;
+const char kAsyncIdStr[] = "0x5";
+const int kAsyncId2 = 6;
+const char kAsyncId2Str[] = "0x6";
+const int kFlowId = 7;
+const char kFlowIdStr[] = "0x7";
+
+const char kRecordAllCategoryFilter[] = "*";
+
+class TraceEventTestFixture : public testing::Test {
+ public:
+ void OnTraceDataCollected(
+ WaitableEvent* flush_complete_event,
+ const scoped_refptr<base::RefCountedString>& events_str,
+ bool has_more_events);
+ void OnWatchEventMatched() {
+ ++event_watch_notification_;
+ }
+ DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
+ DictionaryValue* FindNamePhase(const char* name, const char* phase);
+ DictionaryValue* FindNamePhaseKeyValue(const char* name,
+ const char* phase,
+ const char* key,
+ const char* value);
+ void DropTracedMetadataRecords();
+ bool FindMatchingValue(const char* key,
+ const char* value);
+ bool FindNonMatchingValue(const char* key,
+ const char* value);
+ void Clear() {
+ trace_parsed_.Clear();
+ json_output_.json_output.clear();
+ }
+
+ void BeginTrace() {
+ BeginSpecificTrace("*");
+ }
+
+ void BeginSpecificTrace(const std::string& filter) {
+ event_watch_notification_ = 0;
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(filter, ""),
+ TraceLog::RECORDING_MODE);
+ }
+
+ void CancelTrace() {
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ CancelTraceAsync(&flush_complete_event);
+ flush_complete_event.Wait();
+ }
+
+ void EndTraceAndFlush() {
+ num_flush_callbacks_ = 0;
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ EndTraceAndFlushAsync(&flush_complete_event);
+ flush_complete_event.Wait();
+ }
+
+ // Used when testing thread-local buffers which requires the thread initiating
+ // flush to have a message loop.
+ void EndTraceAndFlushInThreadWithMessageLoop() {
+ WaitableEvent flush_complete_event(
+ WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ Thread flush_thread("flush");
+ flush_thread.Start();
+ flush_thread.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&TraceEventTestFixture::EndTraceAndFlushAsync,
+ base::Unretained(this), &flush_complete_event));
+ flush_complete_event.Wait();
+ }
+
+ void CancelTraceAsync(WaitableEvent* flush_complete_event) {
+ TraceLog::GetInstance()->CancelTracing(
+ base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
+ base::Unretained(static_cast<TraceEventTestFixture*>(this)),
+ base::Unretained(flush_complete_event)));
+ }
+
+ void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
+ TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->Flush(
+ base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
+ base::Unretained(static_cast<TraceEventTestFixture*>(this)),
+ base::Unretained(flush_complete_event)));
+ }
+
+ void SetUp() override {
+ const char* name = PlatformThread::GetName();
+ old_thread_name_ = name ? strdup(name) : NULL;
+
+ TraceLog::DeleteForTesting();
+ TraceLog* tracelog = TraceLog::GetInstance();
+ ASSERT_TRUE(tracelog);
+ ASSERT_FALSE(tracelog->IsEnabled());
+ trace_buffer_.SetOutputCallback(json_output_.GetCallback());
+ event_watch_notification_ = 0;
+ num_flush_callbacks_ = 0;
+ }
+ void TearDown() override {
+ if (TraceLog::GetInstance())
+ EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+ PlatformThread::SetName(old_thread_name_ ? old_thread_name_ : "");
+ free(old_thread_name_);
+ old_thread_name_ = NULL;
+ // We want our singleton torn down after each test.
+ TraceLog::DeleteForTesting();
+ }
+
+ char* old_thread_name_;
+ ListValue trace_parsed_;
+ TraceResultBuffer trace_buffer_;
+ TraceResultBuffer::SimpleOutput json_output_;
+ int event_watch_notification_;
+ size_t num_flush_callbacks_;
+
+ private:
+ // We want our singleton torn down after each test.
+ ShadowingAtExitManager at_exit_manager_;
+ Lock lock_;
+};
+
+void TraceEventTestFixture::OnTraceDataCollected(
+ WaitableEvent* flush_complete_event,
+ const scoped_refptr<base::RefCountedString>& events_str,
+ bool has_more_events) {
+ num_flush_callbacks_++;
+ if (num_flush_callbacks_ > 1) {
+ EXPECT_FALSE(events_str->data().empty());
+ }
+ AutoLock lock(lock_);
+ json_output_.json_output.clear();
+ trace_buffer_.Start();
+ trace_buffer_.AddFragment(events_str->data());
+ trace_buffer_.Finish();
+
+ std::unique_ptr<Value> root = base::JSONReader::Read(
+ json_output_.json_output, JSON_PARSE_RFC | JSON_DETACHABLE_CHILDREN);
+
+ if (!root.get()) {
+ LOG(ERROR) << json_output_.json_output;
+ }
+
+ ListValue* root_list = NULL;
+ ASSERT_TRUE(root.get());
+ ASSERT_TRUE(root->GetAsList(&root_list));
+
+ // Move items into our aggregate collection
+ while (root_list->GetSize()) {
+ std::unique_ptr<Value> item;
+ root_list->Remove(0, &item);
+ trace_parsed_.Append(std::move(item));
+ }
+
+ if (!has_more_events)
+ flush_complete_event->Signal();
+}
+
+static bool CompareJsonValues(const std::string& lhs,
+ const std::string& rhs,
+ CompareOp op) {
+ switch (op) {
+ case IS_EQUAL:
+ return lhs == rhs;
+ case IS_NOT_EQUAL:
+ return lhs != rhs;
+ default:
+ CHECK(0);
+ }
+ return false;
+}
+
+static bool IsKeyValueInDict(const JsonKeyValue* key_value,
+ DictionaryValue* dict) {
+ Value* value = NULL;
+ std::string value_str;
+ if (dict->Get(key_value->key, &value) &&
+ value->GetAsString(&value_str) &&
+ CompareJsonValues(value_str, key_value->value, key_value->op))
+ return true;
+
+ // Recurse to test arguments
+ DictionaryValue* args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ if (args_dict)
+ return IsKeyValueInDict(key_value, args_dict);
+
+ return false;
+}
+
+static bool IsAllKeyValueInDict(const JsonKeyValue* key_values,
+ DictionaryValue* dict) {
+ // Scan all key_values, they must all be present and equal.
+ while (key_values && key_values->key) {
+ if (!IsKeyValueInDict(key_values, dict))
+ return false;
+ ++key_values;
+ }
+ return true;
+}
+
+DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
+ const JsonKeyValue* key_values) {
+ // Scan all items
+ size_t trace_parsed_count = trace_parsed_.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ Value* value = NULL;
+ trace_parsed_.Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ DictionaryValue* dict = static_cast<DictionaryValue*>(value);
+
+ if (IsAllKeyValueInDict(key_values, dict))
+ return dict;
+ }
+ return NULL;
+}
+
+void TraceEventTestFixture::DropTracedMetadataRecords() {
+ std::unique_ptr<ListValue> old_trace_parsed(trace_parsed_.CreateDeepCopy());
+ size_t old_trace_parsed_size = old_trace_parsed->GetSize();
+ trace_parsed_.Clear();
+
+ for (size_t i = 0; i < old_trace_parsed_size; i++) {
+ Value* value = nullptr;
+ old_trace_parsed->Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
+ trace_parsed_.Append(value->CreateDeepCopy());
+ continue;
+ }
+ DictionaryValue* dict = static_cast<DictionaryValue*>(value);
+ std::string tmp;
+ if (dict->GetString("ph", &tmp) && tmp == "M")
+ continue;
+
+ trace_parsed_.Append(value->CreateDeepCopy());
+ }
+}
+
+DictionaryValue* TraceEventTestFixture::FindNamePhase(const char* name,
+ const char* phase) {
+ JsonKeyValue key_values[] = {
+ {"name", name, IS_EQUAL},
+ {"ph", phase, IS_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+DictionaryValue* TraceEventTestFixture::FindNamePhaseKeyValue(
+ const char* name,
+ const char* phase,
+ const char* key,
+ const char* value) {
+ JsonKeyValue key_values[] = {
+ {"name", name, IS_EQUAL},
+ {"ph", phase, IS_EQUAL},
+ {key, value, IS_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+bool TraceEventTestFixture::FindMatchingValue(const char* key,
+ const char* value) {
+ JsonKeyValue key_values[] = {
+ {key, value, IS_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+bool TraceEventTestFixture::FindNonMatchingValue(const char* key,
+ const char* value) {
+ JsonKeyValue key_values[] = {
+ {key, value, IS_NOT_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+bool IsStringInDict(const char* string_to_match, const DictionaryValue* dict) {
+ for (DictionaryValue::Iterator it(*dict); !it.IsAtEnd(); it.Advance()) {
+ if (it.key().find(string_to_match) != std::string::npos)
+ return true;
+
+ std::string value_str;
+ it.value().GetAsString(&value_str);
+ if (value_str.find(string_to_match) != std::string::npos)
+ return true;
+ }
+
+ // Recurse to test arguments
+ const DictionaryValue* args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ if (args_dict)
+ return IsStringInDict(string_to_match, args_dict);
+
+ return false;
+}
+
+const DictionaryValue* FindTraceEntry(
+ const ListValue& trace_parsed,
+ const char* string_to_match,
+ const DictionaryValue* match_after_this_item = NULL) {
+ // Scan all items
+ size_t trace_parsed_count = trace_parsed.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ const Value* value = NULL;
+ trace_parsed.Get(i, &value);
+ if (match_after_this_item) {
+ if (value == match_after_this_item)
+ match_after_this_item = NULL;
+ continue;
+ }
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+
+ if (IsStringInDict(string_to_match, dict))
+ return dict;
+ }
+ return NULL;
+}
+
+std::vector<const DictionaryValue*> FindTraceEntries(
+ const ListValue& trace_parsed,
+ const char* string_to_match) {
+ std::vector<const DictionaryValue*> hits;
+ size_t trace_parsed_count = trace_parsed.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ const Value* value = NULL;
+ trace_parsed.Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+
+ if (IsStringInDict(string_to_match, dict))
+ hits.push_back(dict);
+ }
+ return hits;
+}
+
+const char kControlCharacters[] = "\001\002\003\n\r";
+
+void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
+ {
+ TRACE_EVENT0("all", "TRACE_EVENT0 call");
+ TRACE_EVENT1("all", "TRACE_EVENT1 call", "name1", "value1");
+ TRACE_EVENT2("all", "TRACE_EVENT2 call",
+ "name1", "\"value1\"",
+ "name2", "value\\2");
+
+ TRACE_EVENT_INSTANT0("all", "TRACE_EVENT_INSTANT0 call",
+ TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT1("all", "TRACE_EVENT_INSTANT1 call",
+ TRACE_EVENT_SCOPE_PROCESS, "name1", "value1");
+ TRACE_EVENT_INSTANT2("all", "TRACE_EVENT_INSTANT2 call",
+ TRACE_EVENT_SCOPE_THREAD,
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_BEGIN0("all", "TRACE_EVENT_BEGIN0 call");
+ TRACE_EVENT_BEGIN1("all", "TRACE_EVENT_BEGIN1 call", "name1", "value1");
+ TRACE_EVENT_BEGIN2("all", "TRACE_EVENT_BEGIN2 call",
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_END0("all", "TRACE_EVENT_END0 call");
+ TRACE_EVENT_END1("all", "TRACE_EVENT_END1 call", "name1", "value1");
+ TRACE_EVENT_END2("all", "TRACE_EVENT_END2 call",
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_ASYNC_BEGIN0("all", "TRACE_EVENT_ASYNC_BEGIN0 call", kAsyncId);
+ TRACE_EVENT_ASYNC_BEGIN1("all", "TRACE_EVENT_ASYNC_BEGIN1 call", kAsyncId,
+ "name1", "value1");
+ TRACE_EVENT_ASYNC_BEGIN2("all", "TRACE_EVENT_ASYNC_BEGIN2 call", kAsyncId,
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_ASYNC_STEP_INTO0("all", "TRACE_EVENT_ASYNC_STEP_INTO0 call",
+ kAsyncId, "step_begin1");
+ TRACE_EVENT_ASYNC_STEP_INTO1("all", "TRACE_EVENT_ASYNC_STEP_INTO1 call",
+ kAsyncId, "step_begin2", "name1", "value1");
+
+ TRACE_EVENT_ASYNC_END0("all", "TRACE_EVENT_ASYNC_END0 call", kAsyncId);
+ TRACE_EVENT_ASYNC_END1("all", "TRACE_EVENT_ASYNC_END1 call", kAsyncId,
+ "name1", "value1");
+ TRACE_EVENT_ASYNC_END2("all", "TRACE_EVENT_ASYNC_END2 call", kAsyncId,
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_FLOW_BEGIN0("all", "TRACE_EVENT_FLOW_BEGIN0 call", kFlowId);
+ TRACE_EVENT_FLOW_STEP0("all", "TRACE_EVENT_FLOW_STEP0 call",
+ kFlowId, "step1");
+ TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0("all",
+ "TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0 call", kFlowId);
+
+ TRACE_COUNTER1("all", "TRACE_COUNTER1 call", 31415);
+ TRACE_COUNTER2("all", "TRACE_COUNTER2 call",
+ "a", 30000,
+ "b", 1415);
+
+ TRACE_COUNTER_WITH_TIMESTAMP1("all", "TRACE_COUNTER_WITH_TIMESTAMP1 call",
+ 42, 31415);
+ TRACE_COUNTER_WITH_TIMESTAMP2("all", "TRACE_COUNTER_WITH_TIMESTAMP2 call",
+ 42, "a", 30000, "b", 1415);
+
+ TRACE_COUNTER_ID1("all", "TRACE_COUNTER_ID1 call", 0x319009, 31415);
+ TRACE_COUNTER_ID2("all", "TRACE_COUNTER_ID2 call", 0x319009,
+ "a", 30000, "b", 1415);
+
+ TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId, kThreadId, 12345);
+ TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId, kThreadId, 23456);
+
+ TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId2, kThreadId, 34567);
+ TRACE_EVENT_ASYNC_STEP_PAST0("all", "TRACE_EVENT_ASYNC_STEP_PAST0 call",
+ kAsyncId2, "step_end1");
+ TRACE_EVENT_ASYNC_STEP_PAST1("all", "TRACE_EVENT_ASYNC_STEP_PAST1 call",
+ kAsyncId2, "step_end2", "name1", "value1");
+
+ TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId2, kThreadId, 45678);
+
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID("all", "tracked object 1", 0x42);
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ "all", "tracked object 1", 0x42, "hello");
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID("all", "tracked object 1", 0x42);
+
+ TraceScopedTrackableObject<int> trackable("all", "tracked object 2",
+ 0x2128506);
+ trackable.snapshot("world");
+
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42), "hello");
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+
+ TRACE_EVENT1(kControlCharacters, kControlCharacters,
+ kControlCharacters, kControlCharacters);
+
+ uint64_t context_id = 0x20151021;
+
+ TRACE_EVENT_ENTER_CONTEXT("all", "TRACE_EVENT_ENTER_CONTEXT call",
+ TRACE_ID_WITH_SCOPE("scope", context_id));
+ TRACE_EVENT_LEAVE_CONTEXT("all", "TRACE_EVENT_LEAVE_CONTEXT call",
+ TRACE_ID_WITH_SCOPE("scope", context_id));
+ TRACE_EVENT_SCOPED_CONTEXT("disabled-by-default-cat",
+ "TRACE_EVENT_SCOPED_CONTEXT disabled call",
+ context_id);
+ TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
+ context_id);
+ } // Scope close causes TRACE_EVENT0 etc to send their END events.
+
+ if (task_complete_event)
+ task_complete_event->Signal();
+}
+
+void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
+ const DictionaryValue* item = NULL;
+
+#define EXPECT_FIND_(string) \
+ item = FindTraceEntry(trace_parsed, string); \
+ EXPECT_TRUE(item);
+#define EXPECT_NOT_FIND_(string) \
+ item = FindTraceEntry(trace_parsed, string); \
+ EXPECT_FALSE(item);
+#define EXPECT_SUB_FIND_(string) \
+ if (item) \
+ EXPECT_TRUE(IsStringInDict(string, item));
+
+ EXPECT_FIND_("TRACE_EVENT0 call");
+ {
+ std::string ph;
+ std::string ph_end;
+ EXPECT_TRUE((item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call")));
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("X", ph);
+ item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call", item);
+ EXPECT_FALSE(item);
+ }
+ EXPECT_FIND_("TRACE_EVENT1 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT2 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("\"value1\"");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value\\2");
+
+ EXPECT_FIND_("TRACE_EVENT_INSTANT0 call");
+ {
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("s", &scope)));
+ EXPECT_EQ("g", scope);
+ }
+ EXPECT_FIND_("TRACE_EVENT_INSTANT1 call");
+ {
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("s", &scope)));
+ EXPECT_EQ("p", scope);
+ }
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_INSTANT2 call");
+ {
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("s", &scope)));
+ EXPECT_EQ("t", scope);
+ }
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_BEGIN0 call");
+ EXPECT_FIND_("TRACE_EVENT_BEGIN1 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_BEGIN2 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_END0 call");
+ EXPECT_FIND_("TRACE_EVENT_END1 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_END2 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN2 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("step_begin1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("step_begin2");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_END0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_END1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_END2 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_FLOW_BEGIN0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kFlowIdStr);
+ EXPECT_FIND_("TRACE_EVENT_FLOW_STEP0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kFlowIdStr);
+ EXPECT_SUB_FIND_("step1");
+ EXPECT_FIND_("TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kFlowIdStr);
+
+ EXPECT_FIND_("TRACE_COUNTER1 call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+ EXPECT_EQ(31415, value);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER2 call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+ EXPECT_EQ(30000, value);
+
+ EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+ EXPECT_EQ(1415, value);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER_WITH_TIMESTAMP1 call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+ EXPECT_EQ(31415, value);
+
+ int ts;
+ EXPECT_TRUE((item && item->GetInteger("ts", &ts)));
+ EXPECT_EQ(42, ts);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER_WITH_TIMESTAMP2 call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+ EXPECT_EQ(30000, value);
+
+ EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+ EXPECT_EQ(1415, value);
+
+ int ts;
+ EXPECT_TRUE((item && item->GetInteger("ts", &ts)));
+ EXPECT_EQ(42, ts);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER_ID1 call");
+ {
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x319009", id);
+
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+ EXPECT_EQ(31415, value);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER_ID2 call");
+ {
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x319009", id);
+
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+ EXPECT_EQ(30000, value);
+
+ EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+ EXPECT_EQ(1415, value);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(12345, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncIdStr, id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(23456, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncIdStr, id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(34567, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncId2Str, id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST0 call");
+ {
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncId2Str);
+ EXPECT_SUB_FIND_("step_end1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncId2Str);
+ EXPECT_SUB_FIND_("step_end2");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(45678, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncId2Str, id);
+ }
+
+ EXPECT_FIND_("tracked object 1");
+ {
+ std::string phase;
+ std::string id;
+ std::string snapshot;
+
+ EXPECT_TRUE((item && item->GetString("ph", &phase)));
+ EXPECT_EQ("N", phase);
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x42", id);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 1", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("O", phase);
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x42", id);
+ EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+ EXPECT_EQ("hello", snapshot);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 1", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("D", phase);
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x42", id);
+ }
+
+ EXPECT_FIND_("tracked object 2");
+ {
+ std::string phase;
+ std::string id;
+ std::string snapshot;
+
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("N", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x2128506", id);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 2", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("O", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x2128506", id);
+ EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+ EXPECT_EQ("world", snapshot);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 2", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("D", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x2128506", id);
+ }
+
+ EXPECT_FIND_("tracked object 3");
+ {
+ std::string phase;
+ std::string scope;
+ std::string id;
+ std::string snapshot;
+
+ EXPECT_TRUE((item && item->GetString("ph", &phase)));
+ EXPECT_EQ("N", phase);
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope", scope);
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x42", id);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("O", phase);
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope", scope);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x42", id);
+ EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+ EXPECT_EQ("hello", snapshot);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("D", phase);
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope", scope);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x42", id);
+ }
+
+ EXPECT_FIND_(kControlCharacters);
+ EXPECT_SUB_FIND_(kControlCharacters);
+
+ EXPECT_FIND_("TRACE_EVENT_ENTER_CONTEXT call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("(", ph);
+
+ std::string scope;
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope", scope);
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x20151021", id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_LEAVE_CONTEXT call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ(")", ph);
+
+ std::string scope;
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("scope", &scope)));
+ EXPECT_EQ("scope", scope);
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x20151021", id);
+ }
+
+ std::vector<const DictionaryValue*> scoped_context_calls =
+ FindTraceEntries(trace_parsed, "TRACE_EVENT_SCOPED_CONTEXT call");
+ EXPECT_EQ(2u, scoped_context_calls.size());
+ {
+ item = scoped_context_calls[0];
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("(", ph);
+
+ std::string id;
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x20151021", id);
+ }
+
+ {
+ item = scoped_context_calls[1];
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ(")", ph);
+
+ std::string id;
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x20151021", id);
+ }
+}
+
+void TraceManyInstantEvents(int thread_id, int num_events,
+ WaitableEvent* task_complete_event) {
+ for (int i = 0; i < num_events; i++) {
+ TRACE_EVENT_INSTANT2("all", "multi thread event",
+ TRACE_EVENT_SCOPE_THREAD,
+ "thread", thread_id,
+ "event", i);
+ }
+
+ if (task_complete_event)
+ task_complete_event->Signal();
+}
+
+void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
+ int num_threads,
+ int num_events) {
+ std::map<int, std::map<int, bool> > results;
+
+ size_t trace_parsed_count = trace_parsed.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ const Value* value = NULL;
+ trace_parsed.Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+ std::string name;
+ dict->GetString("name", &name);
+ if (name != "multi thread event")
+ continue;
+
+ int thread = 0;
+ int event = 0;
+ EXPECT_TRUE(dict->GetInteger("args.thread", &thread));
+ EXPECT_TRUE(dict->GetInteger("args.event", &event));
+ results[thread][event] = true;
+ }
+
+ EXPECT_FALSE(results[-1][-1]);
+ for (int thread = 0; thread < num_threads; thread++) {
+ for (int event = 0; event < num_events; event++) {
+ EXPECT_TRUE(results[thread][event]);
+ }
+ }
+}
+
+void CheckTraceDefaultCategoryFilters(const TraceLog& trace_log) {
+ // Default enables all category filters except the disabled-by-default-* ones.
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo,bar"));
+ EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled(
+ "foo,disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log.GetCategoryGroupEnabled(
+ "disabled-by-default-foo,disabled-by-default-bar"));
+}
+
+} // namespace
+
+// Simple Test for emitting data and validating it was received.
+TEST_F(TraceEventTestFixture, DataCaptured) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ TraceWithAllMacroVariants(NULL);
+
+ EndTraceAndFlush();
+
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+// Emit some events and validate that only empty strings are received
+// if we tell Flush() to discard events.
+TEST_F(TraceEventTestFixture, DataDiscarded) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ TraceWithAllMacroVariants(NULL);
+
+ CancelTrace();
+
+ EXPECT_TRUE(trace_parsed_.empty());
+}
+
+class MockEnabledStateChangedObserver :
+ public TraceLog::EnabledStateObserver {
+ public:
+ MOCK_METHOD0(OnTraceLogEnabled, void());
+ MOCK_METHOD0(OnTraceLogDisabled, void());
+};
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnEnable) {
+ MockEnabledStateChangedObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogEnabled())
+ .Times(1);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ testing::Mock::VerifyAndClear(&observer);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverDoesntFireOnSecondEnable) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ testing::StrictMock<MockEnabledStateChangedObserver> observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogEnabled())
+ .Times(0);
+ EXPECT_CALL(observer, OnTraceLogDisabled())
+ .Times(0);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ testing::Mock::VerifyAndClear(&observer);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+ TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnFirstDisable) {
+ TraceConfig tc_inc_all("*", "");
+ TraceLog::GetInstance()->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+ TraceLog::GetInstance()->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+
+ testing::StrictMock<MockEnabledStateChangedObserver> observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogEnabled())
+ .Times(0);
+ EXPECT_CALL(observer, OnTraceLogDisabled())
+ .Times(1);
+ TraceLog::GetInstance()->SetDisabled();
+ testing::Mock::VerifyAndClear(&observer);
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnDisable) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ MockEnabledStateChangedObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogDisabled())
+ .Times(1);
+ TraceLog::GetInstance()->SetDisabled();
+ testing::Mock::VerifyAndClear(&observer);
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+}
+
+// Tests the IsEnabled() state of TraceLog changes before callbacks.
+class AfterStateChangeEnabledStateObserver
+ : public TraceLog::EnabledStateObserver {
+ public:
+ AfterStateChangeEnabledStateObserver() {}
+ ~AfterStateChangeEnabledStateObserver() override {}
+
+ // TraceLog::EnabledStateObserver overrides:
+ void OnTraceLogEnabled() override {
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+ }
+
+ void OnTraceLogDisabled() override {
+ EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+ }
+};
+
+TEST_F(TraceEventTestFixture, ObserversFireAfterStateChange) {
+ AfterStateChangeEnabledStateObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TraceLog::GetInstance()->SetDisabled();
+ EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+}
+
+// Tests that a state observer can remove itself during a callback.
+class SelfRemovingEnabledStateObserver
+ : public TraceLog::EnabledStateObserver {
+ public:
+ SelfRemovingEnabledStateObserver() {}
+ ~SelfRemovingEnabledStateObserver() override {}
+
+ // TraceLog::EnabledStateObserver overrides:
+ void OnTraceLogEnabled() override {}
+
+ void OnTraceLogDisabled() override {
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+ }
+};
+
+TEST_F(TraceEventTestFixture, SelfRemovingObserver) {
+ ASSERT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+ SelfRemovingEnabledStateObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+ EXPECT_EQ(1u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ TraceLog::GetInstance()->SetDisabled();
+ // The observer removed itself on disable.
+ EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+}
+
+bool IsNewTrace() {
+ bool is_new_trace;
+ TRACE_EVENT_IS_NEW_TRACE(&is_new_trace);
+ return is_new_trace;
+}
+
+TEST_F(TraceEventTestFixture, NewTraceRecording) {
+ ASSERT_FALSE(IsNewTrace());
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ // First call to IsNewTrace() should succeed. But, the second shouldn't.
+ ASSERT_TRUE(IsNewTrace());
+ ASSERT_FALSE(IsNewTrace());
+ EndTraceAndFlush();
+
+ // IsNewTrace() should definitely be false now.
+ ASSERT_FALSE(IsNewTrace());
+
+ // Start another trace. IsNewTrace() should become true again, briefly, as
+ // before.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ ASSERT_TRUE(IsNewTrace());
+ ASSERT_FALSE(IsNewTrace());
+
+ // Cleanup.
+ EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, TestTraceFlush) {
+ size_t min_traces = 1;
+ size_t max_traces = 1;
+ do {
+ max_traces *= 2;
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(),
+ TraceLog::RECORDING_MODE);
+ for (size_t i = 0; i < max_traces; i++) {
+ TRACE_EVENT_INSTANT0("x", "y", TRACE_EVENT_SCOPE_THREAD);
+ }
+ EndTraceAndFlush();
+ } while (num_flush_callbacks_ < 2);
+
+ while (min_traces + 50 < max_traces) {
+ size_t traces = (min_traces + max_traces) / 2;
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(),
+ TraceLog::RECORDING_MODE);
+ for (size_t i = 0; i < traces; i++) {
+ TRACE_EVENT_INSTANT0("x", "y", TRACE_EVENT_SCOPE_THREAD);
+ }
+ EndTraceAndFlush();
+ if (num_flush_callbacks_ < 2) {
+ min_traces = traces - 10;
+ } else {
+ max_traces = traces + 10;
+ }
+ }
+
+ for (size_t traces = min_traces; traces < max_traces; traces++) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(),
+ TraceLog::RECORDING_MODE);
+ for (size_t i = 0; i < traces; i++) {
+ TRACE_EVENT_INSTANT0("x", "y", TRACE_EVENT_SCOPE_THREAD);
+ }
+ EndTraceAndFlush();
+ }
+}
+
+TEST_F(TraceEventTestFixture, AddMetadataEvent) {
+ int num_calls = 0;
+
+ class Convertable : public ConvertableToTraceFormat {
+ public:
+ explicit Convertable(int* num_calls) : num_calls_(num_calls) {}
+ ~Convertable() override {}
+ void AppendAsTraceFormat(std::string* out) const override {
+ (*num_calls_)++;
+ out->append("\"metadata_value\"");
+ }
+
+ private:
+ int* num_calls_;
+ };
+
+ std::unique_ptr<ConvertableToTraceFormat> conv1(new Convertable(&num_calls));
+ std::unique_ptr<Convertable> conv2(new Convertable(&num_calls));
+
+ BeginTrace();
+ TRACE_EVENT_API_ADD_METADATA_EVENT(
+ TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_1",
+ "metadata_arg_name", std::move(conv1));
+ TRACE_EVENT_API_ADD_METADATA_EVENT(
+ TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_2",
+ "metadata_arg_name", std::move(conv2));
+ // |AppendAsTraceFormat| should only be called on flush, not when the event
+ // is added.
+ ASSERT_EQ(0, num_calls);
+ EndTraceAndFlush();
+ ASSERT_EQ(2, num_calls);
+ EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_1", "M",
+ "metadata_arg_name", "metadata_value"));
+ EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_2", "M",
+ "metadata_arg_name", "metadata_value"));
+
+ // The metadata event should only be adde to the current trace. In this new
+ // trace, the event should not appear.
+ BeginTrace();
+ EndTraceAndFlush();
+ ASSERT_EQ(2, num_calls);
+}
+
+// Test that categories work.
+TEST_F(TraceEventTestFixture, Categories) {
+ // Test that categories that are used can be retrieved whether trace was
+ // enabled or disabled when the trace event was encountered.
+ TRACE_EVENT_INSTANT0("c1", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("c2", "name", TRACE_EVENT_SCOPE_THREAD);
+ BeginTrace();
+ TRACE_EVENT_INSTANT0("c3", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("c4", "name", TRACE_EVENT_SCOPE_THREAD);
+ // Category groups containing more than one category.
+ TRACE_EVENT_INSTANT0("c5,c6", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("c7,c8", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("c9"), "name",
+ TRACE_EVENT_SCOPE_THREAD);
+
+ EndTraceAndFlush();
+ std::vector<std::string> cat_groups;
+ TraceLog::GetInstance()->GetKnownCategoryGroups(&cat_groups);
+ EXPECT_TRUE(ContainsValue(cat_groups, "c1"));
+ EXPECT_TRUE(ContainsValue(cat_groups, "c2"));
+ EXPECT_TRUE(ContainsValue(cat_groups, "c3"));
+ EXPECT_TRUE(ContainsValue(cat_groups, "c4"));
+ EXPECT_TRUE(ContainsValue(cat_groups, "c5,c6"));
+ EXPECT_TRUE(ContainsValue(cat_groups, "c7,c8"));
+ EXPECT_TRUE(ContainsValue(cat_groups, "disabled-by-default-c9"));
+ // Make sure metadata isn't returned.
+ EXPECT_FALSE(ContainsValue(cat_groups, "__metadata"));
+
+ const std::vector<std::string> empty_categories;
+ std::vector<std::string> included_categories;
+ std::vector<std::string> excluded_categories;
+
+ // Test that category filtering works.
+
+ // Include nonexistent category -> no events
+ Clear();
+ included_categories.clear();
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("not_found823564786", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("cat1", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ DropTracedMetadataRecords();
+ EXPECT_TRUE(trace_parsed_.empty());
+
+ // Include existent category -> only events of that category
+ Clear();
+ included_categories.clear();
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("inc", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("inc", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ DropTracedMetadataRecords();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc"));
+ EXPECT_FALSE(FindNonMatchingValue("cat", "inc"));
+
+ // Include existent wildcard -> all categories matching wildcard
+ Clear();
+ included_categories.clear();
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig("inc_wildcard_*,inc_wildchar_?_end", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("inc_wildcard_abc", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildcard_", "included", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_x_end", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_bla_end", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "not_inc", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "not_inc", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildcard_category,other_category", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0(
+ "non_included_category,inc_wildcard_category", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_abc"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildchar_x_end"));
+ EXPECT_FALSE(FindMatchingValue("name", "not_inc"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_category,other_category"));
+ EXPECT_TRUE(FindMatchingValue("cat",
+ "non_included_category,inc_wildcard_category"));
+
+ included_categories.clear();
+
+ // Exclude nonexistent category -> all events
+ Clear();
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("-not_found823564786", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("cat1", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("category1,category2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "cat1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "cat2"));
+ EXPECT_TRUE(FindMatchingValue("cat", "category1,category2"));
+
+ // Exclude existent category -> only events of other categories
+ Clear();
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("-inc", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("inc", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc2,inc", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc,inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc2"));
+ EXPECT_FALSE(FindMatchingValue("cat", "inc"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc2,inc"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc,inc2"));
+
+ // Exclude existent wildcard -> all categories not matching wildcard
+ Clear();
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig("-inc_wildcard_*,-inc_wildchar_?_end", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("inc_wildcard_abc", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildcard_", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_x_end", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_bla_end", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "included", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "included", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildchar_bla_end"));
+ EXPECT_TRUE(FindMatchingValue("cat", "cat1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "cat2"));
+ EXPECT_FALSE(FindMatchingValue("name", "not_inc"));
+}
+
+
+// Test EVENT_WATCH_NOTIFICATION
+TEST_F(TraceEventTestFixture, EventWatchNotification) {
+ // Basic one occurrence.
+ BeginTrace();
+ TraceLog::WatchEventCallback callback =
+ base::Bind(&TraceEventTestFixture::OnWatchEventMatched,
+ base::Unretained(this));
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 1);
+
+ // Auto-reset after end trace.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ EndTraceAndFlush();
+ BeginTrace();
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+
+ // Multiple occurrence.
+ BeginTrace();
+ int num_occurrences = 5;
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ for (int i = 0; i < num_occurrences; ++i)
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, num_occurrences);
+
+ // Wrong category.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TRACE_EVENT_INSTANT0("wrong_cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+
+ // Wrong name.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TRACE_EVENT_INSTANT0("cat", "wrong_event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+
+ // Canceled.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TraceLog::GetInstance()->CancelWatchEvent();
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+}
+
+// Test ASYNC_BEGIN/END events
+TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
+ BeginTrace();
+
+ unsigned long long id = 0xfeedbeeffeedbeefull;
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name1", id);
+ TRACE_EVENT_ASYNC_STEP_INTO0("cat", "name1", id, "step1");
+ TRACE_EVENT_ASYNC_END0("cat", "name1", id);
+ TRACE_EVENT_BEGIN0("cat", "name2");
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name3", 0);
+ TRACE_EVENT_ASYNC_STEP_PAST0("cat", "name3", 0, "step2");
+
+ EndTraceAndFlush();
+
+ EXPECT_TRUE(FindNamePhase("name1", "S"));
+ EXPECT_TRUE(FindNamePhase("name1", "T"));
+ EXPECT_TRUE(FindNamePhase("name1", "F"));
+
+ std::string id_str;
+ StringAppendF(&id_str, "0x%llx", id);
+
+ EXPECT_TRUE(FindNamePhaseKeyValue("name1", "S", "id", id_str.c_str()));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name1", "T", "id", id_str.c_str()));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name1", "F", "id", id_str.c_str()));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name3", "S", "id", "0x0"));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name3", "p", "id", "0x0"));
+
+ // BEGIN events should not have id
+ EXPECT_FALSE(FindNamePhaseKeyValue("name2", "B", "id", "0"));
+}
+
+// Test ASYNC_BEGIN/END events
+TEST_F(TraceEventTestFixture, AsyncBeginEndPointerMangling) {
+ void* ptr = this;
+
+ TraceLog::GetInstance()->SetProcessID(100);
+ BeginTrace();
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name1", ptr);
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name2", ptr);
+ EndTraceAndFlush();
+
+ TraceLog::GetInstance()->SetProcessID(200);
+ BeginTrace();
+ TRACE_EVENT_ASYNC_END0("cat", "name1", ptr);
+ EndTraceAndFlush();
+
+ DictionaryValue* async_begin = FindNamePhase("name1", "S");
+ DictionaryValue* async_begin2 = FindNamePhase("name2", "S");
+ DictionaryValue* async_end = FindNamePhase("name1", "F");
+ EXPECT_TRUE(async_begin);
+ EXPECT_TRUE(async_begin2);
+ EXPECT_TRUE(async_end);
+
+ Value* value = NULL;
+ std::string async_begin_id_str;
+ std::string async_begin2_id_str;
+ std::string async_end_id_str;
+ ASSERT_TRUE(async_begin->Get("id", &value));
+ ASSERT_TRUE(value->GetAsString(&async_begin_id_str));
+ ASSERT_TRUE(async_begin2->Get("id", &value));
+ ASSERT_TRUE(value->GetAsString(&async_begin2_id_str));
+ ASSERT_TRUE(async_end->Get("id", &value));
+ ASSERT_TRUE(value->GetAsString(&async_end_id_str));
+
+ EXPECT_STREQ(async_begin_id_str.c_str(), async_begin2_id_str.c_str());
+ EXPECT_STRNE(async_begin_id_str.c_str(), async_end_id_str.c_str());
+}
+
+// Test that static strings are not copied.
+TEST_F(TraceEventTestFixture, StaticStringVsString) {
+ TraceLog* tracer = TraceLog::GetInstance();
+ // Make sure old events are flushed:
+ EXPECT_EQ(0u, tracer->GetStatus().event_count);
+ const unsigned char* category_group_enabled =
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("cat");
+
+ {
+ BeginTrace();
+ // Test that string arguments are copied.
+ TraceEventHandle handle1 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+ 0, trace_event_internal::kNoId,
+ "arg1", std::string("argval"), "arg2", std::string("argval"));
+ // Test that static TRACE_STR_COPY string arguments are copied.
+ TraceEventHandle handle2 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+ 0, trace_event_internal::kNoId,
+ "arg1", TRACE_STR_COPY("argval"),
+ "arg2", TRACE_STR_COPY("argval"));
+ EXPECT_GT(tracer->GetStatus().event_count, 1u);
+ const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
+ const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
+ ASSERT_TRUE(event1);
+ ASSERT_TRUE(event2);
+ EXPECT_STREQ("name1", event1->name());
+ EXPECT_STREQ("name2", event2->name());
+ EXPECT_TRUE(event1->parameter_copy_storage() != NULL);
+ EXPECT_TRUE(event2->parameter_copy_storage() != NULL);
+ EXPECT_GT(event1->parameter_copy_storage()->size(), 0u);
+ EXPECT_GT(event2->parameter_copy_storage()->size(), 0u);
+ EndTraceAndFlush();
+ }
+
+ {
+ BeginTrace();
+ // Test that static literal string arguments are not copied.
+ TraceEventHandle handle1 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+ 0, trace_event_internal::kNoId,
+ "arg1", "argval", "arg2", "argval");
+ // Test that static TRACE_STR_COPY NULL string arguments are not copied.
+ const char* str1 = NULL;
+ const char* str2 = NULL;
+ TraceEventHandle handle2 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+ 0, trace_event_internal::kNoId,
+ "arg1", TRACE_STR_COPY(str1),
+ "arg2", TRACE_STR_COPY(str2));
+ EXPECT_GT(tracer->GetStatus().event_count, 1u);
+ const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
+ const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
+ ASSERT_TRUE(event1);
+ ASSERT_TRUE(event2);
+ EXPECT_STREQ("name1", event1->name());
+ EXPECT_STREQ("name2", event2->name());
+ EXPECT_TRUE(event1->parameter_copy_storage() == NULL);
+ EXPECT_TRUE(event2->parameter_copy_storage() == NULL);
+ EndTraceAndFlush();
+ }
+}
+
+// Test that data sent from other threads is gathered
+TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.Start();
+
+ thread.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+ thread.Stop();
+
+ EndTraceAndFlush();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+// Test that data sent from multiple threads is gathered
+TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
+ BeginTrace();
+
+ const int num_threads = 4;
+ const int num_events = 4000;
+ Thread* threads[num_threads];
+ WaitableEvent* task_complete_events[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ threads[i] = new Thread(StringPrintf("Thread %d", i));
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ threads[i]->Start();
+ threads[i]->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&TraceManyInstantEvents, i, num_events,
+ task_complete_events[i]));
+ }
+
+ for (int i = 0; i < num_threads; i++) {
+ task_complete_events[i]->Wait();
+ }
+
+ // Let half of the threads end before flush.
+ for (int i = 0; i < num_threads / 2; i++) {
+ threads[i]->Stop();
+ delete threads[i];
+ delete task_complete_events[i];
+ }
+
+ EndTraceAndFlushInThreadWithMessageLoop();
+ ValidateInstantEventPresentOnEveryThread(trace_parsed_,
+ num_threads, num_events);
+
+ // Let the other half of the threads end after flush.
+ for (int i = num_threads / 2; i < num_threads; i++) {
+ threads[i]->Stop();
+ delete threads[i];
+ delete task_complete_events[i];
+ }
+}
+
+// Test that thread and process names show up in the trace
+TEST_F(TraceEventTestFixture, ThreadNames) {
+ // Create threads before we enable tracing to make sure
+ // that tracelog still captures them.
+ const int kNumThreads = 4;
+ const int kNumEvents = 10;
+ Thread* threads[kNumThreads];
+ PlatformThreadId thread_ids[kNumThreads];
+ for (int i = 0; i < kNumThreads; i++)
+ threads[i] = new Thread(StringPrintf("Thread %d", i));
+
+ // Enable tracing.
+ BeginTrace();
+
+ // Now run some trace code on these threads.
+ WaitableEvent* task_complete_events[kNumThreads];
+ for (int i = 0; i < kNumThreads; i++) {
+ task_complete_events[i] =
+ new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ threads[i]->Start();
+ thread_ids[i] = threads[i]->GetThreadId();
+ threads[i]->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&TraceManyInstantEvents, i, kNumEvents,
+ task_complete_events[i]));
+ }
+ for (int i = 0; i < kNumThreads; i++) {
+ task_complete_events[i]->Wait();
+ }
+
+ // Shut things down.
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i]->Stop();
+ delete threads[i];
+ delete task_complete_events[i];
+ }
+
+ EndTraceAndFlush();
+
+ std::string tmp;
+ int tmp_int;
+ const DictionaryValue* item;
+
+ // Make sure we get thread name metadata.
+ // Note, the test suite may have created a ton of threads.
+ // So, we'll have thread names for threads we didn't create.
+ std::vector<const DictionaryValue*> items =
+ FindTraceEntries(trace_parsed_, "thread_name");
+ for (int i = 0; i < static_cast<int>(items.size()); i++) {
+ item = items[i];
+ ASSERT_TRUE(item);
+ EXPECT_TRUE(item->GetInteger("tid", &tmp_int));
+
+ // See if this thread name is one of the threads we just created
+ for (int j = 0; j < kNumThreads; j++) {
+ if (static_cast<int>(thread_ids[j]) != tmp_int)
+ continue;
+
+ std::string expected_name = StringPrintf("Thread %d", j);
+ EXPECT_TRUE(item->GetString("ph", &tmp) && tmp == "M");
+ EXPECT_TRUE(item->GetInteger("pid", &tmp_int) &&
+ tmp_int == static_cast<int>(base::GetCurrentProcId()));
+ // If the thread name changes or the tid gets reused, the name will be
+ // a comma-separated list of thread names, so look for a substring.
+ EXPECT_TRUE(item->GetString("args.name", &tmp) &&
+ tmp.find(expected_name) != std::string::npos);
+ }
+ }
+}
+
+TEST_F(TraceEventTestFixture, ThreadNameChanges) {
+ BeginTrace();
+
+ PlatformThread::SetName("");
+ TRACE_EVENT_INSTANT0("drink", "water", TRACE_EVENT_SCOPE_THREAD);
+
+ PlatformThread::SetName("cafe");
+ TRACE_EVENT_INSTANT0("drink", "coffee", TRACE_EVENT_SCOPE_THREAD);
+
+ PlatformThread::SetName("shop");
+ // No event here, so won't appear in combined name.
+
+ PlatformThread::SetName("pub");
+ TRACE_EVENT_INSTANT0("drink", "beer", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("drink", "wine", TRACE_EVENT_SCOPE_THREAD);
+
+ PlatformThread::SetName(" bar");
+ TRACE_EVENT_INSTANT0("drink", "whisky", TRACE_EVENT_SCOPE_THREAD);
+
+ EndTraceAndFlush();
+
+ std::vector<const DictionaryValue*> items =
+ FindTraceEntries(trace_parsed_, "thread_name");
+ EXPECT_EQ(1u, items.size());
+ ASSERT_GT(items.size(), 0u);
+ const DictionaryValue* item = items[0];
+ ASSERT_TRUE(item);
+ int tid;
+ EXPECT_TRUE(item->GetInteger("tid", &tid));
+ EXPECT_EQ(PlatformThread::CurrentId(), static_cast<PlatformThreadId>(tid));
+
+ std::string expected_name = "cafe,pub, bar";
+ std::string tmp;
+ EXPECT_TRUE(item->GetString("args.name", &tmp));
+ EXPECT_EQ(expected_name, tmp);
+}
+
+// Test that the disabled trace categories are included/excluded from the
+// trace output correctly.
+TEST_F(TraceEventTestFixture, DisabledCategories) {
+ BeginTrace();
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc"), "first",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("included", "first", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ {
+ const DictionaryValue* item = NULL;
+ ListValue& trace_parsed = trace_parsed_;
+ EXPECT_NOT_FIND_("disabled-by-default-cc");
+ EXPECT_FIND_("included");
+ }
+ Clear();
+
+ BeginSpecificTrace("disabled-by-default-cc");
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc"), "second",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("other_included", "second", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+
+ {
+ const DictionaryValue* item = NULL;
+ ListValue& trace_parsed = trace_parsed_;
+ EXPECT_FIND_("disabled-by-default-cc");
+ EXPECT_FIND_("other_included");
+ }
+
+ Clear();
+
+ BeginSpecificTrace("other_included");
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc") ",other_included",
+ "first", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("other_included," TRACE_DISABLED_BY_DEFAULT("cc"),
+ "second", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+
+ {
+ const DictionaryValue* item = NULL;
+ ListValue& trace_parsed = trace_parsed_;
+ EXPECT_FIND_("disabled-by-default-cc,other_included");
+ EXPECT_FIND_("other_included,disabled-by-default-cc");
+ }
+}
+
+TEST_F(TraceEventTestFixture, NormallyNoDeepCopy) {
+ // Test that the TRACE_EVENT macros do not deep-copy their string. If they
+ // do so it may indicate a performance regression, but more-over it would
+ // make the DEEP_COPY overloads redundant.
+ std::string name_string("event name");
+
+ BeginTrace();
+ TRACE_EVENT_INSTANT0("category", name_string.c_str(),
+ TRACE_EVENT_SCOPE_THREAD);
+
+ // Modify the string in place (a wholesale reassignment may leave the old
+ // string intact on the heap).
+ name_string[0] = '@';
+
+ EndTraceAndFlush();
+
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, "event name"));
+ EXPECT_TRUE(FindTraceEntry(trace_parsed_, name_string.c_str()));
+}
+
+TEST_F(TraceEventTestFixture, DeepCopy) {
+ static const char kOriginalName1[] = "name1";
+ static const char kOriginalName2[] = "name2";
+ static const char kOriginalName3[] = "name3";
+ std::string name1(kOriginalName1);
+ std::string name2(kOriginalName2);
+ std::string name3(kOriginalName3);
+ std::string arg1("arg1");
+ std::string arg2("arg2");
+ std::string val1("val1");
+ std::string val2("val2");
+
+ BeginTrace();
+ TRACE_EVENT_COPY_INSTANT0("category", name1.c_str(),
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_COPY_BEGIN1("category", name2.c_str(),
+ arg1.c_str(), 5);
+ TRACE_EVENT_COPY_END2("category", name3.c_str(),
+ arg1.c_str(), val1,
+ arg2.c_str(), val2);
+
+ // As per NormallyNoDeepCopy, modify the strings in place.
+ name1[0] = name2[0] = name3[0] = arg1[0] = arg2[0] = val1[0] = val2[0] = '@';
+
+ EndTraceAndFlush();
+
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, name1.c_str()));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, name2.c_str()));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, name3.c_str()));
+
+ const DictionaryValue* entry1 = FindTraceEntry(trace_parsed_, kOriginalName1);
+ const DictionaryValue* entry2 = FindTraceEntry(trace_parsed_, kOriginalName2);
+ const DictionaryValue* entry3 = FindTraceEntry(trace_parsed_, kOriginalName3);
+ ASSERT_TRUE(entry1);
+ ASSERT_TRUE(entry2);
+ ASSERT_TRUE(entry3);
+
+ int i;
+ EXPECT_FALSE(entry2->GetInteger("args.@rg1", &i));
+ EXPECT_TRUE(entry2->GetInteger("args.arg1", &i));
+ EXPECT_EQ(5, i);
+
+ std::string s;
+ EXPECT_TRUE(entry3->GetString("args.arg1", &s));
+ EXPECT_EQ("val1", s);
+ EXPECT_TRUE(entry3->GetString("args.arg2", &s));
+ EXPECT_EQ("val2", s);
+}
+
+// Test that TraceResultBuffer outputs the correct result whether it is added
+// in chunks or added all at once.
+TEST_F(TraceEventTestFixture, TraceResultBuffer) {
+ Clear();
+
+ trace_buffer_.Start();
+ trace_buffer_.AddFragment("bla1");
+ trace_buffer_.AddFragment("bla2");
+ trace_buffer_.AddFragment("bla3,bla4");
+ trace_buffer_.Finish();
+ EXPECT_STREQ(json_output_.json_output.c_str(), "[bla1,bla2,bla3,bla4]");
+
+ Clear();
+
+ trace_buffer_.Start();
+ trace_buffer_.AddFragment("bla1,bla2,bla3,bla4");
+ trace_buffer_.Finish();
+ EXPECT_STREQ(json_output_.json_output.c_str(), "[bla1,bla2,bla3,bla4]");
+}
+
+// Test that trace_event parameters are not evaluated if the tracing
+// system is disabled.
+TEST_F(TraceEventTestFixture, TracingIsLazy) {
+ BeginTrace();
+
+ int a = 0;
+ TRACE_EVENT_INSTANT1("category", "test", TRACE_EVENT_SCOPE_THREAD, "a", a++);
+ EXPECT_EQ(1, a);
+
+ TraceLog::GetInstance()->SetDisabled();
+
+ TRACE_EVENT_INSTANT1("category", "test", TRACE_EVENT_SCOPE_THREAD, "a", a++);
+ EXPECT_EQ(1, a);
+
+ EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, TraceEnableDisable) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ TraceConfig tc_inc_all("*", "");
+ trace_log->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(trace_log->IsEnabled());
+ trace_log->SetDisabled();
+ EXPECT_FALSE(trace_log->IsEnabled());
+
+ trace_log->SetEnabled(tc_inc_all, TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(trace_log->IsEnabled());
+ const std::vector<std::string> empty;
+ trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(trace_log->IsEnabled());
+ trace_log->SetDisabled();
+ EXPECT_FALSE(trace_log->IsEnabled());
+ trace_log->SetDisabled();
+ EXPECT_FALSE(trace_log->IsEnabled());
+}
+
+TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ trace_log->SetEnabled(TraceConfig("foo,bar", ""), TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+ trace_log->SetEnabled(TraceConfig("foo2", ""), TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo2"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+ // The "" becomes the default catergory set when applied.
+ trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+ EXPECT_STREQ(
+ "",
+ trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+
+ trace_log->SetEnabled(TraceConfig("-foo,-bar", ""), TraceLog::RECORDING_MODE);
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+ trace_log->SetEnabled(TraceConfig("moo", ""), TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("moo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_STREQ(
+ "-foo,-bar",
+ trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+
+ // Make sure disabled categories aren't cleared if we set in the second.
+ trace_log->SetEnabled(TraceConfig("disabled-by-default-cc,foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+ trace_log->SetEnabled(TraceConfig("disabled-by-default-gpu", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-gpu"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_STREQ(
+ "disabled-by-default-cc,disabled-by-default-gpu",
+ trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig("*", ""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+
+ trace_log->SetEnabled(TraceConfig(""), TraceLog::RECORDING_MODE);
+ CheckTraceDefaultCategoryFilters(*trace_log);
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+
+ trace_log->SetEnabled(TraceConfig("foo,disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+
+ // Enabling only the disabled-by-default-* category means the default ones
+ // are also enabled.
+ trace_log->SetEnabled(TraceConfig("disabled-by-default-foo", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceSampling) {
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
+ TraceLog::RECORDING_MODE);
+
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Stuff");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Things");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+
+ EndTraceAndFlush();
+
+ // Make sure we hit at least once.
+ EXPECT_TRUE(FindNamePhase("Stuff", "P"));
+ EXPECT_TRUE(FindNamePhase("Things", "P"));
+}
+
+TEST_F(TraceEventTestFixture, TraceSamplingScope) {
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
+ TraceLog::RECORDING_MODE);
+
+ TRACE_EVENT_SCOPED_SAMPLING_STATE("AAA", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ {
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+ TRACE_EVENT_SCOPED_SAMPLING_STATE("BBB", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "BBB");
+ }
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ {
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+ TRACE_EVENT_SCOPED_SAMPLING_STATE("CCC", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "CCC");
+ }
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ {
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+ TRACE_EVENT_SET_SAMPLING_STATE("DDD", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
+ }
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
+
+ EndTraceAndFlush();
+}
+
+class MyData : public ConvertableToTraceFormat {
+ public:
+ MyData() {}
+ ~MyData() override {}
+
+ void AppendAsTraceFormat(std::string* out) const override {
+ out->append("{\"foo\":1}");
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MyData);
+};
+
+TEST_F(TraceEventTestFixture, ConvertableTypes) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ std::unique_ptr<ConvertableToTraceFormat> data(new MyData());
+ std::unique_ptr<ConvertableToTraceFormat> data1(new MyData());
+ std::unique_ptr<ConvertableToTraceFormat> data2(new MyData());
+ TRACE_EVENT1("foo", "bar", "data", std::move(data));
+ TRACE_EVENT2("foo", "baz", "data1", std::move(data1), "data2",
+ std::move(data2));
+
+ // Check that std::unique_ptr<DerivedClassOfConvertable> are properly treated
+ // as
+ // convertable and not accidentally casted to bool.
+ std::unique_ptr<MyData> convertData1(new MyData());
+ std::unique_ptr<MyData> convertData2(new MyData());
+ std::unique_ptr<MyData> convertData3(new MyData());
+ std::unique_ptr<MyData> convertData4(new MyData());
+ TRACE_EVENT2("foo", "string_first", "str", "string value 1", "convert",
+ std::move(convertData1));
+ TRACE_EVENT2("foo", "string_second", "convert", std::move(convertData2),
+ "str", "string value 2");
+ TRACE_EVENT2("foo", "both_conv", "convert1", std::move(convertData3),
+ "convert2", std::move(convertData4));
+ EndTraceAndFlush();
+
+ // One arg version.
+ DictionaryValue* dict = FindNamePhase("bar", "X");
+ ASSERT_TRUE(dict);
+
+ const DictionaryValue* args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ const Value* value = NULL;
+ const DictionaryValue* convertable_dict = NULL;
+ EXPECT_TRUE(args_dict->Get("data", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+ int foo_val;
+ EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+ EXPECT_EQ(1, foo_val);
+
+ // Two arg version.
+ dict = FindNamePhase("baz", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ value = NULL;
+ convertable_dict = NULL;
+ EXPECT_TRUE(args_dict->Get("data1", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+ value = NULL;
+ convertable_dict = NULL;
+ EXPECT_TRUE(args_dict->Get("data2", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+ // Convertable with other types.
+ dict = FindNamePhase("string_first", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ std::string str_value;
+ EXPECT_TRUE(args_dict->GetString("str", &str_value));
+ EXPECT_STREQ("string value 1", str_value.c_str());
+
+ value = NULL;
+ convertable_dict = NULL;
+ foo_val = 0;
+ EXPECT_TRUE(args_dict->Get("convert", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+ EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+ EXPECT_EQ(1, foo_val);
+
+ dict = FindNamePhase("string_second", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ EXPECT_TRUE(args_dict->GetString("str", &str_value));
+ EXPECT_STREQ("string value 2", str_value.c_str());
+
+ value = NULL;
+ convertable_dict = NULL;
+ foo_val = 0;
+ EXPECT_TRUE(args_dict->Get("convert", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+ EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+ EXPECT_EQ(1, foo_val);
+
+ dict = FindNamePhase("both_conv", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ value = NULL;
+ convertable_dict = NULL;
+ foo_val = 0;
+ EXPECT_TRUE(args_dict->Get("convert1", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+ EXPECT_TRUE(args_dict->Get("convert2", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+}
+
+TEST_F(TraceEventTestFixture, PrimitiveArgs) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ TRACE_EVENT1("foo", "event1", "int_one", 1);
+ TRACE_EVENT1("foo", "event2", "int_neg_ten", -10);
+ TRACE_EVENT1("foo", "event3", "float_one", 1.0f);
+ TRACE_EVENT1("foo", "event4", "float_half", .5f);
+ TRACE_EVENT1("foo", "event5", "float_neghalf", -.5f);
+ TRACE_EVENT1("foo", "event6", "float_infinity",
+ std::numeric_limits<float>::infinity());
+ TRACE_EVENT1("foo", "event6b", "float_neg_infinity",
+ -std::numeric_limits<float>::infinity());
+ TRACE_EVENT1("foo", "event7", "double_nan",
+ std::numeric_limits<double>::quiet_NaN());
+ void* p = 0;
+ TRACE_EVENT1("foo", "event8", "pointer_null", p);
+ p = reinterpret_cast<void*>(0xbadf00d);
+ TRACE_EVENT1("foo", "event9", "pointer_badf00d", p);
+ TRACE_EVENT1("foo", "event10", "bool_true", true);
+ TRACE_EVENT1("foo", "event11", "bool_false", false);
+ TRACE_EVENT1("foo", "event12", "time_null",
+ base::Time());
+ TRACE_EVENT1("foo", "event13", "time_one",
+ base::Time::FromInternalValue(1));
+ TRACE_EVENT1("foo", "event14", "timeticks_null",
+ base::TimeTicks());
+ TRACE_EVENT1("foo", "event15", "timeticks_one",
+ base::TimeTicks::FromInternalValue(1));
+ EndTraceAndFlush();
+
+ const DictionaryValue* args_dict = NULL;
+ DictionaryValue* dict = NULL;
+ const Value* value = NULL;
+ std::string str_value;
+ int int_value;
+ double double_value;
+ bool bool_value;
+
+ dict = FindNamePhase("event1", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("int_one", &int_value));
+ EXPECT_EQ(1, int_value);
+
+ dict = FindNamePhase("event2", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("int_neg_ten", &int_value));
+ EXPECT_EQ(-10, int_value);
+
+ // 1f must be serlized to JSON as "1.0" in order to be a double, not an int.
+ dict = FindNamePhase("event3", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->Get("float_one", &value));
+ EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->GetAsDouble(&double_value));
+ EXPECT_EQ(1, double_value);
+
+ // .5f must be serlized to JSON as "0.5".
+ dict = FindNamePhase("event4", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->Get("float_half", &value));
+ EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->GetAsDouble(&double_value));
+ EXPECT_EQ(0.5, double_value);
+
+ // -.5f must be serlized to JSON as "-0.5".
+ dict = FindNamePhase("event5", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
+ EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->GetAsDouble(&double_value));
+ EXPECT_EQ(-0.5, double_value);
+
+ // Infinity is serialized to JSON as a string.
+ dict = FindNamePhase("event6", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("float_infinity", &str_value));
+ EXPECT_STREQ("Infinity", str_value.c_str());
+ dict = FindNamePhase("event6b", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("float_neg_infinity", &str_value));
+ EXPECT_STREQ("-Infinity", str_value.c_str());
+
+ // NaN is serialized to JSON as a string.
+ dict = FindNamePhase("event7", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("double_nan", &str_value));
+ EXPECT_STREQ("NaN", str_value.c_str());
+
+ // NULL pointers should be serialized as "0x0".
+ dict = FindNamePhase("event8", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("pointer_null", &str_value));
+ EXPECT_STREQ("0x0", str_value.c_str());
+
+ // Other pointers should be serlized as a hex string.
+ dict = FindNamePhase("event9", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("pointer_badf00d", &str_value));
+ EXPECT_STREQ("0xbadf00d", str_value.c_str());
+
+ dict = FindNamePhase("event10", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetBoolean("bool_true", &bool_value));
+ EXPECT_TRUE(bool_value);
+
+ dict = FindNamePhase("event11", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetBoolean("bool_false", &bool_value));
+ EXPECT_FALSE(bool_value);
+
+ dict = FindNamePhase("event12", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("time_null", &int_value));
+ EXPECT_EQ(0, int_value);
+
+ dict = FindNamePhase("event13", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("time_one", &int_value));
+ EXPECT_EQ(1, int_value);
+
+ dict = FindNamePhase("event14", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("timeticks_null", &int_value));
+ EXPECT_EQ(0, int_value);
+
+ dict = FindNamePhase("event15", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("timeticks_one", &int_value));
+ EXPECT_EQ(1, int_value);
+}
+
+TEST_F(TraceEventTestFixture, NameIsEscaped) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT0("category", "name\\with\\backspaces");
+ EndTraceAndFlush();
+
+ EXPECT_TRUE(FindMatchingValue("cat", "category"));
+ EXPECT_TRUE(FindMatchingValue("name", "name\\with\\backspaces"));
+}
+
+namespace {
+
+bool IsArgNameWhitelisted(const char* arg_name) {
+ return base::MatchPattern(arg_name, "granular_arg_whitelisted");
+}
+
+bool IsTraceEventArgsWhitelisted(const char* category_group_name,
+ const char* event_name,
+ ArgumentNameFilterPredicate* arg_filter) {
+ if (base::MatchPattern(category_group_name, "toplevel") &&
+ base::MatchPattern(event_name, "*")) {
+ return true;
+ }
+
+ if (base::MatchPattern(category_group_name, "benchmark") &&
+ base::MatchPattern(event_name, "granularly_whitelisted")) {
+ *arg_filter = base::Bind(&IsArgNameWhitelisted);
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace
+
+TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
+ TraceLog::GetInstance()->SetArgumentFilterPredicate(
+ base::Bind(&IsTraceEventArgsWhitelisted));
+
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, "enable-argument-filter"),
+ TraceLog::RECORDING_MODE);
+
+ TRACE_EVENT1("toplevel", "event1", "int_one", 1);
+ TRACE_EVENT1("whitewashed", "event2", "int_two", 1);
+
+ TRACE_EVENT2("benchmark", "granularly_whitelisted",
+ "granular_arg_whitelisted", "whitelisted_value",
+ "granular_arg_blacklisted", "blacklisted_value");
+
+ EndTraceAndFlush();
+
+ const DictionaryValue* args_dict = NULL;
+ DictionaryValue* dict = NULL;
+ int int_value;
+
+ dict = FindNamePhase("event1", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("int_one", &int_value));
+ EXPECT_EQ(1, int_value);
+
+ dict = FindNamePhase("event2", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_FALSE(args_dict->GetInteger("int_two", &int_value));
+
+ std::string args_string;
+ EXPECT_TRUE(dict->GetString("args", &args_string));
+ EXPECT_EQ(args_string, "__stripped__");
+
+ dict = FindNamePhase("granularly_whitelisted", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ EXPECT_TRUE(args_dict->GetString("granular_arg_whitelisted", &args_string));
+ EXPECT_EQ(args_string, "whitelisted_value");
+
+ EXPECT_TRUE(args_dict->GetString("granular_arg_blacklisted", &args_string));
+ EXPECT_EQ(args_string, "__stripped__");
+}
+
+class TraceEventCallbackTest : public TraceEventTestFixture {
+ public:
+ void SetUp() override {
+ TraceEventTestFixture::SetUp();
+ ASSERT_EQ(NULL, s_instance);
+ s_instance = this;
+ }
+ void TearDown() override {
+ TraceLog::GetInstance()->SetDisabled();
+ ASSERT_TRUE(s_instance);
+ s_instance = NULL;
+ TraceEventTestFixture::TearDown();
+ }
+
+ protected:
+ // For TraceEventCallbackAndRecordingX tests.
+ void VerifyCallbackAndRecordedEvents(size_t expected_callback_count,
+ size_t expected_recorded_count) {
+ // Callback events.
+ EXPECT_EQ(expected_callback_count, collected_events_names_.size());
+ for (size_t i = 0; i < collected_events_names_.size(); ++i) {
+ EXPECT_EQ("callback", collected_events_categories_[i]);
+ EXPECT_EQ("yes", collected_events_names_[i]);
+ }
+
+ // Recorded events.
+ EXPECT_EQ(expected_recorded_count, trace_parsed_.GetSize());
+ EXPECT_TRUE(FindTraceEntry(trace_parsed_, "recording"));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, "callback"));
+ EXPECT_TRUE(FindTraceEntry(trace_parsed_, "yes"));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, "no"));
+ }
+
+ void VerifyCollectedEvent(size_t i,
+ unsigned phase,
+ const std::string& category,
+ const std::string& name) {
+ EXPECT_EQ(phase, collected_events_phases_[i]);
+ EXPECT_EQ(category, collected_events_categories_[i]);
+ EXPECT_EQ(name, collected_events_names_[i]);
+ }
+
+ std::vector<std::string> collected_events_categories_;
+ std::vector<std::string> collected_events_names_;
+ std::vector<unsigned char> collected_events_phases_;
+ std::vector<TimeTicks> collected_events_timestamps_;
+
+ static TraceEventCallbackTest* s_instance;
+ static void Callback(TimeTicks timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int num_args,
+ const char* const arg_names[],
+ const unsigned char arg_types[],
+ const unsigned long long arg_values[],
+ unsigned int flags) {
+ s_instance->collected_events_phases_.push_back(phase);
+ s_instance->collected_events_categories_.push_back(
+ TraceLog::GetCategoryGroupName(category_group_enabled));
+ s_instance->collected_events_names_.push_back(name);
+ s_instance->collected_events_timestamps_.push_back(timestamp);
+ }
+};
+
+TraceEventCallbackTest* TraceEventCallbackTest::s_instance;
+
+TEST_F(TraceEventCallbackTest, TraceEventCallback) {
+ TRACE_EVENT_INSTANT0("all", "before enable", TRACE_EVENT_SCOPE_THREAD);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ""), Callback);
+ TRACE_EVENT_INSTANT0("all", "event1", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("all", "event2", TRACE_EVENT_SCOPE_GLOBAL);
+ {
+ TRACE_EVENT0("all", "duration");
+ TRACE_EVENT_INSTANT0("all", "event3", TRACE_EVENT_SCOPE_GLOBAL);
+ }
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("all", "after callback removed",
+ TRACE_EVENT_SCOPE_GLOBAL);
+ ASSERT_EQ(5u, collected_events_names_.size());
+ EXPECT_EQ("event1", collected_events_names_[0]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[0]);
+ EXPECT_EQ("event2", collected_events_names_[1]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[1]);
+ EXPECT_EQ("duration", collected_events_names_[2]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_BEGIN, collected_events_phases_[2]);
+ EXPECT_EQ("event3", collected_events_names_[3]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[3]);
+ EXPECT_EQ("duration", collected_events_names_[4]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_END, collected_events_phases_[4]);
+ for (size_t i = 1; i < collected_events_timestamps_.size(); i++) {
+ EXPECT_LE(collected_events_timestamps_[i - 1],
+ collected_events_timestamps_[i]);
+ }
+}
+
+TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+ do {
+ TRACE_EVENT_INSTANT0("all", "badger badger", TRACE_EVENT_SCOPE_GLOBAL);
+ } while (!TraceLog::GetInstance()->BufferIsFull());
+ TraceLog::GetInstance()->SetEventCallbackEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ""), Callback);
+ TRACE_EVENT_INSTANT0("all", "a snake", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ ASSERT_EQ(1u, collected_events_names_.size());
+ EXPECT_EQ("a snake", collected_events_names_[0]);
+}
+
+// 1: Enable callback, enable recording, disable callback, disable recording.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(2, 2);
+}
+
+// 2: Enable callback, enable recording, disable recording, disable callback.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording2) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(3, 1);
+}
+
+// 3: Enable recording, enable callback, disable callback, disable recording.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording3) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(1, 3);
+}
+
+// 4: Enable recording, enable callback, disable recording, disable callback.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording4) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(2, 2);
+}
+
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecordingDuration) {
+ TraceLog::GetInstance()->SetEventCallbackEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ""), Callback);
+ {
+ TRACE_EVENT0("callback", "duration1");
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
+ TRACE_EVENT0("callback", "duration2");
+ EndTraceAndFlush();
+ TRACE_EVENT0("callback", "duration3");
+ }
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+
+ ASSERT_EQ(6u, collected_events_names_.size());
+ VerifyCollectedEvent(0, TRACE_EVENT_PHASE_BEGIN, "callback", "duration1");
+ VerifyCollectedEvent(1, TRACE_EVENT_PHASE_BEGIN, "callback", "duration2");
+ VerifyCollectedEvent(2, TRACE_EVENT_PHASE_BEGIN, "callback", "duration3");
+ VerifyCollectedEvent(3, TRACE_EVENT_PHASE_END, "callback", "duration3");
+ VerifyCollectedEvent(4, TRACE_EVENT_PHASE_END, "callback", "duration2");
+ VerifyCollectedEvent(5, TRACE_EVENT_PHASE_END, "callback", "duration1");
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ trace_log->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
+ trace_log->logged_events_.reset(
+ TraceBuffer::CreateTraceBufferVectorOfSize(100));
+ do {
+ TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ } while (!trace_log->BufferIsFull());
+
+ EndTraceAndFlush();
+
+ const DictionaryValue* trace_full_metadata = NULL;
+
+ trace_full_metadata = FindTraceEntry(trace_parsed_,
+ "overflowed_at_ts");
+ std::string phase;
+ double buffer_limit_reached_timestamp = 0;
+
+ EXPECT_TRUE(trace_full_metadata);
+ EXPECT_TRUE(trace_full_metadata->GetString("ph", &phase));
+ EXPECT_EQ("M", phase);
+ EXPECT_TRUE(trace_full_metadata->GetDouble(
+ "args.overflowed_at_ts", &buffer_limit_reached_timestamp));
+ EXPECT_DOUBLE_EQ(
+ static_cast<double>(
+ trace_log->buffer_limit_reached_timestamp_.ToInternalValue()),
+ buffer_limit_reached_timestamp);
+
+ // Test that buffer_limit_reached_timestamp's value is between the timestamp
+ // of the last trace event and current time.
+ DropTracedMetadataRecords();
+ const DictionaryValue* last_trace_event = NULL;
+ double last_trace_event_timestamp = 0;
+ EXPECT_TRUE(trace_parsed_.GetDictionary(trace_parsed_.GetSize() - 1,
+ &last_trace_event));
+ EXPECT_TRUE(last_trace_event->GetDouble("ts", &last_trace_event_timestamp));
+ EXPECT_LE(last_trace_event_timestamp, buffer_limit_reached_timestamp);
+ EXPECT_LE(buffer_limit_reached_timestamp,
+ trace_log->OffsetNow().ToInternalValue());
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferGetReturnChunk) {
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY),
+ TraceLog::RECORDING_MODE);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ uint32_t last_seq = 0;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+
+ std::unique_ptr<TraceBufferChunk* []> chunks(
+ new TraceBufferChunk*[num_chunks]);
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ EXPECT_EQ((i + 1) * TraceBufferChunk::kTraceBufferChunkSize,
+ buffer->Size());
+ last_seq = chunks[i]->seq();
+ }
+
+ // Ring buffer is never full.
+ EXPECT_FALSE(buffer->IsFull());
+
+ // Return all chunks in original order.
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+ // Should recycle the chunks in the returned order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ last_seq = chunks[i]->seq();
+ }
+
+ // Return all chunks in reverse order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ buffer->ReturnChunk(num_chunks - i - 1, std::unique_ptr<TraceBufferChunk>(
+ chunks[num_chunks - i - 1]));
+ }
+
+ // Should recycle the chunks in the returned order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(num_chunks - i - 1, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ last_seq = chunks[i]->seq();
+ }
+
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferHalfIteration) {
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY),
+ TraceLog::RECORDING_MODE);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+ EXPECT_FALSE(buffer->NextChunk());
+
+ size_t half_chunks = num_chunks / 2;
+ std::unique_ptr<TraceBufferChunk* []> chunks(
+ new TraceBufferChunk*[half_chunks]);
+
+ for (size_t i = 0; i < half_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ }
+ for (size_t i = 0; i < half_chunks; ++i)
+ buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+ for (size_t i = 0; i < half_chunks; ++i)
+ EXPECT_EQ(chunks[i], buffer->NextChunk());
+ EXPECT_FALSE(buffer->NextChunk());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferFullIteration) {
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY),
+ TraceLog::RECORDING_MODE);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+ EXPECT_FALSE(buffer->NextChunk());
+
+ std::unique_ptr<TraceBufferChunk* []> chunks(
+ new TraceBufferChunk*[num_chunks]);
+
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ }
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
+
+ for (size_t i = 0; i < num_chunks; ++i)
+ EXPECT_TRUE(chunks[i] == buffer->NextChunk());
+ EXPECT_FALSE(buffer->NextChunk());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceRecordAsMuchAsPossibleMode) {
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, RECORD_AS_MUCH_AS_POSSIBLE),
+ TraceLog::RECORDING_MODE);
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ EXPECT_EQ(512000000UL, buffer->Capacity());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+void BlockUntilStopped(WaitableEvent* task_start_event,
+ WaitableEvent* task_stop_event) {
+ task_start_event->Signal();
+ task_stop_event->Wait();
+}
+
+TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.Start();
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
+ Unretained(TraceLog::GetInstance())));
+
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+
+ EndTraceAndFlush();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+
+ task_stop_event.Signal();
+ thread.Stop();
+}
+
+TEST_F(TraceEventTestFixture, ConvertTraceConfigToInternalOptions) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ EXPECT_EQ(TraceLog::kInternalRecordUntilFull,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig(kRecordAllCategoryFilter, RECORD_UNTIL_FULL)));
+
+ EXPECT_EQ(TraceLog::kInternalRecordContinuously,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig(kRecordAllCategoryFilter, RECORD_CONTINUOUSLY)));
+
+ EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE)));
+
+ EXPECT_EQ(
+ TraceLog::kInternalRecordUntilFull | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig(kRecordAllCategoryFilter,
+ "record-until-full,enable-sampling")));
+
+ EXPECT_EQ(
+ TraceLog::kInternalRecordContinuously | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig(kRecordAllCategoryFilter,
+ "record-continuously,enable-sampling")));
+
+ EXPECT_EQ(
+ TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig(kRecordAllCategoryFilter,
+ "trace-to-console,enable-sampling")));
+
+ EXPECT_EQ(
+ TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig("*",
+ "trace-to-console,enable-sampling,enable-systrace")));
+}
+
+void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
+ WaitableEvent* task_stop_event) {
+ TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop();
+ BlockUntilStopped(task_start_event, task_stop_event);
+}
+
+TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.Start();
+
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped, &task_start_event,
+ &task_stop_event));
+ task_start_event.Wait();
+
+ EndTraceAndFlush();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+
+ task_stop_event.Signal();
+ thread.Stop();
+}
+
+TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.Start();
+
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+ task_complete_event.Reset();
+
+ WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+
+ // The thread will timeout in this flush.
+ EndTraceAndFlushInThreadWithMessageLoop();
+ Clear();
+
+ // Let the thread's message loop continue to spin.
+ task_stop_event.Signal();
+
+ // The following sequence ensures that the FlushCurrentThread task has been
+ // executed in the thread before continuing.
+ task_start_event.Reset();
+ task_stop_event.Reset();
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+ task_stop_event.Signal();
+ Clear();
+
+ // TraceLog should discover the generation mismatch and recover the thread
+ // local buffer for the thread without any error.
+ BeginTrace();
+ thread.task_runner()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+ task_complete_event.Reset();
+ EndTraceAndFlushInThreadWithMessageLoop();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+std::string* g_log_buffer = NULL;
+bool MockLogMessageHandler(int, const char*, int, size_t,
+ const std::string& str) {
+ if (!g_log_buffer)
+ g_log_buffer = new std::string();
+ g_log_buffer->append(str);
+ return false;
+}
+
+TEST_F(TraceEventTestFixture, EchoToConsole) {
+ logging::LogMessageHandlerFunction old_log_message_handler =
+ logging::GetLogMessageHandler();
+ logging::SetLogMessageHandler(MockLogMessageHandler);
+
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE),
+ TraceLog::RECORDING_MODE);
+ TRACE_EVENT_BEGIN0("a", "begin_end");
+ {
+ TRACE_EVENT0("b", "duration");
+ TRACE_EVENT0("b1", "duration1");
+ }
+ TRACE_EVENT_INSTANT0("c", "instant", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_END0("a", "begin_end");
+
+ EXPECT_NE(std::string::npos, g_log_buffer->find("begin_end[a]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| duration[b]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| | duration1[b1]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| | duration1[b1] ("));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| duration[b] ("));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| instant[c]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("begin_end[a] ("));
+
+ EndTraceAndFlush();
+ delete g_log_buffer;
+ logging::SetLogMessageHandler(old_log_message_handler);
+ g_log_buffer = NULL;
+}
+
+bool LogMessageHandlerWithTraceEvent(int, const char*, int, size_t,
+ const std::string&) {
+ TRACE_EVENT0("log", "trace_event");
+ return false;
+}
+
+TEST_F(TraceEventTestFixture, EchoToConsoleTraceEventRecursion) {
+ logging::LogMessageHandlerFunction old_log_message_handler =
+ logging::GetLogMessageHandler();
+ logging::SetLogMessageHandler(LogMessageHandlerWithTraceEvent);
+
+ TraceLog::GetInstance()->SetEnabled(
+ TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE),
+ TraceLog::RECORDING_MODE);
+ {
+ // This should not cause deadlock or infinite recursion.
+ TRACE_EVENT0("b", "duration");
+ }
+
+ EndTraceAndFlush();
+ logging::SetLogMessageHandler(old_log_message_handler);
+}
+
+TEST_F(TraceEventTestFixture, TimeOffset) {
+ BeginTrace();
+ // Let TraceLog timer start from 0.
+ TimeDelta time_offset = TimeTicks::Now() - TimeTicks();
+ TraceLog::GetInstance()->SetTimeOffset(time_offset);
+
+ {
+ TRACE_EVENT0("all", "duration1");
+ TRACE_EVENT0("all", "duration2");
+ }
+ TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+ TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
+
+ EndTraceAndFlush();
+ DropTracedMetadataRecords();
+
+ double end_time = static_cast<double>(
+ (TimeTicks::Now() - time_offset).ToInternalValue());
+ double last_timestamp = 0;
+ for (size_t i = 0; i < trace_parsed_.GetSize(); ++i) {
+ const DictionaryValue* item;
+ EXPECT_TRUE(trace_parsed_.GetDictionary(i, &item));
+ double timestamp;
+ EXPECT_TRUE(item->GetDouble("ts", ×tamp));
+ EXPECT_GE(timestamp, last_timestamp);
+ EXPECT_LE(timestamp, end_time);
+ last_timestamp = timestamp;
+ }
+}
+
+TEST_F(TraceEventTestFixture, ConfigureSyntheticDelays) {
+ BeginSpecificTrace("DELAY(test.Delay;0.05)");
+
+ base::TimeTicks start = base::TimeTicks::Now();
+ {
+ TRACE_EVENT_SYNTHETIC_DELAY("test.Delay");
+ }
+ base::TimeDelta duration = base::TimeTicks::Now() - start;
+ EXPECT_GE(duration.InMilliseconds(), 50);
+
+ EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, BadSyntheticDelayConfigurations) {
+ const char* const filters[] = {
+ "",
+ "DELAY(",
+ "DELAY(;",
+ "DELAY(;)",
+ "DELAY(test.Delay)",
+ "DELAY(test.Delay;)"
+ };
+ for (size_t i = 0; i < arraysize(filters); i++) {
+ BeginSpecificTrace(filters[i]);
+ EndTraceAndFlush();
+ TraceConfig trace_config = TraceLog::GetInstance()->GetCurrentTraceConfig();
+ EXPECT_EQ(0u, trace_config.GetSyntheticDelayValues().size());
+ }
+}
+
+TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationMerging) {
+ TraceConfig config1("DELAY(test.Delay1;16)", "");
+ TraceConfig config2("DELAY(test.Delay2;32)", "");
+ config1.Merge(config2);
+ EXPECT_EQ(2u, config1.GetSyntheticDelayValues().size());
+}
+
+TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
+ const char filter[] = "DELAY(test.Delay;16;oneshot)";
+ TraceConfig config(filter, "");
+ EXPECT_EQ(filter, config.ToCategoryFilterString());
+}
+
+TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
+ BeginSpecificTrace("-*");
+ TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindNamePhase("clock_sync", "c"));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_log.cc b/libchrome/base/trace_event/trace_log.cc
new file mode 100644
index 0000000..12cebc6
--- /dev/null
+++ b/libchrome/base/trace_event/trace_log.cc
@@ -0,0 +1,1801 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_log.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+#include <utility>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/memory/singleton.h"
+#include "base/process/process_metrics.h"
+#include "base/stl_util.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/worker_pool.h"
+#include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+#include "base/trace_event/trace_sampling_thread.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/trace_event/trace_event_etw_export_win.h"
+#endif
+
+// The thread buckets for the sampling profiler.
+BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+namespace base {
+namespace internal {
+
+class DeleteTraceLogForTesting {
+ public:
+ static void Delete() {
+ Singleton<trace_event::TraceLog,
+ LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0);
+ }
+};
+
+} // namespace internal
+
+namespace trace_event {
+
+namespace {
+
+// Controls the number of trace events we will buffer in-memory
+// before throwing them away.
+const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
+
+const size_t kTraceEventVectorBigBufferChunks =
+ 512000000 / kTraceBufferChunkSize;
+static_assert(
+ kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
+ "Too many big buffer chunks");
+const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
+static_assert(
+ kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
+ "Too many vector buffer chunks");
+const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
+
+// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
+const size_t kEchoToConsoleTraceEventBufferChunks = 256;
+
+const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
+const int kThreadFlushTimeoutMs = 3000;
+
+#define MAX_CATEGORY_GROUPS 200
+
+// Parallel arrays g_category_groups and g_category_group_enabled are separate
+// so that a pointer to a member of g_category_group_enabled can be easily
+// converted to an index into g_category_groups. This allows macros to deal
+// only with char enabled pointers from g_category_group_enabled, and we can
+// convert internally to determine the category name from the char enabled
+// pointer.
+const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+ "toplevel",
+ "tracing already shutdown",
+ "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+ "__metadata"};
+
+// The enabled flag is char instead of bool so that the API can be used from C.
+unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
+// Indexes here have to match the g_category_groups array indexes above.
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
+const int g_category_metadata = 3;
+const int g_num_builtin_categories = 4;
+// Skip default categories.
+base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
+
+// The name of the current thread. This is used to decide if the current
+// thread name has changed. We combine all the seen thread names into the
+// output name for the thread.
+LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
+ LAZY_INSTANCE_INITIALIZER;
+
+ThreadTicks ThreadNow() {
+ return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
+}
+
+template <typename T>
+void InitializeMetadataEvent(TraceEvent* trace_event,
+ int thread_id,
+ const char* metadata_name,
+ const char* arg_name,
+ const T& value) {
+ if (!trace_event)
+ return;
+
+ int num_args = 1;
+ unsigned char arg_type;
+ unsigned long long arg_value;
+ ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
+ trace_event->Initialize(
+ thread_id,
+ TimeTicks(),
+ ThreadTicks(),
+ TRACE_EVENT_PHASE_METADATA,
+ &g_category_group_enabled[g_category_metadata],
+ metadata_name,
+ trace_event_internal::kGlobalScope, // scope
+ trace_event_internal::kNoId, // id
+ trace_event_internal::kNoId, // bind_id
+ num_args,
+ &arg_name,
+ &arg_type,
+ &arg_value,
+ nullptr,
+ TRACE_EVENT_FLAG_NONE);
+}
+
+class AutoThreadLocalBoolean {
+ public:
+ explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
+ : thread_local_boolean_(thread_local_boolean) {
+ DCHECK(!thread_local_boolean_->Get());
+ thread_local_boolean_->Set(true);
+ }
+ ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
+
+ private:
+ ThreadLocalBoolean* thread_local_boolean_;
+ DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
+};
+
+// Use this function instead of TraceEventHandle constructor to keep the
+// overhead of ScopedTracer (trace_event.h) constructor minimum.
+void MakeHandle(uint32_t chunk_seq,
+ size_t chunk_index,
+ size_t event_index,
+ TraceEventHandle* handle) {
+ DCHECK(chunk_seq);
+ DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
+ DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
+ handle->chunk_seq = chunk_seq;
+ handle->chunk_index = static_cast<uint16_t>(chunk_index);
+ handle->event_index = static_cast<uint16_t>(event_index);
+}
+
+} // namespace
+
+// A helper class that allows the lock to be acquired in the middle of the scope
+// and unlocks at the end of scope if locked.
+class TraceLog::OptionalAutoLock {
+ public:
+ explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
+
+ ~OptionalAutoLock() {
+ if (locked_)
+ lock_->Release();
+ }
+
+ void EnsureAcquired() {
+ if (!locked_) {
+ lock_->Acquire();
+ locked_ = true;
+ }
+ }
+
+ private:
+ Lock* lock_;
+ bool locked_;
+ DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
+};
+
+class TraceLog::ThreadLocalEventBuffer
+ : public MessageLoop::DestructionObserver,
+ public MemoryDumpProvider {
+ public:
+ explicit ThreadLocalEventBuffer(TraceLog* trace_log);
+ ~ThreadLocalEventBuffer() override;
+
+ TraceEvent* AddTraceEvent(TraceEventHandle* handle);
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) {
+ if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
+ handle.chunk_index != chunk_index_) {
+ return nullptr;
+ }
+
+ return chunk_->GetEventAt(handle.event_index);
+ }
+
+ int generation() const { return generation_; }
+
+ private:
+ // MessageLoop::DestructionObserver
+ void WillDestroyCurrentMessageLoop() override;
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
+
+ void FlushWhileLocked();
+
+ void CheckThisIsCurrentBuffer() const {
+ DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
+ }
+
+ // Since TraceLog is a leaky singleton, trace_log_ will always be valid
+ // as long as the thread exists.
+ TraceLog* trace_log_;
+ std::unique_ptr<TraceBufferChunk> chunk_;
+ size_t chunk_index_;
+ int generation_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
+};
+
+TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
+ : trace_log_(trace_log),
+ chunk_index_(0),
+ generation_(trace_log->generation()) {
+ // ThreadLocalEventBuffer is created only if the thread has a message loop, so
+ // the following message_loop won't be NULL.
+ MessageLoop* message_loop = MessageLoop::current();
+ message_loop->AddDestructionObserver(this);
+
+ // This is to report the local memory usage when memory-infra is enabled.
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
+
+ AutoLock lock(trace_log->lock_);
+ trace_log->thread_message_loops_.insert(message_loop);
+}
+
+TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
+ CheckThisIsCurrentBuffer();
+ MessageLoop::current()->RemoveDestructionObserver(this);
+ MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+
+ {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ trace_log_->thread_message_loops_.erase(MessageLoop::current());
+ }
+ trace_log_->thread_local_event_buffer_.Set(NULL);
+}
+
+TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
+ TraceEventHandle* handle) {
+ CheckThisIsCurrentBuffer();
+
+ if (chunk_ && chunk_->IsFull()) {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ chunk_.reset();
+ }
+ if (!chunk_) {
+ AutoLock lock(trace_log_->lock_);
+ chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
+ trace_log_->CheckIfBufferIsFullWhileLocked();
+ }
+ if (!chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle)
+ MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
+
+ return trace_event;
+}
+
+void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
+ delete this;
+}
+
+bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) {
+ if (!chunk_)
+ return true;
+ std::string dump_base_name = StringPrintf(
+ "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
+ TraceEventMemoryOverhead overhead;
+ chunk_->EstimateTraceMemoryOverhead(&overhead);
+ overhead.DumpInto(dump_base_name.c_str(), pmd);
+ return true;
+}
+
+void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
+ if (!chunk_)
+ return;
+
+ trace_log_->lock_.AssertAcquired();
+ if (trace_log_->CheckGeneration(generation_)) {
+ // Return the chunk to the buffer only if the generation matches.
+ trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
+ }
+ // Otherwise this method may be called from the destructor, or TraceLog will
+ // find the generation mismatch and delete this buffer soon.
+}
+
+struct TraceLog::RegisteredAsyncObserver {
+ explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
+ : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
+ ~RegisteredAsyncObserver() {}
+
+ WeakPtr<AsyncEnabledStateObserver> observer;
+ scoped_refptr<SequencedTaskRunner> task_runner;
+};
+
+TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
+
+TraceLogStatus::~TraceLogStatus() {}
+
+// static
+TraceLog* TraceLog::GetInstance() {
+ return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get();
+}
+
+TraceLog::TraceLog()
+ : mode_(DISABLED),
+ num_traces_recorded_(0),
+ event_callback_(0),
+ dispatching_to_observer_list_(false),
+ process_sort_index_(0),
+ process_id_hash_(0),
+ process_id_(0),
+ watch_category_(0),
+ trace_options_(kInternalRecordUntilFull),
+ sampling_thread_handle_(0),
+ trace_config_(TraceConfig()),
+ event_callback_trace_config_(TraceConfig()),
+ thread_shared_chunk_index_(0),
+ generation_(0),
+ use_worker_thread_(false) {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
+ // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
+ // sizeof(g_category_group_enabled),
+ // "trace_event category enabled");
+ for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+ ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+ "trace_event category enabled");
+ }
+#if defined(OS_NACL) // NaCl shouldn't expose the process id.
+ SetProcessID(0);
+#else
+ SetProcessID(static_cast<int>(GetCurrentProcId()));
+#endif
+
+ logged_events_.reset(CreateTraceBuffer());
+
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
+ nullptr);
+}
+
+TraceLog::~TraceLog() {}
+
+void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
+ // A ThreadLocalEventBuffer needs the message loop
+ // - to know when the thread exits;
+ // - to handle the final flush.
+ // For a thread without a message loop or the message loop may be blocked, the
+ // trace events will be added into the main buffer directly.
+ if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
+ return;
+ HEAP_PROFILER_SCOPED_IGNORE;
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
+ if (thread_local_event_buffer &&
+ !CheckGeneration(thread_local_event_buffer->generation())) {
+ delete thread_local_event_buffer;
+ thread_local_event_buffer = NULL;
+ }
+ if (!thread_local_event_buffer) {
+ thread_local_event_buffer = new ThreadLocalEventBuffer(this);
+ thread_local_event_buffer_.Set(thread_local_event_buffer);
+ }
+}
+
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) {
+ // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
+ // (crbug.com/499731).
+ TraceEventMemoryOverhead overhead;
+ overhead.Add("TraceLog", sizeof(*this));
+ {
+ AutoLock lock(lock_);
+ if (logged_events_)
+ logged_events_->EstimateTraceMemoryOverhead(&overhead);
+
+ for (auto& metadata_event : metadata_events_)
+ metadata_event->EstimateTraceMemoryOverhead(&overhead);
+ }
+ overhead.AddSelf();
+ overhead.DumpInto("tracing/main_trace_log", pmd);
+ return true;
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabled(
+ const char* category_group) {
+ TraceLog* tracelog = GetInstance();
+ if (!tracelog) {
+ DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+ return &g_category_group_enabled[g_category_already_shutdown];
+ }
+ return tracelog->GetCategoryGroupEnabledInternal(category_group);
+}
+
+const char* TraceLog::GetCategoryGroupName(
+ const unsigned char* category_group_enabled) {
+ // Calculate the index of the category group by finding
+ // category_group_enabled in g_category_group_enabled array.
+ uintptr_t category_begin =
+ reinterpret_cast<uintptr_t>(g_category_group_enabled);
+ uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
+ DCHECK(category_ptr >= category_begin &&
+ category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
+ MAX_CATEGORY_GROUPS))
+ << "out of bounds category pointer";
+ uintptr_t category_index =
+ (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
+ return g_category_groups[category_index];
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
+ unsigned char enabled_flag = 0;
+ const char* category_group = g_category_groups[category_index];
+ if (mode_ == RECORDING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category_group)) {
+ enabled_flag |= ENABLED_FOR_RECORDING;
+ }
+
+ if (event_callback_ &&
+ event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
+ enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+ }
+
+#if defined(OS_WIN)
+ if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+ category_group)) {
+ enabled_flag |= ENABLED_FOR_ETW_EXPORT;
+ }
+#endif
+
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+
+ g_category_group_enabled[category_index] = enabled_flag;
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlags() {
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; i++)
+ UpdateCategoryGroupEnabledFlag(i);
+}
+
+void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
+ ResetTraceEventSyntheticDelays();
+ const TraceConfig::StringList& delays =
+ trace_config_.GetSyntheticDelayValues();
+ TraceConfig::StringList::const_iterator ci;
+ for (ci = delays.begin(); ci != delays.end(); ++ci) {
+ StringTokenizer tokens(*ci, ";");
+ if (!tokens.GetNext())
+ continue;
+ TraceEventSyntheticDelay* delay =
+ TraceEventSyntheticDelay::Lookup(tokens.token());
+ while (tokens.GetNext()) {
+ std::string token = tokens.token();
+ char* duration_end;
+ double target_duration = strtod(token.c_str(), &duration_end);
+ if (duration_end != token.c_str()) {
+ delay->SetTargetDuration(TimeDelta::FromMicroseconds(
+ static_cast<int64_t>(target_duration * 1e6)));
+ } else if (token == "static") {
+ delay->SetMode(TraceEventSyntheticDelay::STATIC);
+ } else if (token == "oneshot") {
+ delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
+ } else if (token == "alternating") {
+ delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
+ }
+ }
+ }
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
+ const char* category_group) {
+ DCHECK(!strchr(category_group, '"'))
+ << "Category groups may not contain double quote";
+ // The g_category_groups is append only, avoid using a lock for the fast path.
+ size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < current_category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ unsigned char* category_group_enabled = NULL;
+ // This is the slow path: the lock is not held in the case above, so more
+ // than one thread could have reached here trying to add the same category.
+ // Only hold to lock when actually appending a new category, and
+ // check the categories groups again.
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ // Create a new category group.
+ DCHECK(category_index < MAX_CATEGORY_GROUPS)
+ << "must increase MAX_CATEGORY_GROUPS";
+ if (category_index < MAX_CATEGORY_GROUPS) {
+ // Don't hold on to the category_group pointer, so that we can create
+ // category groups with strings not known at compile time (this is
+ // required by SetWatchEvent).
+ const char* new_group = strdup(category_group);
+ ANNOTATE_LEAKING_OBJECT_PTR(new_group);
+ g_category_groups[category_index] = new_group;
+ DCHECK(!g_category_group_enabled[category_index]);
+ // Note that if both included and excluded patterns in the
+ // TraceConfig are empty, we exclude nothing,
+ // thereby enabling this category group.
+ UpdateCategoryGroupEnabledFlag(category_index);
+ category_group_enabled = &g_category_group_enabled[category_index];
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ } else {
+ category_group_enabled =
+ &g_category_group_enabled[g_category_categories_exhausted];
+ }
+ return category_group_enabled;
+}
+
+void TraceLog::GetKnownCategoryGroups(
+ std::vector<std::string>* category_groups) {
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = g_num_builtin_categories; i < category_index; i++)
+ category_groups->push_back(g_category_groups[i]);
+}
+
+void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
+ std::vector<EnabledStateObserver*> observer_list;
+ std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
+ {
+ AutoLock lock(lock_);
+
+ // Can't enable tracing when Flush() is in progress.
+ DCHECK(!flush_task_runner_);
+
+ InternalTraceOptions new_options =
+ GetInternalOptionsFromTraceConfig(trace_config);
+
+ InternalTraceOptions old_options = trace_options();
+
+ if (IsEnabled()) {
+ if (new_options != old_options) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different "
+ << "set of options.";
+ }
+
+ if (mode != mode_) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
+ }
+
+ trace_config_.Merge(trace_config);
+ UpdateCategoryGroupEnabledFlags();
+ return;
+ }
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = mode;
+
+ if (new_options != old_options) {
+ subtle::NoBarrier_Store(&trace_options_, new_options);
+ UseNextTraceBuffer();
+ }
+
+ num_traces_recorded_++;
+
+ trace_config_ = TraceConfig(trace_config);
+ UpdateCategoryGroupEnabledFlags();
+ UpdateSyntheticDelaysFromTraceConfig();
+
+ if (new_options & kInternalEnableSampling) {
+ sampling_thread_.reset(new TraceSamplingThread);
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[0], "bucket0",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[1], "bucket1",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[2], "bucket2",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ if (!PlatformThread::Create(0, sampling_thread_.get(),
+ &sampling_thread_handle_)) {
+ DCHECK(false) << "failed to create thread";
+ }
+ }
+
+ dispatching_to_observer_list_ = true;
+ observer_list = enabled_state_observer_list_;
+ observer_map = async_observers_;
+ }
+ // Notify observers outside the lock in case they trigger trace events.
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogEnabled();
+ for (const auto& it : observer_map) {
+ it.second.task_runner->PostTask(
+ FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
+ it.second.observer));
+ }
+
+ {
+ AutoLock lock(lock_);
+ dispatching_to_observer_list_ = false;
+ }
+}
+
+void TraceLog::SetArgumentFilterPredicate(
+ const ArgumentFilterPredicate& argument_filter_predicate) {
+ AutoLock lock(lock_);
+ DCHECK(!argument_filter_predicate.is_null());
+ DCHECK(argument_filter_predicate_.is_null());
+ argument_filter_predicate_ = argument_filter_predicate;
+}
+
+TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
+ const TraceConfig& config) {
+ InternalTraceOptions ret =
+ config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
+ if (config.IsArgumentFilterEnabled())
+ ret |= kInternalEnableArgumentFilter;
+ switch (config.GetTraceRecordMode()) {
+ case RECORD_UNTIL_FULL:
+ return ret | kInternalRecordUntilFull;
+ case RECORD_CONTINUOUSLY:
+ return ret | kInternalRecordContinuously;
+ case ECHO_TO_CONSOLE:
+ return ret | kInternalEchoToConsole;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ return ret | kInternalRecordAsMuchAsPossible;
+ }
+ NOTREACHED();
+ return kInternalNone;
+}
+
+TraceConfig TraceLog::GetCurrentTraceConfig() const {
+ AutoLock lock(lock_);
+ return trace_config_;
+}
+
+void TraceLog::SetDisabled() {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked();
+}
+
+void TraceLog::SetDisabledWhileLocked() {
+ lock_.AssertAcquired();
+
+ if (!IsEnabled())
+ return;
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = DISABLED;
+
+ if (sampling_thread_.get()) {
+ // Stop the sampling thread.
+ sampling_thread_->Stop();
+ lock_.Release();
+ PlatformThread::Join(sampling_thread_handle_);
+ lock_.Acquire();
+ sampling_thread_handle_ = PlatformThreadHandle();
+ sampling_thread_.reset();
+ }
+
+ trace_config_.Clear();
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ UpdateCategoryGroupEnabledFlags();
+ AddMetadataEventsWhileLocked();
+
+ // Remove metadata events so they will not get added to a subsequent trace.
+ metadata_events_.clear();
+
+ dispatching_to_observer_list_ = true;
+ std::vector<EnabledStateObserver*> observer_list =
+ enabled_state_observer_list_;
+ std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map =
+ async_observers_;
+
+ {
+ // Dispatch to observers outside the lock in case the observer triggers a
+ // trace event.
+ AutoUnlock unlock(lock_);
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogDisabled();
+ for (const auto& it : observer_map) {
+ it.second.task_runner->PostTask(
+ FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
+ it.second.observer));
+ }
+ }
+ dispatching_to_observer_list_ = false;
+}
+
+int TraceLog::GetNumTracesRecorded() {
+ AutoLock lock(lock_);
+ if (!IsEnabled())
+ return -1;
+ return num_traces_recorded_;
+}
+
+void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
+ AutoLock lock(lock_);
+ enabled_state_observer_list_.push_back(listener);
+}
+
+void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
+ AutoLock lock(lock_);
+ std::vector<EnabledStateObserver*>::iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(), listener);
+ if (it != enabled_state_observer_list_.end())
+ enabled_state_observer_list_.erase(it);
+}
+
+bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
+ AutoLock lock(lock_);
+ return ContainsValue(enabled_state_observer_list_, listener);
+}
+
+TraceLogStatus TraceLog::GetStatus() const {
+ AutoLock lock(lock_);
+ TraceLogStatus result;
+ result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
+ result.event_count = static_cast<uint32_t>(logged_events_->Size());
+ return result;
+}
+
+bool TraceLog::BufferIsFull() const {
+ AutoLock lock(lock_);
+ return logged_events_->IsFull();
+}
+
+TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
+ TraceEventHandle* handle,
+ bool check_buffer_is_full) {
+ lock_.AssertAcquired();
+
+ if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ std::move(thread_shared_chunk_));
+ }
+
+ if (!thread_shared_chunk_) {
+ thread_shared_chunk_ =
+ logged_events_->GetChunk(&thread_shared_chunk_index_);
+ if (check_buffer_is_full)
+ CheckIfBufferIsFullWhileLocked();
+ }
+ if (!thread_shared_chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle) {
+ MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
+ event_index, handle);
+ }
+ return trace_event;
+}
+
+void TraceLog::CheckIfBufferIsFullWhileLocked() {
+ lock_.AssertAcquired();
+ if (logged_events_->IsFull()) {
+ if (buffer_limit_reached_timestamp_.is_null()) {
+ buffer_limit_reached_timestamp_ = OffsetNow();
+ }
+ SetDisabledWhileLocked();
+ }
+}
+
+void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
+ EventCallback cb) {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_,
+ reinterpret_cast<subtle::AtomicWord>(cb));
+ event_callback_trace_config_ = trace_config;
+ UpdateCategoryGroupEnabledFlags();
+}
+
+void TraceLog::SetEventCallbackDisabled() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_, 0);
+ UpdateCategoryGroupEnabledFlags();
+}
+
+// Flush() works as the following:
+// 1. Flush() is called in thread A whose task runner is saved in
+// flush_task_runner_;
+// 2. If thread_message_loops_ is not empty, thread A posts task to each message
+// loop to flush the thread local buffers; otherwise finish the flush;
+// 3. FlushCurrentThread() deletes the thread local event buffer:
+// - The last batch of events of the thread are flushed into the main buffer;
+// - The message loop will be removed from thread_message_loops_;
+// If this is the last message loop, finish the flush;
+// 4. If any thread hasn't finish its flush in time, finish the flush.
+void TraceLog::Flush(const TraceLog::OutputCallback& cb,
+ bool use_worker_thread) {
+ FlushInternal(cb, use_worker_thread, false);
+}
+
+void TraceLog::CancelTracing(const OutputCallback& cb) {
+ SetDisabled();
+ FlushInternal(cb, false, true);
+}
+
+void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
+ bool use_worker_thread,
+ bool discard_events) {
+ use_worker_thread_ = use_worker_thread;
+ if (IsEnabled()) {
+ // Can't flush when tracing is enabled because otherwise PostTask would
+ // - generate more trace events;
+ // - deschedule the calling thread on some platforms causing inaccurate
+ // timing of the trace events.
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ if (!cb.is_null())
+ cb.Run(empty_result, false);
+ LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
+ return;
+ }
+
+ int generation = this->generation();
+ // Copy of thread_message_loops_ to be used without locking.
+ std::vector<scoped_refptr<SingleThreadTaskRunner>>
+ thread_message_loop_task_runners;
+ {
+ AutoLock lock(lock_);
+ DCHECK(!flush_task_runner_);
+ flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
+ ? ThreadTaskRunnerHandle::Get()
+ : nullptr;
+ DCHECK(thread_message_loops_.empty() || flush_task_runner_);
+ flush_output_callback_ = cb;
+
+ if (thread_shared_chunk_) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ std::move(thread_shared_chunk_));
+ }
+
+ if (thread_message_loops_.size()) {
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ thread_message_loop_task_runners.push_back((*it)->task_runner());
+ }
+ }
+ }
+
+ if (thread_message_loop_task_runners.size()) {
+ for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
+ thread_message_loop_task_runners[i]->PostTask(
+ FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
+ generation, discard_events));
+ }
+ flush_task_runner_->PostDelayedTask(
+ FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
+ discard_events),
+ TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
+ return;
+ }
+
+ FinishFlush(generation, discard_events);
+}
+
+// Usually it runs on a different thread.
+void TraceLog::ConvertTraceEventsToTraceFormat(
+ std::unique_ptr<TraceBuffer> logged_events,
+ const OutputCallback& flush_output_callback,
+ const ArgumentFilterPredicate& argument_filter_predicate) {
+ if (flush_output_callback.is_null())
+ return;
+
+ HEAP_PROFILER_SCOPED_IGNORE;
+ // The callback need to be called at least once even if there is no events
+ // to let the caller know the completion of flush.
+ scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
+ while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ size_t size = json_events_str_ptr->size();
+ if (size > kTraceEventBufferSizeInBytes) {
+ flush_output_callback.Run(json_events_str_ptr, true);
+ json_events_str_ptr = new RefCountedString();
+ } else if (size) {
+ json_events_str_ptr->data().append(",\n");
+ }
+ chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
+ argument_filter_predicate);
+ }
+ }
+ flush_output_callback.Run(json_events_str_ptr, false);
+}
+
+void TraceLog::FinishFlush(int generation, bool discard_events) {
+ std::unique_ptr<TraceBuffer> previous_logged_events;
+ OutputCallback flush_output_callback;
+ ArgumentFilterPredicate argument_filter_predicate;
+
+ if (!CheckGeneration(generation))
+ return;
+
+ {
+ AutoLock lock(lock_);
+
+ previous_logged_events.swap(logged_events_);
+ UseNextTraceBuffer();
+ thread_message_loops_.clear();
+
+ flush_task_runner_ = NULL;
+ flush_output_callback = flush_output_callback_;
+ flush_output_callback_.Reset();
+
+ if (trace_options() & kInternalEnableArgumentFilter) {
+ CHECK(!argument_filter_predicate_.is_null());
+ argument_filter_predicate = argument_filter_predicate_;
+ }
+ }
+
+ if (discard_events) {
+ if (!flush_output_callback.is_null()) {
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ flush_output_callback.Run(empty_result, false);
+ }
+ return;
+ }
+
+ if (use_worker_thread_ &&
+ WorkerPool::PostTask(
+ FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
+ Passed(&previous_logged_events),
+ flush_output_callback, argument_filter_predicate),
+ true)) {
+ return;
+ }
+
+ ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
+ flush_output_callback,
+ argument_filter_predicate);
+}
+
+// Run in each thread holding a local event buffer.
+void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_) {
+ // This is late. The corresponding flush has finished.
+ return;
+ }
+ }
+
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_ ||
+ thread_message_loops_.size())
+ return;
+
+ flush_task_runner_->PostTask(
+ FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
+ discard_events));
+}
+
+void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_) {
+ // Flush has finished before timeout.
+ return;
+ }
+
+ LOG(WARNING)
+ << "The following threads haven't finished flush in time. "
+ "If this happens stably for some thread, please call "
+ "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
+ "the thread to avoid its trace events from being lost.";
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
+ }
+ }
+ FinishFlush(generation, discard_events);
+}
+
+void TraceLog::UseNextTraceBuffer() {
+ logged_events_.reset(CreateTraceBuffer());
+ subtle::NoBarrier_AtomicIncrement(&generation_, 1);
+ thread_shared_chunk_.reset();
+ thread_shared_chunk_index_ = 0;
+}
+
+TraceEventHandle TraceLog::AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ scope,
+ id,
+ trace_event_internal::kNoId, // bind_id
+ thread_id,
+ now,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithBindId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned long long bind_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ scope,
+ id,
+ bind_id,
+ thread_id,
+ now,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithProcessId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int process_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ base::TimeTicks now = base::TimeTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ scope,
+ id,
+ trace_event_internal::kNoId, // bind_id
+ process_id,
+ now,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
+}
+
+// Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
+// with kNoId as bind_id
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const TimeTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ scope,
+ id,
+ trace_event_internal::kNoId, // bind_id
+ thread_id,
+ timestamp,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned long long bind_id,
+ int thread_id,
+ const TimeTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ TraceEventHandle handle = {0, 0, 0};
+ if (!*category_group_enabled)
+ return handle;
+
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return handle;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ DCHECK(name);
+ DCHECK(!timestamp.is_null());
+
+ if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
+ if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
+ (flags & TRACE_EVENT_FLAG_FLOW_OUT))
+ bind_id = MangleEventId(bind_id);
+ id = MangleEventId(id);
+ }
+
+ TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
+ ThreadTicks thread_now = ThreadNow();
+
+ // |thread_local_event_buffer_| can be null if the current thread doesn't have
+ // a message loop or the message loop is blocked.
+ InitializeThreadLocalEventBufferIfSupported();
+ auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
+
+ // Check and update the current thread name only if the event is for the
+ // current thread to avoid locks in most cases.
+ if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
+ const char* new_name =
+ ThreadIdNameManager::GetInstance()->GetName(thread_id);
+ // Check if the thread name has been set or changed since the previous
+ // call (if any), but don't bother if the new name is empty. Note this will
+ // not detect a thread name change within the same char* buffer address: we
+ // favor common case performance over corner case correctness.
+ if (new_name != g_current_thread_name.Get().Get() && new_name &&
+ *new_name) {
+ g_current_thread_name.Get().Set(new_name);
+
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ hash_map<int, std::string>::iterator existing_name =
+ thread_names_.find(thread_id);
+ if (existing_name == thread_names_.end()) {
+ // This is a new thread id, and a new name.
+ thread_names_[thread_id] = new_name;
+ } else {
+ // This is a thread id that we've seen before, but potentially with a
+ // new name.
+ std::vector<StringPiece> existing_names = base::SplitStringPiece(
+ existing_name->second, ",", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ bool found = std::find(existing_names.begin(), existing_names.end(),
+ new_name) != existing_names.end();
+ if (!found) {
+ if (existing_names.size())
+ existing_name->second.push_back(',');
+ existing_name->second.append(new_name);
+ }
+ }
+ }
+ }
+
+#if defined(OS_WIN)
+ // This is done sooner rather than later, to avoid creating the event and
+ // acquiring the lock, which is not needed for ETW as it's already threadsafe.
+ if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
+ TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values,
+ convertable_values);
+#endif // OS_WIN
+
+ std::string console_message;
+ if (*category_group_enabled & ENABLED_FOR_RECORDING) {
+ OptionalAutoLock lock(&lock_);
+
+ TraceEvent* trace_event = NULL;
+ if (thread_local_event_buffer) {
+ trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
+ } else {
+ lock.EnsureAcquired();
+ trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
+ }
+
+ if (trace_event) {
+ trace_event->Initialize(thread_id,
+ offset_event_timestamp,
+ thread_now,
+ phase,
+ category_group_enabled,
+ name,
+ scope,
+ id,
+ bind_id,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags);
+
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message = EventToConsoleMessage(
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ timestamp, trace_event);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (reinterpret_cast<const unsigned char*>(
+ subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
+ bool event_name_matches;
+ WatchEventCallback watch_event_callback_copy;
+ {
+ AutoLock lock(lock_);
+ event_name_matches = watch_event_name_ == name;
+ watch_event_callback_copy = watch_event_callback_;
+ }
+ if (event_name_matches) {
+ if (!watch_event_callback_copy.is_null())
+ watch_event_callback_copy.Run();
+ }
+ }
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(
+ offset_event_timestamp,
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ category_group_enabled, name, scope, id, num_args, arg_names,
+ arg_types, arg_values, flags);
+ }
+ }
+
+ // TODO(primiano): Add support for events with copied name crbug.com/581078
+ if (!(flags & TRACE_EVENT_FLAG_COPY)) {
+ if (AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
+ if (phase == TRACE_EVENT_PHASE_BEGIN ||
+ phase == TRACE_EVENT_PHASE_COMPLETE) {
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PushPseudoStackFrame(name);
+ } else if (phase == TRACE_EVENT_PHASE_END) {
+ // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
+ // is in |TraceLog::UpdateTraceEventDuration|.
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PopPseudoStackFrame(name);
+ }
+ }
+ }
+
+ return handle;
+}
+
+void TraceLog::AddMetadataEvent(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ HEAP_PROFILER_SCOPED_IGNORE;
+ std::unique_ptr<TraceEvent> trace_event(new TraceEvent);
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ ThreadTicks thread_now = ThreadNow();
+ TimeTicks now = OffsetNow();
+ AutoLock lock(lock_);
+ trace_event->Initialize(
+ thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
+ category_group_enabled, name,
+ trace_event_internal::kGlobalScope, // scope
+ trace_event_internal::kNoId, // id
+ trace_event_internal::kNoId, // bind_id
+ num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+ metadata_events_.push_back(std::move(trace_event));
+}
+
+// May be called when a COMPELETE event ends and the unfinished event has been
+// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
+std::string TraceLog::EventToConsoleMessage(unsigned char phase,
+ const TimeTicks& timestamp,
+ TraceEvent* trace_event) {
+ HEAP_PROFILER_SCOPED_IGNORE;
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
+ // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
+ DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
+
+ TimeDelta duration;
+ int thread_id =
+ trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
+ if (phase == TRACE_EVENT_PHASE_END) {
+ duration = timestamp - thread_event_start_times_[thread_id].top();
+ thread_event_start_times_[thread_id].pop();
+ }
+
+ std::string thread_name = thread_names_[thread_id];
+ if (thread_colors_.find(thread_name) == thread_colors_.end())
+ thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
+
+ std::ostringstream log;
+ log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
+ thread_colors_[thread_name]);
+
+ size_t depth = 0;
+ if (thread_event_start_times_.find(thread_id) !=
+ thread_event_start_times_.end())
+ depth = thread_event_start_times_[thread_id].size();
+
+ for (size_t i = 0; i < depth; ++i)
+ log << "| ";
+
+ if (trace_event)
+ trace_event->AppendPrettyPrinted(&log);
+ if (phase == TRACE_EVENT_PHASE_END)
+ log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
+
+ log << "\x1b[0;m";
+
+ if (phase == TRACE_EVENT_PHASE_BEGIN)
+ thread_event_start_times_[thread_id].push(timestamp);
+
+ return log.str();
+}
+
+void TraceLog::UpdateTraceEventDuration(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle) {
+ char category_group_enabled_local = *category_group_enabled;
+ if (!category_group_enabled_local)
+ return;
+
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ ThreadTicks thread_now = ThreadNow();
+ TimeTicks now = OffsetNow();
+
+#if defined(OS_WIN)
+ // Generate an ETW event that marks the end of a complete event.
+ if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT)
+ TraceEventETWExport::AddCompleteEndEvent(name);
+#endif // OS_WIN
+
+ std::string console_message;
+ if (category_group_enabled_local & ENABLED_FOR_RECORDING) {
+ OptionalAutoLock lock(&lock_);
+
+ TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
+ if (trace_event) {
+ DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+ trace_event->UpdateDuration(now, thread_now);
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message =
+ EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
+ }
+
+ if (AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
+ // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
+ AllocationContextTracker::GetInstanceForCurrentThread()
+ ->PopPseudoStackFrame(name);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(
+ now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+ nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+ }
+ }
+}
+
+void TraceLog::SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback) {
+ const unsigned char* category =
+ GetCategoryGroupEnabled(category_name.c_str());
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_,
+ reinterpret_cast<subtle::AtomicWord>(category));
+ watch_event_name_ = event_name;
+ watch_event_callback_ = callback;
+}
+
+void TraceLog::CancelWatchEvent() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ watch_event_callback_.Reset();
+}
+
+uint64_t TraceLog::MangleEventId(uint64_t id) {
+ return id ^ process_id_hash_;
+}
+
+void TraceLog::AddMetadataEventsWhileLocked() {
+ lock_.AssertAcquired();
+
+ // Move metadata added by |AddMetadataEvent| into the trace log.
+ while (!metadata_events_.empty()) {
+ TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false);
+ event->MoveFrom(std::move(metadata_events_.back()));
+ metadata_events_.pop_back();
+ }
+
+#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ 0, "num_cpus", "number",
+ base::SysInfo::NumberOfProcessors());
+#endif
+
+ int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ if (process_sort_index_ != 0) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_sort_index",
+ "sort_index", process_sort_index_);
+ }
+
+ if (process_name_.size()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_name", "name",
+ process_name_);
+ }
+
+ if (process_labels_.size() > 0) {
+ std::vector<std::string> labels;
+ for (base::hash_map<int, std::string>::iterator it =
+ process_labels_.begin();
+ it != process_labels_.end(); it++) {
+ labels.push_back(it->second);
+ }
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_labels", "labels",
+ base::JoinString(labels, ","));
+ }
+
+ // Thread sort indices.
+ for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
+ it != thread_sort_indices_.end(); it++) {
+ if (it->second == 0)
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first, "thread_sort_index", "sort_index",
+ it->second);
+ }
+
+ // Thread names.
+ AutoLock thread_info_lock(thread_info_lock_);
+ for (hash_map<int, std::string>::iterator it = thread_names_.begin();
+ it != thread_names_.end(); it++) {
+ if (it->second.empty())
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first, "thread_name", "name", it->second);
+ }
+
+ // If buffer is full, add a metadata record to report this.
+ if (!buffer_limit_reached_timestamp_.is_null()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "trace_buffer_overflowed",
+ "overflowed_at_ts",
+ buffer_limit_reached_timestamp_);
+ }
+}
+
+void TraceLog::WaitSamplingEventForTesting() {
+ if (!sampling_thread_)
+ return;
+ sampling_thread_->WaitSamplingEventForTesting();
+}
+
+void TraceLog::DeleteForTesting() {
+ internal::DeleteTraceLogForTesting::Delete();
+}
+
+TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
+ return GetEventByHandleInternal(handle, NULL);
+}
+
+TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock) {
+ if (!handle.chunk_seq)
+ return NULL;
+
+ if (thread_local_event_buffer_.Get()) {
+ TraceEvent* trace_event =
+ thread_local_event_buffer_.Get()->GetEventByHandle(handle);
+ if (trace_event)
+ return trace_event;
+ }
+
+ // The event has been out-of-control of the thread local buffer.
+ // Try to get the event from the main buffer with a lock.
+ if (lock)
+ lock->EnsureAcquired();
+
+ if (thread_shared_chunk_ &&
+ handle.chunk_index == thread_shared_chunk_index_) {
+ return handle.chunk_seq == thread_shared_chunk_->seq()
+ ? thread_shared_chunk_->GetEventAt(handle.event_index)
+ : NULL;
+ }
+
+ return logged_events_->GetEventByHandle(handle);
+}
+
+void TraceLog::SetProcessID(int process_id) {
+ process_id_ = process_id;
+ // Create a FNV hash from the process ID for XORing.
+ // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
+ unsigned long long offset_basis = 14695981039346656037ull;
+ unsigned long long fnv_prime = 1099511628211ull;
+ unsigned long long pid = static_cast<unsigned long long>(process_id_);
+ process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
+}
+
+void TraceLog::SetProcessSortIndex(int sort_index) {
+ AutoLock lock(lock_);
+ process_sort_index_ = sort_index;
+}
+
+void TraceLog::SetProcessName(const std::string& process_name) {
+ AutoLock lock(lock_);
+ process_name_ = process_name;
+}
+
+void TraceLog::UpdateProcessLabel(int label_id,
+ const std::string& current_label) {
+ if (!current_label.length())
+ return RemoveProcessLabel(label_id);
+
+ AutoLock lock(lock_);
+ process_labels_[label_id] = current_label;
+}
+
+void TraceLog::RemoveProcessLabel(int label_id) {
+ AutoLock lock(lock_);
+ base::hash_map<int, std::string>::iterator it =
+ process_labels_.find(label_id);
+ if (it == process_labels_.end())
+ return;
+
+ process_labels_.erase(it);
+}
+
+void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
+ AutoLock lock(lock_);
+ thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
+}
+
+void TraceLog::SetTimeOffset(TimeDelta offset) {
+ time_offset_ = offset;
+}
+
+size_t TraceLog::GetObserverCountForTest() const {
+ return enabled_state_observer_list_.size();
+}
+
+void TraceLog::SetCurrentThreadBlocksMessageLoop() {
+ thread_blocks_message_loop_.Set(true);
+ if (thread_local_event_buffer_.Get()) {
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+ }
+}
+
+TraceBuffer* TraceLog::CreateTraceBuffer() {
+ HEAP_PROFILER_SCOPED_IGNORE;
+ InternalTraceOptions options = trace_options();
+ if (options & kInternalRecordContinuously)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kTraceEventRingBufferChunks);
+ else if (options & kInternalEchoToConsole)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kEchoToConsoleTraceEventBufferChunks);
+ else if (options & kInternalRecordAsMuchAsPossible)
+ return TraceBuffer::CreateTraceBufferVectorOfSize(
+ kTraceEventVectorBigBufferChunks);
+ return TraceBuffer::CreateTraceBufferVectorOfSize(
+ kTraceEventVectorBufferChunks);
+}
+
+#if defined(OS_WIN)
+void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ // Go through each category and set/clear the ETW bit depending on whether the
+ // category is enabled.
+ for (size_t i = 0; i < category_index; i++) {
+ const char* category_group = g_category_groups[i];
+ DCHECK(category_group);
+ if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+ category_group)) {
+ g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
+ } else {
+ g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
+ }
+ }
+}
+#endif // defined(OS_WIN)
+
+void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
+}
+
+void TraceLog::AddAsyncEnabledStateObserver(
+ WeakPtr<AsyncEnabledStateObserver> listener) {
+ AutoLock lock(lock_);
+ async_observers_.insert(
+ std::make_pair(listener.get(), RegisteredAsyncObserver(listener)));
+}
+
+void TraceLog::RemoveAsyncEnabledStateObserver(
+ AsyncEnabledStateObserver* listener) {
+ AutoLock lock(lock_);
+ async_observers_.erase(listener);
+}
+
+bool TraceLog::HasAsyncEnabledStateObserver(
+ AsyncEnabledStateObserver* listener) const {
+ AutoLock lock(lock_);
+ return ContainsKey(async_observers_, listener);
+}
+
+} // namespace trace_event
+} // namespace base
+
+namespace trace_event_internal {
+
+ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
+ const char* category_group,
+ const char* name) {
+ // The single atom works because for now the category_group can only be "gpu".
+ DCHECK_EQ(strcmp(category_group, "gpu"), 0);
+ static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
+ category_group, atomic, category_group_enabled_);
+ name_ = name;
+ if (*category_group_enabled_) {
+ event_handle_ =
+ TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ TRACE_EVENT_PHASE_COMPLETE,
+ category_group_enabled_,
+ name,
+ trace_event_internal::kGlobalScope, // scope
+ trace_event_internal::kNoId, // id
+ static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
+ base::TimeTicks::Now(),
+ trace_event_internal::kZeroNumArgs,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ TRACE_EVENT_FLAG_NONE);
+ }
+}
+
+ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
+ if (*category_group_enabled_) {
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
+ event_handle_);
+ }
+}
+
+} // namespace trace_event_internal
diff --git a/libchrome/base/trace_event/trace_log.h b/libchrome/base/trace_event/trace_log.h
new file mode 100644
index 0000000..e4407e8
--- /dev/null
+++ b/libchrome/base/trace_event/trace_log.h
@@ -0,0 +1,531 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_LOG_H_
+#define BASE_TRACE_EVENT_TRACE_LOG_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/containers/hash_tables.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/scoped_vector.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "build/build_config.h"
+
+namespace base {
+
+template <typename Type>
+struct DefaultSingletonTraits;
+class RefCountedString;
+
+namespace trace_event {
+
+class TraceBuffer;
+class TraceBufferChunk;
+class TraceEvent;
+class TraceEventMemoryOverhead;
+class TraceSamplingThread;
+
+struct BASE_EXPORT TraceLogStatus {
+ TraceLogStatus();
+ ~TraceLogStatus();
+ uint32_t event_capacity;
+ uint32_t event_count;
+};
+
+class BASE_EXPORT TraceLog : public MemoryDumpProvider {
+ public:
+ enum Mode {
+ DISABLED = 0,
+ RECORDING_MODE
+ };
+
+ // The pointer returned from GetCategoryGroupEnabledInternal() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in TraceEvent.h in Blink.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ ENABLED_FOR_RECORDING = 1 << 0,
+ // Category group enabled by SetEventCallbackEnabled().
+ ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+ // Category group enabled to export events to ETW.
+ ENABLED_FOR_ETW_EXPORT = 1 << 3
+ };
+
+ static TraceLog* GetInstance();
+
+ // Get set of known category groups. This can change as new code paths are
+ // reached. The known category groups are inserted into |category_groups|.
+ void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
+
+ // Retrieves a copy (for thread-safety) of the current TraceConfig.
+ TraceConfig GetCurrentTraceConfig() const;
+
+ // Initializes the thread-local event buffer, if not already initialized and
+ // if the current thread supports that (has a message loop).
+ void InitializeThreadLocalEventBufferIfSupported();
+
+ // Enables normal tracing (recording trace events in the trace buffer).
+ // See TraceConfig comments for details on how to control what categories
+ // will be traced. If tracing has already been enabled, |category_filter| will
+ // be merged into the current category filter.
+ void SetEnabled(const TraceConfig& trace_config, Mode mode);
+
+ // Disables normal tracing for all categories.
+ void SetDisabled();
+
+ bool IsEnabled() { return mode_ != DISABLED; }
+
+ // The number of times we have begun recording traces. If tracing is off,
+ // returns -1. If tracing is on, then it returns the number of times we have
+ // recorded a trace. By watching for this number to increment, you can
+ // passively discover when a new trace has begun. This is then used to
+ // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
+ int GetNumTracesRecorded();
+
+#if defined(OS_ANDROID)
+ void StartATrace();
+ void StopATrace();
+ void AddClockSyncMetadataEvent();
+#endif
+
+ // Enabled state listeners give a callback when tracing is enabled or
+ // disabled. This can be used to tie into other library's tracing systems
+ // on-demand.
+ class BASE_EXPORT EnabledStateObserver {
+ public:
+ virtual ~EnabledStateObserver() = default;
+
+ // Called just after the tracing system becomes enabled, outside of the
+ // |lock_|. TraceLog::IsEnabled() is true at this point.
+ virtual void OnTraceLogEnabled() = 0;
+
+ // Called just after the tracing system disables, outside of the |lock_|.
+ // TraceLog::IsEnabled() is false at this point.
+ virtual void OnTraceLogDisabled() = 0;
+ };
+ void AddEnabledStateObserver(EnabledStateObserver* listener);
+ void RemoveEnabledStateObserver(EnabledStateObserver* listener);
+ bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
+
+ // Asynchronous enabled state listeners. When tracing is enabled or disabled,
+ // for each observer, a task for invoking its appropriate callback is posted
+ // to the thread from which AddAsyncEnabledStateObserver() was called. This
+ // allows the observer to be safely destroyed, provided that it happens on the
+ // same thread that invoked AddAsyncEnabledStateObserver().
+ class BASE_EXPORT AsyncEnabledStateObserver {
+ public:
+ virtual ~AsyncEnabledStateObserver() = default;
+
+ // Posted just after the tracing system becomes enabled, outside |lock_|.
+ // TraceLog::IsEnabled() is true at this point.
+ virtual void OnTraceLogEnabled() = 0;
+
+ // Posted just after the tracing system becomes disabled, outside |lock_|.
+ // TraceLog::IsEnabled() is false at this point.
+ virtual void OnTraceLogDisabled() = 0;
+ };
+ void AddAsyncEnabledStateObserver(
+ WeakPtr<AsyncEnabledStateObserver> listener);
+ void RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener);
+ bool HasAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener) const;
+
+ TraceLogStatus GetStatus() const;
+ bool BufferIsFull() const;
+
+ // Computes an estimate of the size of the TraceLog including all the retained
+ // objects.
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ // Not using base::Callback because of its limited by 7 parameters.
+ // Also, using primitive type allows directly passing callback from WebCore.
+ // WARNING: It is possible for the previously set callback to be called
+ // after a call to SetEventCallbackEnabled() that replaces or a call to
+ // SetEventCallbackDisabled() that disables the callback.
+ // This callback may be invoked on any thread.
+ // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
+ // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
+ // interface simple.
+ typedef void (*EventCallback)(TimeTicks timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int num_args,
+ const char* const arg_names[],
+ const unsigned char arg_types[],
+ const unsigned long long arg_values[],
+ unsigned int flags);
+
+ // Enable tracing for EventCallback.
+ void SetEventCallbackEnabled(const TraceConfig& trace_config,
+ EventCallback cb);
+ void SetEventCallbackDisabled();
+ void SetArgumentFilterPredicate(
+ const ArgumentFilterPredicate& argument_filter_predicate);
+
+ // Flush all collected events to the given output callback. The callback will
+ // be called one or more times either synchronously or asynchronously from
+ // the current thread with IPC-bite-size chunks. The string format is
+ // undefined. Use TraceResultBuffer to convert one or more trace strings to
+ // JSON. The callback can be null if the caller doesn't want any data.
+ // Due to the implementation of thread-local buffers, flush can't be
+ // done when tracing is enabled. If called when tracing is enabled, the
+ // callback will be called directly with (empty_string, false) to indicate
+ // the end of this unsuccessful flush. Flush does the serialization
+ // on the same thread if the caller doesn't set use_worker_thread explicitly.
+ typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
+ bool has_more_events)> OutputCallback;
+ void Flush(const OutputCallback& cb, bool use_worker_thread = false);
+
+ // Cancels tracing and discards collected data.
+ void CancelTracing(const OutputCallback& cb);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // The name parameter is a category group for example:
+ // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
+ static const unsigned char* GetCategoryGroupEnabled(const char* name);
+ static const char* GetCategoryGroupName(
+ const unsigned char* category_group_enabled);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
+ // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
+ TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithBindId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned long long bind_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithProcessId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int process_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ int thread_id,
+ const TimeTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ const char* scope,
+ unsigned long long id,
+ unsigned long long bind_id,
+ int thread_id,
+ const TimeTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+
+ // Adds a metadata event that will be written when the trace log is flushed.
+ void AddMetadataEvent(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+
+ void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle);
+
+ // For every matching event, the callback will be called.
+ typedef base::Callback<void()> WatchEventCallback;
+ void SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback);
+ // Cancel the watch event. If tracing is enabled, this may race with the
+ // watch event notification firing.
+ void CancelWatchEvent();
+
+ int process_id() const { return process_id_; }
+
+ uint64_t MangleEventId(uint64_t id);
+
+ // Exposed for unittesting:
+
+ void WaitSamplingEventForTesting();
+
+ // Allows deleting our singleton instance.
+ static void DeleteForTesting();
+
+ // Allow tests to inspect TraceEvents.
+ TraceEvent* GetEventByHandle(TraceEventHandle handle);
+
+ void SetProcessID(int process_id);
+
+ // Process sort indices, if set, override the order of a process will appear
+ // relative to other processes in the trace viewer. Processes are sorted first
+ // on their sort index, ascending, then by their name, and then tid.
+ void SetProcessSortIndex(int sort_index);
+
+ // Sets the name of the process.
+ void SetProcessName(const std::string& process_name);
+
+ // Processes can have labels in addition to their names. Use labels, for
+ // instance, to list out the web page titles that a process is handling.
+ void UpdateProcessLabel(int label_id, const std::string& current_label);
+ void RemoveProcessLabel(int label_id);
+
+ // Thread sort indices, if set, override the order of a thread will appear
+ // within its process in the trace viewer. Threads are sorted first on their
+ // sort index, ascending, then by their name, and then tid.
+ void SetThreadSortIndex(PlatformThreadId thread_id, int sort_index);
+
+ // Allow setting an offset between the current TimeTicks time and the time
+ // that should be reported.
+ void SetTimeOffset(TimeDelta offset);
+
+ size_t GetObserverCountForTest() const;
+
+ // Call this method if the current thread may block the message loop to
+ // prevent the thread from using the thread-local buffer because the thread
+ // may not handle the flush request in time causing lost of unflushed events.
+ void SetCurrentThreadBlocksMessageLoop();
+
+#if defined(OS_WIN)
+ // This function is called by the ETW exporting module whenever the ETW
+ // keyword (flags) changes. This keyword indicates which categories should be
+ // exported, so whenever it changes, we adjust accordingly.
+ void UpdateETWCategoryGroupEnabledFlags();
+#endif
+
+ private:
+ typedef unsigned int InternalTraceOptions;
+
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferGetReturnChunk);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferHalfIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferFullIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, TraceBufferVectorReportFull);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ ConvertTraceConfigToInternalOptions);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceRecordAsMuchAsPossibleMode);
+
+ // This allows constructor and destructor to be private and usable only
+ // by the Singleton class.
+ friend struct DefaultSingletonTraits<TraceLog>;
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
+
+ // Enable/disable each category group based on the current mode_,
+ // category_filter_, event_callback_ and event_callback_category_filter_.
+ // Enable the category group in the enabled mode if category_filter_ matches
+ // the category group, or event_callback_ is not null and
+ // event_callback_category_filter_ matches the category group.
+ void UpdateCategoryGroupEnabledFlags();
+ void UpdateCategoryGroupEnabledFlag(size_t category_index);
+
+ // Configure synthetic delays based on the values set in the current
+ // trace config.
+ void UpdateSyntheticDelaysFromTraceConfig();
+
+ InternalTraceOptions GetInternalOptionsFromTraceConfig(
+ const TraceConfig& config);
+
+ class ThreadLocalEventBuffer;
+ class OptionalAutoLock;
+ struct RegisteredAsyncObserver;
+
+ TraceLog();
+ ~TraceLog() override;
+ const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
+ void AddMetadataEventsWhileLocked();
+
+ InternalTraceOptions trace_options() const {
+ return static_cast<InternalTraceOptions>(
+ subtle::NoBarrier_Load(&trace_options_));
+ }
+
+ TraceBuffer* trace_buffer() const { return logged_events_.get(); }
+ TraceBuffer* CreateTraceBuffer();
+
+ std::string EventToConsoleMessage(unsigned char phase,
+ const TimeTicks& timestamp,
+ TraceEvent* trace_event);
+
+ TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
+ bool check_buffer_is_full);
+ void CheckIfBufferIsFullWhileLocked();
+ void SetDisabledWhileLocked();
+
+ TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock);
+
+ void FlushInternal(const OutputCallback& cb,
+ bool use_worker_thread,
+ bool discard_events);
+
+ // |generation| is used in the following callbacks to check if the callback
+ // is called for the flush of the current |logged_events_|.
+ void FlushCurrentThread(int generation, bool discard_events);
+ // Usually it runs on a different thread.
+ static void ConvertTraceEventsToTraceFormat(
+ std::unique_ptr<TraceBuffer> logged_events,
+ const TraceLog::OutputCallback& flush_output_callback,
+ const ArgumentFilterPredicate& argument_filter_predicate);
+ void FinishFlush(int generation, bool discard_events);
+ void OnFlushTimeout(int generation, bool discard_events);
+
+ int generation() const {
+ return static_cast<int>(subtle::NoBarrier_Load(&generation_));
+ }
+ bool CheckGeneration(int generation) const {
+ return generation == this->generation();
+ }
+ void UseNextTraceBuffer();
+
+ TimeTicks OffsetNow() const { return OffsetTimestamp(TimeTicks::Now()); }
+ TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
+ return timestamp - time_offset_;
+ }
+
+ // Internal representation of trace options since we store the currently used
+ // trace option as an AtomicWord.
+ static const InternalTraceOptions kInternalNone;
+ static const InternalTraceOptions kInternalRecordUntilFull;
+ static const InternalTraceOptions kInternalRecordContinuously;
+ static const InternalTraceOptions kInternalEchoToConsole;
+ static const InternalTraceOptions kInternalEnableSampling;
+ static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
+ static const InternalTraceOptions kInternalEnableArgumentFilter;
+
+ // This lock protects TraceLog member accesses (except for members protected
+ // by thread_info_lock_) from arbitrary threads.
+ mutable Lock lock_;
+ // This lock protects accesses to thread_names_, thread_event_start_times_
+ // and thread_colors_.
+ Lock thread_info_lock_;
+ Mode mode_;
+ int num_traces_recorded_;
+ std::unique_ptr<TraceBuffer> logged_events_;
+ std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
+ subtle::AtomicWord /* EventCallback */ event_callback_;
+ bool dispatching_to_observer_list_;
+ std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+ std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
+ async_observers_;
+
+ std::string process_name_;
+ base::hash_map<int, std::string> process_labels_;
+ int process_sort_index_;
+ base::hash_map<int, int> thread_sort_indices_;
+ base::hash_map<int, std::string> thread_names_;
+
+ // The following two maps are used only when ECHO_TO_CONSOLE.
+ base::hash_map<int, std::stack<TimeTicks>> thread_event_start_times_;
+ base::hash_map<std::string, int> thread_colors_;
+
+ TimeTicks buffer_limit_reached_timestamp_;
+
+ // XORed with TraceID to make it unlikely to collide with other processes.
+ unsigned long long process_id_hash_;
+
+ int process_id_;
+
+ TimeDelta time_offset_;
+
+ // Allow tests to wake up when certain events occur.
+ WatchEventCallback watch_event_callback_;
+ subtle::AtomicWord /* const unsigned char* */ watch_category_;
+ std::string watch_event_name_;
+
+ subtle::AtomicWord /* Options */ trace_options_;
+
+ // Sampling thread handles.
+ std::unique_ptr<TraceSamplingThread> sampling_thread_;
+ PlatformThreadHandle sampling_thread_handle_;
+
+ TraceConfig trace_config_;
+ TraceConfig event_callback_trace_config_;
+
+ ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
+ ThreadLocalBoolean thread_blocks_message_loop_;
+ ThreadLocalBoolean thread_is_in_trace_event_;
+
+ // Contains the message loops of threads that have had at least one event
+ // added into the local event buffer. Not using SingleThreadTaskRunner
+ // because we need to know the life time of the message loops.
+ hash_set<MessageLoop*> thread_message_loops_;
+
+ // For events which can't be added into the thread local buffer, e.g. events
+ // from threads without a message loop.
+ std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
+ size_t thread_shared_chunk_index_;
+
+ // Set when asynchronous Flush is in progress.
+ OutputCallback flush_output_callback_;
+ scoped_refptr<SingleThreadTaskRunner> flush_task_runner_;
+ ArgumentFilterPredicate argument_filter_predicate_;
+ subtle::AtomicWord generation_;
+ bool use_worker_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceLog);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_LOG_H_
diff --git a/libchrome/base/trace_event/trace_log_constants.cc b/libchrome/base/trace_event/trace_log_constants.cc
new file mode 100644
index 0000000..cd2ff0d
--- /dev/null
+++ b/libchrome/base/trace_event/trace_log_constants.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+
+// Constant used by TraceLog's internal implementation of trace_option.
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalNone = 0;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalRecordUntilFull = 1 << 0;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalRecordContinuously = 1 << 1;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalEnableSampling = 1 << 2;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalEchoToConsole = 1 << 3;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalRecordAsMuchAsPossible = 1 << 4;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalEnableArgumentFilter = 1 << 5;
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_sampling_thread.cc b/libchrome/base/trace_event/trace_sampling_thread.cc
new file mode 100644
index 0000000..5a0d2f8
--- /dev/null
+++ b/libchrome/base/trace_event/trace_sampling_thread.cc
@@ -0,0 +1,107 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
+#include "base/trace_event/trace_sampling_thread.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData {
+ public:
+ TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback);
+ ~TraceBucketData();
+
+ TRACE_EVENT_API_ATOMIC_WORD* bucket;
+ const char* bucket_name;
+ TraceSampleCallback callback;
+};
+
+TraceSamplingThread::TraceSamplingThread()
+ : thread_running_(false),
+ waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+TraceSamplingThread::~TraceSamplingThread() {}
+
+void TraceSamplingThread::ThreadMain() {
+ PlatformThread::SetName("Sampling Thread");
+ thread_running_ = true;
+ const int kSamplingFrequencyMicroseconds = 1000;
+ while (!cancellation_flag_.IsSet()) {
+ PlatformThread::Sleep(
+ TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
+ GetSamples();
+ waitable_event_for_testing_.Signal();
+ }
+}
+
+// static
+void TraceSamplingThread::DefaultSamplingCallback(
+ TraceBucketData* bucket_data) {
+ TRACE_EVENT_API_ATOMIC_WORD category_and_name =
+ TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
+ if (!category_and_name)
+ return;
+ const char* const combined =
+ reinterpret_cast<const char* const>(category_and_name);
+ const char* category_group;
+ const char* name;
+ ExtractCategoryAndName(combined, &category_group, &name);
+ TRACE_EVENT_API_ADD_TRACE_EVENT(
+ TRACE_EVENT_PHASE_SAMPLE,
+ TraceLog::GetCategoryGroupEnabled(category_group), name,
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+ NULL, NULL, NULL, NULL, 0);
+}
+
+void TraceSamplingThread::GetSamples() {
+ for (size_t i = 0; i < sample_buckets_.size(); ++i) {
+ TraceBucketData* bucket_data = &sample_buckets_[i];
+ bucket_data->callback.Run(bucket_data);
+ }
+}
+
+void TraceSamplingThread::RegisterSampleBucket(
+ TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback) {
+ // Access to sample_buckets_ doesn't cause races with the sampling thread
+ // that uses the sample_buckets_, because it is guaranteed that
+ // RegisterSampleBucket is called before the sampling thread is created.
+ DCHECK(!thread_running_);
+ sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
+}
+
+// static
+void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name) {
+ *category = combined;
+ *name = &combined[strlen(combined) + 1];
+}
+
+void TraceSamplingThread::Stop() {
+ cancellation_flag_.Set();
+}
+
+void TraceSamplingThread::WaitSamplingEventForTesting() {
+ waitable_event_for_testing_.Wait();
+}
+
+TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback)
+ : bucket(bucket), bucket_name(name), callback(callback) {}
+
+TraceBucketData::~TraceBucketData() {}
+
+} // namespace trace_event
+} // namespace base
diff --git a/libchrome/base/trace_event/trace_sampling_thread.h b/libchrome/base/trace_event/trace_sampling_thread.h
new file mode 100644
index 0000000..f976a80
--- /dev/null
+++ b/libchrome/base/trace_event/trace_sampling_thread.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData;
+typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
+
+// This object must be created on the IO thread.
+class TraceSamplingThread : public PlatformThread::Delegate {
+ public:
+ TraceSamplingThread();
+ ~TraceSamplingThread() override;
+
+ // Implementation of PlatformThread::Delegate:
+ void ThreadMain() override;
+
+ static void DefaultSamplingCallback(TraceBucketData* bucket_data);
+
+ void Stop();
+ void WaitSamplingEventForTesting();
+
+ private:
+ friend class TraceLog;
+
+ void GetSamples();
+ // Not thread-safe. Once the ThreadMain has been called, this can no longer
+ // be called.
+ void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback);
+ // Splits a combined "category\0name" into the two component parts.
+ static void ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name);
+ std::vector<TraceBucketData> sample_buckets_;
+ bool thread_running_;
+ CancellationFlag cancellation_flag_;
+ WaitableEvent waitable_event_for_testing_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
diff --git a/libchrome/base/tracked_objects.cc b/libchrome/base/tracked_objects.cc
new file mode 100644
index 0000000..487fd19
--- /dev/null
+++ b/libchrome/base/tracked_objects.cc
@@ -0,0 +1,980 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/tracked_objects.h"
+
+#include <limits.h>
+#include <stdlib.h>
+
+#include "base/atomicops.h"
+#include "base/base_switches.h"
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
+#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/third_party/valgrind/memcheck.h"
+#include "base/threading/worker_pool.h"
+#include "base/tracking_info.h"
+#include "build/build_config.h"
+
+using base::TimeDelta;
+
+namespace base {
+class TimeDelta;
+}
+
+namespace tracked_objects {
+
+namespace {
+// When ThreadData is first initialized, should we start in an ACTIVE state to
+// record all of the startup-time tasks, or should we start up DEACTIVATED, so
+// that we only record after parsing the command line flag --enable-tracking.
+// Note that the flag may force either state, so this really controls only the
+// period of time up until that flag is parsed. If there is no flag seen, then
+// this state may prevail for much or all of the process lifetime.
+const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE;
+
+// Possible states of the profiler timing enabledness.
+enum {
+ UNDEFINED_TIMING,
+ ENABLED_TIMING,
+ DISABLED_TIMING,
+};
+
+// State of the profiler timing enabledness.
+base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING;
+
+// Returns whether profiler timing is enabled. The default is true, but this
+// may be overridden by a command-line flag. Some platforms may
+// programmatically set this command-line flag to the "off" value if it's not
+// specified.
+// This in turn can be overridden by explicitly calling
+// ThreadData::EnableProfilerTiming, say, based on a field trial.
+inline bool IsProfilerTimingEnabled() {
+ // Reading |g_profiler_timing_enabled| is done without barrier because
+ // multiple initialization is not an issue while the barrier can be relatively
+ // costly given that this method is sometimes called in a tight loop.
+ base::subtle::Atomic32 current_timing_enabled =
+ base::subtle::NoBarrier_Load(&g_profiler_timing_enabled);
+ if (current_timing_enabled == UNDEFINED_TIMING) {
+ if (!base::CommandLine::InitializedForCurrentProcess())
+ return true;
+ current_timing_enabled =
+ (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kProfilerTiming) ==
+ switches::kProfilerTimingDisabledValue)
+ ? DISABLED_TIMING
+ : ENABLED_TIMING;
+ base::subtle::NoBarrier_Store(&g_profiler_timing_enabled,
+ current_timing_enabled);
+ }
+ return current_timing_enabled == ENABLED_TIMING;
+}
+
+} // namespace
+
+//------------------------------------------------------------------------------
+// DeathData tallies durations when a death takes place.
+
+DeathData::DeathData()
+ : count_(0),
+ sample_probability_count_(0),
+ run_duration_sum_(0),
+ queue_duration_sum_(0),
+ run_duration_max_(0),
+ queue_duration_max_(0),
+ run_duration_sample_(0),
+ queue_duration_sample_(0),
+ last_phase_snapshot_(nullptr) {
+}
+
+DeathData::DeathData(const DeathData& other)
+ : count_(other.count_),
+ sample_probability_count_(other.sample_probability_count_),
+ run_duration_sum_(other.run_duration_sum_),
+ queue_duration_sum_(other.queue_duration_sum_),
+ run_duration_max_(other.run_duration_max_),
+ queue_duration_max_(other.queue_duration_max_),
+ run_duration_sample_(other.run_duration_sample_),
+ queue_duration_sample_(other.queue_duration_sample_),
+ last_phase_snapshot_(nullptr) {
+ // This constructor will be used by std::map when adding new DeathData values
+ // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
+ // need to worry about ownership transfer.
+ DCHECK(other.last_phase_snapshot_ == nullptr);
+}
+
+DeathData::~DeathData() {
+ while (last_phase_snapshot_) {
+ const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
+ last_phase_snapshot_ = snapshot->prev;
+ delete snapshot;
+ }
+}
+
+// TODO(jar): I need to see if this macro to optimize branching is worth using.
+//
+// This macro has no branching, so it is surely fast, and is equivalent to:
+// if (assign_it)
+// target = source;
+// We use a macro rather than a template to force this to inline.
+// Related code for calculating max is discussed on the web.
+#define CONDITIONAL_ASSIGN(assign_it, target, source) \
+ ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
+
+void DeathData::RecordDeath(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number) {
+ // We'll just clamp at INT_MAX, but we should note this in the UI as such.
+ if (count_ < INT_MAX)
+ base::subtle::NoBarrier_Store(&count_, count_ + 1);
+
+ int sample_probability_count =
+ base::subtle::NoBarrier_Load(&sample_probability_count_);
+ if (sample_probability_count < INT_MAX)
+ ++sample_probability_count;
+ base::subtle::NoBarrier_Store(&sample_probability_count_,
+ sample_probability_count);
+
+ base::subtle::NoBarrier_Store(&queue_duration_sum_,
+ queue_duration_sum_ + queue_duration);
+ base::subtle::NoBarrier_Store(&run_duration_sum_,
+ run_duration_sum_ + run_duration);
+
+ if (queue_duration_max() < queue_duration)
+ base::subtle::NoBarrier_Store(&queue_duration_max_, queue_duration);
+ if (run_duration_max() < run_duration)
+ base::subtle::NoBarrier_Store(&run_duration_max_, run_duration);
+
+ // Take a uniformly distributed sample over all durations ever supplied during
+ // the current profiling phase.
+ // The probability that we (instead) use this new sample is
+ // 1/sample_probability_count_. This results in a completely uniform selection
+ // of the sample (at least when we don't clamp sample_probability_count_...
+ // but that should be inconsequentially likely). We ignore the fact that we
+ // correlated our selection of a sample to the run and queue times (i.e., we
+ // used them to generate random_number).
+ CHECK_GT(sample_probability_count, 0);
+ if (0 == (random_number % sample_probability_count)) {
+ base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration);
+ base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
+ }
+}
+
+void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
+ // Snapshotting and storing current state.
+ last_phase_snapshot_ = new DeathDataPhaseSnapshot(
+ profiling_phase, count(), run_duration_sum(), run_duration_max(),
+ run_duration_sample(), queue_duration_sum(), queue_duration_max(),
+ queue_duration_sample(), last_phase_snapshot_);
+
+ // Not touching fields for which a delta can be computed by comparing with a
+ // snapshot from the previous phase. Resetting other fields. Sample values
+ // will be reset upon next death recording because sample_probability_count_
+ // is set to 0.
+ // We avoid resetting to 0 in favor of deltas whenever possible. The reason
+ // is that for incrementable fields, resetting to 0 from the snapshot thread
+ // potentially in parallel with incrementing in the death thread may result in
+ // significant data corruption that has a potential to grow with time. Not
+ // resetting incrementable fields and using deltas will cause any
+ // off-by-little corruptions to be likely fixed at the next snapshot.
+ // The max values are not incrementable, and cannot be deduced using deltas
+ // for a given phase. Hence, we have to reset them to 0. But the potential
+ // damage is limited to getting the previous phase's max to apply for the next
+ // phase, and the error doesn't have a potential to keep growing with new
+ // resets.
+ // sample_probability_count_ is incrementable, but must be reset to 0 at the
+ // phase end, so that we start a new uniformly randomized sample selection
+ // after the reset. These fields are updated using atomics. However, race
+ // conditions are possible since these are updated individually and not
+ // together atomically, resulting in the values being mutually inconsistent.
+ // The damage is limited to selecting a wrong sample, which is not something
+ // that can cause accumulating or cascading effects.
+ // If there were no inconsistencies caused by race conditions, we never send a
+ // sample for the previous phase in the next phase's snapshot because
+ // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count.
+ base::subtle::NoBarrier_Store(&sample_probability_count_, 0);
+ base::subtle::NoBarrier_Store(&run_duration_max_, 0);
+ base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
+}
+
+//------------------------------------------------------------------------------
+DeathDataSnapshot::DeathDataSnapshot()
+ : count(-1),
+ run_duration_sum(-1),
+ run_duration_max(-1),
+ run_duration_sample(-1),
+ queue_duration_sum(-1),
+ queue_duration_max(-1),
+ queue_duration_sample(-1) {
+}
+
+DeathDataSnapshot::DeathDataSnapshot(int count,
+ int32_t run_duration_sum,
+ int32_t run_duration_max,
+ int32_t run_duration_sample,
+ int32_t queue_duration_sum,
+ int32_t queue_duration_max,
+ int32_t queue_duration_sample)
+ : count(count),
+ run_duration_sum(run_duration_sum),
+ run_duration_max(run_duration_max),
+ run_duration_sample(run_duration_sample),
+ queue_duration_sum(queue_duration_sum),
+ queue_duration_max(queue_duration_max),
+ queue_duration_sample(queue_duration_sample) {}
+
+DeathDataSnapshot::~DeathDataSnapshot() {
+}
+
+DeathDataSnapshot DeathDataSnapshot::Delta(
+ const DeathDataSnapshot& older) const {
+ return DeathDataSnapshot(count - older.count,
+ run_duration_sum - older.run_duration_sum,
+ run_duration_max, run_duration_sample,
+ queue_duration_sum - older.queue_duration_sum,
+ queue_duration_max, queue_duration_sample);
+}
+
+//------------------------------------------------------------------------------
+BirthOnThread::BirthOnThread(const Location& location,
+ const ThreadData& current)
+ : location_(location),
+ birth_thread_(¤t) {
+}
+
+//------------------------------------------------------------------------------
+BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
+}
+
+BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth)
+ : location(birth.location()),
+ thread_name(birth.birth_thread()->thread_name()) {
+}
+
+BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
+}
+
+//------------------------------------------------------------------------------
+Births::Births(const Location& location, const ThreadData& current)
+ : BirthOnThread(location, current),
+ birth_count_(1) { }
+
+int Births::birth_count() const { return birth_count_; }
+
+void Births::RecordBirth() { ++birth_count_; }
+
+//------------------------------------------------------------------------------
+// ThreadData maintains the central data for all births and deaths on a single
+// thread.
+
+// TODO(jar): We should pull all these static vars together, into a struct, and
+// optimize layout so that we benefit from locality of reference during accesses
+// to them.
+
+// static
+ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL;
+
+// A TLS slot which points to the ThreadData instance for the current thread.
+// We do a fake initialization here (zeroing out data), and then the real
+// in-place construction happens when we call tls_index_.Initialize().
+// static
+base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
+
+// static
+int ThreadData::worker_thread_data_creation_count_ = 0;
+
+// static
+int ThreadData::cleanup_count_ = 0;
+
+// static
+int ThreadData::incarnation_counter_ = 0;
+
+// static
+ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
+
+// static
+ThreadData* ThreadData::first_retired_worker_ = NULL;
+
+// static
+base::LazyInstance<base::Lock>::Leaky
+ ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
+
+// static
+base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
+
+ThreadData::ThreadData(const std::string& suggested_name)
+ : next_(NULL),
+ next_retired_worker_(NULL),
+ worker_thread_number_(0),
+ incarnation_count_for_pool_(-1),
+ current_stopwatch_(NULL) {
+ DCHECK_GE(suggested_name.size(), 0u);
+ thread_name_ = suggested_name;
+ PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
+}
+
+ThreadData::ThreadData(int thread_number)
+ : next_(NULL),
+ next_retired_worker_(NULL),
+ worker_thread_number_(thread_number),
+ incarnation_count_for_pool_(-1),
+ current_stopwatch_(NULL) {
+ CHECK_GT(thread_number, 0);
+ base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
+ PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
+}
+
+ThreadData::~ThreadData() {
+}
+
+void ThreadData::PushToHeadOfList() {
+ // Toss in a hint of randomness (atop the uniniitalized value).
+ (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
+ sizeof(random_number_));
+ MSAN_UNPOISON(&random_number_, sizeof(random_number_));
+ random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0));
+ random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
+
+ DCHECK(!next_);
+ base::AutoLock lock(*list_lock_.Pointer());
+ incarnation_count_for_pool_ = incarnation_counter_;
+ next_ = all_thread_data_list_head_;
+ all_thread_data_list_head_ = this;
+}
+
+// static
+ThreadData* ThreadData::first() {
+ base::AutoLock lock(*list_lock_.Pointer());
+ return all_thread_data_list_head_;
+}
+
+ThreadData* ThreadData::next() const { return next_; }
+
+// static
+void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
+ if (base::WorkerPool::RunsTasksOnCurrentThread())
+ return;
+ EnsureTlsInitialization();
+ ThreadData* current_thread_data =
+ reinterpret_cast<ThreadData*>(tls_index_.Get());
+ if (current_thread_data)
+ return; // Browser tests instigate this.
+ current_thread_data = new ThreadData(suggested_name);
+ tls_index_.Set(current_thread_data);
+}
+
+// static
+ThreadData* ThreadData::Get() {
+ if (!tls_index_.initialized())
+ return NULL; // For unittests only.
+ ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get());
+ if (registered)
+ return registered;
+
+ // We must be a worker thread, since we didn't pre-register.
+ ThreadData* worker_thread_data = NULL;
+ int worker_thread_number = 0;
+ {
+ base::AutoLock lock(*list_lock_.Pointer());
+ if (first_retired_worker_) {
+ worker_thread_data = first_retired_worker_;
+ first_retired_worker_ = first_retired_worker_->next_retired_worker_;
+ worker_thread_data->next_retired_worker_ = NULL;
+ } else {
+ worker_thread_number = ++worker_thread_data_creation_count_;
+ }
+ }
+
+ // If we can't find a previously used instance, then we have to create one.
+ if (!worker_thread_data) {
+ DCHECK_GT(worker_thread_number, 0);
+ worker_thread_data = new ThreadData(worker_thread_number);
+ }
+ DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
+
+ tls_index_.Set(worker_thread_data);
+ return worker_thread_data;
+}
+
+// static
+void ThreadData::OnThreadTermination(void* thread_data) {
+ DCHECK(thread_data); // TLS should *never* call us with a NULL.
+ // We must NOT do any allocations during this callback. There is a chance
+ // that the allocator is no longer active on this thread.
+ reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup();
+}
+
+void ThreadData::OnThreadTerminationCleanup() {
+ // The list_lock_ was created when we registered the callback, so it won't be
+ // allocated here despite the lazy reference.
+ base::AutoLock lock(*list_lock_.Pointer());
+ if (incarnation_counter_ != incarnation_count_for_pool_)
+ return; // ThreadData was constructed in an earlier unit test.
+ ++cleanup_count_;
+ // Only worker threads need to be retired and reused.
+ if (!worker_thread_number_) {
+ return;
+ }
+ // We must NOT do any allocations during this callback.
+ // Using the simple linked lists avoids all allocations.
+ DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
+ this->next_retired_worker_ = first_retired_worker_;
+ first_retired_worker_ = this;
+}
+
+// static
+void ThreadData::Snapshot(int current_profiling_phase,
+ ProcessDataSnapshot* process_data_snapshot) {
+ // Get an unchanging copy of a ThreadData list.
+ ThreadData* my_list = ThreadData::first();
+
+ // Gather data serially.
+ // This hackish approach *can* get some slightly corrupt tallies, as we are
+ // grabbing values without the protection of a lock, but it has the advantage
+ // of working even with threads that don't have message loops. If a user
+ // sees any strangeness, they can always just run their stats gathering a
+ // second time.
+ BirthCountMap birth_counts;
+ for (ThreadData* thread_data = my_list; thread_data;
+ thread_data = thread_data->next()) {
+ thread_data->SnapshotExecutedTasks(current_profiling_phase,
+ &process_data_snapshot->phased_snapshots,
+ &birth_counts);
+ }
+
+ // Add births that are still active -- i.e. objects that have tallied a birth,
+ // but have not yet tallied a matching death, and hence must be either
+ // running, queued up, or being held in limbo for future posting.
+ auto* current_phase_tasks =
+ &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
+ for (const auto& birth_count : birth_counts) {
+ if (birth_count.second > 0) {
+ current_phase_tasks->push_back(
+ TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
+ DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0),
+ "Still_Alive"));
+ }
+ }
+}
+
+// static
+void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
+ // Get an unchanging copy of a ThreadData list.
+ ThreadData* my_list = ThreadData::first();
+
+ // Add snapshots for all instances of death data in all threads serially.
+ // This hackish approach *can* get some slightly corrupt tallies, as we are
+ // grabbing values without the protection of a lock, but it has the advantage
+ // of working even with threads that don't have message loops. Any corruption
+ // shouldn't cause "cascading damage" to anything else (in later phases).
+ for (ThreadData* thread_data = my_list; thread_data;
+ thread_data = thread_data->next()) {
+ thread_data->OnProfilingPhaseCompletedOnThread(profiling_phase);
+ }
+}
+
+Births* ThreadData::TallyABirth(const Location& location) {
+ BirthMap::iterator it = birth_map_.find(location);
+ Births* child;
+ if (it != birth_map_.end()) {
+ child = it->second;
+ child->RecordBirth();
+ } else {
+ child = new Births(location, *this); // Leak this.
+ // Lock since the map may get relocated now, and other threads sometimes
+ // snapshot it (but they lock before copying it).
+ base::AutoLock lock(map_lock_);
+ birth_map_[location] = child;
+ }
+
+ return child;
+}
+
+void ThreadData::TallyADeath(const Births& births,
+ int32_t queue_duration,
+ const TaskStopwatch& stopwatch) {
+ int32_t run_duration = stopwatch.RunDurationMs();
+
+ // Stir in some randomness, plus add constant in case durations are zero.
+ const uint32_t kSomePrimeNumber = 2147483647;
+ random_number_ += queue_duration + run_duration + kSomePrimeNumber;
+ // An address is going to have some randomness to it as well ;-).
+ random_number_ ^=
+ static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
+
+ DeathMap::iterator it = death_map_.find(&births);
+ DeathData* death_data;
+ if (it != death_map_.end()) {
+ death_data = &it->second;
+ } else {
+ base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
+ death_data = &death_map_[&births];
+ } // Release lock ASAP.
+ death_data->RecordDeath(queue_duration, run_duration, random_number_);
+}
+
+// static
+Births* ThreadData::TallyABirthIfActive(const Location& location) {
+ if (!TrackingStatus())
+ return NULL;
+ ThreadData* current_thread_data = Get();
+ if (!current_thread_data)
+ return NULL;
+ return current_thread_data->TallyABirth(location);
+}
+
+// static
+void ThreadData::TallyRunOnNamedThreadIfTracking(
+ const base::TrackingInfo& completed_task,
+ const TaskStopwatch& stopwatch) {
+ // Even if we have been DEACTIVATED, we will process any pending births so
+ // that our data structures (which counted the outstanding births) remain
+ // consistent.
+ const Births* births = completed_task.birth_tally;
+ if (!births)
+ return;
+ ThreadData* current_thread_data = stopwatch.GetThreadData();
+ if (!current_thread_data)
+ return;
+
+ // Watch out for a race where status_ is changing, and hence one or both
+ // of start_of_run or end_of_run is zero. In that case, we didn't bother to
+ // get a time value since we "weren't tracking" and we were trying to be
+ // efficient by not calling for a genuine time value. For simplicity, we'll
+ // use a default zero duration when we can't calculate a true value.
+ TrackedTime start_of_run = stopwatch.StartTime();
+ int32_t queue_duration = 0;
+ if (!start_of_run.is_null()) {
+ queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
+ .InMilliseconds();
+ }
+ current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
+}
+
+// static
+void ThreadData::TallyRunOnWorkerThreadIfTracking(
+ const Births* births,
+ const TrackedTime& time_posted,
+ const TaskStopwatch& stopwatch) {
+ // Even if we have been DEACTIVATED, we will process any pending births so
+ // that our data structures (which counted the outstanding births) remain
+ // consistent.
+ if (!births)
+ return;
+
+ // TODO(jar): Support the option to coalesce all worker-thread activity under
+ // one ThreadData instance that uses locks to protect *all* access. This will
+ // reduce memory (making it provably bounded), but run incrementally slower
+ // (since we'll use locks on TallyABirth and TallyADeath). The good news is
+ // that the locks on TallyADeath will be *after* the worker thread has run,
+ // and hence nothing will be waiting for the completion (... besides some
+ // other thread that might like to run). Also, the worker threads tasks are
+ // generally longer, and hence the cost of the lock may perchance be amortized
+ // over the long task's lifetime.
+ ThreadData* current_thread_data = stopwatch.GetThreadData();
+ if (!current_thread_data)
+ return;
+
+ TrackedTime start_of_run = stopwatch.StartTime();
+ int32_t queue_duration = 0;
+ if (!start_of_run.is_null()) {
+ queue_duration = (start_of_run - time_posted).InMilliseconds();
+ }
+ current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
+}
+
+// static
+void ThreadData::TallyRunInAScopedRegionIfTracking(
+ const Births* births,
+ const TaskStopwatch& stopwatch) {
+ // Even if we have been DEACTIVATED, we will process any pending births so
+ // that our data structures (which counted the outstanding births) remain
+ // consistent.
+ if (!births)
+ return;
+
+ ThreadData* current_thread_data = stopwatch.GetThreadData();
+ if (!current_thread_data)
+ return;
+
+ int32_t queue_duration = 0;
+ current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
+}
+
+void ThreadData::SnapshotExecutedTasks(
+ int current_profiling_phase,
+ PhasedProcessDataSnapshotMap* phased_snapshots,
+ BirthCountMap* birth_counts) {
+ // Get copy of data, so that the data will not change during the iterations
+ // and processing.
+ BirthMap birth_map;
+ DeathsSnapshot deaths;
+ SnapshotMaps(current_profiling_phase, &birth_map, &deaths);
+
+ for (const auto& birth : birth_map) {
+ (*birth_counts)[birth.second] += birth.second->birth_count();
+ }
+
+ for (const auto& death : deaths) {
+ (*birth_counts)[death.first] -= death.first->birth_count();
+
+ // For the current death data, walk through all its snapshots, starting from
+ // the current one, then from the previous profiling phase etc., and for
+ // each snapshot calculate the delta between the snapshot and the previous
+ // phase, if any. Store the deltas in the result.
+ for (const DeathDataPhaseSnapshot* phase = &death.second; phase;
+ phase = phase->prev) {
+ const DeathDataSnapshot& death_data =
+ phase->prev ? phase->death_data.Delta(phase->prev->death_data)
+ : phase->death_data;
+
+ if (death_data.count > 0) {
+ (*phased_snapshots)[phase->profiling_phase].tasks.push_back(
+ TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data,
+ thread_name()));
+ }
+ }
+ }
+}
+
+// This may be called from another thread.
+void ThreadData::SnapshotMaps(int profiling_phase,
+ BirthMap* birth_map,
+ DeathsSnapshot* deaths) {
+ base::AutoLock lock(map_lock_);
+
+ for (const auto& birth : birth_map_)
+ (*birth_map)[birth.first] = birth.second;
+
+ for (const auto& death : death_map_) {
+ deaths->push_back(std::make_pair(
+ death.first,
+ DeathDataPhaseSnapshot(profiling_phase, death.second.count(),
+ death.second.run_duration_sum(),
+ death.second.run_duration_max(),
+ death.second.run_duration_sample(),
+ death.second.queue_duration_sum(),
+ death.second.queue_duration_max(),
+ death.second.queue_duration_sample(),
+ death.second.last_phase_snapshot())));
+ }
+}
+
+void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
+ base::AutoLock lock(map_lock_);
+
+ for (auto& death : death_map_) {
+ death.second.OnProfilingPhaseCompleted(profiling_phase);
+ }
+}
+
+void ThreadData::EnsureTlsInitialization() {
+ if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
+ return; // Someone else did the initialization.
+ // Due to racy lazy initialization in tests, we'll need to recheck status_
+ // after we acquire the lock.
+
+ // Ensure that we don't double initialize tls. We are called when single
+ // threaded in the product, but some tests may be racy and lazy about our
+ // initialization.
+ base::AutoLock lock(*list_lock_.Pointer());
+ if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
+ return; // Someone raced in here and beat us.
+
+ // Perform the "real" TLS initialization now, and leave it intact through
+ // process termination.
+ if (!tls_index_.initialized()) { // Testing may have initialized this.
+ DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED);
+ tls_index_.Initialize(&ThreadData::OnThreadTermination);
+ DCHECK(tls_index_.initialized());
+ } else {
+ // TLS was initialzed for us earlier.
+ DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), DORMANT_DURING_TESTS);
+ }
+
+ // Incarnation counter is only significant to testing, as it otherwise will
+ // never again change in this process.
+ ++incarnation_counter_;
+
+ // The lock is not critical for setting status_, but it doesn't hurt. It also
+ // ensures that if we have a racy initialization, that we'll bail as soon as
+ // we get the lock earlier in this method.
+ base::subtle::Release_Store(&status_, kInitialStartupState);
+ DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
+}
+
+// static
+void ThreadData::InitializeAndSetTrackingStatus(Status status) {
+ DCHECK_GE(status, DEACTIVATED);
+ DCHECK_LE(status, PROFILING_ACTIVE);
+
+ EnsureTlsInitialization(); // No-op if already initialized.
+
+ if (status > DEACTIVATED)
+ status = PROFILING_ACTIVE;
+ base::subtle::Release_Store(&status_, status);
+}
+
+// static
+ThreadData::Status ThreadData::status() {
+ return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
+}
+
+// static
+bool ThreadData::TrackingStatus() {
+ return base::subtle::Acquire_Load(&status_) > DEACTIVATED;
+}
+
+// static
+void ThreadData::EnableProfilerTiming() {
+ base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING);
+}
+
+// static
+TrackedTime ThreadData::Now() {
+ if (now_function_for_testing_)
+ return TrackedTime::FromMilliseconds((*now_function_for_testing_)());
+ if (IsProfilerTimingEnabled() && TrackingStatus())
+ return TrackedTime::Now();
+ return TrackedTime(); // Super fast when disabled, or not compiled.
+}
+
+// static
+void ThreadData::EnsureCleanupWasCalled(int /*major_threads_shutdown_count*/) {
+ base::AutoLock lock(*list_lock_.Pointer());
+ if (worker_thread_data_creation_count_ == 0)
+ return; // We haven't really run much, and couldn't have leaked.
+
+ // TODO(jar): until this is working on XP, don't run the real test.
+#if 0
+ // Verify that we've at least shutdown/cleanup the major namesd threads. The
+ // caller should tell us how many thread shutdowns should have taken place by
+ // now.
+ CHECK_GT(cleanup_count_, major_threads_shutdown_count);
+#endif
+}
+
+// static
+void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
+ // This is only called from test code, where we need to cleanup so that
+ // additional tests can be run.
+ // We must be single threaded... but be careful anyway.
+ InitializeAndSetTrackingStatus(DEACTIVATED);
+
+ ThreadData* thread_data_list;
+ {
+ base::AutoLock lock(*list_lock_.Pointer());
+ thread_data_list = all_thread_data_list_head_;
+ all_thread_data_list_head_ = NULL;
+ ++incarnation_counter_;
+ // To be clean, break apart the retired worker list (though we leak them).
+ while (first_retired_worker_) {
+ ThreadData* worker = first_retired_worker_;
+ CHECK_GT(worker->worker_thread_number_, 0);
+ first_retired_worker_ = worker->next_retired_worker_;
+ worker->next_retired_worker_ = NULL;
+ }
+ }
+
+ // Put most global static back in pristine shape.
+ worker_thread_data_creation_count_ = 0;
+ cleanup_count_ = 0;
+ tls_index_.Set(NULL);
+ // Almost UNINITIALIZED.
+ base::subtle::Release_Store(&status_, DORMANT_DURING_TESTS);
+
+ // To avoid any chance of racing in unit tests, which is the only place we
+ // call this function, we may sometimes leak all the data structures we
+ // recovered, as they may still be in use on threads from prior tests!
+ if (leak) {
+ ThreadData* thread_data = thread_data_list;
+ while (thread_data) {
+ ANNOTATE_LEAKING_OBJECT_PTR(thread_data);
+ thread_data = thread_data->next();
+ }
+ return;
+ }
+
+ // When we want to cleanup (on a single thread), here is what we do.
+
+ // Do actual recursive delete in all ThreadData instances.
+ while (thread_data_list) {
+ ThreadData* next_thread_data = thread_data_list;
+ thread_data_list = thread_data_list->next();
+
+ for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
+ next_thread_data->birth_map_.end() != it; ++it)
+ delete it->second; // Delete the Birth Records.
+ delete next_thread_data; // Includes all Death Records.
+ }
+}
+
+//------------------------------------------------------------------------------
+TaskStopwatch::TaskStopwatch()
+ : wallclock_duration_ms_(0),
+ current_thread_data_(NULL),
+ excluded_duration_ms_(0),
+ parent_(NULL) {
+#if DCHECK_IS_ON()
+ state_ = CREATED;
+ child_ = NULL;
+#endif
+}
+
+TaskStopwatch::~TaskStopwatch() {
+#if DCHECK_IS_ON()
+ DCHECK(state_ != RUNNING);
+ DCHECK(child_ == NULL);
+#endif
+}
+
+void TaskStopwatch::Start() {
+#if DCHECK_IS_ON()
+ DCHECK(state_ == CREATED);
+ state_ = RUNNING;
+#endif
+
+ start_time_ = ThreadData::Now();
+
+ current_thread_data_ = ThreadData::Get();
+ if (!current_thread_data_)
+ return;
+
+ parent_ = current_thread_data_->current_stopwatch_;
+#if DCHECK_IS_ON()
+ if (parent_) {
+ DCHECK(parent_->state_ == RUNNING);
+ DCHECK(parent_->child_ == NULL);
+ parent_->child_ = this;
+ }
+#endif
+ current_thread_data_->current_stopwatch_ = this;
+}
+
+void TaskStopwatch::Stop() {
+ const TrackedTime end_time = ThreadData::Now();
+#if DCHECK_IS_ON()
+ DCHECK(state_ == RUNNING);
+ state_ = STOPPED;
+ DCHECK(child_ == NULL);
+#endif
+
+ if (!start_time_.is_null() && !end_time.is_null()) {
+ wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
+ }
+
+ if (!current_thread_data_)
+ return;
+
+ DCHECK(current_thread_data_->current_stopwatch_ == this);
+ current_thread_data_->current_stopwatch_ = parent_;
+ if (!parent_)
+ return;
+
+#if DCHECK_IS_ON()
+ DCHECK(parent_->state_ == RUNNING);
+ DCHECK(parent_->child_ == this);
+ parent_->child_ = NULL;
+#endif
+ parent_->excluded_duration_ms_ += wallclock_duration_ms_;
+ parent_ = NULL;
+}
+
+TrackedTime TaskStopwatch::StartTime() const {
+#if DCHECK_IS_ON()
+ DCHECK(state_ != CREATED);
+#endif
+
+ return start_time_;
+}
+
+int32_t TaskStopwatch::RunDurationMs() const {
+#if DCHECK_IS_ON()
+ DCHECK(state_ == STOPPED);
+#endif
+
+ return wallclock_duration_ms_ - excluded_duration_ms_;
+}
+
+ThreadData* TaskStopwatch::GetThreadData() const {
+#if DCHECK_IS_ON()
+ DCHECK(state_ != CREATED);
+#endif
+
+ return current_thread_data_;
+}
+
+//------------------------------------------------------------------------------
+// DeathDataPhaseSnapshot
+
+DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
+ int profiling_phase,
+ int count,
+ int32_t run_duration_sum,
+ int32_t run_duration_max,
+ int32_t run_duration_sample,
+ int32_t queue_duration_sum,
+ int32_t queue_duration_max,
+ int32_t queue_duration_sample,
+ const DeathDataPhaseSnapshot* prev)
+ : profiling_phase(profiling_phase),
+ death_data(count,
+ run_duration_sum,
+ run_duration_max,
+ run_duration_sample,
+ queue_duration_sum,
+ queue_duration_max,
+ queue_duration_sample),
+ prev(prev) {}
+
+//------------------------------------------------------------------------------
+// TaskSnapshot
+
+TaskSnapshot::TaskSnapshot() {
+}
+
+TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
+ const DeathDataSnapshot& death_data,
+ const std::string& death_thread_name)
+ : birth(birth),
+ death_data(death_data),
+ death_thread_name(death_thread_name) {
+}
+
+TaskSnapshot::~TaskSnapshot() {
+}
+
+//------------------------------------------------------------------------------
+// ProcessDataPhaseSnapshot
+
+ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() {
+}
+
+ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot(
+ const ProcessDataPhaseSnapshot& other) = default;
+
+ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() {
+}
+
+//------------------------------------------------------------------------------
+// ProcessDataPhaseSnapshot
+
+ProcessDataSnapshot::ProcessDataSnapshot()
+#if !defined(OS_NACL)
+ : process_id(base::GetCurrentProcId()) {
+#else
+ : process_id(base::kNullProcessId) {
+#endif
+}
+
+ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
+ default;
+
+ProcessDataSnapshot::~ProcessDataSnapshot() {
+}
+
+} // namespace tracked_objects
diff --git a/libchrome/base/tracked_objects.h b/libchrome/base/tracked_objects.h
new file mode 100644
index 0000000..7ef0317
--- /dev/null
+++ b/libchrome/base/tracked_objects.h
@@ -0,0 +1,820 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACKED_OBJECTS_H_
+#define BASE_TRACKED_OBJECTS_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <stack>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/containers/hash_tables.h"
+#include "base/gtest_prod_util.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "base/profiler/tracked_time.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+struct TrackingInfo;
+}
+
+// TrackedObjects provides a database of stats about objects (generally Tasks)
+// that are tracked. Tracking means their birth, death, duration, birth thread,
+// death thread, and birth place are recorded. This data is carefully spread
+// across a series of objects so that the counts and times can be rapidly
+// updated without (usually) having to lock the data, and hence there is usually
+// very little contention caused by the tracking. The data can be viewed via
+// the about:profiler URL, with a variety of sorting and filtering choices.
+//
+// These classes serve as the basis of a profiler of sorts for the Tasks system.
+// As a result, design decisions were made to maximize speed, by minimizing
+// recurring allocation/deallocation, lock contention and data copying. In the
+// "stable" state, which is reached relatively quickly, there is no separate
+// marginal allocation cost associated with construction or destruction of
+// tracked objects, no locks are generally employed, and probably the largest
+// computational cost is associated with obtaining start and stop times for
+// instances as they are created and destroyed.
+//
+// The following describes the life cycle of tracking an instance.
+//
+// First off, when the instance is created, the FROM_HERE macro is expanded
+// to specify the birth place (file, line, function) where the instance was
+// created. That data is used to create a transient Location instance
+// encapsulating the above triple of information. The strings (like __FILE__)
+// are passed around by reference, with the assumption that they are static, and
+// will never go away. This ensures that the strings can be dealt with as atoms
+// with great efficiency (i.e., copying of strings is never needed, and
+// comparisons for equality can be based on pointer comparisons).
+//
+// Next, a Births instance is created for use ONLY on the thread where this
+// instance was created. That Births instance records (in a base class
+// BirthOnThread) references to the static data provided in a Location instance,
+// as well as a pointer specifying the thread on which the birth takes place.
+// Hence there is at most one Births instance for each Location on each thread.
+// The derived Births class contains slots for recording statistics about all
+// instances born at the same location. Statistics currently include only the
+// count of instances constructed.
+//
+// Since the base class BirthOnThread contains only constant data, it can be
+// freely accessed by any thread at any time (i.e., only the statistic needs to
+// be handled carefully, and stats are updated exclusively on the birth thread).
+//
+// For Tasks, having now either constructed or found the Births instance
+// described above, a pointer to the Births instance is then recorded into the
+// PendingTask structure in MessageLoop. This fact alone is very useful in
+// debugging, when there is a question of where an instance came from. In
+// addition, the birth time is also recorded and used to later evaluate the
+// lifetime duration of the whole Task. As a result of the above embedding, we
+// can find out a Task's location of birth, and thread of birth, without using
+// any locks, as all that data is constant across the life of the process.
+//
+// The above work *could* also be done for any other object as well by calling
+// TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
+//
+// The amount of memory used in the above data structures depends on how many
+// threads there are, and how many Locations of construction there are.
+// Fortunately, we don't use memory that is the product of those two counts, but
+// rather we only need one Births instance for each thread that constructs an
+// instance at a Location. In many cases, instances are only created on one
+// thread, so the memory utilization is actually fairly restrained.
+//
+// Lastly, when an instance is deleted, the final tallies of statistics are
+// carefully accumulated. That tallying writes into slots (members) in a
+// collection of DeathData instances. For each birth place Location that is
+// destroyed on a thread, there is a DeathData instance to record the additional
+// death count, as well as accumulate the run-time and queue-time durations for
+// the instance as it is destroyed (dies). By maintaining a single place to
+// aggregate this running sum *only* for the given thread, we avoid the need to
+// lock such DeathData instances. (i.e., these accumulated stats in a DeathData
+// instance are exclusively updated by the singular owning thread).
+//
+// With the above life cycle description complete, the major remaining detail
+// is explaining how each thread maintains a list of DeathData instances, and
+// of Births instances, and is able to avoid additional (redundant/unnecessary)
+// allocations.
+//
+// Each thread maintains a list of data items specific to that thread in a
+// ThreadData instance (for that specific thread only). The two critical items
+// are lists of DeathData and Births instances. These lists are maintained in
+// STL maps, which are indexed by Location. As noted earlier, we can compare
+// locations very efficiently as we consider the underlying data (file,
+// function, line) to be atoms, and hence pointer comparison is used rather than
+// (slow) string comparisons.
+//
+// To provide a mechanism for iterating over all "known threads," which means
+// threads that have recorded a birth or a death, we create a singly linked list
+// of ThreadData instances. Each such instance maintains a pointer to the next
+// one. A static member of ThreadData provides a pointer to the first item on
+// this global list, and access via that all_thread_data_list_head_ item
+// requires the use of the list_lock_.
+// When new ThreadData instances is added to the global list, it is pre-pended,
+// which ensures that any prior acquisition of the list is valid (i.e., the
+// holder can iterate over it without fear of it changing, or the necessity of
+// using an additional lock. Iterations are actually pretty rare (used
+// primarily for cleanup, or snapshotting data for display), so this lock has
+// very little global performance impact.
+//
+// The above description tries to define the high performance (run time)
+// portions of these classes. After gathering statistics, calls instigated
+// by visiting about:profiler will assemble and aggregate data for display. The
+// following data structures are used for producing such displays. They are
+// not performance critical, and their only major constraint is that they should
+// be able to run concurrently with ongoing augmentation of the birth and death
+// data.
+//
+// This header also exports collection of classes that provide "snapshotted"
+// representations of the core tracked_objects:: classes. These snapshotted
+// representations are designed for safe transmission of the tracked_objects::
+// data across process boundaries. Each consists of:
+// (1) a default constructor, to support the IPC serialization macros,
+// (2) a constructor that extracts data from the type being snapshotted, and
+// (3) the snapshotted data.
+//
+// For a given birth location, information about births is spread across data
+// structures that are asynchronously changing on various threads. For
+// serialization and display purposes, we need to construct TaskSnapshot
+// instances for each combination of birth thread, death thread, and location,
+// along with the count of such lifetimes. We gather such data into a
+// TaskSnapshot instances, so that such instances can be sorted and
+// aggregated (and remain frozen during our processing).
+//
+// Profiling consists of phases. The concrete phase in the sequence of phases
+// is identified by its 0-based index.
+//
+// The ProcessDataPhaseSnapshot struct is a serialized representation of the
+// list of ThreadData objects for a process for a concrete profiling phase. It
+// holds a set of TaskSnapshots. The statistics in a snapshot are gathered
+// asynhcronously relative to their ongoing updates.
+// It is possible, though highly unlikely, that stats could be incorrectly
+// recorded by this process (all data is held in 32 bit ints, but we are not
+// atomically collecting all data, so we could have count that does not, for
+// example, match with the number of durations we accumulated). The advantage
+// to having fast (non-atomic) updates of the data outweighs the minimal risk of
+// a singular corrupt statistic snapshot (only the snapshot could be corrupt,
+// not the underlying and ongoing statistic). In contrast, pointer data that
+// is accessed during snapshotting is completely invariant, and hence is
+// perfectly acquired (i.e., no potential corruption, and no risk of a bad
+// memory reference).
+//
+// TODO(jar): We can implement a Snapshot system that *tries* to grab the
+// snapshots on the source threads *when* they have MessageLoops available
+// (worker threads don't have message loops generally, and hence gathering from
+// them will continue to be asynchronous). We had an implementation of this in
+// the past, but the difficulty is dealing with message loops being terminated.
+// We can *try* to spam the available threads via some task runner to
+// achieve this feat, and it *might* be valuable when we are collecting data
+// for upload via UMA (where correctness of data may be more significant than
+// for a single screen of about:profiler).
+//
+// TODO(jar): We need to store DataCollections, and provide facilities for
+// taking the difference between two gathered DataCollections. For now, we're
+// just adding a hack that Reset()s to zero all counts and stats. This is also
+// done in a slightly thread-unsafe fashion, as the resetting is done
+// asynchronously relative to ongoing updates (but all data is 32 bit in size).
+// For basic profiling, this will work "most of the time," and should be
+// sufficient... but storing away DataCollections is the "right way" to do this.
+// We'll accomplish this via JavaScript storage of snapshots, and then we'll
+// remove the Reset() methods. We may also need a short-term-max value in
+// DeathData that is reset (as synchronously as possible) during each snapshot.
+// This will facilitate displaying a max value for each snapshot period.
+
+namespace tracked_objects {
+
+//------------------------------------------------------------------------------
+// For a specific thread, and a specific birth place, the collection of all
+// death info (with tallies for each death thread, to prevent access conflicts).
+class ThreadData;
+class BASE_EXPORT BirthOnThread {
+ public:
+ BirthOnThread(const Location& location, const ThreadData& current);
+
+ const Location& location() const { return location_; }
+ const ThreadData* birth_thread() const { return birth_thread_; }
+
+ private:
+ // File/lineno of birth. This defines the essence of the task, as the context
+ // of the birth (construction) often tell what the item is for. This field
+ // is const, and hence safe to access from any thread.
+ const Location location_;
+
+ // The thread that records births into this object. Only this thread is
+ // allowed to update birth_count_ (which changes over time).
+ const ThreadData* const birth_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(BirthOnThread);
+};
+
+//------------------------------------------------------------------------------
+// A "snapshotted" representation of the BirthOnThread class.
+
+struct BASE_EXPORT BirthOnThreadSnapshot {
+ BirthOnThreadSnapshot();
+ explicit BirthOnThreadSnapshot(const BirthOnThread& birth);
+ ~BirthOnThreadSnapshot();
+
+ LocationSnapshot location;
+ std::string thread_name;
+};
+
+//------------------------------------------------------------------------------
+// A class for accumulating counts of births (without bothering with a map<>).
+
+class BASE_EXPORT Births: public BirthOnThread {
+ public:
+ Births(const Location& location, const ThreadData& current);
+
+ int birth_count() const;
+
+ // When we have a birth we update the count for this birthplace.
+ void RecordBirth();
+
+ private:
+ // The number of births on this thread for our location_.
+ int birth_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(Births);
+};
+
+//------------------------------------------------------------------------------
+// A "snapshotted" representation of the DeathData class.
+
+struct BASE_EXPORT DeathDataSnapshot {
+ DeathDataSnapshot();
+
+ // Constructs the snapshot from individual values.
+ // The alternative would be taking a DeathData parameter, but this would
+ // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing
+ // a wrapper structure as a param or using an empty constructor for
+ // snapshotting DeathData would be less efficient.
+ DeathDataSnapshot(int count,
+ int32_t run_duration_sum,
+ int32_t run_duration_max,
+ int32_t run_duration_sample,
+ int32_t queue_duration_sum,
+ int32_t queue_duration_max,
+ int32_t queue_duration_sample);
+ ~DeathDataSnapshot();
+
+ // Calculates and returns the delta between this snapshot and an earlier
+ // snapshot of the same task |older|.
+ DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
+
+ int count;
+ int32_t run_duration_sum;
+ int32_t run_duration_max;
+ int32_t run_duration_sample;
+ int32_t queue_duration_sum;
+ int32_t queue_duration_max;
+ int32_t queue_duration_sample;
+};
+
+//------------------------------------------------------------------------------
+// A "snapshotted" representation of the DeathData for a particular profiling
+// phase. Used as an element of the list of phase snapshots owned by DeathData.
+
+struct DeathDataPhaseSnapshot {
+ DeathDataPhaseSnapshot(int profiling_phase,
+ int count,
+ int32_t run_duration_sum,
+ int32_t run_duration_max,
+ int32_t run_duration_sample,
+ int32_t queue_duration_sum,
+ int32_t queue_duration_max,
+ int32_t queue_duration_sample,
+ const DeathDataPhaseSnapshot* prev);
+
+ // Profiling phase at which completion this snapshot was taken.
+ int profiling_phase;
+
+ // Death data snapshot.
+ DeathDataSnapshot death_data;
+
+ // Pointer to a snapshot from the previous phase.
+ const DeathDataPhaseSnapshot* prev;
+};
+
+//------------------------------------------------------------------------------
+// Information about deaths of a task on a given thread, called "death thread".
+// Access to members of this class is never protected by a lock. The fields
+// are accessed in such a way that corruptions resulting from race conditions
+// are not significant, and don't accumulate as a result of multiple accesses.
+// All invocations of DeathData::OnProfilingPhaseCompleted and
+// ThreadData::SnapshotMaps (which takes DeathData snapshot) in a given process
+// must be called from the same thread. It doesn't matter what thread it is, but
+// it's important the same thread is used as a snapshot thread during the whole
+// process lifetime. All fields except sample_probability_count_ can be
+// snapshotted.
+
+class BASE_EXPORT DeathData {
+ public:
+ DeathData();
+ DeathData(const DeathData& other);
+ ~DeathData();
+
+ // Update stats for a task destruction (death) that had a Run() time of
+ // |duration|, and has had a queueing delay of |queue_duration|.
+ void RecordDeath(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number);
+
+ // Metrics and past snapshots accessors, used only for serialization and in
+ // tests.
+ int count() const { return base::subtle::NoBarrier_Load(&count_); }
+ int32_t run_duration_sum() const {
+ return base::subtle::NoBarrier_Load(&run_duration_sum_);
+ }
+ int32_t run_duration_max() const {
+ return base::subtle::NoBarrier_Load(&run_duration_max_);
+ }
+ int32_t run_duration_sample() const {
+ return base::subtle::NoBarrier_Load(&run_duration_sample_);
+ }
+ int32_t queue_duration_sum() const {
+ return base::subtle::NoBarrier_Load(&queue_duration_sum_);
+ }
+ int32_t queue_duration_max() const {
+ return base::subtle::NoBarrier_Load(&queue_duration_max_);
+ }
+ int32_t queue_duration_sample() const {
+ return base::subtle::NoBarrier_Load(&queue_duration_sample_);
+ }
+ const DeathDataPhaseSnapshot* last_phase_snapshot() const {
+ return last_phase_snapshot_;
+ }
+
+ // Called when the current profiling phase, identified by |profiling_phase|,
+ // ends.
+ // Must be called only on the snapshot thread.
+ void OnProfilingPhaseCompleted(int profiling_phase);
+
+ private:
+ // Members are ordered from most regularly read and updated, to least
+ // frequently used. This might help a bit with cache lines.
+ // Number of runs seen (divisor for calculating averages).
+ // Can be incremented only on the death thread.
+ base::subtle::Atomic32 count_;
+
+ // Count used in determining probability of selecting exec/queue times from a
+ // recorded death as samples.
+ // Gets incremented only on the death thread, but can be set to 0 by
+ // OnProfilingPhaseCompleted() on the snapshot thread.
+ base::subtle::Atomic32 sample_probability_count_;
+
+ // Basic tallies, used to compute averages. Can be incremented only on the
+ // death thread.
+ base::subtle::Atomic32 run_duration_sum_;
+ base::subtle::Atomic32 queue_duration_sum_;
+ // Max values, used by local visualization routines. These are often read,
+ // but rarely updated. The max values get assigned only on the death thread,
+ // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
+ // snapshot thread.
+ base::subtle::Atomic32 run_duration_max_;
+ base::subtle::Atomic32 queue_duration_max_;
+ // Samples, used by crowd sourcing gatherers. These are almost never read,
+ // and rarely updated. They can be modified only on the death thread.
+ base::subtle::Atomic32 run_duration_sample_;
+ base::subtle::Atomic32 queue_duration_sample_;
+
+ // Snapshot of this death data made at the last profiling phase completion, if
+ // any. DeathData owns the whole list starting with this pointer.
+ // Can be accessed only on the snapshot thread.
+ const DeathDataPhaseSnapshot* last_phase_snapshot_;
+
+ DISALLOW_ASSIGN(DeathData);
+};
+
+//------------------------------------------------------------------------------
+// A temporary collection of data that can be sorted and summarized. It is
+// gathered (carefully) from many threads. Instances are held in arrays and
+// processed, filtered, and rendered.
+// The source of this data was collected on many threads, and is asynchronously
+// changing. The data in this instance is not asynchronously changing.
+
+struct BASE_EXPORT TaskSnapshot {
+ TaskSnapshot();
+ TaskSnapshot(const BirthOnThreadSnapshot& birth,
+ const DeathDataSnapshot& death_data,
+ const std::string& death_thread_name);
+ ~TaskSnapshot();
+
+ BirthOnThreadSnapshot birth;
+ // Delta between death data for a thread for a certain profiling phase and the
+ // snapshot for the pervious phase, if any. Otherwise, just a snapshot.
+ DeathDataSnapshot death_data;
+ std::string death_thread_name;
+};
+
+//------------------------------------------------------------------------------
+// For each thread, we have a ThreadData that stores all tracking info generated
+// on this thread. This prevents the need for locking as data accumulates.
+// We use ThreadLocalStorage to quickly identfy the current ThreadData context.
+// We also have a linked list of ThreadData instances, and that list is used to
+// harvest data from all existing instances.
+
+struct ProcessDataPhaseSnapshot;
+struct ProcessDataSnapshot;
+class BASE_EXPORT TaskStopwatch;
+
+// Map from profiling phase number to the process-wide snapshotted
+// representation of the list of ThreadData objects that died during the given
+// phase.
+typedef std::map<int, ProcessDataPhaseSnapshot> PhasedProcessDataSnapshotMap;
+
+class BASE_EXPORT ThreadData {
+ public:
+ // Current allowable states of the tracking system. The states can vary
+ // between ACTIVE and DEACTIVATED, but can never go back to UNINITIALIZED.
+ enum Status {
+ UNINITIALIZED, // Pristine, link-time state before running.
+ DORMANT_DURING_TESTS, // Only used during testing.
+ DEACTIVATED, // No longer recording profiling.
+ PROFILING_ACTIVE, // Recording profiles.
+ STATUS_LAST = PROFILING_ACTIVE
+ };
+
+ typedef base::hash_map<Location, Births*, Location::Hash> BirthMap;
+ typedef std::map<const Births*, DeathData> DeathMap;
+
+ // Initialize the current thread context with a new instance of ThreadData.
+ // This is used by all threads that have names, and should be explicitly
+ // set *before* any births on the threads have taken place. It is generally
+ // only used by the message loop, which has a well defined thread name.
+ static void InitializeThreadContext(const std::string& suggested_name);
+
+ // Using Thread Local Store, find the current instance for collecting data.
+ // If an instance does not exist, construct one (and remember it for use on
+ // this thread.
+ // This may return NULL if the system is disabled for any reason.
+ static ThreadData* Get();
+
+ // Fills |process_data_snapshot| with phased snapshots of all profiling
+ // phases, including the current one, identified by |current_profiling_phase|.
+ // |current_profiling_phase| is necessary because a child process can start
+ // after several phase-changing events, so it needs to receive the current
+ // phase number from the browser process to fill the correct entry for the
+ // current phase in the |process_data_snapshot| map.
+ static void Snapshot(int current_profiling_phase,
+ ProcessDataSnapshot* process_data_snapshot);
+
+ // Called when the current profiling phase, identified by |profiling_phase|,
+ // ends.
+ // |profiling_phase| is necessary because a child process can start after
+ // several phase-changing events, so it needs to receive the phase number from
+ // the browser process to fill the correct entry in the
+ // completed_phases_snapshots_ map.
+ static void OnProfilingPhaseCompleted(int profiling_phase);
+
+ // Finds (or creates) a place to count births from the given location in this
+ // thread, and increment that tally.
+ // TallyABirthIfActive will returns NULL if the birth cannot be tallied.
+ static Births* TallyABirthIfActive(const Location& location);
+
+ // Records the end of a timed run of an object. The |completed_task| contains
+ // a pointer to a Births, the time_posted, and a delayed_start_time if any.
+ // The |start_of_run| indicates when we started to perform the run of the
+ // task. The delayed_start_time is non-null for tasks that were posted as
+ // delayed tasks, and it indicates when the task should have run (i.e., when
+ // it should have posted out of the timer queue, and into the work queue.
+ // The |end_of_run| was just obtained by a call to Now() (just after the task
+ // finished). It is provided as an argument to help with testing.
+ static void TallyRunOnNamedThreadIfTracking(
+ const base::TrackingInfo& completed_task,
+ const TaskStopwatch& stopwatch);
+
+ // Record the end of a timed run of an object. The |birth| is the record for
+ // the instance, the |time_posted| records that instant, which is presumed to
+ // be when the task was posted into a queue to run on a worker thread.
+ // The |start_of_run| is when the worker thread started to perform the run of
+ // the task.
+ // The |end_of_run| was just obtained by a call to Now() (just after the task
+ // finished).
+ static void TallyRunOnWorkerThreadIfTracking(const Births* births,
+ const TrackedTime& time_posted,
+ const TaskStopwatch& stopwatch);
+
+ // Record the end of execution in region, generally corresponding to a scope
+ // being exited.
+ static void TallyRunInAScopedRegionIfTracking(const Births* births,
+ const TaskStopwatch& stopwatch);
+
+ const std::string& thread_name() const { return thread_name_; }
+
+ // Initializes all statics if needed (this initialization call should be made
+ // while we are single threaded).
+ static void EnsureTlsInitialization();
+
+ // Sets internal status_.
+ // If |status| is false, then status_ is set to DEACTIVATED.
+ // If |status| is true, then status_ is set to PROFILING_ACTIVE.
+ static void InitializeAndSetTrackingStatus(Status status);
+
+ static Status status();
+
+ // Indicate if any sort of profiling is being done (i.e., we are more than
+ // DEACTIVATED).
+ static bool TrackingStatus();
+
+ // Enables profiler timing.
+ static void EnableProfilerTiming();
+
+ // Provide a time function that does nothing (runs fast) when we don't have
+ // the profiler enabled. It will generally be optimized away when it is
+ // ifdef'ed to be small enough (allowing the profiler to be "compiled out" of
+ // the code).
+ static TrackedTime Now();
+
+ // This function can be called at process termination to validate that thread
+ // cleanup routines have been called for at least some number of named
+ // threads.
+ static void EnsureCleanupWasCalled(int major_threads_shutdown_count);
+
+ private:
+ friend class TaskStopwatch;
+ // Allow only tests to call ShutdownSingleThreadedCleanup. We NEVER call it
+ // in production code.
+ // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
+ // better change of optimizing (inlining? etc.) private methods (knowing that
+ // there will be no need for an external entry point).
+ friend class TrackedObjectsTest;
+ FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
+ FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
+
+ // Type for an alternate timer function (testing only).
+ typedef unsigned int NowFunction();
+
+ typedef std::map<const BirthOnThread*, int> BirthCountMap;
+ typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
+ DeathsSnapshot;
+
+ // Worker thread construction creates a name since there is none.
+ explicit ThreadData(int thread_number);
+
+ // Message loop based construction should provide a name.
+ explicit ThreadData(const std::string& suggested_name);
+
+ ~ThreadData();
+
+ // Push this instance to the head of all_thread_data_list_head_, linking it to
+ // the previous head. This is performed after each construction, and leaves
+ // the instance permanently on that list.
+ void PushToHeadOfList();
+
+ // (Thread safe) Get start of list of all ThreadData instances using the lock.
+ static ThreadData* first();
+
+ // Iterate through the null terminated list of ThreadData instances.
+ ThreadData* next() const;
+
+
+ // In this thread's data, record a new birth.
+ Births* TallyABirth(const Location& location);
+
+ // Find a place to record a death on this thread.
+ void TallyADeath(const Births& births,
+ int32_t queue_duration,
+ const TaskStopwatch& stopwatch);
+
+ // Snapshots (under a lock) the profiled data for the tasks for this thread
+ // and writes all of the executed tasks' data -- i.e. the data for all
+ // profiling phases (including the current one: |current_profiling_phase|) for
+ // the tasks with with entries in the death_map_ -- into |phased_snapshots|.
+ // Also updates the |birth_counts| tally for each task to keep track of the
+ // number of living instances of the task -- that is, each task maps to the
+ // number of births for the task that have not yet been balanced by a death.
+ void SnapshotExecutedTasks(int current_profiling_phase,
+ PhasedProcessDataSnapshotMap* phased_snapshots,
+ BirthCountMap* birth_counts);
+
+ // Using our lock, make a copy of the specified maps. This call may be made
+ // on non-local threads, which necessitate the use of the lock to prevent
+ // the map(s) from being reallocated while they are copied.
+ void SnapshotMaps(int profiling_phase,
+ BirthMap* birth_map,
+ DeathsSnapshot* deaths);
+
+ // Called for this thread when the current profiling phase, identified by
+ // |profiling_phase|, ends.
+ void OnProfilingPhaseCompletedOnThread(int profiling_phase);
+
+ // This method is called by the TLS system when a thread terminates.
+ // The argument may be NULL if this thread has never tracked a birth or death.
+ static void OnThreadTermination(void* thread_data);
+
+ // This method should be called when a worker thread terminates, so that we
+ // can save all the thread data into a cache of reusable ThreadData instances.
+ void OnThreadTerminationCleanup();
+
+ // Cleans up data structures, and returns statics to near pristine (mostly
+ // uninitialized) state. If there is any chance that other threads are still
+ // using the data structures, then the |leak| argument should be passed in as
+ // true, and the data structures (birth maps, death maps, ThreadData
+ // insntances, etc.) will be leaked and not deleted. If you have joined all
+ // threads since the time that InitializeAndSetTrackingStatus() was called,
+ // then you can pass in a |leak| value of false, and this function will
+ // delete recursively all data structures, starting with the list of
+ // ThreadData instances.
+ static void ShutdownSingleThreadedCleanup(bool leak);
+
+ // When non-null, this specifies an external function that supplies monotone
+ // increasing time functcion.
+ static NowFunction* now_function_for_testing_;
+
+ // We use thread local store to identify which ThreadData to interact with.
+ static base::ThreadLocalStorage::StaticSlot tls_index_;
+
+ // List of ThreadData instances for use with worker threads. When a worker
+ // thread is done (terminated), we push it onto this list. When a new worker
+ // thread is created, we first try to re-use a ThreadData instance from the
+ // list, and if none are available, construct a new one.
+ // This is only accessed while list_lock_ is held.
+ static ThreadData* first_retired_worker_;
+
+ // Link to the most recently created instance (starts a null terminated list).
+ // The list is traversed by about:profiler when it needs to snapshot data.
+ // This is only accessed while list_lock_ is held.
+ static ThreadData* all_thread_data_list_head_;
+
+ // The next available worker thread number. This should only be accessed when
+ // the list_lock_ is held.
+ static int worker_thread_data_creation_count_;
+
+ // The number of times TLS has called us back to cleanup a ThreadData
+ // instance. This is only accessed while list_lock_ is held.
+ static int cleanup_count_;
+
+ // Incarnation sequence number, indicating how many times (during unittests)
+ // we've either transitioned out of UNINITIALIZED, or into that state. This
+ // value is only accessed while the list_lock_ is held.
+ static int incarnation_counter_;
+
+ // Protection for access to all_thread_data_list_head_, and to
+ // unregistered_thread_data_pool_. This lock is leaked at shutdown.
+ // The lock is very infrequently used, so we can afford to just make a lazy
+ // instance and be safe.
+ static base::LazyInstance<base::Lock>::Leaky list_lock_;
+
+ // We set status_ to SHUTDOWN when we shut down the tracking service.
+ static base::subtle::Atomic32 status_;
+
+ // Link to next instance (null terminated list). Used to globally track all
+ // registered instances (corresponds to all registered threads where we keep
+ // data).
+ ThreadData* next_;
+
+ // Pointer to another ThreadData instance for a Worker-Thread that has been
+ // retired (its thread was terminated). This value is non-NULL only for a
+ // retired ThreadData associated with a Worker-Thread.
+ ThreadData* next_retired_worker_;
+
+ // The name of the thread that is being recorded. If this thread has no
+ // message_loop, then this is a worker thread, with a sequence number postfix.
+ std::string thread_name_;
+
+ // Indicate if this is a worker thread, and the ThreadData contexts should be
+ // stored in the unregistered_thread_data_pool_ when not in use.
+ // Value is zero when it is not a worker thread. Value is a positive integer
+ // corresponding to the created thread name if it is a worker thread.
+ int worker_thread_number_;
+
+ // A map used on each thread to keep track of Births on this thread.
+ // This map should only be accessed on the thread it was constructed on.
+ // When a snapshot is needed, this structure can be locked in place for the
+ // duration of the snapshotting activity.
+ BirthMap birth_map_;
+
+ // Similar to birth_map_, this records informations about death of tracked
+ // instances (i.e., when a tracked instance was destroyed on this thread).
+ // It is locked before changing, and hence other threads may access it by
+ // locking before reading it.
+ DeathMap death_map_;
+
+ // Lock to protect *some* access to BirthMap and DeathMap. The maps are
+ // regularly read and written on this thread, but may only be read from other
+ // threads. To support this, we acquire this lock if we are writing from this
+ // thread, or reading from another thread. For reading from this thread we
+ // don't need a lock, as there is no potential for a conflict since the
+ // writing is only done from this thread.
+ mutable base::Lock map_lock_;
+
+ // A random number that we used to select decide which sample to keep as a
+ // representative sample in each DeathData instance. We can't start off with
+ // much randomness (because we can't call RandInt() on all our threads), so
+ // we stir in more and more as we go.
+ uint32_t random_number_;
+
+ // Record of what the incarnation_counter_ was when this instance was created.
+ // If the incarnation_counter_ has changed, then we avoid pushing into the
+ // pool (this is only critical in tests which go through multiple
+ // incarnations).
+ int incarnation_count_for_pool_;
+
+ // Most recently started (i.e. most nested) stopwatch on the current thread,
+ // if it exists; NULL otherwise.
+ TaskStopwatch* current_stopwatch_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadData);
+};
+
+//------------------------------------------------------------------------------
+// Stopwatch to measure task run time or simply create a time interval that will
+// be subtracted from the current most nested task's run time. Stopwatches
+// coordinate with the stopwatches in which they are nested to avoid
+// double-counting nested tasks run times.
+
+class BASE_EXPORT TaskStopwatch {
+ public:
+ // Starts the stopwatch.
+ TaskStopwatch();
+ ~TaskStopwatch();
+
+ // Starts stopwatch.
+ void Start();
+
+ // Stops stopwatch.
+ void Stop();
+
+ // Returns the start time.
+ TrackedTime StartTime() const;
+
+ // Task's duration is calculated as the wallclock duration between starting
+ // and stopping this stopwatch, minus the wallclock durations of any other
+ // instances that are immediately nested in this one, started and stopped on
+ // this thread during that period.
+ int32_t RunDurationMs() const;
+
+ // Returns tracking info for the current thread.
+ ThreadData* GetThreadData() const;
+
+ private:
+ // Time when the stopwatch was started.
+ TrackedTime start_time_;
+
+ // Wallclock duration of the task.
+ int32_t wallclock_duration_ms_;
+
+ // Tracking info for the current thread.
+ ThreadData* current_thread_data_;
+
+ // Sum of wallclock durations of all stopwatches that were directly nested in
+ // this one.
+ int32_t excluded_duration_ms_;
+
+ // Stopwatch which was running on our thread when this stopwatch was started.
+ // That preexisting stopwatch must be adjusted to the exclude the wallclock
+ // duration of this stopwatch.
+ TaskStopwatch* parent_;
+
+#if DCHECK_IS_ON()
+ // State of the stopwatch. Stopwatch is first constructed in a created state
+ // state, then is optionally started/stopped, then destructed.
+ enum { CREATED, RUNNING, STOPPED } state_;
+
+ // Currently running stopwatch that is directly nested in this one, if such
+ // stopwatch exists. NULL otherwise.
+ TaskStopwatch* child_;
+#endif
+};
+
+//------------------------------------------------------------------------------
+// A snapshotted representation of the list of ThreadData objects for a process,
+// for a single profiling phase.
+
+struct BASE_EXPORT ProcessDataPhaseSnapshot {
+ public:
+ ProcessDataPhaseSnapshot();
+ ProcessDataPhaseSnapshot(const ProcessDataPhaseSnapshot& other);
+ ~ProcessDataPhaseSnapshot();
+
+ std::vector<TaskSnapshot> tasks;
+};
+
+//------------------------------------------------------------------------------
+// A snapshotted representation of the list of ThreadData objects for a process,
+// for all profiling phases, including the current one.
+
+struct BASE_EXPORT ProcessDataSnapshot {
+ public:
+ ProcessDataSnapshot();
+ ProcessDataSnapshot(const ProcessDataSnapshot& other);
+ ~ProcessDataSnapshot();
+
+ PhasedProcessDataSnapshotMap phased_snapshots;
+ base::ProcessId process_id;
+};
+
+} // namespace tracked_objects
+
+#endif // BASE_TRACKED_OBJECTS_H_
diff --git a/libchrome/base/tracked_objects_unittest.cc b/libchrome/base/tracked_objects_unittest.cc
new file mode 100644
index 0000000..70d9601
--- /dev/null
+++ b/libchrome/base/tracked_objects_unittest.cc
@@ -0,0 +1,1187 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test of classes in the tracked_objects.h classes.
+
+#include "base/tracked_objects.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/process/process_handle.h"
+#include "base/time/time.h"
+#include "base/tracking_info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+const int kLineNumber = 1776;
+const char kFile[] = "FixedUnitTestFileName";
+const char kWorkerThreadName[] = "WorkerThread-1";
+const char kMainThreadName[] = "SomeMainThreadName";
+const char kStillAlive[] = "Still_Alive";
+
+namespace tracked_objects {
+
+class TrackedObjectsTest : public testing::Test {
+ protected:
+ TrackedObjectsTest() {
+ // On entry, leak any database structures in case they are still in use by
+ // prior threads.
+ ThreadData::ShutdownSingleThreadedCleanup(true);
+
+ test_time_ = 0;
+ ThreadData::now_function_for_testing_ = &TrackedObjectsTest::GetTestTime;
+ }
+
+ ~TrackedObjectsTest() override {
+ // We should not need to leak any structures we create, since we are
+ // single threaded, and carefully accounting for items.
+ ThreadData::ShutdownSingleThreadedCleanup(false);
+ }
+
+ // Reset the profiler state.
+ void Reset() {
+ ThreadData::ShutdownSingleThreadedCleanup(false);
+ test_time_ = 0;
+ }
+
+ // Simulate a birth on the thread named |thread_name|, at the given
+ // |location|.
+ void TallyABirth(const Location& location, const std::string& thread_name) {
+ // If the |thread_name| is empty, we don't initialize system with a thread
+ // name, so we're viewed as a worker thread.
+ if (!thread_name.empty())
+ ThreadData::InitializeThreadContext(kMainThreadName);
+
+ // Do not delete |birth|. We don't own it.
+ Births* birth = ThreadData::TallyABirthIfActive(location);
+
+ if (ThreadData::status() == ThreadData::DEACTIVATED)
+ EXPECT_EQ(reinterpret_cast<Births*>(NULL), birth);
+ else
+ EXPECT_NE(reinterpret_cast<Births*>(NULL), birth);
+ }
+
+ // Helper function to verify the most common test expectations.
+ void ExpectSimpleProcessData(const ProcessDataSnapshot& process_data,
+ const std::string& function_name,
+ const std::string& birth_thread,
+ const std::string& death_thread,
+ int count,
+ int run_ms,
+ int queue_ms) {
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+
+ ASSERT_EQ(1u, process_data_phase.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase.tasks[0].birth.location.file_name);
+ EXPECT_EQ(function_name,
+ process_data_phase.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(birth_thread, process_data_phase.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(count, process_data_phase.tasks[0].death_data.count);
+ EXPECT_EQ(count * run_ms,
+ process_data_phase.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(run_ms, process_data_phase.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(run_ms,
+ process_data_phase.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(count * queue_ms,
+ process_data_phase.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(queue_ms,
+ process_data_phase.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(queue_ms,
+ process_data_phase.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(death_thread, process_data_phase.tasks[0].death_thread_name);
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+ }
+
+ // Sets time that will be returned by ThreadData::Now().
+ static void SetTestTime(unsigned int test_time) { test_time_ = test_time; }
+
+ private:
+ // Returns test time in milliseconds.
+ static unsigned int GetTestTime() { return test_time_; }
+
+ // Test time in milliseconds.
+ static unsigned int test_time_;
+};
+
+// static
+unsigned int TrackedObjectsTest::test_time_;
+
+TEST_F(TrackedObjectsTest, TaskStopwatchNoStartStop) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ // Check that creating and destroying a stopwatch without starting it doesn't
+ // crash.
+ TaskStopwatch stopwatch;
+}
+
+TEST_F(TrackedObjectsTest, MinimalStartupShutdown) {
+ // Minimal test doesn't even create any tasks.
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ EXPECT_FALSE(ThreadData::first()); // No activity even on this thread.
+ ThreadData* data = ThreadData::Get();
+ EXPECT_TRUE(ThreadData::first()); // Now class was constructed.
+ ASSERT_TRUE(data);
+ EXPECT_FALSE(data->next());
+ EXPECT_EQ(data, ThreadData::Get());
+ ThreadData::BirthMap birth_map;
+ ThreadData::DeathsSnapshot deaths;
+ data->SnapshotMaps(0, &birth_map, &deaths);
+ EXPECT_EQ(0u, birth_map.size());
+ EXPECT_EQ(0u, deaths.size());
+
+ // Clean up with no leaking.
+ Reset();
+
+ // Do it again, just to be sure we reset state completely.
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+ EXPECT_FALSE(ThreadData::first()); // No activity even on this thread.
+ data = ThreadData::Get();
+ EXPECT_TRUE(ThreadData::first()); // Now class was constructed.
+ ASSERT_TRUE(data);
+ EXPECT_FALSE(data->next());
+ EXPECT_EQ(data, ThreadData::Get());
+ birth_map.clear();
+ deaths.clear();
+ data->SnapshotMaps(0, &birth_map, &deaths);
+ EXPECT_EQ(0u, birth_map.size());
+ EXPECT_EQ(0u, deaths.size());
+}
+
+TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ // Instigate tracking on a single tracked object, on our thread.
+ const char kFunction[] = "TinyStartupShutdown";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ ThreadData::TallyABirthIfActive(location);
+
+ ThreadData* data = ThreadData::first();
+ ASSERT_TRUE(data);
+ EXPECT_FALSE(data->next());
+ EXPECT_EQ(data, ThreadData::Get());
+ ThreadData::BirthMap birth_map;
+ ThreadData::DeathsSnapshot deaths;
+ data->SnapshotMaps(0, &birth_map, &deaths);
+ EXPECT_EQ(1u, birth_map.size()); // 1 birth location.
+ EXPECT_EQ(1, birth_map.begin()->second->birth_count()); // 1 birth.
+ EXPECT_EQ(0u, deaths.size()); // No deaths.
+
+
+ // Now instigate another birth, while we are timing the run of the first
+ // execution.
+ // Create a child (using the same birth location).
+ // TrackingInfo will call TallyABirth() during construction.
+ const int32_t start_time = 1;
+ base::TimeTicks kBogusBirthTime = base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(start_time);
+ base::TrackingInfo pending_task(location, kBogusBirthTime);
+ SetTestTime(1);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ // Finally conclude the outer run.
+ const int32_t time_elapsed = 1000;
+ SetTestTime(start_time + time_elapsed);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ birth_map.clear();
+ deaths.clear();
+ data->SnapshotMaps(0, &birth_map, &deaths);
+ EXPECT_EQ(1u, birth_map.size()); // 1 birth location.
+ EXPECT_EQ(2, birth_map.begin()->second->birth_count()); // 2 births.
+ EXPECT_EQ(1u, deaths.size()); // 1 location.
+ EXPECT_EQ(1, deaths.begin()->second.death_data.count); // 1 death.
+
+ // The births were at the same location as the one known death.
+ EXPECT_EQ(birth_map.begin()->second, deaths.begin()->first);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+ ASSERT_EQ(1u, process_data_phase.tasks.size());
+ EXPECT_EQ(kFile, process_data_phase.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase.tasks[0].birth.location.line_number);
+ EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
+ EXPECT_EQ(time_elapsed,
+ process_data_phase.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(time_elapsed,
+ process_data_phase.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(time_elapsed,
+ process_data_phase.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sample);
+ EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].death_thread_name);
+}
+
+TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ std::unique_ptr<DeathData> data(new DeathData());
+ ASSERT_NE(data, nullptr);
+ EXPECT_EQ(data->run_duration_sum(), 0);
+ EXPECT_EQ(data->run_duration_max(), 0);
+ EXPECT_EQ(data->run_duration_sample(), 0);
+ EXPECT_EQ(data->queue_duration_sum(), 0);
+ EXPECT_EQ(data->queue_duration_max(), 0);
+ EXPECT_EQ(data->queue_duration_sample(), 0);
+ EXPECT_EQ(data->count(), 0);
+ EXPECT_EQ(nullptr, data->last_phase_snapshot());
+
+ int32_t run_ms = 42;
+ int32_t queue_ms = 8;
+
+ const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
+ data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ EXPECT_EQ(data->run_duration_sum(), run_ms);
+ EXPECT_EQ(data->run_duration_max(), run_ms);
+ EXPECT_EQ(data->run_duration_sample(), run_ms);
+ EXPECT_EQ(data->queue_duration_sum(), queue_ms);
+ EXPECT_EQ(data->queue_duration_max(), queue_ms);
+ EXPECT_EQ(data->queue_duration_sample(), queue_ms);
+ EXPECT_EQ(data->count(), 1);
+ EXPECT_EQ(nullptr, data->last_phase_snapshot());
+
+ data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
+ EXPECT_EQ(data->run_duration_max(), run_ms);
+ EXPECT_EQ(data->run_duration_sample(), run_ms);
+ EXPECT_EQ(data->queue_duration_sum(), queue_ms + queue_ms);
+ EXPECT_EQ(data->queue_duration_max(), queue_ms);
+ EXPECT_EQ(data->queue_duration_sample(), queue_ms);
+ EXPECT_EQ(data->count(), 2);
+ EXPECT_EQ(nullptr, data->last_phase_snapshot());
+}
+
+TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ std::unique_ptr<DeathData> data(new DeathData());
+ ASSERT_NE(data, nullptr);
+
+ int32_t run_ms = 42;
+ int32_t queue_ms = 8;
+
+ const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
+ data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+
+ data->OnProfilingPhaseCompleted(123);
+ EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
+ EXPECT_EQ(data->run_duration_max(), 0);
+ EXPECT_EQ(data->run_duration_sample(), run_ms);
+ EXPECT_EQ(data->queue_duration_sum(), queue_ms + queue_ms);
+ EXPECT_EQ(data->queue_duration_max(), 0);
+ EXPECT_EQ(data->queue_duration_sample(), queue_ms);
+ EXPECT_EQ(data->count(), 2);
+ ASSERT_NE(nullptr, data->last_phase_snapshot());
+ EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
+ EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
+ EXPECT_EQ(2 * run_ms,
+ data->last_phase_snapshot()->death_data.run_duration_sum);
+ EXPECT_EQ(run_ms, data->last_phase_snapshot()->death_data.run_duration_max);
+ EXPECT_EQ(run_ms,
+ data->last_phase_snapshot()->death_data.run_duration_sample);
+ EXPECT_EQ(2 * queue_ms,
+ data->last_phase_snapshot()->death_data.queue_duration_sum);
+ EXPECT_EQ(queue_ms,
+ data->last_phase_snapshot()->death_data.queue_duration_max);
+ EXPECT_EQ(queue_ms,
+ data->last_phase_snapshot()->death_data.queue_duration_sample);
+ EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
+
+ int32_t run_ms1 = 21;
+ int32_t queue_ms1 = 4;
+
+ data->RecordDeath(queue_ms1, run_ms1, kUnrandomInt);
+ EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms + run_ms1);
+ EXPECT_EQ(data->run_duration_max(), run_ms1);
+ EXPECT_EQ(data->run_duration_sample(), run_ms1);
+ EXPECT_EQ(data->queue_duration_sum(), queue_ms + queue_ms + queue_ms1);
+ EXPECT_EQ(data->queue_duration_max(), queue_ms1);
+ EXPECT_EQ(data->queue_duration_sample(), queue_ms1);
+ EXPECT_EQ(data->count(), 3);
+ ASSERT_NE(nullptr, data->last_phase_snapshot());
+ EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
+ EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
+ EXPECT_EQ(2 * run_ms,
+ data->last_phase_snapshot()->death_data.run_duration_sum);
+ EXPECT_EQ(run_ms, data->last_phase_snapshot()->death_data.run_duration_max);
+ EXPECT_EQ(run_ms,
+ data->last_phase_snapshot()->death_data.run_duration_sample);
+ EXPECT_EQ(2 * queue_ms,
+ data->last_phase_snapshot()->death_data.queue_duration_sum);
+ EXPECT_EQ(queue_ms,
+ data->last_phase_snapshot()->death_data.queue_duration_max);
+ EXPECT_EQ(queue_ms,
+ data->last_phase_snapshot()->death_data.queue_duration_sample);
+ EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
+}
+
+TEST_F(TrackedObjectsTest, Delta) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ DeathDataSnapshot snapshot;
+ snapshot.count = 10;
+ snapshot.run_duration_sum = 100;
+ snapshot.run_duration_max = 50;
+ snapshot.run_duration_sample = 25;
+ snapshot.queue_duration_sum = 200;
+ snapshot.queue_duration_max = 101;
+ snapshot.queue_duration_sample = 26;
+
+ DeathDataSnapshot older_snapshot;
+ older_snapshot.count = 2;
+ older_snapshot.run_duration_sum = 95;
+ older_snapshot.run_duration_max = 48;
+ older_snapshot.run_duration_sample = 22;
+ older_snapshot.queue_duration_sum = 190;
+ older_snapshot.queue_duration_max = 99;
+ older_snapshot.queue_duration_sample = 21;
+
+ const DeathDataSnapshot& delta = snapshot.Delta(older_snapshot);
+ EXPECT_EQ(8, delta.count);
+ EXPECT_EQ(5, delta.run_duration_sum);
+ EXPECT_EQ(50, delta.run_duration_max);
+ EXPECT_EQ(25, delta.run_duration_sample);
+ EXPECT_EQ(10, delta.queue_duration_sum);
+ EXPECT_EQ(101, delta.queue_duration_max);
+ EXPECT_EQ(26, delta.queue_duration_sample);
+}
+
+TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToSnapshotWorkerThread) {
+ // Start in the deactivated state.
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::DEACTIVATED);
+
+ const char kFunction[] = "DeactivatedBirthOnlyToSnapshotWorkerThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, std::string());
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+
+ ASSERT_EQ(0u, process_data_phase.tasks.size());
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToSnapshotMainThread) {
+ // Start in the deactivated state.
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::DEACTIVATED);
+
+ const char kFunction[] = "DeactivatedBirthOnlyToSnapshotMainThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+
+ ASSERT_EQ(0u, process_data_phase.tasks.size());
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, BirthOnlyToSnapshotWorkerThread) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "BirthOnlyToSnapshotWorkerThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, std::string());
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kWorkerThreadName,
+ kStillAlive, 1, 0, 0);
+}
+
+TEST_F(TrackedObjectsTest, BirthOnlyToSnapshotMainThread) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "BirthOnlyToSnapshotMainThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kMainThreadName, kStillAlive,
+ 1, 0, 0);
+}
+
+TEST_F(TrackedObjectsTest, LifeCycleToSnapshotMainThread) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "LifeCycleToSnapshotMainThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kMainThreadName,
+ kMainThreadName, 1, 2, 4);
+}
+
+TEST_F(TrackedObjectsTest, TwoPhases) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "TwoPhases";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ ThreadData::OnProfilingPhaseCompleted(0);
+
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted1 = TrackedTime::FromMilliseconds(9);
+ const base::TimeTicks kDelayedStartTime1 = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task1(location, kDelayedStartTime1);
+ pending_task1.time_posted = kTimePosted1; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun1 = 11;
+ const unsigned int kEndOfRun1 = 21;
+ SetTestTime(kStartOfRun1);
+ TaskStopwatch stopwatch1;
+ stopwatch1.Start();
+ SetTestTime(kEndOfRun1);
+ stopwatch1.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task1, stopwatch1);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(1, &process_data);
+
+ ASSERT_EQ(2u, process_data.phased_snapshots.size());
+
+ auto it0 = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it0 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase0 = it0->second;
+
+ ASSERT_EQ(1u, process_data_phase0.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase0.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase0.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase0.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
+ EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+
+ auto it1 = process_data.phased_snapshots.find(1);
+ ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase1 = it1->second;
+
+ ASSERT_EQ(1u, process_data_phase1.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase1.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase1.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase1.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
+ EXPECT_EQ(10, process_data_phase1.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(10, process_data_phase1.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(10, process_data_phase1.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, ThreePhases) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "ThreePhases";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+
+ // Phase 0
+ {
+ TallyABirth(location, kMainThreadName);
+
+ // TrackingInfo will call TallyABirth() during construction.
+ SetTestTime(10);
+ base::TrackingInfo pending_task(location, base::TimeTicks());
+
+ SetTestTime(17);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(23);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+ }
+
+ ThreadData::OnProfilingPhaseCompleted(0);
+
+ // Phase 1
+ {
+ TallyABirth(location, kMainThreadName);
+
+ SetTestTime(30);
+ base::TrackingInfo pending_task(location, base::TimeTicks());
+
+ SetTestTime(35);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(39);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+ }
+
+ ThreadData::OnProfilingPhaseCompleted(1);
+
+ // Phase 2
+ {
+ TallyABirth(location, kMainThreadName);
+
+ // TrackingInfo will call TallyABirth() during construction.
+ SetTestTime(40);
+ base::TrackingInfo pending_task(location, base::TimeTicks());
+
+ SetTestTime(43);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(45);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+ }
+
+ // Snapshot and check results.
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(2, &process_data);
+
+ ASSERT_EQ(3u, process_data.phased_snapshots.size());
+
+ auto it0 = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it0 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase0 = it0->second;
+
+ ASSERT_EQ(1u, process_data_phase0.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase0.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase0.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase0.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
+ EXPECT_EQ(6, process_data_phase0.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(6, process_data_phase0.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(6, process_data_phase0.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+
+ auto it1 = process_data.phased_snapshots.find(1);
+ ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase1 = it1->second;
+
+ ASSERT_EQ(1u, process_data_phase1.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase1.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase1.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase1.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
+ EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+
+ auto it2 = process_data.phased_snapshots.find(2);
+ ASSERT_TRUE(it2 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase2 = it2->second;
+
+ ASSERT_EQ(1u, process_data_phase2.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase2.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase2.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase2.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase2.tasks[0].death_data.count);
+ EXPECT_EQ(2, process_data_phase2.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(2, process_data_phase2.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(2, process_data_phase2.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].death_thread_name);
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, TwoPhasesSecondEmpty) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "TwoPhasesSecondEmpty";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ ThreadData::InitializeThreadContext(kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ ThreadData::OnProfilingPhaseCompleted(0);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(1, &process_data);
+
+ ASSERT_EQ(2u, process_data.phased_snapshots.size());
+
+ auto it0 = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it0 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase0 = it0->second;
+
+ ASSERT_EQ(1u, process_data_phase0.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase0.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase0.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase0.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
+ EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+
+ auto it1 = process_data.phased_snapshots.find(1);
+ ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase1 = it1->second;
+
+ ASSERT_EQ(0u, process_data_phase1.tasks.size());
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, TwoPhasesFirstEmpty) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ ThreadData::OnProfilingPhaseCompleted(0);
+
+ const char kFunction[] = "TwoPhasesSecondEmpty";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ ThreadData::InitializeThreadContext(kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(1, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+
+ auto it1 = process_data.phased_snapshots.find(1);
+ ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase1 = it1->second;
+
+ ASSERT_EQ(1u, process_data_phase1.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase1.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase1.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase1.tasks[0].birth.location.line_number);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+
+ EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
+ EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_sample);
+
+ EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+// We will deactivate tracking after the birth, and before the death, and
+// demonstrate that the lifecycle is completely tallied. This ensures that
+// our tallied births are matched by tallied deaths (except for when the
+// task is still running, or is queued).
+TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToSnapshotMainThread) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "LifeCycleMidDeactivatedToSnapshotMainThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ // Turn off tracking now that we have births.
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::DEACTIVATED);
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kMainThreadName,
+ kMainThreadName, 1, 2, 4);
+}
+
+// We will deactivate tracking before starting a life cycle, and neither
+// the birth nor the death will be recorded.
+TEST_F(TrackedObjectsTest, LifeCyclePreDeactivatedToSnapshotMainThread) {
+ // Start in the deactivated state.
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::DEACTIVATED);
+
+ const char kFunction[] = "LifeCyclePreDeactivatedToSnapshotMainThread";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+
+ ASSERT_EQ(0u, process_data_phase.tasks.size());
+
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, TwoLives) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "TwoLives";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task2(location, kDelayedStartTime);
+ pending_task2.time_posted = kTimePosted; // Overwrite implied Now().
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch2;
+ stopwatch2.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch2.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task2, stopwatch2);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kMainThreadName,
+ kMainThreadName, 2, 2, 4);
+}
+
+TEST_F(TrackedObjectsTest, DifferentLives) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ // Use a well named thread.
+ ThreadData::InitializeThreadContext(kMainThreadName);
+ const char kFunction[] = "DifferentLives";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ const unsigned int kStartOfRun = 5;
+ const unsigned int kEndOfRun = 7;
+ SetTestTime(kStartOfRun);
+ TaskStopwatch stopwatch;
+ stopwatch.Start();
+ SetTestTime(kEndOfRun);
+ stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, stopwatch);
+
+ const int kSecondFakeLineNumber = 999;
+ Location second_location(kFunction, kFile, kSecondFakeLineNumber, NULL);
+
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task2(second_location, kDelayedStartTime);
+ pending_task2.time_posted = kTimePosted; // Overwrite implied Now().
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+
+ ASSERT_EQ(2u, process_data_phase.tasks.size());
+
+ EXPECT_EQ(kFile, process_data_phase.tasks[0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase.tasks[0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase.tasks[0].birth.location.line_number);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
+ EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_sum);
+ EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_max);
+ EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_sample);
+ EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sum);
+ EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_max);
+ EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sample);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(kFile, process_data_phase.tasks[1].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase.tasks[1].birth.location.function_name);
+ EXPECT_EQ(kSecondFakeLineNumber,
+ process_data_phase.tasks[1].birth.location.line_number);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[1].birth.thread_name);
+ EXPECT_EQ(1, process_data_phase.tasks[1].death_data.count);
+ EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_sum);
+ EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_max);
+ EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_sample);
+ EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sum);
+ EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_max);
+ EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sample);
+ EXPECT_EQ(kStillAlive, process_data_phase.tasks[1].death_thread_name);
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+TEST_F(TrackedObjectsTest, TaskWithNestedExclusion) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "TaskWithNestedExclusion";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ SetTestTime(5);
+ TaskStopwatch task_stopwatch;
+ task_stopwatch.Start();
+ {
+ SetTestTime(8);
+ TaskStopwatch exclusion_stopwatch;
+ exclusion_stopwatch.Start();
+ SetTestTime(12);
+ exclusion_stopwatch.Stop();
+ }
+ SetTestTime(15);
+ task_stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, task_stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kMainThreadName,
+ kMainThreadName, 1, 6, 4);
+}
+
+TEST_F(TrackedObjectsTest, TaskWith2NestedExclusions) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "TaskWith2NestedExclusions";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ SetTestTime(5);
+ TaskStopwatch task_stopwatch;
+ task_stopwatch.Start();
+ {
+ SetTestTime(8);
+ TaskStopwatch exclusion_stopwatch;
+ exclusion_stopwatch.Start();
+ SetTestTime(12);
+ exclusion_stopwatch.Stop();
+
+ SetTestTime(15);
+ TaskStopwatch exclusion_stopwatch2;
+ exclusion_stopwatch2.Start();
+ SetTestTime(18);
+ exclusion_stopwatch2.Stop();
+ }
+ SetTestTime(25);
+ task_stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, task_stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+ ExpectSimpleProcessData(process_data, kFunction, kMainThreadName,
+ kMainThreadName, 1, 13, 4);
+}
+
+TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ const char kFunction[] = "TaskWithNestedExclusionWithNestedTask";
+ Location location(kFunction, kFile, kLineNumber, NULL);
+
+ const int kSecondFakeLineNumber = 999;
+
+ TallyABirth(location, kMainThreadName);
+
+ const TrackedTime kTimePosted = TrackedTime::FromMilliseconds(1);
+ const base::TimeTicks kDelayedStartTime = base::TimeTicks();
+ // TrackingInfo will call TallyABirth() during construction.
+ base::TrackingInfo pending_task(location, kDelayedStartTime);
+ pending_task.time_posted = kTimePosted; // Overwrite implied Now().
+
+ SetTestTime(5);
+ TaskStopwatch task_stopwatch;
+ task_stopwatch.Start();
+ {
+ SetTestTime(8);
+ TaskStopwatch exclusion_stopwatch;
+ exclusion_stopwatch.Start();
+ {
+ Location second_location(kFunction, kFile, kSecondFakeLineNumber, NULL);
+ base::TrackingInfo nested_task(second_location, kDelayedStartTime);
+ // Overwrite implied Now().
+ nested_task.time_posted = TrackedTime::FromMilliseconds(8);
+ SetTestTime(9);
+ TaskStopwatch nested_task_stopwatch;
+ nested_task_stopwatch.Start();
+ SetTestTime(11);
+ nested_task_stopwatch.Stop();
+ ThreadData::TallyRunOnNamedThreadIfTracking(
+ nested_task, nested_task_stopwatch);
+ }
+ SetTestTime(12);
+ exclusion_stopwatch.Stop();
+ }
+ SetTestTime(15);
+ task_stopwatch.Stop();
+
+ ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, task_stopwatch);
+
+ ProcessDataSnapshot process_data;
+ ThreadData::Snapshot(0, &process_data);
+
+ ASSERT_EQ(1u, process_data.phased_snapshots.size());
+ auto it = process_data.phased_snapshots.find(0);
+ ASSERT_TRUE(it != process_data.phased_snapshots.end());
+ const ProcessDataPhaseSnapshot& process_data_phase = it->second;
+
+ // The order in which the two task follow is platform-dependent.
+ int t0 =
+ (process_data_phase.tasks[0].birth.location.line_number == kLineNumber)
+ ? 0
+ : 1;
+ int t1 = 1 - t0;
+
+ ASSERT_EQ(2u, process_data_phase.tasks.size());
+ EXPECT_EQ(kFile, process_data_phase.tasks[t0].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase.tasks[t0].birth.location.function_name);
+ EXPECT_EQ(kLineNumber,
+ process_data_phase.tasks[t0].birth.location.line_number);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].birth.thread_name);
+ EXPECT_EQ(1, process_data_phase.tasks[t0].death_data.count);
+ EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_sum);
+ EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_max);
+ EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_sample);
+ EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sum);
+ EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_max);
+ EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sample);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].death_thread_name);
+ EXPECT_EQ(kFile, process_data_phase.tasks[t1].birth.location.file_name);
+ EXPECT_EQ(kFunction,
+ process_data_phase.tasks[t1].birth.location.function_name);
+ EXPECT_EQ(kSecondFakeLineNumber,
+ process_data_phase.tasks[t1].birth.location.line_number);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].birth.thread_name);
+ EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.count);
+ EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_sum);
+ EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_max);
+ EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_sample);
+ EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sum);
+ EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_max);
+ EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sample);
+ EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].death_thread_name);
+ EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
+}
+
+} // namespace tracked_objects
diff --git a/libchrome/base/tracking_info.cc b/libchrome/base/tracking_info.cc
new file mode 100644
index 0000000..c02b2f4
--- /dev/null
+++ b/libchrome/base/tracking_info.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/tracking_info.h"
+
+#include <stddef.h>
+#include "base/tracked_objects.h"
+
+namespace base {
+
+TrackingInfo::TrackingInfo()
+ : birth_tally(NULL) {
+}
+
+TrackingInfo::TrackingInfo(
+ const tracked_objects::Location& posted_from,
+ base::TimeTicks delayed_run_time)
+ : birth_tally(
+ tracked_objects::ThreadData::TallyABirthIfActive(posted_from)),
+ time_posted(tracked_objects::ThreadData::Now()),
+ delayed_run_time(delayed_run_time) {
+}
+
+TrackingInfo::~TrackingInfo() {}
+
+} // namespace base
+
diff --git a/libchrome/base/tracking_info.h b/libchrome/base/tracking_info.h
new file mode 100644
index 0000000..6c3bcd1
--- /dev/null
+++ b/libchrome/base/tracking_info.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a simple struct with tracking information that is stored
+// with a PendingTask (when message_loop is handling the task).
+// Only the information that is shared with the profiler in tracked_objects
+// are included in this structure.
+
+
+#ifndef BASE_TRACKING_INFO_H_
+#define BASE_TRACKING_INFO_H_
+
+#include "base/base_export.h"
+#include "base/profiler/tracked_time.h"
+#include "base/time/time.h"
+
+namespace tracked_objects {
+class Location;
+class Births;
+}
+
+namespace base {
+
+// This structure is copied around by value.
+struct BASE_EXPORT TrackingInfo {
+ TrackingInfo();
+ TrackingInfo(const tracked_objects::Location& posted_from,
+ base::TimeTicks delayed_run_time);
+ ~TrackingInfo();
+
+ // To avoid conflating our stats with the delay duration in a PostDelayedTask,
+ // we identify such tasks, and replace their post_time with the time they
+ // were scheduled (requested?) to emerge from the delayed task queue. This
+ // means that queuing delay for such tasks will show how long they went
+ // unserviced, after they *could* be serviced. This is the same stat as we
+ // have for non-delayed tasks, and we consistently call it queuing delay.
+ tracked_objects::TrackedTime EffectiveTimePosted() const {
+ return delayed_run_time.is_null()
+ ? time_posted
+ : tracked_objects::TrackedTime(delayed_run_time);
+ }
+
+ // Record of location and thread that the task came from.
+ tracked_objects::Births* birth_tally;
+
+ // Time when the related task was posted. Note that this value may be empty
+ // if task profiling is disabled, and should only be used in conjunction with
+ // profiling-related reporting.
+ tracked_objects::TrackedTime time_posted;
+
+ // The time when the task should be run.
+ base::TimeTicks delayed_run_time;
+};
+
+} // namespace base
+
+#endif // BASE_TRACKING_INFO_H_
diff --git a/libchrome/base/tuple.h b/libchrome/base/tuple.h
new file mode 100644
index 0000000..e82f2e5
--- /dev/null
+++ b/libchrome/base/tuple.h
@@ -0,0 +1,195 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Use std::tuple as tuple type. This file contains helper functions for
+// working with std::tuples.
+// The functions DispatchToMethod and DispatchToFunction take a function pointer
+// or instance and method pointer, and unpack a tuple into arguments to the
+// call.
+//
+// Example usage:
+// // These two methods of creating a Tuple are identical.
+// std::tuple<int, const char*> tuple_a(1, "wee");
+// std::tuple<int, const char*> tuple_b = std::make_tuple(1, "wee");
+//
+// void SomeFunc(int a, const char* b) { }
+// DispatchToFunction(&SomeFunc, tuple_a); // SomeFunc(1, "wee")
+// DispatchToFunction(
+// &SomeFunc, std::make_tuple(10, "foo")); // SomeFunc(10, "foo")
+//
+// struct { void SomeMeth(int a, int b, int c) { } } foo;
+// DispatchToMethod(&foo, &Foo::SomeMeth, std::make_tuple(1, 2, 3));
+// // foo->SomeMeth(1, 2, 3);
+
+#ifndef BASE_TUPLE_H_
+#define BASE_TUPLE_H_
+
+#include <stddef.h>
+#include <tuple>
+
+#include "base/bind_helpers.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Index sequences
+//
+// Minimal clone of the similarly-named C++14 functionality.
+
+template <size_t...>
+struct IndexSequence {};
+
+template <size_t... Ns>
+struct MakeIndexSequenceImpl;
+
+#if defined(_PREFAST_) && defined(OS_WIN)
+
+// Work around VC++ 2013 /analyze internal compiler error:
+// https://connect.microsoft.com/VisualStudio/feedback/details/1053626
+
+template <> struct MakeIndexSequenceImpl<0> {
+ using Type = IndexSequence<>;
+};
+template <> struct MakeIndexSequenceImpl<1> {
+ using Type = IndexSequence<0>;
+};
+template <> struct MakeIndexSequenceImpl<2> {
+ using Type = IndexSequence<0,1>;
+};
+template <> struct MakeIndexSequenceImpl<3> {
+ using Type = IndexSequence<0,1,2>;
+};
+template <> struct MakeIndexSequenceImpl<4> {
+ using Type = IndexSequence<0,1,2,3>;
+};
+template <> struct MakeIndexSequenceImpl<5> {
+ using Type = IndexSequence<0,1,2,3,4>;
+};
+template <> struct MakeIndexSequenceImpl<6> {
+ using Type = IndexSequence<0,1,2,3,4,5>;
+};
+template <> struct MakeIndexSequenceImpl<7> {
+ using Type = IndexSequence<0,1,2,3,4,5,6>;
+};
+template <> struct MakeIndexSequenceImpl<8> {
+ using Type = IndexSequence<0,1,2,3,4,5,6,7>;
+};
+template <> struct MakeIndexSequenceImpl<9> {
+ using Type = IndexSequence<0,1,2,3,4,5,6,7,8>;
+};
+template <> struct MakeIndexSequenceImpl<10> {
+ using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9>;
+};
+template <> struct MakeIndexSequenceImpl<11> {
+ using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10>;
+};
+template <> struct MakeIndexSequenceImpl<12> {
+ using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11>;
+};
+template <> struct MakeIndexSequenceImpl<13> {
+ using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11,12>;
+};
+
+#else // defined(OS_WIN) && defined(_PREFAST_)
+
+template <size_t... Ns>
+struct MakeIndexSequenceImpl<0, Ns...> {
+ using Type = IndexSequence<Ns...>;
+};
+
+template <size_t N, size_t... Ns>
+struct MakeIndexSequenceImpl<N, Ns...>
+ : MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
+
+#endif // defined(OS_WIN) && defined(_PREFAST_)
+
+// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
+// rvalue-reference of a tuple, where an rvalue-reference is expected.
+template <size_t I, typename... Ts>
+typename std::tuple_element<I, std::tuple<Ts...>>::type&& get(
+ std::tuple<Ts...>&& t) {
+ using ElemType = typename std::tuple_element<I, std::tuple<Ts...>>::type;
+ return std::forward<ElemType>(std::get<I>(t));
+}
+
+template <size_t I, typename T>
+auto get(T& t) -> decltype(std::get<I>(t)) {
+ return std::get<I>(t);
+}
+
+template <size_t N>
+using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
+
+// Dispatchers ----------------------------------------------------------------
+//
+// Helper functions that call the given method on an object, with the unpacked
+// tuple arguments. Notice that they all have the same number of arguments,
+// so you need only write:
+// DispatchToMethod(object, &Object::method, args);
+// This is very useful for templated dispatchers, since they don't need to know
+// what type |args| is.
+
+// Non-Static Dispatchers with no out params.
+
+template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
+inline void DispatchToMethodImpl(const ObjT& obj,
+ Method method,
+ const std::tuple<Ts...>& arg,
+ IndexSequence<Ns...>) {
+ (obj->*method)(internal::Unwrap(std::get<Ns>(arg))...);
+}
+
+template <typename ObjT, typename Method, typename... Ts>
+inline void DispatchToMethod(const ObjT& obj,
+ Method method,
+ const std::tuple<Ts...>& arg) {
+ DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
+}
+
+// Static Dispatchers with no out params.
+
+template <typename Function, typename... Ts, size_t... Ns>
+inline void DispatchToFunctionImpl(Function function,
+ const std::tuple<Ts...>& arg,
+ IndexSequence<Ns...>) {
+ (*function)(internal::Unwrap(std::get<Ns>(arg))...);
+}
+
+template <typename Function, typename... Ts>
+inline void DispatchToFunction(Function function,
+ const std::tuple<Ts...>& arg) {
+ DispatchToFunctionImpl(function, arg, MakeIndexSequence<sizeof...(Ts)>());
+}
+
+// Dispatchers with out parameters.
+
+template <typename ObjT,
+ typename Method,
+ typename... InTs,
+ typename... OutTs,
+ size_t... InNs,
+ size_t... OutNs>
+inline void DispatchToMethodImpl(const ObjT& obj,
+ Method method,
+ const std::tuple<InTs...>& in,
+ std::tuple<OutTs...>* out,
+ IndexSequence<InNs...>,
+ IndexSequence<OutNs...>) {
+ (obj->*method)(internal::Unwrap(std::get<InNs>(in))...,
+ &std::get<OutNs>(*out)...);
+}
+
+template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
+inline void DispatchToMethod(const ObjT& obj,
+ Method method,
+ const std::tuple<InTs...>& in,
+ std::tuple<OutTs...>* out) {
+ DispatchToMethodImpl(obj, method, in, out,
+ MakeIndexSequence<sizeof...(InTs)>(),
+ MakeIndexSequence<sizeof...(OutTs)>());
+}
+
+} // namespace base
+
+#endif // BASE_TUPLE_H_
diff --git a/libchrome/base/tuple_unittest.cc b/libchrome/base/tuple_unittest.cc
new file mode 100644
index 0000000..6f90c29
--- /dev/null
+++ b/libchrome/base/tuple_unittest.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/tuple.h"
+
+#include "base/compiler_specific.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+void DoAdd(int a, int b, int c, int* res) {
+ *res = a + b + c;
+}
+
+struct Addy {
+ Addy() { }
+ void DoAdd(int a, int b, int c, int d, int* res) {
+ *res = a + b + c + d;
+ }
+};
+
+struct Addz {
+ Addz() { }
+ void DoAdd(int a, int b, int c, int d, int e, int* res) {
+ *res = a + b + c + d + e;
+ }
+};
+
+} // namespace
+
+TEST(TupleTest, Basic) {
+ std::tuple<> t0 = std::make_tuple();
+ ALLOW_UNUSED_LOCAL(t0);
+ std::tuple<int> t1(1);
+ std::tuple<int, const char*> t2 =
+ std::make_tuple(1, static_cast<const char*>("wee"));
+ ALLOW_UNUSED_LOCAL(t2);
+ std::tuple<int, int, int> t3(1, 2, 3);
+ std::tuple<int, int, int, int*> t4(1, 2, 3, &std::get<0>(t1));
+ std::tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &std::get<0>(t4));
+ std::tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &std::get<0>(t4));
+
+ EXPECT_EQ(1, std::get<0>(t1));
+ DispatchToFunction(&DoAdd, t4);
+ EXPECT_EQ(6, std::get<0>(t1));
+
+ int res = 0;
+ DispatchToFunction(&DoAdd, std::make_tuple(9, 8, 7, &res));
+ EXPECT_EQ(24, res);
+
+ Addy addy;
+ EXPECT_EQ(1, std::get<0>(t4));
+ DispatchToMethod(&addy, &Addy::DoAdd, t5);
+ EXPECT_EQ(10, std::get<0>(t4));
+
+ Addz addz;
+ EXPECT_EQ(10, std::get<0>(t4));
+ DispatchToMethod(&addz, &Addz::DoAdd, t6);
+ EXPECT_EQ(15, std::get<0>(t4));
+}
+
+namespace {
+
+struct CopyLogger {
+ CopyLogger() { ++TimesConstructed; }
+ CopyLogger(const CopyLogger& tocopy) { ++TimesConstructed; ++TimesCopied; }
+ ~CopyLogger() { }
+
+ static int TimesCopied;
+ static int TimesConstructed;
+};
+
+void SomeLoggerMethRef(const CopyLogger& logy, const CopyLogger* ptr, bool* b) {
+ *b = &logy == ptr;
+}
+
+void SomeLoggerMethCopy(CopyLogger logy, const CopyLogger* ptr, bool* b) {
+ *b = &logy == ptr;
+}
+
+int CopyLogger::TimesCopied = 0;
+int CopyLogger::TimesConstructed = 0;
+
+} // namespace
+
+TEST(TupleTest, Copying) {
+ CopyLogger logger;
+ EXPECT_EQ(0, CopyLogger::TimesCopied);
+ EXPECT_EQ(1, CopyLogger::TimesConstructed);
+
+ bool res = false;
+
+ // Creating the tuple should copy the class to store internally in the tuple.
+ std::tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
+ std::get<1>(tuple) = &std::get<0>(tuple);
+ EXPECT_EQ(2, CopyLogger::TimesConstructed);
+ EXPECT_EQ(1, CopyLogger::TimesCopied);
+
+ // Our internal Logger and the one passed to the function should be the same.
+ res = false;
+ DispatchToFunction(&SomeLoggerMethRef, tuple);
+ EXPECT_TRUE(res);
+ EXPECT_EQ(2, CopyLogger::TimesConstructed);
+ EXPECT_EQ(1, CopyLogger::TimesCopied);
+
+ // Now they should be different, since the function call will make a copy.
+ res = false;
+ DispatchToFunction(&SomeLoggerMethCopy, tuple);
+ EXPECT_FALSE(res);
+ EXPECT_EQ(3, CopyLogger::TimesConstructed);
+ EXPECT_EQ(2, CopyLogger::TimesCopied);
+}
+
+TEST(TupleTest, Get) {
+ int i = 1;
+ int j = 2;
+ std::tuple<int, int&, int&&> t(3, i, std::move(j));
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<0>(t))>::value));
+ EXPECT_EQ(3, base::get<0>(t));
+
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<1>(t))>::value));
+ EXPECT_EQ(1, base::get<1>(t));
+
+ EXPECT_TRUE((std::is_same<int&, decltype(base::get<2>(t))>::value));
+ EXPECT_EQ(2, base::get<2>(t));
+
+ EXPECT_TRUE((std::is_same<int&&,
+ decltype(base::get<0>(std::move(t)))>::value));
+ EXPECT_EQ(3, base::get<0>(std::move(t)));
+
+ EXPECT_TRUE((std::is_same<int&,
+ decltype(base::get<1>(std::move(t)))>::value));
+ EXPECT_EQ(1, base::get<1>(std::move(t)));
+
+ EXPECT_TRUE((std::is_same<int&&,
+ decltype(base::get<2>(std::move(t)))>::value));
+ EXPECT_EQ(2, base::get<2>(std::move(t)));
+}
+
+} // namespace base
diff --git a/libchrome/base/values.cc b/libchrome/base/values.cc
new file mode 100644
index 0000000..d579699
--- /dev/null
+++ b/libchrome/base/values.cc
@@ -0,0 +1,1169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/values.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <cmath>
+#include <ostream>
+#include <utility>
+
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace base {
+
+namespace {
+
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
+
+// Make a deep copy of |node|, but don't include empty lists or dictionaries
+// in the copy. It's possible for this function to return NULL and it
+// expects |node| to always be non-NULL.
+std::unique_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
+ std::unique_ptr<ListValue> copy;
+ for (const auto& entry : list) {
+ std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(*entry);
+ if (child_copy) {
+ if (!copy)
+ copy.reset(new ListValue);
+ copy->Append(std::move(child_copy));
+ }
+ }
+ return copy;
+}
+
+std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
+ const DictionaryValue& dict) {
+ std::unique_ptr<DictionaryValue> copy;
+ for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+ std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(it.value());
+ if (child_copy) {
+ if (!copy)
+ copy.reset(new DictionaryValue);
+ copy->SetWithoutPathExpansion(it.key(), std::move(child_copy));
+ }
+ }
+ return copy;
+}
+
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
+ switch (node.GetType()) {
+ case Value::TYPE_LIST:
+ return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
+
+ case Value::TYPE_DICTIONARY:
+ return CopyDictionaryWithoutEmptyChildren(
+ static_cast<const DictionaryValue&>(node));
+
+ default:
+ return node.CreateDeepCopy();
+ }
+}
+
+} // namespace
+
+Value::~Value() {
+}
+
+// static
+std::unique_ptr<Value> Value::CreateNullValue() {
+ return WrapUnique(new Value(TYPE_NULL));
+}
+
+bool Value::GetAsBinary(const BinaryValue**) const {
+ return false;
+}
+
+bool Value::GetAsBoolean(bool*) const {
+ return false;
+}
+
+bool Value::GetAsInteger(int*) const {
+ return false;
+}
+
+bool Value::GetAsDouble(double*) const {
+ return false;
+}
+
+bool Value::GetAsString(std::string*) const {
+ return false;
+}
+
+bool Value::GetAsString(string16*) const {
+ return false;
+}
+
+bool Value::GetAsString(const StringValue**) const {
+ return false;
+}
+
+bool Value::GetAsList(ListValue**) {
+ return false;
+}
+
+bool Value::GetAsList(const ListValue**) const {
+ return false;
+}
+
+bool Value::GetAsDictionary(DictionaryValue**) {
+ return false;
+}
+
+bool Value::GetAsDictionary(const DictionaryValue**) const {
+ return false;
+}
+
+Value* Value::DeepCopy() const {
+ // This method should only be getting called for null Values--all subclasses
+ // need to provide their own implementation;.
+ DCHECK(IsType(TYPE_NULL));
+ return CreateNullValue().release();
+}
+
+std::unique_ptr<Value> Value::CreateDeepCopy() const {
+ return WrapUnique(DeepCopy());
+}
+
+bool Value::Equals(const Value* other) const {
+ // This method should only be getting called for null Values--all subclasses
+ // need to provide their own implementation;.
+ DCHECK(IsType(TYPE_NULL));
+ return other->IsType(TYPE_NULL);
+}
+
+// static
+bool Value::Equals(const Value* a, const Value* b) {
+ if ((a == NULL) && (b == NULL)) return true;
+ if ((a == NULL) ^ (b == NULL)) return false;
+ return a->Equals(b);
+}
+
+Value::Value(Type type) : type_(type) {}
+
+Value::Value(const Value& that) : type_(that.type_) {}
+
+Value& Value::operator=(const Value& that) {
+ type_ = that.type_;
+ return *this;
+}
+
+///////////////////// FundamentalValue ////////////////////
+
+FundamentalValue::FundamentalValue(bool in_value)
+ : Value(TYPE_BOOLEAN), boolean_value_(in_value) {
+}
+
+FundamentalValue::FundamentalValue(int in_value)
+ : Value(TYPE_INTEGER), integer_value_(in_value) {
+}
+
+FundamentalValue::FundamentalValue(double in_value)
+ : Value(TYPE_DOUBLE), double_value_(in_value) {
+ if (!std::isfinite(double_value_)) {
+ NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
+ << "values cannot be represented in JSON";
+ double_value_ = 0.0;
+ }
+}
+
+FundamentalValue::~FundamentalValue() {
+}
+
+bool FundamentalValue::GetAsBoolean(bool* out_value) const {
+ if (out_value && IsType(TYPE_BOOLEAN))
+ *out_value = boolean_value_;
+ return (IsType(TYPE_BOOLEAN));
+}
+
+bool FundamentalValue::GetAsInteger(int* out_value) const {
+ if (out_value && IsType(TYPE_INTEGER))
+ *out_value = integer_value_;
+ return (IsType(TYPE_INTEGER));
+}
+
+bool FundamentalValue::GetAsDouble(double* out_value) const {
+ if (out_value && IsType(TYPE_DOUBLE))
+ *out_value = double_value_;
+ else if (out_value && IsType(TYPE_INTEGER))
+ *out_value = integer_value_;
+ return (IsType(TYPE_DOUBLE) || IsType(TYPE_INTEGER));
+}
+
+FundamentalValue* FundamentalValue::DeepCopy() const {
+ switch (GetType()) {
+ case TYPE_BOOLEAN:
+ return new FundamentalValue(boolean_value_);
+
+ case TYPE_INTEGER:
+ return new FundamentalValue(integer_value_);
+
+ case TYPE_DOUBLE:
+ return new FundamentalValue(double_value_);
+
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+}
+
+bool FundamentalValue::Equals(const Value* other) const {
+ if (other->GetType() != GetType())
+ return false;
+
+ switch (GetType()) {
+ case TYPE_BOOLEAN: {
+ bool lhs, rhs;
+ return GetAsBoolean(&lhs) && other->GetAsBoolean(&rhs) && lhs == rhs;
+ }
+ case TYPE_INTEGER: {
+ int lhs, rhs;
+ return GetAsInteger(&lhs) && other->GetAsInteger(&rhs) && lhs == rhs;
+ }
+ case TYPE_DOUBLE: {
+ double lhs, rhs;
+ return GetAsDouble(&lhs) && other->GetAsDouble(&rhs) && lhs == rhs;
+ }
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+///////////////////// StringValue ////////////////////
+
+StringValue::StringValue(const std::string& in_value)
+ : Value(TYPE_STRING),
+ value_(in_value) {
+ DCHECK(IsStringUTF8(in_value));
+}
+
+StringValue::StringValue(const string16& in_value)
+ : Value(TYPE_STRING),
+ value_(UTF16ToUTF8(in_value)) {
+}
+
+StringValue::~StringValue() {
+}
+
+std::string* StringValue::GetString() {
+ return &value_;
+}
+
+const std::string& StringValue::GetString() const {
+ return value_;
+}
+
+bool StringValue::GetAsString(std::string* out_value) const {
+ if (out_value)
+ *out_value = value_;
+ return true;
+}
+
+bool StringValue::GetAsString(string16* out_value) const {
+ if (out_value)
+ *out_value = UTF8ToUTF16(value_);
+ return true;
+}
+
+bool StringValue::GetAsString(const StringValue** out_value) const {
+ if (out_value)
+ *out_value = this;
+ return true;
+}
+
+StringValue* StringValue::DeepCopy() const {
+ return new StringValue(value_);
+}
+
+bool StringValue::Equals(const Value* other) const {
+ if (other->GetType() != GetType())
+ return false;
+ std::string lhs, rhs;
+ return GetAsString(&lhs) && other->GetAsString(&rhs) && lhs == rhs;
+}
+
+///////////////////// BinaryValue ////////////////////
+
+BinaryValue::BinaryValue()
+ : Value(TYPE_BINARY),
+ size_(0) {
+}
+
+BinaryValue::BinaryValue(std::unique_ptr<char[]> buffer, size_t size)
+ : Value(TYPE_BINARY), buffer_(std::move(buffer)), size_(size) {}
+
+BinaryValue::~BinaryValue() {
+}
+
+// static
+std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
+ const char* buffer,
+ size_t size) {
+ std::unique_ptr<char[]> buffer_copy(new char[size]);
+ memcpy(buffer_copy.get(), buffer, size);
+ return base::MakeUnique<BinaryValue>(std::move(buffer_copy), size);
+}
+
+bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
+ if (out_value)
+ *out_value = this;
+ return true;
+}
+
+BinaryValue* BinaryValue::DeepCopy() const {
+ return CreateWithCopiedBuffer(buffer_.get(), size_).release();
+}
+
+bool BinaryValue::Equals(const Value* other) const {
+ if (other->GetType() != GetType())
+ return false;
+ const BinaryValue* other_binary = static_cast<const BinaryValue*>(other);
+ if (other_binary->size_ != size_)
+ return false;
+ return !memcmp(GetBuffer(), other_binary->GetBuffer(), size_);
+}
+
+///////////////////// DictionaryValue ////////////////////
+
+// static
+std::unique_ptr<DictionaryValue> DictionaryValue::From(
+ std::unique_ptr<Value> value) {
+ DictionaryValue* out;
+ if (value && value->GetAsDictionary(&out)) {
+ ignore_result(value.release());
+ return WrapUnique(out);
+ }
+ return nullptr;
+}
+
+DictionaryValue::DictionaryValue()
+ : Value(TYPE_DICTIONARY) {
+}
+
+DictionaryValue::~DictionaryValue() {
+ Clear();
+}
+
+bool DictionaryValue::GetAsDictionary(DictionaryValue** out_value) {
+ if (out_value)
+ *out_value = this;
+ return true;
+}
+
+bool DictionaryValue::GetAsDictionary(const DictionaryValue** out_value) const {
+ if (out_value)
+ *out_value = this;
+ return true;
+}
+
+bool DictionaryValue::HasKey(const std::string& key) const {
+ DCHECK(IsStringUTF8(key));
+ auto current_entry = dictionary_.find(key);
+ DCHECK((current_entry == dictionary_.end()) || current_entry->second);
+ return current_entry != dictionary_.end();
+}
+
+void DictionaryValue::Clear() {
+ dictionary_.clear();
+}
+
+void DictionaryValue::Set(const std::string& path,
+ std::unique_ptr<Value> in_value) {
+ DCHECK(IsStringUTF8(path));
+ DCHECK(in_value);
+
+ std::string current_path(path);
+ DictionaryValue* current_dictionary = this;
+ for (size_t delimiter_position = current_path.find('.');
+ delimiter_position != std::string::npos;
+ delimiter_position = current_path.find('.')) {
+ // Assume that we're indexing into a dictionary.
+ std::string key(current_path, 0, delimiter_position);
+ DictionaryValue* child_dictionary = NULL;
+ if (!current_dictionary->GetDictionary(key, &child_dictionary)) {
+ child_dictionary = new DictionaryValue;
+ current_dictionary->SetWithoutPathExpansion(key, child_dictionary);
+ }
+
+ current_dictionary = child_dictionary;
+ current_path.erase(0, delimiter_position + 1);
+ }
+
+ current_dictionary->SetWithoutPathExpansion(current_path,
+ std::move(in_value));
+}
+
+void DictionaryValue::Set(const std::string& path, Value* in_value) {
+ Set(path, WrapUnique(in_value));
+}
+
+void DictionaryValue::SetBoolean(const std::string& path, bool in_value) {
+ Set(path, new FundamentalValue(in_value));
+}
+
+void DictionaryValue::SetInteger(const std::string& path, int in_value) {
+ Set(path, new FundamentalValue(in_value));
+}
+
+void DictionaryValue::SetDouble(const std::string& path, double in_value) {
+ Set(path, new FundamentalValue(in_value));
+}
+
+void DictionaryValue::SetString(const std::string& path,
+ const std::string& in_value) {
+ Set(path, new StringValue(in_value));
+}
+
+void DictionaryValue::SetString(const std::string& path,
+ const string16& in_value) {
+ Set(path, new StringValue(in_value));
+}
+
+void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
+ std::unique_ptr<Value> in_value) {
+ dictionary_[key] = std::move(in_value);
+}
+
+void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
+ Value* in_value) {
+ SetWithoutPathExpansion(key, WrapUnique(in_value));
+}
+
+void DictionaryValue::SetBooleanWithoutPathExpansion(
+ const std::string& path, bool in_value) {
+ SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+}
+
+void DictionaryValue::SetIntegerWithoutPathExpansion(
+ const std::string& path, int in_value) {
+ SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+}
+
+void DictionaryValue::SetDoubleWithoutPathExpansion(
+ const std::string& path, double in_value) {
+ SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+}
+
+void DictionaryValue::SetStringWithoutPathExpansion(
+ const std::string& path, const std::string& in_value) {
+ SetWithoutPathExpansion(path, new StringValue(in_value));
+}
+
+void DictionaryValue::SetStringWithoutPathExpansion(
+ const std::string& path, const string16& in_value) {
+ SetWithoutPathExpansion(path, new StringValue(in_value));
+}
+
+bool DictionaryValue::Get(StringPiece path,
+ const Value** out_value) const {
+ DCHECK(IsStringUTF8(path));
+ StringPiece current_path(path);
+ const DictionaryValue* current_dictionary = this;
+ for (size_t delimiter_position = current_path.find('.');
+ delimiter_position != std::string::npos;
+ delimiter_position = current_path.find('.')) {
+ const DictionaryValue* child_dictionary = NULL;
+ if (!current_dictionary->GetDictionaryWithoutPathExpansion(
+ current_path.substr(0, delimiter_position).as_string(),
+ &child_dictionary)) {
+ return false;
+ }
+
+ current_dictionary = child_dictionary;
+ current_path = current_path.substr(delimiter_position + 1);
+ }
+
+ return current_dictionary->GetWithoutPathExpansion(current_path.as_string(),
+ out_value);
+}
+
+bool DictionaryValue::Get(StringPiece path, Value** out_value) {
+ return static_cast<const DictionaryValue&>(*this).Get(
+ path,
+ const_cast<const Value**>(out_value));
+}
+
+bool DictionaryValue::GetBoolean(const std::string& path,
+ bool* bool_value) const {
+ const Value* value;
+ if (!Get(path, &value))
+ return false;
+
+ return value->GetAsBoolean(bool_value);
+}
+
+bool DictionaryValue::GetInteger(const std::string& path,
+ int* out_value) const {
+ const Value* value;
+ if (!Get(path, &value))
+ return false;
+
+ return value->GetAsInteger(out_value);
+}
+
+bool DictionaryValue::GetDouble(const std::string& path,
+ double* out_value) const {
+ const Value* value;
+ if (!Get(path, &value))
+ return false;
+
+ return value->GetAsDouble(out_value);
+}
+
+bool DictionaryValue::GetString(const std::string& path,
+ std::string* out_value) const {
+ const Value* value;
+ if (!Get(path, &value))
+ return false;
+
+ return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetString(const std::string& path,
+ string16* out_value) const {
+ const Value* value;
+ if (!Get(path, &value))
+ return false;
+
+ return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetStringASCII(const std::string& path,
+ std::string* out_value) const {
+ std::string out;
+ if (!GetString(path, &out))
+ return false;
+
+ if (!IsStringASCII(out)) {
+ NOTREACHED();
+ return false;
+ }
+
+ out_value->assign(out);
+ return true;
+}
+
+bool DictionaryValue::GetBinary(const std::string& path,
+ const BinaryValue** out_value) const {
+ const Value* value;
+ bool result = Get(path, &value);
+ if (!result || !value->IsType(TYPE_BINARY))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const BinaryValue*>(value);
+
+ return true;
+}
+
+bool DictionaryValue::GetBinary(const std::string& path,
+ BinaryValue** out_value) {
+ return static_cast<const DictionaryValue&>(*this).GetBinary(
+ path,
+ const_cast<const BinaryValue**>(out_value));
+}
+
+bool DictionaryValue::GetDictionary(StringPiece path,
+ const DictionaryValue** out_value) const {
+ const Value* value;
+ bool result = Get(path, &value);
+ if (!result || !value->IsType(TYPE_DICTIONARY))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const DictionaryValue*>(value);
+
+ return true;
+}
+
+bool DictionaryValue::GetDictionary(StringPiece path,
+ DictionaryValue** out_value) {
+ return static_cast<const DictionaryValue&>(*this).GetDictionary(
+ path,
+ const_cast<const DictionaryValue**>(out_value));
+}
+
+bool DictionaryValue::GetList(const std::string& path,
+ const ListValue** out_value) const {
+ const Value* value;
+ bool result = Get(path, &value);
+ if (!result || !value->IsType(TYPE_LIST))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const ListValue*>(value);
+
+ return true;
+}
+
+bool DictionaryValue::GetList(const std::string& path, ListValue** out_value) {
+ return static_cast<const DictionaryValue&>(*this).GetList(
+ path,
+ const_cast<const ListValue**>(out_value));
+}
+
+bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
+ const Value** out_value) const {
+ DCHECK(IsStringUTF8(key));
+ auto entry_iterator = dictionary_.find(key);
+ if (entry_iterator == dictionary_.end())
+ return false;
+
+ if (out_value)
+ *out_value = entry_iterator->second.get();
+ return true;
+}
+
+bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
+ Value** out_value) {
+ return static_cast<const DictionaryValue&>(*this).GetWithoutPathExpansion(
+ key,
+ const_cast<const Value**>(out_value));
+}
+
+bool DictionaryValue::GetBooleanWithoutPathExpansion(const std::string& key,
+ bool* out_value) const {
+ const Value* value;
+ if (!GetWithoutPathExpansion(key, &value))
+ return false;
+
+ return value->GetAsBoolean(out_value);
+}
+
+bool DictionaryValue::GetIntegerWithoutPathExpansion(const std::string& key,
+ int* out_value) const {
+ const Value* value;
+ if (!GetWithoutPathExpansion(key, &value))
+ return false;
+
+ return value->GetAsInteger(out_value);
+}
+
+bool DictionaryValue::GetDoubleWithoutPathExpansion(const std::string& key,
+ double* out_value) const {
+ const Value* value;
+ if (!GetWithoutPathExpansion(key, &value))
+ return false;
+
+ return value->GetAsDouble(out_value);
+}
+
+bool DictionaryValue::GetStringWithoutPathExpansion(
+ const std::string& key,
+ std::string* out_value) const {
+ const Value* value;
+ if (!GetWithoutPathExpansion(key, &value))
+ return false;
+
+ return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetStringWithoutPathExpansion(const std::string& key,
+ string16* out_value) const {
+ const Value* value;
+ if (!GetWithoutPathExpansion(key, &value))
+ return false;
+
+ return value->GetAsString(out_value);
+}
+
+bool DictionaryValue::GetDictionaryWithoutPathExpansion(
+ const std::string& key,
+ const DictionaryValue** out_value) const {
+ const Value* value;
+ bool result = GetWithoutPathExpansion(key, &value);
+ if (!result || !value->IsType(TYPE_DICTIONARY))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const DictionaryValue*>(value);
+
+ return true;
+}
+
+bool DictionaryValue::GetDictionaryWithoutPathExpansion(
+ const std::string& key,
+ DictionaryValue** out_value) {
+ const DictionaryValue& const_this =
+ static_cast<const DictionaryValue&>(*this);
+ return const_this.GetDictionaryWithoutPathExpansion(
+ key,
+ const_cast<const DictionaryValue**>(out_value));
+}
+
+bool DictionaryValue::GetListWithoutPathExpansion(
+ const std::string& key,
+ const ListValue** out_value) const {
+ const Value* value;
+ bool result = GetWithoutPathExpansion(key, &value);
+ if (!result || !value->IsType(TYPE_LIST))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const ListValue*>(value);
+
+ return true;
+}
+
+bool DictionaryValue::GetListWithoutPathExpansion(const std::string& key,
+ ListValue** out_value) {
+ return
+ static_cast<const DictionaryValue&>(*this).GetListWithoutPathExpansion(
+ key,
+ const_cast<const ListValue**>(out_value));
+}
+
+bool DictionaryValue::Remove(const std::string& path,
+ std::unique_ptr<Value>* out_value) {
+ DCHECK(IsStringUTF8(path));
+ std::string current_path(path);
+ DictionaryValue* current_dictionary = this;
+ size_t delimiter_position = current_path.rfind('.');
+ if (delimiter_position != std::string::npos) {
+ if (!GetDictionary(current_path.substr(0, delimiter_position),
+ ¤t_dictionary))
+ return false;
+ current_path.erase(0, delimiter_position + 1);
+ }
+
+ return current_dictionary->RemoveWithoutPathExpansion(current_path,
+ out_value);
+}
+
+bool DictionaryValue::RemoveWithoutPathExpansion(
+ const std::string& key,
+ std::unique_ptr<Value>* out_value) {
+ DCHECK(IsStringUTF8(key));
+ auto entry_iterator = dictionary_.find(key);
+ if (entry_iterator == dictionary_.end())
+ return false;
+
+ if (out_value)
+ *out_value = std::move(entry_iterator->second);
+ dictionary_.erase(entry_iterator);
+ return true;
+}
+
+bool DictionaryValue::RemovePath(const std::string& path,
+ std::unique_ptr<Value>* out_value) {
+ bool result = false;
+ size_t delimiter_position = path.find('.');
+
+ if (delimiter_position == std::string::npos)
+ return RemoveWithoutPathExpansion(path, out_value);
+
+ const std::string subdict_path = path.substr(0, delimiter_position);
+ DictionaryValue* subdict = NULL;
+ if (!GetDictionary(subdict_path, &subdict))
+ return false;
+ result = subdict->RemovePath(path.substr(delimiter_position + 1),
+ out_value);
+ if (result && subdict->empty())
+ RemoveWithoutPathExpansion(subdict_path, NULL);
+
+ return result;
+}
+
+std::unique_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
+ const {
+ std::unique_ptr<DictionaryValue> copy =
+ CopyDictionaryWithoutEmptyChildren(*this);
+ if (!copy)
+ copy.reset(new DictionaryValue);
+ return copy;
+}
+
+void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
+ for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
+ const Value* merge_value = &it.value();
+ // Check whether we have to merge dictionaries.
+ if (merge_value->IsType(Value::TYPE_DICTIONARY)) {
+ DictionaryValue* sub_dict;
+ if (GetDictionaryWithoutPathExpansion(it.key(), &sub_dict)) {
+ sub_dict->MergeDictionary(
+ static_cast<const DictionaryValue*>(merge_value));
+ continue;
+ }
+ }
+ // All other cases: Make a copy and hook it up.
+ SetWithoutPathExpansion(it.key(), merge_value->DeepCopy());
+ }
+}
+
+void DictionaryValue::Swap(DictionaryValue* other) {
+ dictionary_.swap(other->dictionary_);
+}
+
+DictionaryValue::Iterator::Iterator(const DictionaryValue& target)
+ : target_(target),
+ it_(target.dictionary_.begin()) {}
+
+DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
+
+DictionaryValue::Iterator::~Iterator() {}
+
+DictionaryValue* DictionaryValue::DeepCopy() const {
+ DictionaryValue* result = new DictionaryValue;
+
+ for (const auto& current_entry : dictionary_) {
+ result->SetWithoutPathExpansion(current_entry.first,
+ current_entry.second->CreateDeepCopy());
+ }
+
+ return result;
+}
+
+std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
+ return WrapUnique(DeepCopy());
+}
+
+bool DictionaryValue::Equals(const Value* other) const {
+ if (other->GetType() != GetType())
+ return false;
+
+ const DictionaryValue* other_dict =
+ static_cast<const DictionaryValue*>(other);
+ Iterator lhs_it(*this);
+ Iterator rhs_it(*other_dict);
+ while (!lhs_it.IsAtEnd() && !rhs_it.IsAtEnd()) {
+ if (lhs_it.key() != rhs_it.key() ||
+ !lhs_it.value().Equals(&rhs_it.value())) {
+ return false;
+ }
+ lhs_it.Advance();
+ rhs_it.Advance();
+ }
+ if (!lhs_it.IsAtEnd() || !rhs_it.IsAtEnd())
+ return false;
+
+ return true;
+}
+
+///////////////////// ListValue ////////////////////
+
+// static
+std::unique_ptr<ListValue> ListValue::From(std::unique_ptr<Value> value) {
+ ListValue* out;
+ if (value && value->GetAsList(&out)) {
+ ignore_result(value.release());
+ return WrapUnique(out);
+ }
+ return nullptr;
+}
+
+ListValue::ListValue() : Value(TYPE_LIST) {
+}
+
+ListValue::~ListValue() {
+ Clear();
+}
+
+void ListValue::Clear() {
+ list_.clear();
+}
+
+bool ListValue::Set(size_t index, Value* in_value) {
+ return Set(index, WrapUnique(in_value));
+}
+
+bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
+ if (!in_value)
+ return false;
+
+ if (index >= list_.size()) {
+ // Pad out any intermediate indexes with null settings
+ while (index > list_.size())
+ Append(CreateNullValue());
+ Append(std::move(in_value));
+ } else {
+ // TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
+ DCHECK(list_[index] != in_value);
+ list_[index] = std::move(in_value);
+ }
+ return true;
+}
+
+bool ListValue::Get(size_t index, const Value** out_value) const {
+ if (index >= list_.size())
+ return false;
+
+ if (out_value)
+ *out_value = list_[index].get();
+
+ return true;
+}
+
+bool ListValue::Get(size_t index, Value** out_value) {
+ return static_cast<const ListValue&>(*this).Get(
+ index,
+ const_cast<const Value**>(out_value));
+}
+
+bool ListValue::GetBoolean(size_t index, bool* bool_value) const {
+ const Value* value;
+ if (!Get(index, &value))
+ return false;
+
+ return value->GetAsBoolean(bool_value);
+}
+
+bool ListValue::GetInteger(size_t index, int* out_value) const {
+ const Value* value;
+ if (!Get(index, &value))
+ return false;
+
+ return value->GetAsInteger(out_value);
+}
+
+bool ListValue::GetDouble(size_t index, double* out_value) const {
+ const Value* value;
+ if (!Get(index, &value))
+ return false;
+
+ return value->GetAsDouble(out_value);
+}
+
+bool ListValue::GetString(size_t index, std::string* out_value) const {
+ const Value* value;
+ if (!Get(index, &value))
+ return false;
+
+ return value->GetAsString(out_value);
+}
+
+bool ListValue::GetString(size_t index, string16* out_value) const {
+ const Value* value;
+ if (!Get(index, &value))
+ return false;
+
+ return value->GetAsString(out_value);
+}
+
+bool ListValue::GetBinary(size_t index, const BinaryValue** out_value) const {
+ const Value* value;
+ bool result = Get(index, &value);
+ if (!result || !value->IsType(TYPE_BINARY))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const BinaryValue*>(value);
+
+ return true;
+}
+
+bool ListValue::GetBinary(size_t index, BinaryValue** out_value) {
+ return static_cast<const ListValue&>(*this).GetBinary(
+ index,
+ const_cast<const BinaryValue**>(out_value));
+}
+
+bool ListValue::GetDictionary(size_t index,
+ const DictionaryValue** out_value) const {
+ const Value* value;
+ bool result = Get(index, &value);
+ if (!result || !value->IsType(TYPE_DICTIONARY))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const DictionaryValue*>(value);
+
+ return true;
+}
+
+bool ListValue::GetDictionary(size_t index, DictionaryValue** out_value) {
+ return static_cast<const ListValue&>(*this).GetDictionary(
+ index,
+ const_cast<const DictionaryValue**>(out_value));
+}
+
+bool ListValue::GetList(size_t index, const ListValue** out_value) const {
+ const Value* value;
+ bool result = Get(index, &value);
+ if (!result || !value->IsType(TYPE_LIST))
+ return false;
+
+ if (out_value)
+ *out_value = static_cast<const ListValue*>(value);
+
+ return true;
+}
+
+bool ListValue::GetList(size_t index, ListValue** out_value) {
+ return static_cast<const ListValue&>(*this).GetList(
+ index,
+ const_cast<const ListValue**>(out_value));
+}
+
+bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
+ if (index >= list_.size())
+ return false;
+
+ if (out_value)
+ *out_value = std::move(list_[index]);
+
+ list_.erase(list_.begin() + index);
+ return true;
+}
+
+bool ListValue::Remove(const Value& value, size_t* index) {
+ for (auto it = list_.begin(); it != list_.end(); ++it) {
+ if ((*it)->Equals(&value)) {
+ size_t previous_index = it - list_.begin();
+ list_.erase(it);
+
+ if (index)
+ *index = previous_index;
+ return true;
+ }
+ }
+ return false;
+}
+
+ListValue::iterator ListValue::Erase(iterator iter,
+ std::unique_ptr<Value>* out_value) {
+ if (out_value)
+ *out_value = std::move(*Storage::iterator(iter));
+
+ return list_.erase(iter);
+}
+
+void ListValue::Append(std::unique_ptr<Value> in_value) {
+ list_.push_back(std::move(in_value));
+}
+
+void ListValue::Append(Value* in_value) {
+ DCHECK(in_value);
+ Append(WrapUnique(in_value));
+}
+
+void ListValue::AppendBoolean(bool in_value) {
+ Append(new FundamentalValue(in_value));
+}
+
+void ListValue::AppendInteger(int in_value) {
+ Append(new FundamentalValue(in_value));
+}
+
+void ListValue::AppendDouble(double in_value) {
+ Append(new FundamentalValue(in_value));
+}
+
+void ListValue::AppendString(const std::string& in_value) {
+ Append(new StringValue(in_value));
+}
+
+void ListValue::AppendString(const string16& in_value) {
+ Append(new StringValue(in_value));
+}
+
+void ListValue::AppendStrings(const std::vector<std::string>& in_values) {
+ for (std::vector<std::string>::const_iterator it = in_values.begin();
+ it != in_values.end(); ++it) {
+ AppendString(*it);
+ }
+}
+
+void ListValue::AppendStrings(const std::vector<string16>& in_values) {
+ for (std::vector<string16>::const_iterator it = in_values.begin();
+ it != in_values.end(); ++it) {
+ AppendString(*it);
+ }
+}
+
+bool ListValue::AppendIfNotPresent(Value* in_value) {
+ DCHECK(in_value);
+ for (const auto& entry : list_) {
+ if (entry->Equals(in_value)) {
+ delete in_value;
+ return false;
+ }
+ }
+ list_.emplace_back(in_value);
+ return true;
+}
+
+bool ListValue::Insert(size_t index, Value* in_value) {
+ DCHECK(in_value);
+ if (index > list_.size())
+ return false;
+
+ list_.insert(list_.begin() + index, WrapUnique(in_value));
+ return true;
+}
+
+ListValue::const_iterator ListValue::Find(const Value& value) const {
+ return std::find_if(list_.begin(), list_.end(),
+ [&value](const std::unique_ptr<Value>& entry) {
+ return entry->Equals(&value);
+ });
+}
+
+void ListValue::Swap(ListValue* other) {
+ list_.swap(other->list_);
+}
+
+bool ListValue::GetAsList(ListValue** out_value) {
+ if (out_value)
+ *out_value = this;
+ return true;
+}
+
+bool ListValue::GetAsList(const ListValue** out_value) const {
+ if (out_value)
+ *out_value = this;
+ return true;
+}
+
+ListValue* ListValue::DeepCopy() const {
+ ListValue* result = new ListValue;
+
+ for (const auto& entry : list_)
+ result->Append(entry->CreateDeepCopy());
+
+ return result;
+}
+
+std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
+ return WrapUnique(DeepCopy());
+}
+
+bool ListValue::Equals(const Value* other) const {
+ if (other->GetType() != GetType())
+ return false;
+
+ const ListValue* other_list =
+ static_cast<const ListValue*>(other);
+ Storage::const_iterator lhs_it, rhs_it;
+ for (lhs_it = begin(), rhs_it = other_list->begin();
+ lhs_it != end() && rhs_it != other_list->end();
+ ++lhs_it, ++rhs_it) {
+ if (!(*lhs_it)->Equals(rhs_it->get()))
+ return false;
+ }
+ if (lhs_it != end() || rhs_it != other_list->end())
+ return false;
+
+ return true;
+}
+
+ValueSerializer::~ValueSerializer() {
+}
+
+ValueDeserializer::~ValueDeserializer() {
+}
+
+std::ostream& operator<<(std::ostream& out, const Value& value) {
+ std::string json;
+ JSONWriter::WriteWithOptions(value, JSONWriter::OPTIONS_PRETTY_PRINT, &json);
+ return out << json;
+}
+
+} // namespace base
diff --git a/libchrome/base/values.h b/libchrome/base/values.h
new file mode 100644
index 0000000..e3d6089
--- /dev/null
+++ b/libchrome/base/values.h
@@ -0,0 +1,569 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file specifies a recursive data storage class called Value intended for
+// storing settings and other persistable data.
+//
+// A Value represents something that can be stored in JSON or passed to/from
+// JavaScript. As such, it is NOT a generalized variant type, since only the
+// types supported by JavaScript/JSON are supported.
+//
+// IN PARTICULAR this means that there is no support for int64_t or unsigned
+// numbers. Writing JSON with such types would violate the spec. If you need
+// something like this, either use a double or make a string value containing
+// the number you want.
+
+#ifndef BASE_VALUES_H_
+#define BASE_VALUES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <iosfwd>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class BinaryValue;
+class DictionaryValue;
+class FundamentalValue;
+class ListValue;
+class StringValue;
+class Value;
+
+// The Value class is the base class for Values. A Value can be instantiated
+// via the Create*Value() factory methods, or by directly creating instances of
+// the subclasses.
+//
+// See the file-level comment above for more information.
+class BASE_EXPORT Value {
+ public:
+ enum Type {
+ TYPE_NULL = 0,
+ TYPE_BOOLEAN,
+ TYPE_INTEGER,
+ TYPE_DOUBLE,
+ TYPE_STRING,
+ TYPE_BINARY,
+ TYPE_DICTIONARY,
+ TYPE_LIST
+ // Note: Do not add more types. See the file-level comment above for why.
+ };
+
+ virtual ~Value();
+
+ static std::unique_ptr<Value> CreateNullValue();
+
+ // Returns the type of the value stored by the current Value object.
+ // Each type will be implemented by only one subclass of Value, so it's
+ // safe to use the Type to determine whether you can cast from
+ // Value* to (Implementing Class)*. Also, a Value object never changes
+ // its type after construction.
+ Type GetType() const { return type_; }
+
+ // Returns true if the current object represents a given type.
+ bool IsType(Type type) const { return type == type_; }
+
+ // These methods allow the convenient retrieval of the contents of the Value.
+ // If the current object can be converted into the given type, the value is
+ // returned through the |out_value| parameter and true is returned;
+ // otherwise, false is returned and |out_value| is unchanged.
+ virtual bool GetAsBoolean(bool* out_value) const;
+ virtual bool GetAsInteger(int* out_value) const;
+ virtual bool GetAsDouble(double* out_value) const;
+ virtual bool GetAsString(std::string* out_value) const;
+ virtual bool GetAsString(string16* out_value) const;
+ virtual bool GetAsString(const StringValue** out_value) const;
+ virtual bool GetAsBinary(const BinaryValue** out_value) const;
+ virtual bool GetAsList(ListValue** out_value);
+ virtual bool GetAsList(const ListValue** out_value) const;
+ virtual bool GetAsDictionary(DictionaryValue** out_value);
+ virtual bool GetAsDictionary(const DictionaryValue** out_value) const;
+ // Note: Do not add more types. See the file-level comment above for why.
+
+ // This creates a deep copy of the entire Value tree, and returns a pointer
+ // to the copy. The caller gets ownership of the copy, of course.
+ //
+ // Subclasses return their own type directly in their overrides;
+ // this works because C++ supports covariant return types.
+ virtual Value* DeepCopy() const;
+ // Preferred version of DeepCopy. TODO(estade): remove the above.
+ std::unique_ptr<Value> CreateDeepCopy() const;
+
+ // Compares if two Value objects have equal contents.
+ virtual bool Equals(const Value* other) const;
+
+ // Compares if two Value objects have equal contents. Can handle NULLs.
+ // NULLs are considered equal but different from Value::CreateNullValue().
+ static bool Equals(const Value* a, const Value* b);
+
+ protected:
+ // These aren't safe for end-users, but they are useful for subclasses.
+ explicit Value(Type type);
+ Value(const Value& that);
+ Value& operator=(const Value& that);
+
+ private:
+ Type type_;
+};
+
+// FundamentalValue represents the simple fundamental types of values.
+class BASE_EXPORT FundamentalValue : public Value {
+ public:
+ explicit FundamentalValue(bool in_value);
+ explicit FundamentalValue(int in_value);
+ explicit FundamentalValue(double in_value);
+ ~FundamentalValue() override;
+
+ // Overridden from Value:
+ bool GetAsBoolean(bool* out_value) const override;
+ bool GetAsInteger(int* out_value) const override;
+ // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // doubles.
+ bool GetAsDouble(double* out_value) const override;
+ FundamentalValue* DeepCopy() const override;
+ bool Equals(const Value* other) const override;
+
+ private:
+ union {
+ bool boolean_value_;
+ int integer_value_;
+ double double_value_;
+ };
+};
+
+class BASE_EXPORT StringValue : public Value {
+ public:
+ // Initializes a StringValue with a UTF-8 narrow character string.
+ explicit StringValue(const std::string& in_value);
+
+ // Initializes a StringValue with a string16.
+ explicit StringValue(const string16& in_value);
+
+ ~StringValue() override;
+
+ // Returns |value_| as a pointer or reference.
+ std::string* GetString();
+ const std::string& GetString() const;
+
+ // Overridden from Value:
+ bool GetAsString(std::string* out_value) const override;
+ bool GetAsString(string16* out_value) const override;
+ bool GetAsString(const StringValue** out_value) const override;
+ StringValue* DeepCopy() const override;
+ bool Equals(const Value* other) const override;
+
+ private:
+ std::string value_;
+};
+
+class BASE_EXPORT BinaryValue: public Value {
+ public:
+ // Creates a BinaryValue with a null buffer and size of 0.
+ BinaryValue();
+
+ // Creates a BinaryValue, taking ownership of the bytes pointed to by
+ // |buffer|.
+ BinaryValue(std::unique_ptr<char[]> buffer, size_t size);
+
+ ~BinaryValue() override;
+
+ // For situations where you want to keep ownership of your buffer, this
+ // factory method creates a new BinaryValue by copying the contents of the
+ // buffer that's passed in.
+ static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
+ size_t size);
+
+ size_t GetSize() const { return size_; }
+
+ // May return NULL.
+ char* GetBuffer() { return buffer_.get(); }
+ const char* GetBuffer() const { return buffer_.get(); }
+
+ // Overridden from Value:
+ bool GetAsBinary(const BinaryValue** out_value) const override;
+ BinaryValue* DeepCopy() const override;
+ bool Equals(const Value* other) const override;
+
+ private:
+ std::unique_ptr<char[]> buffer_;
+ size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(BinaryValue);
+};
+
+// DictionaryValue provides a key-value dictionary with (optional) "path"
+// parsing for recursive access; see the comment at the top of the file. Keys
+// are |std::string|s and should be UTF-8 encoded.
+class BASE_EXPORT DictionaryValue : public Value {
+ public:
+ using Storage = std::map<std::string, std::unique_ptr<Value>>;
+ // Returns |value| if it is a dictionary, nullptr otherwise.
+ static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
+
+ DictionaryValue();
+ ~DictionaryValue() override;
+
+ // Overridden from Value:
+ bool GetAsDictionary(DictionaryValue** out_value) override;
+ bool GetAsDictionary(const DictionaryValue** out_value) const override;
+
+ // Returns true if the current dictionary has a value for the given key.
+ bool HasKey(const std::string& key) const;
+
+ // Returns the number of Values in this dictionary.
+ size_t size() const { return dictionary_.size(); }
+
+ // Returns whether the dictionary is empty.
+ bool empty() const { return dictionary_.empty(); }
+
+ // Clears any current contents of this dictionary.
+ void Clear();
+
+ // Sets the Value associated with the given path starting from this object.
+ // A path has the form "<key>" or "<key>.<key>.[...]", where "." indexes
+ // into the next DictionaryValue down. Obviously, "." can't be used
+ // within a key, but there are no other restrictions on keys.
+ // If the key at any step of the way doesn't exist, or exists but isn't
+ // a DictionaryValue, a new DictionaryValue will be created and attached
+ // to the path in that location. |in_value| must be non-null.
+ void Set(const std::string& path, std::unique_ptr<Value> in_value);
+ // Deprecated version of the above. TODO(estade): remove.
+ void Set(const std::string& path, Value* in_value);
+
+ // Convenience forms of Set(). These methods will replace any existing
+ // value at that path, even if it has a different type.
+ void SetBoolean(const std::string& path, bool in_value);
+ void SetInteger(const std::string& path, int in_value);
+ void SetDouble(const std::string& path, double in_value);
+ void SetString(const std::string& path, const std::string& in_value);
+ void SetString(const std::string& path, const string16& in_value);
+
+ // Like Set(), but without special treatment of '.'. This allows e.g. URLs to
+ // be used as paths.
+ void SetWithoutPathExpansion(const std::string& key,
+ std::unique_ptr<Value> in_value);
+ // Deprecated version of the above. TODO(estade): remove.
+ void SetWithoutPathExpansion(const std::string& key, Value* in_value);
+
+ // Convenience forms of SetWithoutPathExpansion().
+ void SetBooleanWithoutPathExpansion(const std::string& path, bool in_value);
+ void SetIntegerWithoutPathExpansion(const std::string& path, int in_value);
+ void SetDoubleWithoutPathExpansion(const std::string& path, double in_value);
+ void SetStringWithoutPathExpansion(const std::string& path,
+ const std::string& in_value);
+ void SetStringWithoutPathExpansion(const std::string& path,
+ const string16& in_value);
+
+ // Gets the Value associated with the given path starting from this object.
+ // A path has the form "<key>" or "<key>.<key>.[...]", where "." indexes
+ // into the next DictionaryValue down. If the path can be resolved
+ // successfully, the value for the last key in the path will be returned
+ // through the |out_value| parameter, and the function will return true.
+ // Otherwise, it will return false and |out_value| will be untouched.
+ // Note that the dictionary always owns the value that's returned.
+ // |out_value| is optional and will only be set if non-NULL.
+ bool Get(StringPiece path, const Value** out_value) const;
+ bool Get(StringPiece path, Value** out_value);
+
+ // These are convenience forms of Get(). The value will be retrieved
+ // and the return value will be true if the path is valid and the value at
+ // the end of the path can be returned in the form specified.
+ // |out_value| is optional and will only be set if non-NULL.
+ bool GetBoolean(const std::string& path, bool* out_value) const;
+ bool GetInteger(const std::string& path, int* out_value) const;
+ // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // doubles.
+ bool GetDouble(const std::string& path, double* out_value) const;
+ bool GetString(const std::string& path, std::string* out_value) const;
+ bool GetString(const std::string& path, string16* out_value) const;
+ bool GetStringASCII(const std::string& path, std::string* out_value) const;
+ bool GetBinary(const std::string& path, const BinaryValue** out_value) const;
+ bool GetBinary(const std::string& path, BinaryValue** out_value);
+ bool GetDictionary(StringPiece path,
+ const DictionaryValue** out_value) const;
+ bool GetDictionary(StringPiece path, DictionaryValue** out_value);
+ bool GetList(const std::string& path, const ListValue** out_value) const;
+ bool GetList(const std::string& path, ListValue** out_value);
+
+ // Like Get(), but without special treatment of '.'. This allows e.g. URLs to
+ // be used as paths.
+ bool GetWithoutPathExpansion(const std::string& key,
+ const Value** out_value) const;
+ bool GetWithoutPathExpansion(const std::string& key, Value** out_value);
+ bool GetBooleanWithoutPathExpansion(const std::string& key,
+ bool* out_value) const;
+ bool GetIntegerWithoutPathExpansion(const std::string& key,
+ int* out_value) const;
+ bool GetDoubleWithoutPathExpansion(const std::string& key,
+ double* out_value) const;
+ bool GetStringWithoutPathExpansion(const std::string& key,
+ std::string* out_value) const;
+ bool GetStringWithoutPathExpansion(const std::string& key,
+ string16* out_value) const;
+ bool GetDictionaryWithoutPathExpansion(
+ const std::string& key,
+ const DictionaryValue** out_value) const;
+ bool GetDictionaryWithoutPathExpansion(const std::string& key,
+ DictionaryValue** out_value);
+ bool GetListWithoutPathExpansion(const std::string& key,
+ const ListValue** out_value) const;
+ bool GetListWithoutPathExpansion(const std::string& key,
+ ListValue** out_value);
+
+ // Removes the Value with the specified path from this dictionary (or one
+ // of its child dictionaries, if the path is more than just a local key).
+ // If |out_value| is non-NULL, the removed Value will be passed out via
+ // |out_value|. If |out_value| is NULL, the removed value will be deleted.
+ // This method returns true if |path| is a valid path; otherwise it will
+ // return false and the DictionaryValue object will be unchanged.
+ virtual bool Remove(const std::string& path,
+ std::unique_ptr<Value>* out_value);
+
+ // Like Remove(), but without special treatment of '.'. This allows e.g. URLs
+ // to be used as paths.
+ virtual bool RemoveWithoutPathExpansion(const std::string& key,
+ std::unique_ptr<Value>* out_value);
+
+ // Removes a path, clearing out all dictionaries on |path| that remain empty
+ // after removing the value at |path|.
+ virtual bool RemovePath(const std::string& path,
+ std::unique_ptr<Value>* out_value);
+
+ // Makes a copy of |this| but doesn't include empty dictionaries and lists in
+ // the copy. This never returns NULL, even if |this| itself is empty.
+ std::unique_ptr<DictionaryValue> DeepCopyWithoutEmptyChildren() const;
+
+ // Merge |dictionary| into this dictionary. This is done recursively, i.e. any
+ // sub-dictionaries will be merged as well. In case of key collisions, the
+ // passed in dictionary takes precedence and data already present will be
+ // replaced. Values within |dictionary| are deep-copied, so |dictionary| may
+ // be freed any time after this call.
+ void MergeDictionary(const DictionaryValue* dictionary);
+
+ // Swaps contents with the |other| dictionary.
+ virtual void Swap(DictionaryValue* other);
+
+ // This class provides an iterator over both keys and values in the
+ // dictionary. It can't be used to modify the dictionary.
+ class BASE_EXPORT Iterator {
+ public:
+ explicit Iterator(const DictionaryValue& target);
+ Iterator(const Iterator& other);
+ ~Iterator();
+
+ bool IsAtEnd() const { return it_ == target_.dictionary_.end(); }
+ void Advance() { ++it_; }
+
+ const std::string& key() const { return it_->first; }
+ const Value& value() const { return *it_->second; }
+
+ private:
+ const DictionaryValue& target_;
+ Storage::const_iterator it_;
+ };
+
+ // Overridden from Value:
+ DictionaryValue* DeepCopy() const override;
+ // Preferred version of DeepCopy. TODO(estade): remove the above.
+ std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
+ bool Equals(const Value* other) const override;
+
+ private:
+ Storage dictionary_;
+
+ DISALLOW_COPY_AND_ASSIGN(DictionaryValue);
+};
+
+// This type of Value represents a list of other Value values.
+class BASE_EXPORT ListValue : public Value {
+ public:
+ using Storage = std::vector<std::unique_ptr<Value>>;
+ using const_iterator = Storage::const_iterator;
+ using iterator = Storage::iterator;
+
+ // Returns |value| if it is a list, nullptr otherwise.
+ static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
+
+ ListValue();
+ ~ListValue() override;
+
+ // Clears the contents of this ListValue
+ void Clear();
+
+ // Returns the number of Values in this list.
+ size_t GetSize() const { return list_.size(); }
+
+ // Returns whether the list is empty.
+ bool empty() const { return list_.empty(); }
+
+ // Sets the list item at the given index to be the Value specified by
+ // the value given. If the index beyond the current end of the list, null
+ // Values will be used to pad out the list.
+ // Returns true if successful, or false if the index was negative or
+ // the value is a null pointer.
+ bool Set(size_t index, Value* in_value);
+ // Preferred version of the above. TODO(estade): remove the above.
+ bool Set(size_t index, std::unique_ptr<Value> in_value);
+
+ // Gets the Value at the given index. Modifies |out_value| (and returns true)
+ // only if the index falls within the current list range.
+ // Note that the list always owns the Value passed out via |out_value|.
+ // |out_value| is optional and will only be set if non-NULL.
+ bool Get(size_t index, const Value** out_value) const;
+ bool Get(size_t index, Value** out_value);
+
+ // Convenience forms of Get(). Modifies |out_value| (and returns true)
+ // only if the index is valid and the Value at that index can be returned
+ // in the specified form.
+ // |out_value| is optional and will only be set if non-NULL.
+ bool GetBoolean(size_t index, bool* out_value) const;
+ bool GetInteger(size_t index, int* out_value) const;
+ // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // doubles.
+ bool GetDouble(size_t index, double* out_value) const;
+ bool GetString(size_t index, std::string* out_value) const;
+ bool GetString(size_t index, string16* out_value) const;
+ bool GetBinary(size_t index, const BinaryValue** out_value) const;
+ bool GetBinary(size_t index, BinaryValue** out_value);
+ bool GetDictionary(size_t index, const DictionaryValue** out_value) const;
+ bool GetDictionary(size_t index, DictionaryValue** out_value);
+ bool GetList(size_t index, const ListValue** out_value) const;
+ bool GetList(size_t index, ListValue** out_value);
+
+ // Removes the Value with the specified index from this list.
+ // If |out_value| is non-NULL, the removed Value AND ITS OWNERSHIP will be
+ // passed out via |out_value|. If |out_value| is NULL, the removed value will
+ // be deleted. This method returns true if |index| is valid; otherwise
+ // it will return false and the ListValue object will be unchanged.
+ virtual bool Remove(size_t index, std::unique_ptr<Value>* out_value);
+
+ // Removes the first instance of |value| found in the list, if any, and
+ // deletes it. |index| is the location where |value| was found. Returns false
+ // if not found.
+ bool Remove(const Value& value, size_t* index);
+
+ // Removes the element at |iter|. If |out_value| is NULL, the value will be
+ // deleted, otherwise ownership of the value is passed back to the caller.
+ // Returns an iterator pointing to the location of the element that
+ // followed the erased element.
+ iterator Erase(iterator iter, std::unique_ptr<Value>* out_value);
+
+ // Appends a Value to the end of the list.
+ void Append(std::unique_ptr<Value> in_value);
+ // Deprecated version of the above. TODO(estade): remove.
+ void Append(Value* in_value);
+
+ // Convenience forms of Append.
+ void AppendBoolean(bool in_value);
+ void AppendInteger(int in_value);
+ void AppendDouble(double in_value);
+ void AppendString(const std::string& in_value);
+ void AppendString(const string16& in_value);
+ void AppendStrings(const std::vector<std::string>& in_values);
+ void AppendStrings(const std::vector<string16>& in_values);
+
+ // Appends a Value if it's not already present. Takes ownership of the
+ // |in_value|. Returns true if successful, or false if the value was already
+ // present. If the value was already present the |in_value| is deleted.
+ bool AppendIfNotPresent(Value* in_value);
+
+ // Insert a Value at index.
+ // Returns true if successful, or false if the index was out of range.
+ bool Insert(size_t index, Value* in_value);
+
+ // Searches for the first instance of |value| in the list using the Equals
+ // method of the Value type.
+ // Returns a const_iterator to the found item or to end() if none exists.
+ const_iterator Find(const Value& value) const;
+
+ // Swaps contents with the |other| list.
+ virtual void Swap(ListValue* other);
+
+ // Iteration.
+ iterator begin() { return list_.begin(); }
+ iterator end() { return list_.end(); }
+
+ const_iterator begin() const { return list_.begin(); }
+ const_iterator end() const { return list_.end(); }
+
+ // Overridden from Value:
+ bool GetAsList(ListValue** out_value) override;
+ bool GetAsList(const ListValue** out_value) const override;
+ ListValue* DeepCopy() const override;
+ bool Equals(const Value* other) const override;
+
+ // Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
+ std::unique_ptr<ListValue> CreateDeepCopy() const;
+
+ private:
+ Storage list_;
+
+ DISALLOW_COPY_AND_ASSIGN(ListValue);
+};
+
+// This interface is implemented by classes that know how to serialize
+// Value objects.
+class BASE_EXPORT ValueSerializer {
+ public:
+ virtual ~ValueSerializer();
+
+ virtual bool Serialize(const Value& root) = 0;
+};
+
+// This interface is implemented by classes that know how to deserialize Value
+// objects.
+class BASE_EXPORT ValueDeserializer {
+ public:
+ virtual ~ValueDeserializer();
+
+ // This method deserializes the subclass-specific format into a Value object.
+ // If the return value is non-NULL, the caller takes ownership of returned
+ // Value. If the return value is NULL, and if error_code is non-NULL,
+ // error_code will be set with the underlying error.
+ // If |error_message| is non-null, it will be filled in with a formatted
+ // error message including the location of the error if appropriate.
+ virtual std::unique_ptr<Value> Deserialize(int* error_code,
+ std::string* error_str) = 0;
+};
+
+// Stream operator so Values can be used in assertion statements. In order that
+// gtest uses this operator to print readable output on test failures, we must
+// override each specific type. Otherwise, the default template implementation
+// is preferred over an upcast.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const Value& value);
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+ const FundamentalValue& value) {
+ return out << static_cast<const Value&>(value);
+}
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+ const StringValue& value) {
+ return out << static_cast<const Value&>(value);
+}
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+ const DictionaryValue& value) {
+ return out << static_cast<const Value&>(value);
+}
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+ const ListValue& value) {
+ return out << static_cast<const Value&>(value);
+}
+
+} // namespace base
+
+#endif // BASE_VALUES_H_
diff --git a/libchrome/base/values_unittest.cc b/libchrome/base/values_unittest.cc
new file mode 100644
index 0000000..d685222
--- /dev/null
+++ b/libchrome/base/values_unittest.cc
@@ -0,0 +1,1156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/values.h"
+
+#include <stddef.h>
+
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ValuesTest, Basic) {
+ // Test basic dictionary getting/setting
+ DictionaryValue settings;
+ std::string homepage = "http://google.com";
+ ASSERT_FALSE(settings.GetString("global.homepage", &homepage));
+ ASSERT_EQ(std::string("http://google.com"), homepage);
+
+ ASSERT_FALSE(settings.Get("global", NULL));
+ settings.SetBoolean("global", true);
+ ASSERT_TRUE(settings.Get("global", NULL));
+ settings.SetString("global.homepage", "http://scurvy.com");
+ ASSERT_TRUE(settings.Get("global", NULL));
+ homepage = "http://google.com";
+ ASSERT_TRUE(settings.GetString("global.homepage", &homepage));
+ ASSERT_EQ(std::string("http://scurvy.com"), homepage);
+
+ // Test storing a dictionary in a list.
+ ListValue* toolbar_bookmarks;
+ ASSERT_FALSE(
+ settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
+
+ std::unique_ptr<ListValue> new_toolbar_bookmarks(new ListValue);
+ settings.Set("global.toolbar.bookmarks", std::move(new_toolbar_bookmarks));
+ ASSERT_TRUE(settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
+
+ std::unique_ptr<DictionaryValue> new_bookmark(new DictionaryValue);
+ new_bookmark->SetString("name", "Froogle");
+ new_bookmark->SetString("url", "http://froogle.com");
+ toolbar_bookmarks->Append(std::move(new_bookmark));
+
+ ListValue* bookmark_list;
+ ASSERT_TRUE(settings.GetList("global.toolbar.bookmarks", &bookmark_list));
+ DictionaryValue* bookmark;
+ ASSERT_EQ(1U, bookmark_list->GetSize());
+ ASSERT_TRUE(bookmark_list->GetDictionary(0, &bookmark));
+ std::string bookmark_name = "Unnamed";
+ ASSERT_TRUE(bookmark->GetString("name", &bookmark_name));
+ ASSERT_EQ(std::string("Froogle"), bookmark_name);
+ std::string bookmark_url;
+ ASSERT_TRUE(bookmark->GetString("url", &bookmark_url));
+ ASSERT_EQ(std::string("http://froogle.com"), bookmark_url);
+}
+
+TEST(ValuesTest, List) {
+ std::unique_ptr<ListValue> mixed_list(new ListValue());
+ mixed_list->Set(0, WrapUnique(new FundamentalValue(true)));
+ mixed_list->Set(1, WrapUnique(new FundamentalValue(42)));
+ mixed_list->Set(2, WrapUnique(new FundamentalValue(88.8)));
+ mixed_list->Set(3, WrapUnique(new StringValue("foo")));
+ ASSERT_EQ(4u, mixed_list->GetSize());
+
+ Value *value = NULL;
+ bool bool_value = false;
+ int int_value = 0;
+ double double_value = 0.0;
+ std::string string_value;
+
+ ASSERT_FALSE(mixed_list->Get(4, &value));
+
+ ASSERT_FALSE(mixed_list->GetInteger(0, &int_value));
+ ASSERT_EQ(0, int_value);
+ ASSERT_FALSE(mixed_list->GetBoolean(1, &bool_value));
+ ASSERT_FALSE(bool_value);
+ ASSERT_FALSE(mixed_list->GetString(2, &string_value));
+ ASSERT_EQ("", string_value);
+ ASSERT_FALSE(mixed_list->GetInteger(2, &int_value));
+ ASSERT_EQ(0, int_value);
+ ASSERT_FALSE(mixed_list->GetBoolean(3, &bool_value));
+ ASSERT_FALSE(bool_value);
+
+ ASSERT_TRUE(mixed_list->GetBoolean(0, &bool_value));
+ ASSERT_TRUE(bool_value);
+ ASSERT_TRUE(mixed_list->GetInteger(1, &int_value));
+ ASSERT_EQ(42, int_value);
+ // implicit conversion from Integer to Double should be possible.
+ ASSERT_TRUE(mixed_list->GetDouble(1, &double_value));
+ ASSERT_EQ(42, double_value);
+ ASSERT_TRUE(mixed_list->GetDouble(2, &double_value));
+ ASSERT_EQ(88.8, double_value);
+ ASSERT_TRUE(mixed_list->GetString(3, &string_value));
+ ASSERT_EQ("foo", string_value);
+
+ // Try searching in the mixed list.
+ base::FundamentalValue sought_value(42);
+ base::FundamentalValue not_found_value(false);
+
+ ASSERT_NE(mixed_list->end(), mixed_list->Find(sought_value));
+ ASSERT_TRUE((*mixed_list->Find(sought_value))->GetAsInteger(&int_value));
+ ASSERT_EQ(42, int_value);
+ ASSERT_EQ(mixed_list->end(), mixed_list->Find(not_found_value));
+}
+
+TEST(ValuesTest, BinaryValue) {
+ // Default constructor creates a BinaryValue with a null buffer and size 0.
+ std::unique_ptr<BinaryValue> binary(new BinaryValue());
+ ASSERT_TRUE(binary.get());
+ ASSERT_EQ(NULL, binary->GetBuffer());
+ ASSERT_EQ(0U, binary->GetSize());
+
+ // Test the common case of a non-empty buffer
+ std::unique_ptr<char[]> buffer(new char[15]);
+ char* original_buffer = buffer.get();
+ binary.reset(new BinaryValue(std::move(buffer), 15));
+ ASSERT_TRUE(binary.get());
+ ASSERT_TRUE(binary->GetBuffer());
+ ASSERT_EQ(original_buffer, binary->GetBuffer());
+ ASSERT_EQ(15U, binary->GetSize());
+
+ char stack_buffer[42];
+ memset(stack_buffer, '!', 42);
+ binary = BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42);
+ ASSERT_TRUE(binary.get());
+ ASSERT_TRUE(binary->GetBuffer());
+ ASSERT_NE(stack_buffer, binary->GetBuffer());
+ ASSERT_EQ(42U, binary->GetSize());
+ ASSERT_EQ(0, memcmp(stack_buffer, binary->GetBuffer(), binary->GetSize()));
+
+ // Test overloaded GetAsBinary.
+ Value* narrow_value = binary.get();
+ const BinaryValue* narrow_binary = NULL;
+ ASSERT_TRUE(narrow_value->GetAsBinary(&narrow_binary));
+ EXPECT_EQ(binary.get(), narrow_binary);
+}
+
+TEST(ValuesTest, StringValue) {
+ // Test overloaded StringValue constructor.
+ std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
+ ASSERT_TRUE(narrow_value.get());
+ ASSERT_TRUE(narrow_value->IsType(Value::TYPE_STRING));
+ std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
+ ASSERT_TRUE(utf16_value.get());
+ ASSERT_TRUE(utf16_value->IsType(Value::TYPE_STRING));
+
+ // Test overloaded GetAsString.
+ std::string narrow = "http://google.com";
+ string16 utf16 = ASCIIToUTF16("http://google.com");
+ const StringValue* string_value = NULL;
+ ASSERT_TRUE(narrow_value->GetAsString(&narrow));
+ ASSERT_TRUE(narrow_value->GetAsString(&utf16));
+ ASSERT_TRUE(narrow_value->GetAsString(&string_value));
+ ASSERT_EQ(std::string("narrow"), narrow);
+ ASSERT_EQ(ASCIIToUTF16("narrow"), utf16);
+ ASSERT_EQ(string_value->GetString(), narrow);
+
+ ASSERT_TRUE(utf16_value->GetAsString(&narrow));
+ ASSERT_TRUE(utf16_value->GetAsString(&utf16));
+ ASSERT_TRUE(utf16_value->GetAsString(&string_value));
+ ASSERT_EQ(std::string("utf16"), narrow);
+ ASSERT_EQ(ASCIIToUTF16("utf16"), utf16);
+ ASSERT_EQ(string_value->GetString(), narrow);
+
+ // Don't choke on NULL values.
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(NULL)));
+ ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(NULL)));
+ ASSERT_TRUE(narrow_value->GetAsString(
+ static_cast<const StringValue**>(NULL)));
+}
+
+// This is a Value object that allows us to tell if it's been
+// properly deleted by modifying the value of external flag on destruction.
+class DeletionTestValue : public Value {
+ public:
+ explicit DeletionTestValue(bool* deletion_flag) : Value(TYPE_NULL) {
+ Init(deletion_flag); // Separate function so that we can use ASSERT_*
+ }
+
+ void Init(bool* deletion_flag) {
+ ASSERT_TRUE(deletion_flag);
+ deletion_flag_ = deletion_flag;
+ *deletion_flag_ = false;
+ }
+
+ ~DeletionTestValue() override { *deletion_flag_ = true; }
+
+ private:
+ bool* deletion_flag_;
+};
+
+TEST(ValuesTest, ListDeletion) {
+ bool deletion_flag = true;
+
+ {
+ ListValue list;
+ list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ }
+ EXPECT_TRUE(deletion_flag);
+
+ {
+ ListValue list;
+ list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ list.Clear();
+ EXPECT_TRUE(deletion_flag);
+ }
+
+ {
+ ListValue list;
+ list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ EXPECT_TRUE(list.Set(0, Value::CreateNullValue()));
+ EXPECT_TRUE(deletion_flag);
+ }
+}
+
+TEST(ValuesTest, ListRemoval) {
+ bool deletion_flag = true;
+ std::unique_ptr<Value> removed_item;
+
+ {
+ ListValue list;
+ list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ EXPECT_EQ(1U, list.GetSize());
+ EXPECT_FALSE(list.Remove(std::numeric_limits<size_t>::max(),
+ &removed_item));
+ EXPECT_FALSE(list.Remove(1, &removed_item));
+ EXPECT_TRUE(list.Remove(0, &removed_item));
+ ASSERT_TRUE(removed_item);
+ EXPECT_EQ(0U, list.GetSize());
+ }
+ EXPECT_FALSE(deletion_flag);
+ removed_item.reset();
+ EXPECT_TRUE(deletion_flag);
+
+ {
+ ListValue list;
+ list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ EXPECT_TRUE(list.Remove(0, NULL));
+ EXPECT_TRUE(deletion_flag);
+ EXPECT_EQ(0U, list.GetSize());
+ }
+
+ {
+ ListValue list;
+ std::unique_ptr<DeletionTestValue> value(
+ new DeletionTestValue(&deletion_flag));
+ DeletionTestValue* original_value = value.get();
+ list.Append(std::move(value));
+ EXPECT_FALSE(deletion_flag);
+ size_t index = 0;
+ list.Remove(*original_value, &index);
+ EXPECT_EQ(0U, index);
+ EXPECT_TRUE(deletion_flag);
+ EXPECT_EQ(0U, list.GetSize());
+ }
+}
+
+TEST(ValuesTest, DictionaryDeletion) {
+ std::string key = "test";
+ bool deletion_flag = true;
+
+ {
+ DictionaryValue dict;
+ dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ }
+ EXPECT_TRUE(deletion_flag);
+
+ {
+ DictionaryValue dict;
+ dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ dict.Clear();
+ EXPECT_TRUE(deletion_flag);
+ }
+
+ {
+ DictionaryValue dict;
+ dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ dict.Set(key, Value::CreateNullValue());
+ EXPECT_TRUE(deletion_flag);
+ }
+}
+
+TEST(ValuesTest, DictionaryRemoval) {
+ std::string key = "test";
+ bool deletion_flag = true;
+ std::unique_ptr<Value> removed_item;
+
+ {
+ DictionaryValue dict;
+ dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ EXPECT_TRUE(dict.HasKey(key));
+ EXPECT_FALSE(dict.Remove("absent key", &removed_item));
+ EXPECT_TRUE(dict.Remove(key, &removed_item));
+ EXPECT_FALSE(dict.HasKey(key));
+ ASSERT_TRUE(removed_item);
+ }
+ EXPECT_FALSE(deletion_flag);
+ removed_item.reset();
+ EXPECT_TRUE(deletion_flag);
+
+ {
+ DictionaryValue dict;
+ dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+ EXPECT_FALSE(deletion_flag);
+ EXPECT_TRUE(dict.HasKey(key));
+ EXPECT_TRUE(dict.Remove(key, NULL));
+ EXPECT_TRUE(deletion_flag);
+ EXPECT_FALSE(dict.HasKey(key));
+ }
+}
+
+TEST(ValuesTest, DictionaryWithoutPathExpansion) {
+ DictionaryValue dict;
+ dict.Set("this.is.expanded", Value::CreateNullValue());
+ dict.SetWithoutPathExpansion("this.isnt.expanded", Value::CreateNullValue());
+
+ EXPECT_FALSE(dict.HasKey("this.is.expanded"));
+ EXPECT_TRUE(dict.HasKey("this"));
+ Value* value1;
+ EXPECT_TRUE(dict.Get("this", &value1));
+ DictionaryValue* value2;
+ ASSERT_TRUE(dict.GetDictionaryWithoutPathExpansion("this", &value2));
+ EXPECT_EQ(value1, value2);
+ EXPECT_EQ(1U, value2->size());
+
+ EXPECT_TRUE(dict.HasKey("this.isnt.expanded"));
+ Value* value3;
+ EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
+ Value* value4;
+ ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
+ EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
+}
+
+// Tests the deprecated version of SetWithoutPathExpansion.
+// TODO(estade): remove.
+TEST(ValuesTest, DictionaryWithoutPathExpansionDeprecated) {
+ DictionaryValue dict;
+ dict.Set("this.is.expanded", Value::CreateNullValue());
+ dict.SetWithoutPathExpansion("this.isnt.expanded", Value::CreateNullValue());
+
+ EXPECT_FALSE(dict.HasKey("this.is.expanded"));
+ EXPECT_TRUE(dict.HasKey("this"));
+ Value* value1;
+ EXPECT_TRUE(dict.Get("this", &value1));
+ DictionaryValue* value2;
+ ASSERT_TRUE(dict.GetDictionaryWithoutPathExpansion("this", &value2));
+ EXPECT_EQ(value1, value2);
+ EXPECT_EQ(1U, value2->size());
+
+ EXPECT_TRUE(dict.HasKey("this.isnt.expanded"));
+ Value* value3;
+ EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
+ Value* value4;
+ ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
+ EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
+}
+
+TEST(ValuesTest, DictionaryRemovePath) {
+ DictionaryValue dict;
+ dict.SetInteger("a.long.way.down", 1);
+ dict.SetBoolean("a.long.key.path", true);
+
+ std::unique_ptr<Value> removed_item;
+ EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
+ ASSERT_TRUE(removed_item);
+ EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
+ EXPECT_FALSE(dict.HasKey("a.long.way.down"));
+ EXPECT_FALSE(dict.HasKey("a.long.way"));
+ EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
+
+ removed_item.reset();
+ EXPECT_FALSE(dict.RemovePath("a.long.way.down", &removed_item));
+ EXPECT_FALSE(removed_item);
+ EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
+
+ removed_item.reset();
+ EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
+ ASSERT_TRUE(removed_item);
+ EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(dict.empty());
+}
+
+TEST(ValuesTest, DeepCopy) {
+ DictionaryValue original_dict;
+ std::unique_ptr<Value> scoped_null = Value::CreateNullValue();
+ Value* original_null = scoped_null.get();
+ original_dict.Set("null", std::move(scoped_null));
+ std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+ FundamentalValue* original_bool = scoped_bool.get();
+ original_dict.Set("bool", std::move(scoped_bool));
+ std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+ FundamentalValue* original_int = scoped_int.get();
+ original_dict.Set("int", std::move(scoped_int));
+ std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+ FundamentalValue* original_double = scoped_double.get();
+ original_dict.Set("double", std::move(scoped_double));
+ std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
+ StringValue* original_string = scoped_string.get();
+ original_dict.Set("string", std::move(scoped_string));
+ std::unique_ptr<StringValue> scoped_string16(
+ new StringValue(ASCIIToUTF16("hello16")));
+ StringValue* original_string16 = scoped_string16.get();
+ original_dict.Set("string16", std::move(scoped_string16));
+
+ std::unique_ptr<char[]> original_buffer(new char[42]);
+ memset(original_buffer.get(), '!', 42);
+ std::unique_ptr<BinaryValue> scoped_binary(
+ new BinaryValue(std::move(original_buffer), 42));
+ BinaryValue* original_binary = scoped_binary.get();
+ original_dict.Set("binary", std::move(scoped_binary));
+
+ std::unique_ptr<ListValue> scoped_list(new ListValue());
+ Value* original_list = scoped_list.get();
+ std::unique_ptr<FundamentalValue> scoped_list_element_0(
+ new FundamentalValue(0));
+ Value* original_list_element_0 = scoped_list_element_0.get();
+ scoped_list->Append(std::move(scoped_list_element_0));
+ std::unique_ptr<FundamentalValue> scoped_list_element_1(
+ new FundamentalValue(1));
+ Value* original_list_element_1 = scoped_list_element_1.get();
+ scoped_list->Append(std::move(scoped_list_element_1));
+ original_dict.Set("list", std::move(scoped_list));
+
+ std::unique_ptr<DictionaryValue> scoped_nested_dictionary(
+ new DictionaryValue());
+ Value* original_nested_dictionary = scoped_nested_dictionary.get();
+ scoped_nested_dictionary->SetString("key", "value");
+ original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
+
+ std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
+ ASSERT_TRUE(copy_dict.get());
+ ASSERT_NE(copy_dict.get(), &original_dict);
+
+ Value* copy_null = NULL;
+ ASSERT_TRUE(copy_dict->Get("null", ©_null));
+ ASSERT_TRUE(copy_null);
+ ASSERT_NE(copy_null, original_null);
+ ASSERT_TRUE(copy_null->IsType(Value::TYPE_NULL));
+
+ Value* copy_bool = NULL;
+ ASSERT_TRUE(copy_dict->Get("bool", ©_bool));
+ ASSERT_TRUE(copy_bool);
+ ASSERT_NE(copy_bool, original_bool);
+ ASSERT_TRUE(copy_bool->IsType(Value::TYPE_BOOLEAN));
+ bool copy_bool_value = false;
+ ASSERT_TRUE(copy_bool->GetAsBoolean(©_bool_value));
+ ASSERT_TRUE(copy_bool_value);
+
+ Value* copy_int = NULL;
+ ASSERT_TRUE(copy_dict->Get("int", ©_int));
+ ASSERT_TRUE(copy_int);
+ ASSERT_NE(copy_int, original_int);
+ ASSERT_TRUE(copy_int->IsType(Value::TYPE_INTEGER));
+ int copy_int_value = 0;
+ ASSERT_TRUE(copy_int->GetAsInteger(©_int_value));
+ ASSERT_EQ(42, copy_int_value);
+
+ Value* copy_double = NULL;
+ ASSERT_TRUE(copy_dict->Get("double", ©_double));
+ ASSERT_TRUE(copy_double);
+ ASSERT_NE(copy_double, original_double);
+ ASSERT_TRUE(copy_double->IsType(Value::TYPE_DOUBLE));
+ double copy_double_value = 0;
+ ASSERT_TRUE(copy_double->GetAsDouble(©_double_value));
+ ASSERT_EQ(3.14, copy_double_value);
+
+ Value* copy_string = NULL;
+ ASSERT_TRUE(copy_dict->Get("string", ©_string));
+ ASSERT_TRUE(copy_string);
+ ASSERT_NE(copy_string, original_string);
+ ASSERT_TRUE(copy_string->IsType(Value::TYPE_STRING));
+ std::string copy_string_value;
+ string16 copy_string16_value;
+ ASSERT_TRUE(copy_string->GetAsString(©_string_value));
+ ASSERT_TRUE(copy_string->GetAsString(©_string16_value));
+ ASSERT_EQ(std::string("hello"), copy_string_value);
+ ASSERT_EQ(ASCIIToUTF16("hello"), copy_string16_value);
+
+ Value* copy_string16 = NULL;
+ ASSERT_TRUE(copy_dict->Get("string16", ©_string16));
+ ASSERT_TRUE(copy_string16);
+ ASSERT_NE(copy_string16, original_string16);
+ ASSERT_TRUE(copy_string16->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(copy_string16->GetAsString(©_string_value));
+ ASSERT_TRUE(copy_string16->GetAsString(©_string16_value));
+ ASSERT_EQ(std::string("hello16"), copy_string_value);
+ ASSERT_EQ(ASCIIToUTF16("hello16"), copy_string16_value);
+
+ Value* copy_binary = NULL;
+ ASSERT_TRUE(copy_dict->Get("binary", ©_binary));
+ ASSERT_TRUE(copy_binary);
+ ASSERT_NE(copy_binary, original_binary);
+ ASSERT_TRUE(copy_binary->IsType(Value::TYPE_BINARY));
+ ASSERT_NE(original_binary->GetBuffer(),
+ static_cast<BinaryValue*>(copy_binary)->GetBuffer());
+ ASSERT_EQ(original_binary->GetSize(),
+ static_cast<BinaryValue*>(copy_binary)->GetSize());
+ ASSERT_EQ(0, memcmp(original_binary->GetBuffer(),
+ static_cast<BinaryValue*>(copy_binary)->GetBuffer(),
+ original_binary->GetSize()));
+
+ Value* copy_value = NULL;
+ ASSERT_TRUE(copy_dict->Get("list", ©_value));
+ ASSERT_TRUE(copy_value);
+ ASSERT_NE(copy_value, original_list);
+ ASSERT_TRUE(copy_value->IsType(Value::TYPE_LIST));
+ ListValue* copy_list = NULL;
+ ASSERT_TRUE(copy_value->GetAsList(©_list));
+ ASSERT_TRUE(copy_list);
+ ASSERT_EQ(2U, copy_list->GetSize());
+
+ Value* copy_list_element_0;
+ ASSERT_TRUE(copy_list->Get(0, ©_list_element_0));
+ ASSERT_TRUE(copy_list_element_0);
+ ASSERT_NE(copy_list_element_0, original_list_element_0);
+ int copy_list_element_0_value;
+ ASSERT_TRUE(copy_list_element_0->GetAsInteger(©_list_element_0_value));
+ ASSERT_EQ(0, copy_list_element_0_value);
+
+ Value* copy_list_element_1;
+ ASSERT_TRUE(copy_list->Get(1, ©_list_element_1));
+ ASSERT_TRUE(copy_list_element_1);
+ ASSERT_NE(copy_list_element_1, original_list_element_1);
+ int copy_list_element_1_value;
+ ASSERT_TRUE(copy_list_element_1->GetAsInteger(©_list_element_1_value));
+ ASSERT_EQ(1, copy_list_element_1_value);
+
+ copy_value = NULL;
+ ASSERT_TRUE(copy_dict->Get("dictionary", ©_value));
+ ASSERT_TRUE(copy_value);
+ ASSERT_NE(copy_value, original_nested_dictionary);
+ ASSERT_TRUE(copy_value->IsType(Value::TYPE_DICTIONARY));
+ DictionaryValue* copy_nested_dictionary = NULL;
+ ASSERT_TRUE(copy_value->GetAsDictionary(©_nested_dictionary));
+ ASSERT_TRUE(copy_nested_dictionary);
+ EXPECT_TRUE(copy_nested_dictionary->HasKey("key"));
+}
+
+TEST(ValuesTest, Equals) {
+ std::unique_ptr<Value> null1(Value::CreateNullValue());
+ std::unique_ptr<Value> null2(Value::CreateNullValue());
+ EXPECT_NE(null1.get(), null2.get());
+ EXPECT_TRUE(null1->Equals(null2.get()));
+
+ FundamentalValue boolean(false);
+ EXPECT_FALSE(null1->Equals(&boolean));
+
+ DictionaryValue dv;
+ dv.SetBoolean("a", false);
+ dv.SetInteger("b", 2);
+ dv.SetDouble("c", 2.5);
+ dv.SetString("d1", "string");
+ dv.SetString("d2", ASCIIToUTF16("http://google.com"));
+ dv.Set("e", Value::CreateNullValue());
+
+ std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
+ EXPECT_TRUE(dv.Equals(copy.get()));
+
+ std::unique_ptr<ListValue> list(new ListValue);
+ ListValue* original_list = list.get();
+ list->Append(Value::CreateNullValue());
+ list->Append(WrapUnique(new DictionaryValue));
+ std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
+
+ dv.Set("f", std::move(list));
+ EXPECT_FALSE(dv.Equals(copy.get()));
+ copy->Set("f", std::move(list_copy));
+ EXPECT_TRUE(dv.Equals(copy.get()));
+
+ original_list->Append(WrapUnique(new FundamentalValue(true)));
+ EXPECT_FALSE(dv.Equals(copy.get()));
+
+ // Check if Equals detects differences in only the keys.
+ copy = dv.CreateDeepCopy();
+ EXPECT_TRUE(dv.Equals(copy.get()));
+ copy->Remove("a", NULL);
+ copy->SetBoolean("aa", false);
+ EXPECT_FALSE(dv.Equals(copy.get()));
+}
+
+TEST(ValuesTest, StaticEquals) {
+ std::unique_ptr<Value> null1(Value::CreateNullValue());
+ std::unique_ptr<Value> null2(Value::CreateNullValue());
+ EXPECT_TRUE(Value::Equals(null1.get(), null2.get()));
+ EXPECT_TRUE(Value::Equals(NULL, NULL));
+
+ std::unique_ptr<Value> i42(new FundamentalValue(42));
+ std::unique_ptr<Value> j42(new FundamentalValue(42));
+ std::unique_ptr<Value> i17(new FundamentalValue(17));
+ EXPECT_TRUE(Value::Equals(i42.get(), i42.get()));
+ EXPECT_TRUE(Value::Equals(j42.get(), i42.get()));
+ EXPECT_TRUE(Value::Equals(i42.get(), j42.get()));
+ EXPECT_FALSE(Value::Equals(i42.get(), i17.get()));
+ EXPECT_FALSE(Value::Equals(i42.get(), NULL));
+ EXPECT_FALSE(Value::Equals(NULL, i42.get()));
+
+ // NULL and Value::CreateNullValue() are intentionally different: We need
+ // support for NULL as a return value for "undefined" without caring for
+ // ownership of the pointer.
+ EXPECT_FALSE(Value::Equals(null1.get(), NULL));
+ EXPECT_FALSE(Value::Equals(NULL, null1.get()));
+}
+
+TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
+ DictionaryValue original_dict;
+ std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
+ Value* original_null = scoped_null.get();
+ original_dict.Set("null", std::move(scoped_null));
+ std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+ Value* original_bool = scoped_bool.get();
+ original_dict.Set("bool", std::move(scoped_bool));
+ std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+ Value* original_int = scoped_int.get();
+ original_dict.Set("int", std::move(scoped_int));
+ std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+ Value* original_double = scoped_double.get();
+ original_dict.Set("double", std::move(scoped_double));
+ std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
+ Value* original_string = scoped_string.get();
+ original_dict.Set("string", std::move(scoped_string));
+ std::unique_ptr<StringValue> scoped_string16(
+ new StringValue(ASCIIToUTF16("hello16")));
+ Value* original_string16 = scoped_string16.get();
+ original_dict.Set("string16", std::move(scoped_string16));
+
+ std::unique_ptr<char[]> original_buffer(new char[42]);
+ memset(original_buffer.get(), '!', 42);
+ std::unique_ptr<BinaryValue> scoped_binary(
+ new BinaryValue(std::move(original_buffer), 42));
+ Value* original_binary = scoped_binary.get();
+ original_dict.Set("binary", std::move(scoped_binary));
+
+ std::unique_ptr<ListValue> scoped_list(new ListValue());
+ Value* original_list = scoped_list.get();
+ std::unique_ptr<FundamentalValue> scoped_list_element_0(
+ new FundamentalValue(0));
+ scoped_list->Append(std::move(scoped_list_element_0));
+ std::unique_ptr<FundamentalValue> scoped_list_element_1(
+ new FundamentalValue(1));
+ scoped_list->Append(std::move(scoped_list_element_1));
+ original_dict.Set("list", std::move(scoped_list));
+
+ std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
+ std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
+ std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
+ std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
+ std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
+ std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
+ std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
+ std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
+ std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
+
+ EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
+ EXPECT_TRUE(original_null->Equals(copy_null.get()));
+ EXPECT_TRUE(original_bool->Equals(copy_bool.get()));
+ EXPECT_TRUE(original_int->Equals(copy_int.get()));
+ EXPECT_TRUE(original_double->Equals(copy_double.get()));
+ EXPECT_TRUE(original_string->Equals(copy_string.get()));
+ EXPECT_TRUE(original_string16->Equals(copy_string16.get()));
+ EXPECT_TRUE(original_binary->Equals(copy_binary.get()));
+ EXPECT_TRUE(original_list->Equals(copy_list.get()));
+}
+
+TEST(ValuesTest, RemoveEmptyChildren) {
+ std::unique_ptr<DictionaryValue> root(new DictionaryValue);
+ // Remove empty lists and dictionaries.
+ root->Set("empty_dict", WrapUnique(new DictionaryValue));
+ root->Set("empty_list", WrapUnique(new ListValue));
+ root->SetWithoutPathExpansion("a.b.c.d.e",
+ WrapUnique(new DictionaryValue));
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_TRUE(root->empty());
+
+ // Make sure we don't prune too much.
+ root->SetBoolean("bool", true);
+ root->Set("empty_dict", WrapUnique(new DictionaryValue));
+ root->SetString("empty_string", std::string());
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(2U, root->size());
+
+ // Should do nothing.
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(2U, root->size());
+
+ // Nested test cases. These should all reduce back to the bool and string
+ // set above.
+ {
+ root->Set("a.b.c.d.e", WrapUnique(new DictionaryValue));
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(2U, root->size());
+ }
+ {
+ std::unique_ptr<DictionaryValue> inner(new DictionaryValue);
+ inner->Set("empty_dict", WrapUnique(new DictionaryValue));
+ inner->Set("empty_list", WrapUnique(new ListValue));
+ root->Set("dict_with_empty_children", std::move(inner));
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(2U, root->size());
+ }
+ {
+ std::unique_ptr<ListValue> inner(new ListValue);
+ inner->Append(WrapUnique(new DictionaryValue));
+ inner->Append(WrapUnique(new ListValue));
+ root->Set("list_with_empty_children", std::move(inner));
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(2U, root->size());
+ }
+
+ // Nested with siblings.
+ {
+ std::unique_ptr<ListValue> inner(new ListValue());
+ inner->Append(WrapUnique(new DictionaryValue));
+ inner->Append(WrapUnique(new ListValue));
+ root->Set("list_with_empty_children", std::move(inner));
+ std::unique_ptr<DictionaryValue> inner2(new DictionaryValue);
+ inner2->Set("empty_dict", WrapUnique(new DictionaryValue));
+ inner2->Set("empty_list", WrapUnique(new ListValue));
+ root->Set("dict_with_empty_children", std::move(inner2));
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(2U, root->size());
+ }
+
+ // Make sure nested values don't get pruned.
+ {
+ std::unique_ptr<ListValue> inner(new ListValue);
+ std::unique_ptr<ListValue> inner2(new ListValue);
+ inner2->Append(WrapUnique(new StringValue("hello")));
+ inner->Append(WrapUnique(new DictionaryValue));
+ inner->Append(std::move(inner2));
+ root->Set("list_with_empty_children", std::move(inner));
+ root = root->DeepCopyWithoutEmptyChildren();
+ EXPECT_EQ(3U, root->size());
+
+ ListValue* inner_value, *inner_value2;
+ EXPECT_TRUE(root->GetList("list_with_empty_children", &inner_value));
+ EXPECT_EQ(1U, inner_value->GetSize()); // Dictionary was pruned.
+ EXPECT_TRUE(inner_value->GetList(0, &inner_value2));
+ EXPECT_EQ(1U, inner_value2->GetSize());
+ }
+}
+
+TEST(ValuesTest, MergeDictionary) {
+ std::unique_ptr<DictionaryValue> base(new DictionaryValue);
+ base->SetString("base_key", "base_key_value_base");
+ base->SetString("collide_key", "collide_key_value_base");
+ std::unique_ptr<DictionaryValue> base_sub_dict(new DictionaryValue);
+ base_sub_dict->SetString("sub_base_key", "sub_base_key_value_base");
+ base_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_base");
+ base->Set("sub_dict_key", std::move(base_sub_dict));
+
+ std::unique_ptr<DictionaryValue> merge(new DictionaryValue);
+ merge->SetString("merge_key", "merge_key_value_merge");
+ merge->SetString("collide_key", "collide_key_value_merge");
+ std::unique_ptr<DictionaryValue> merge_sub_dict(new DictionaryValue);
+ merge_sub_dict->SetString("sub_merge_key", "sub_merge_key_value_merge");
+ merge_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_merge");
+ merge->Set("sub_dict_key", std::move(merge_sub_dict));
+
+ base->MergeDictionary(merge.get());
+
+ EXPECT_EQ(4U, base->size());
+ std::string base_key_value;
+ EXPECT_TRUE(base->GetString("base_key", &base_key_value));
+ EXPECT_EQ("base_key_value_base", base_key_value); // Base value preserved.
+ std::string collide_key_value;
+ EXPECT_TRUE(base->GetString("collide_key", &collide_key_value));
+ EXPECT_EQ("collide_key_value_merge", collide_key_value); // Replaced.
+ std::string merge_key_value;
+ EXPECT_TRUE(base->GetString("merge_key", &merge_key_value));
+ EXPECT_EQ("merge_key_value_merge", merge_key_value); // Merged in.
+
+ DictionaryValue* res_sub_dict;
+ EXPECT_TRUE(base->GetDictionary("sub_dict_key", &res_sub_dict));
+ EXPECT_EQ(3U, res_sub_dict->size());
+ std::string sub_base_key_value;
+ EXPECT_TRUE(res_sub_dict->GetString("sub_base_key", &sub_base_key_value));
+ EXPECT_EQ("sub_base_key_value_base", sub_base_key_value); // Preserved.
+ std::string sub_collide_key_value;
+ EXPECT_TRUE(res_sub_dict->GetString("sub_collide_key",
+ &sub_collide_key_value));
+ EXPECT_EQ("sub_collide_key_value_merge", sub_collide_key_value); // Replaced.
+ std::string sub_merge_key_value;
+ EXPECT_TRUE(res_sub_dict->GetString("sub_merge_key", &sub_merge_key_value));
+ EXPECT_EQ("sub_merge_key_value_merge", sub_merge_key_value); // Merged in.
+}
+
+TEST(ValuesTest, MergeDictionaryDeepCopy) {
+ std::unique_ptr<DictionaryValue> child(new DictionaryValue);
+ DictionaryValue* original_child = child.get();
+ child->SetString("test", "value");
+ EXPECT_EQ(1U, child->size());
+
+ std::string value;
+ EXPECT_TRUE(child->GetString("test", &value));
+ EXPECT_EQ("value", value);
+
+ std::unique_ptr<DictionaryValue> base(new DictionaryValue);
+ base->Set("dict", std::move(child));
+ EXPECT_EQ(1U, base->size());
+
+ DictionaryValue* ptr;
+ EXPECT_TRUE(base->GetDictionary("dict", &ptr));
+ EXPECT_EQ(original_child, ptr);
+
+ std::unique_ptr<DictionaryValue> merged(new DictionaryValue);
+ merged->MergeDictionary(base.get());
+ EXPECT_EQ(1U, merged->size());
+ EXPECT_TRUE(merged->GetDictionary("dict", &ptr));
+ EXPECT_NE(original_child, ptr);
+ EXPECT_TRUE(ptr->GetString("test", &value));
+ EXPECT_EQ("value", value);
+
+ original_child->SetString("test", "overwrite");
+ base.reset();
+ EXPECT_TRUE(ptr->GetString("test", &value));
+ EXPECT_EQ("value", value);
+}
+
+TEST(ValuesTest, DictionaryIterator) {
+ DictionaryValue dict;
+ for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+ ADD_FAILURE();
+ }
+
+ StringValue value1("value1");
+ dict.Set("key1", value1.CreateDeepCopy());
+ bool seen1 = false;
+ for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+ EXPECT_FALSE(seen1);
+ EXPECT_EQ("key1", it.key());
+ EXPECT_TRUE(value1.Equals(&it.value()));
+ seen1 = true;
+ }
+ EXPECT_TRUE(seen1);
+
+ StringValue value2("value2");
+ dict.Set("key2", value2.CreateDeepCopy());
+ bool seen2 = seen1 = false;
+ for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
+ if (it.key() == "key1") {
+ EXPECT_FALSE(seen1);
+ EXPECT_TRUE(value1.Equals(&it.value()));
+ seen1 = true;
+ } else if (it.key() == "key2") {
+ EXPECT_FALSE(seen2);
+ EXPECT_TRUE(value2.Equals(&it.value()));
+ seen2 = true;
+ } else {
+ ADD_FAILURE();
+ }
+ }
+ EXPECT_TRUE(seen1);
+ EXPECT_TRUE(seen2);
+}
+
+// DictionaryValue/ListValue's Get*() methods should accept NULL as an out-value
+// and still return true/false based on success.
+TEST(ValuesTest, GetWithNullOutValue) {
+ DictionaryValue main_dict;
+ ListValue main_list;
+
+ FundamentalValue bool_value(false);
+ FundamentalValue int_value(1234);
+ FundamentalValue double_value(12.34567);
+ StringValue string_value("foo");
+ BinaryValue binary_value;
+ DictionaryValue dict_value;
+ ListValue list_value;
+
+ main_dict.Set("bool", bool_value.CreateDeepCopy());
+ main_dict.Set("int", int_value.CreateDeepCopy());
+ main_dict.Set("double", double_value.CreateDeepCopy());
+ main_dict.Set("string", string_value.CreateDeepCopy());
+ main_dict.Set("binary", binary_value.CreateDeepCopy());
+ main_dict.Set("dict", dict_value.CreateDeepCopy());
+ main_dict.Set("list", list_value.CreateDeepCopy());
+
+ main_list.Append(bool_value.CreateDeepCopy());
+ main_list.Append(int_value.CreateDeepCopy());
+ main_list.Append(double_value.CreateDeepCopy());
+ main_list.Append(string_value.CreateDeepCopy());
+ main_list.Append(binary_value.CreateDeepCopy());
+ main_list.Append(dict_value.CreateDeepCopy());
+ main_list.Append(list_value.CreateDeepCopy());
+
+ EXPECT_TRUE(main_dict.Get("bool", NULL));
+ EXPECT_TRUE(main_dict.Get("int", NULL));
+ EXPECT_TRUE(main_dict.Get("double", NULL));
+ EXPECT_TRUE(main_dict.Get("string", NULL));
+ EXPECT_TRUE(main_dict.Get("binary", NULL));
+ EXPECT_TRUE(main_dict.Get("dict", NULL));
+ EXPECT_TRUE(main_dict.Get("list", NULL));
+ EXPECT_FALSE(main_dict.Get("DNE", NULL));
+
+ EXPECT_TRUE(main_dict.GetBoolean("bool", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("int", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("double", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("string", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("binary", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("dict", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("list", NULL));
+ EXPECT_FALSE(main_dict.GetBoolean("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetInteger("bool", NULL));
+ EXPECT_TRUE(main_dict.GetInteger("int", NULL));
+ EXPECT_FALSE(main_dict.GetInteger("double", NULL));
+ EXPECT_FALSE(main_dict.GetInteger("string", NULL));
+ EXPECT_FALSE(main_dict.GetInteger("binary", NULL));
+ EXPECT_FALSE(main_dict.GetInteger("dict", NULL));
+ EXPECT_FALSE(main_dict.GetInteger("list", NULL));
+ EXPECT_FALSE(main_dict.GetInteger("DNE", NULL));
+
+ // Both int and double values can be obtained from GetDouble.
+ EXPECT_FALSE(main_dict.GetDouble("bool", NULL));
+ EXPECT_TRUE(main_dict.GetDouble("int", NULL));
+ EXPECT_TRUE(main_dict.GetDouble("double", NULL));
+ EXPECT_FALSE(main_dict.GetDouble("string", NULL));
+ EXPECT_FALSE(main_dict.GetDouble("binary", NULL));
+ EXPECT_FALSE(main_dict.GetDouble("dict", NULL));
+ EXPECT_FALSE(main_dict.GetDouble("list", NULL));
+ EXPECT_FALSE(main_dict.GetDouble("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetString("bool", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("int", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("double", static_cast<std::string*>(NULL)));
+ EXPECT_TRUE(main_dict.GetString("string", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("binary", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("dict", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("list", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("DNE", static_cast<std::string*>(NULL)));
+
+ EXPECT_FALSE(main_dict.GetString("bool", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("int", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("double", static_cast<string16*>(NULL)));
+ EXPECT_TRUE(main_dict.GetString("string", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("binary", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("dict", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("list", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetString("DNE", static_cast<string16*>(NULL)));
+
+ EXPECT_FALSE(main_dict.GetBinary("bool", NULL));
+ EXPECT_FALSE(main_dict.GetBinary("int", NULL));
+ EXPECT_FALSE(main_dict.GetBinary("double", NULL));
+ EXPECT_FALSE(main_dict.GetBinary("string", NULL));
+ EXPECT_TRUE(main_dict.GetBinary("binary", NULL));
+ EXPECT_FALSE(main_dict.GetBinary("dict", NULL));
+ EXPECT_FALSE(main_dict.GetBinary("list", NULL));
+ EXPECT_FALSE(main_dict.GetBinary("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetDictionary("bool", NULL));
+ EXPECT_FALSE(main_dict.GetDictionary("int", NULL));
+ EXPECT_FALSE(main_dict.GetDictionary("double", NULL));
+ EXPECT_FALSE(main_dict.GetDictionary("string", NULL));
+ EXPECT_FALSE(main_dict.GetDictionary("binary", NULL));
+ EXPECT_TRUE(main_dict.GetDictionary("dict", NULL));
+ EXPECT_FALSE(main_dict.GetDictionary("list", NULL));
+ EXPECT_FALSE(main_dict.GetDictionary("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetList("bool", NULL));
+ EXPECT_FALSE(main_dict.GetList("int", NULL));
+ EXPECT_FALSE(main_dict.GetList("double", NULL));
+ EXPECT_FALSE(main_dict.GetList("string", NULL));
+ EXPECT_FALSE(main_dict.GetList("binary", NULL));
+ EXPECT_FALSE(main_dict.GetList("dict", NULL));
+ EXPECT_TRUE(main_dict.GetList("list", NULL));
+ EXPECT_FALSE(main_dict.GetList("DNE", NULL));
+
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("bool", NULL));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("int", NULL));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("double", NULL));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("string", NULL));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("binary", NULL));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("dict", NULL));
+ EXPECT_TRUE(main_dict.GetWithoutPathExpansion("list", NULL));
+ EXPECT_FALSE(main_dict.GetWithoutPathExpansion("DNE", NULL));
+
+ EXPECT_TRUE(main_dict.GetBooleanWithoutPathExpansion("bool", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("int", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("double", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("string", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("binary", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("dict", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("list", NULL));
+ EXPECT_FALSE(main_dict.GetBooleanWithoutPathExpansion("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("bool", NULL));
+ EXPECT_TRUE(main_dict.GetIntegerWithoutPathExpansion("int", NULL));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("double", NULL));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("string", NULL));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("binary", NULL));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("dict", NULL));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("list", NULL));
+ EXPECT_FALSE(main_dict.GetIntegerWithoutPathExpansion("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("bool", NULL));
+ EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("int", NULL));
+ EXPECT_TRUE(main_dict.GetDoubleWithoutPathExpansion("double", NULL));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("string", NULL));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("binary", NULL));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("dict", NULL));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("list", NULL));
+ EXPECT_FALSE(main_dict.GetDoubleWithoutPathExpansion("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "bool", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "int", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "double", static_cast<std::string*>(NULL)));
+ EXPECT_TRUE(main_dict.GetStringWithoutPathExpansion(
+ "string", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "binary", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "dict", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "list", static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "DNE", static_cast<std::string*>(NULL)));
+
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "bool", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "int", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "double", static_cast<string16*>(NULL)));
+ EXPECT_TRUE(main_dict.GetStringWithoutPathExpansion(
+ "string", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "binary", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "dict", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "list", static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_dict.GetStringWithoutPathExpansion(
+ "DNE", static_cast<string16*>(NULL)));
+
+ // There is no GetBinaryWithoutPathExpansion for some reason, but if there
+ // were it should be tested here...
+
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("bool", NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("int", NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("double", NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("string", NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("binary", NULL));
+ EXPECT_TRUE(main_dict.GetDictionaryWithoutPathExpansion("dict", NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("list", NULL));
+ EXPECT_FALSE(main_dict.GetDictionaryWithoutPathExpansion("DNE", NULL));
+
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("bool", NULL));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("int", NULL));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("double", NULL));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("string", NULL));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("binary", NULL));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("dict", NULL));
+ EXPECT_TRUE(main_dict.GetListWithoutPathExpansion("list", NULL));
+ EXPECT_FALSE(main_dict.GetListWithoutPathExpansion("DNE", NULL));
+
+ EXPECT_TRUE(main_list.Get(0, NULL));
+ EXPECT_TRUE(main_list.Get(1, NULL));
+ EXPECT_TRUE(main_list.Get(2, NULL));
+ EXPECT_TRUE(main_list.Get(3, NULL));
+ EXPECT_TRUE(main_list.Get(4, NULL));
+ EXPECT_TRUE(main_list.Get(5, NULL));
+ EXPECT_TRUE(main_list.Get(6, NULL));
+ EXPECT_FALSE(main_list.Get(7, NULL));
+
+ EXPECT_TRUE(main_list.GetBoolean(0, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(1, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(2, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(3, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(4, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(5, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(6, NULL));
+ EXPECT_FALSE(main_list.GetBoolean(7, NULL));
+
+ EXPECT_FALSE(main_list.GetInteger(0, NULL));
+ EXPECT_TRUE(main_list.GetInteger(1, NULL));
+ EXPECT_FALSE(main_list.GetInteger(2, NULL));
+ EXPECT_FALSE(main_list.GetInteger(3, NULL));
+ EXPECT_FALSE(main_list.GetInteger(4, NULL));
+ EXPECT_FALSE(main_list.GetInteger(5, NULL));
+ EXPECT_FALSE(main_list.GetInteger(6, NULL));
+ EXPECT_FALSE(main_list.GetInteger(7, NULL));
+
+ EXPECT_FALSE(main_list.GetDouble(0, NULL));
+ EXPECT_TRUE(main_list.GetDouble(1, NULL));
+ EXPECT_TRUE(main_list.GetDouble(2, NULL));
+ EXPECT_FALSE(main_list.GetDouble(3, NULL));
+ EXPECT_FALSE(main_list.GetDouble(4, NULL));
+ EXPECT_FALSE(main_list.GetDouble(5, NULL));
+ EXPECT_FALSE(main_list.GetDouble(6, NULL));
+ EXPECT_FALSE(main_list.GetDouble(7, NULL));
+
+ EXPECT_FALSE(main_list.GetString(0, static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(1, static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(2, static_cast<std::string*>(NULL)));
+ EXPECT_TRUE(main_list.GetString(3, static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(4, static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(5, static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(6, static_cast<std::string*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(7, static_cast<std::string*>(NULL)));
+
+ EXPECT_FALSE(main_list.GetString(0, static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(1, static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(2, static_cast<string16*>(NULL)));
+ EXPECT_TRUE(main_list.GetString(3, static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(4, static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(5, static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(6, static_cast<string16*>(NULL)));
+ EXPECT_FALSE(main_list.GetString(7, static_cast<string16*>(NULL)));
+
+ EXPECT_FALSE(main_list.GetBinary(0, NULL));
+ EXPECT_FALSE(main_list.GetBinary(1, NULL));
+ EXPECT_FALSE(main_list.GetBinary(2, NULL));
+ EXPECT_FALSE(main_list.GetBinary(3, NULL));
+ EXPECT_TRUE(main_list.GetBinary(4, NULL));
+ EXPECT_FALSE(main_list.GetBinary(5, NULL));
+ EXPECT_FALSE(main_list.GetBinary(6, NULL));
+ EXPECT_FALSE(main_list.GetBinary(7, NULL));
+
+ EXPECT_FALSE(main_list.GetDictionary(0, NULL));
+ EXPECT_FALSE(main_list.GetDictionary(1, NULL));
+ EXPECT_FALSE(main_list.GetDictionary(2, NULL));
+ EXPECT_FALSE(main_list.GetDictionary(3, NULL));
+ EXPECT_FALSE(main_list.GetDictionary(4, NULL));
+ EXPECT_TRUE(main_list.GetDictionary(5, NULL));
+ EXPECT_FALSE(main_list.GetDictionary(6, NULL));
+ EXPECT_FALSE(main_list.GetDictionary(7, NULL));
+
+ EXPECT_FALSE(main_list.GetList(0, NULL));
+ EXPECT_FALSE(main_list.GetList(1, NULL));
+ EXPECT_FALSE(main_list.GetList(2, NULL));
+ EXPECT_FALSE(main_list.GetList(3, NULL));
+ EXPECT_FALSE(main_list.GetList(4, NULL));
+ EXPECT_FALSE(main_list.GetList(5, NULL));
+ EXPECT_TRUE(main_list.GetList(6, NULL));
+ EXPECT_FALSE(main_list.GetList(7, NULL));
+}
+
+} // namespace base
diff --git a/libchrome/base/version.cc b/libchrome/base/version.cc
new file mode 100644
index 0000000..02213fb
--- /dev/null
+++ b/libchrome/base/version.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Parses the |numbers| vector representing the different numbers
+// inside the version string and constructs a vector of valid integers. It stops
+// when it reaches an invalid item (including the wildcard character). |parsed|
+// is the resulting integer vector. Function returns true if all numbers were
+// parsed successfully, false otherwise.
+bool ParseVersionNumbers(const std::string& version_str,
+ std::vector<uint32_t>* parsed) {
+ std::vector<StringPiece> numbers =
+ SplitStringPiece(version_str, ".", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ if (numbers.empty())
+ return false;
+
+ for (auto it = numbers.begin(); it != numbers.end(); ++it) {
+ if (StartsWith(*it, "+", CompareCase::SENSITIVE))
+ return false;
+
+ unsigned int num;
+ if (!StringToUint(*it, &num))
+ return false;
+
+ // This throws out leading zeros for the first item only.
+ if (it == numbers.begin() && UintToString(num) != *it)
+ return false;
+
+ // StringToUint returns unsigned int but Version fields are uint32_t.
+ static_assert(sizeof (uint32_t) == sizeof (unsigned int),
+ "uint32_t must be same as unsigned int");
+ parsed->push_back(num);
+ }
+ return true;
+}
+
+// Compares version components in |components1| with components in
+// |components2|. Returns -1, 0 or 1 if |components1| is less than, equal to,
+// or greater than |components2|, respectively.
+int CompareVersionComponents(const std::vector<uint32_t>& components1,
+ const std::vector<uint32_t>& components2) {
+ const size_t count = std::min(components1.size(), components2.size());
+ for (size_t i = 0; i < count; ++i) {
+ if (components1[i] > components2[i])
+ return 1;
+ if (components1[i] < components2[i])
+ return -1;
+ }
+ if (components1.size() > components2.size()) {
+ for (size_t i = count; i < components1.size(); ++i) {
+ if (components1[i] > 0)
+ return 1;
+ }
+ } else if (components1.size() < components2.size()) {
+ for (size_t i = count; i < components2.size(); ++i) {
+ if (components2[i] > 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+} // namespace
+
+Version::Version() {
+}
+
+Version::Version(const Version& other) = default;
+
+Version::~Version() {
+}
+
+Version::Version(const std::string& version_str) {
+ std::vector<uint32_t> parsed;
+ if (!ParseVersionNumbers(version_str, &parsed))
+ return;
+
+ components_.swap(parsed);
+}
+
+bool Version::IsValid() const {
+ return (!components_.empty());
+}
+
+// static
+bool Version::IsValidWildcardString(const std::string& wildcard_string) {
+ std::string version_string = wildcard_string;
+ if (EndsWith(version_string, ".*", CompareCase::SENSITIVE))
+ version_string.resize(version_string.size() - 2);
+
+ Version version(version_string);
+ return version.IsValid();
+}
+
+int Version::CompareToWildcardString(const std::string& wildcard_string) const {
+ DCHECK(IsValid());
+ DCHECK(Version::IsValidWildcardString(wildcard_string));
+
+ // Default behavior if the string doesn't end with a wildcard.
+ if (!EndsWith(wildcard_string, ".*", CompareCase::SENSITIVE)) {
+ Version version(wildcard_string);
+ DCHECK(version.IsValid());
+ return CompareTo(version);
+ }
+
+ std::vector<uint32_t> parsed;
+ const bool success = ParseVersionNumbers(
+ wildcard_string.substr(0, wildcard_string.length() - 2), &parsed);
+ DCHECK(success);
+ const int comparison = CompareVersionComponents(components_, parsed);
+ // If the version is smaller than the wildcard version's |parsed| vector,
+ // then the wildcard has no effect (e.g. comparing 1.2.3 and 1.3.*) and the
+ // version is still smaller. Same logic for equality (e.g. comparing 1.2.2 to
+ // 1.2.2.* is 0 regardless of the wildcard). Under this logic,
+ // 1.2.0.0.0.0 compared to 1.2.* is 0.
+ if (comparison == -1 || comparison == 0)
+ return comparison;
+
+ // Catch the case where the digits of |parsed| are found in |components_|,
+ // which means that the two are equal since |parsed| has a trailing "*".
+ // (e.g. 1.2.3 vs. 1.2.* will return 0). All other cases return 1 since
+ // components is greater (e.g. 3.2.3 vs 1.*).
+ DCHECK_GT(parsed.size(), 0UL);
+ const size_t min_num_comp = std::min(components_.size(), parsed.size());
+ for (size_t i = 0; i < min_num_comp; ++i) {
+ if (components_[i] != parsed[i])
+ return 1;
+ }
+ return 0;
+}
+
+int Version::CompareTo(const Version& other) const {
+ DCHECK(IsValid());
+ DCHECK(other.IsValid());
+ return CompareVersionComponents(components_, other.components_);
+}
+
+const std::string Version::GetString() const {
+ DCHECK(IsValid());
+ std::string version_str;
+ size_t count = components_.size();
+ for (size_t i = 0; i < count - 1; ++i) {
+ version_str.append(UintToString(components_[i]));
+ version_str.append(".");
+ }
+ version_str.append(UintToString(components_[count - 1]));
+ return version_str;
+}
+
+bool operator==(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) == 0;
+}
+
+bool operator!=(const Version& v1, const Version& v2) {
+ return !(v1 == v2);
+}
+
+bool operator<(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) < 0;
+}
+
+bool operator<=(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) <= 0;
+}
+
+bool operator>(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) > 0;
+}
+
+bool operator>=(const Version& v1, const Version& v2) {
+ return v1.CompareTo(v2) >= 0;
+}
+
+std::ostream& operator<<(std::ostream& stream, const Version& v) {
+ return stream << v.GetString();
+}
+
+} // namespace base
diff --git a/libchrome/base/version.h b/libchrome/base/version.h
new file mode 100644
index 0000000..25b570a
--- /dev/null
+++ b/libchrome/base/version.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VERSION_H_
+#define BASE_VERSION_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Version represents a dotted version number, like "1.2.3.4", supporting
+// parsing and comparison.
+class BASE_EXPORT Version {
+ public:
+ // The only thing you can legally do to a default constructed
+ // Version object is assign to it.
+ Version();
+
+ Version(const Version& other);
+
+ ~Version();
+
+ // Initializes from a decimal dotted version number, like "0.1.1".
+ // Each component is limited to a uint16_t. Call IsValid() to learn
+ // the outcome.
+ explicit Version(const std::string& version_str);
+
+ // Returns true if the object contains a valid version number.
+ bool IsValid() const;
+
+ // Returns true if the version wildcard string is valid. The version wildcard
+ // string may end with ".*" (e.g. 1.2.*, 1.*). Any other arrangement with "*"
+ // is invalid (e.g. 1.*.3 or 1.2.3*). This functions defaults to standard
+ // Version behavior (IsValid) if no wildcard is present.
+ static bool IsValidWildcardString(const std::string& wildcard_string);
+
+ // Returns -1, 0, 1 for <, ==, >.
+ int CompareTo(const Version& other) const;
+
+ // Given a valid version object, compare if a |wildcard_string| results in a
+ // newer version. This function will default to CompareTo if the string does
+ // not end in wildcard sequence ".*". IsValidWildcard(wildcard_string) must be
+ // true before using this function.
+ int CompareToWildcardString(const std::string& wildcard_string) const;
+
+ // Return the string representation of this version.
+ const std::string GetString() const;
+
+ const std::vector<uint32_t>& components() const { return components_; }
+
+ private:
+ std::vector<uint32_t> components_;
+};
+
+BASE_EXPORT bool operator==(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator!=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>=(const Version& v1, const Version& v2);
+BASE_EXPORT std::ostream& operator<<(std::ostream& stream, const Version& v);
+
+} // namespace base
+
+// TODO(xhwang) remove this when all users are updated to explicitly use the
+// namespace
+using base::Version;
+
+#endif // BASE_VERSION_H_
diff --git a/libchrome/base/version_unittest.cc b/libchrome/base/version_unittest.cc
new file mode 100644
index 0000000..5d9ea99
--- /dev/null
+++ b/libchrome/base/version_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(VersionTest, DefaultConstructor) {
+ Version v;
+ EXPECT_FALSE(v.IsValid());
+}
+
+TEST(VersionTest, ValueSemantics) {
+ Version v1("1.2.3.4");
+ EXPECT_TRUE(v1.IsValid());
+ Version v3;
+ EXPECT_FALSE(v3.IsValid());
+ {
+ Version v2(v1);
+ v3 = v2;
+ EXPECT_TRUE(v2.IsValid());
+ EXPECT_EQ(v1, v2);
+ }
+ EXPECT_EQ(v3, v1);
+}
+
+TEST(VersionTest, GetVersionFromString) {
+ static const struct version_string {
+ const char* input;
+ size_t parts;
+ uint32_t firstpart;
+ bool success;
+ } cases[] = {
+ {"", 0, 0, false},
+ {" ", 0, 0, false},
+ {"\t", 0, 0, false},
+ {"\n", 0, 0, false},
+ {" ", 0, 0, false},
+ {".", 0, 0, false},
+ {" . ", 0, 0, false},
+ {"0", 1, 0, true},
+ {"0.", 0, 0, false},
+ {"0.0", 2, 0, true},
+ {"4294967295.0", 2, 4294967295, true},
+ {"4294967296.0", 0, 0, false},
+ {"-1.0", 0, 0, false},
+ {"1.-1.0", 0, 0, false},
+ {"1,--1.0", 0, 0, false},
+ {"+1.0", 0, 0, false},
+ {"1.+1.0", 0, 0, false},
+ {"1+1.0", 0, 0, false},
+ {"++1.0", 0, 0, false},
+ {"1.0a", 0, 0, false},
+ {"1.2.3.4.5.6.7.8.9.0", 10, 1, true},
+ {"02.1", 0, 0, false},
+ {"0.01", 2, 0, true},
+ {"f.1", 0, 0, false},
+ {"15.007.20011", 3, 15, true},
+ {"15.5.28.130162", 4, 15, true},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ Version version(cases[i].input);
+ EXPECT_EQ(cases[i].success, version.IsValid());
+ if (cases[i].success) {
+ EXPECT_EQ(cases[i].parts, version.components().size());
+ EXPECT_EQ(cases[i].firstpart, version.components()[0]);
+ }
+ }
+}
+
+TEST(VersionTest, Compare) {
+ static const struct version_compare {
+ const char* lhs;
+ const char* rhs;
+ int expected;
+ } cases[] = {
+ {"1.0", "1.0", 0},
+ {"1.0", "0.0", 1},
+ {"1.0", "2.0", -1},
+ {"1.0", "1.1", -1},
+ {"1.1", "1.0", 1},
+ {"1.0", "1.0.1", -1},
+ {"1.1", "1.0.1", 1},
+ {"1.1", "1.0.1", 1},
+ {"1.0.0", "1.0", 0},
+ {"1.0.3", "1.0.20", -1},
+ {"11.0.10", "15.007.20011", -1},
+ {"11.0.10", "15.5.28.130162", -1},
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ Version lhs(cases[i].lhs);
+ Version rhs(cases[i].rhs);
+ EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
+ cases[i].lhs << " ? " << cases[i].rhs;
+
+ // Test comparison operators
+ switch (cases[i].expected) {
+ case -1:
+ EXPECT_LT(lhs, rhs);
+ EXPECT_LE(lhs, rhs);
+ EXPECT_NE(lhs, rhs);
+ EXPECT_FALSE(lhs == rhs);
+ EXPECT_FALSE(lhs >= rhs);
+ EXPECT_FALSE(lhs > rhs);
+ break;
+ case 0:
+ EXPECT_FALSE(lhs < rhs);
+ EXPECT_LE(lhs, rhs);
+ EXPECT_FALSE(lhs != rhs);
+ EXPECT_EQ(lhs, rhs);
+ EXPECT_GE(lhs, rhs);
+ EXPECT_FALSE(lhs > rhs);
+ break;
+ case 1:
+ EXPECT_FALSE(lhs < rhs);
+ EXPECT_FALSE(lhs <= rhs);
+ EXPECT_NE(lhs, rhs);
+ EXPECT_FALSE(lhs == rhs);
+ EXPECT_GE(lhs, rhs);
+ EXPECT_GT(lhs, rhs);
+ break;
+ }
+ }
+}
+
+TEST(VersionTest, CompareToWildcardString) {
+ static const struct version_compare {
+ const char* lhs;
+ const char* rhs;
+ int expected;
+ } cases[] = {
+ {"1.0", "1.*", 0},
+ {"1.0", "0.*", 1},
+ {"1.0", "2.*", -1},
+ {"1.2.3", "1.2.3.*", 0},
+ {"10.0", "1.0.*", 1},
+ {"1.0", "3.0.*", -1},
+ {"1.4", "1.3.0.*", 1},
+ {"1.3.9", "1.3.*", 0},
+ {"1.4.1", "1.3.*", 1},
+ {"1.3", "1.4.5.*", -1},
+ {"1.5", "1.4.5.*", 1},
+ {"1.3.9", "1.3.*", 0},
+ {"1.2.0.0.0.0", "1.2.*", 0},
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ const Version version(cases[i].lhs);
+ const int result = version.CompareToWildcardString(cases[i].rhs);
+ EXPECT_EQ(result, cases[i].expected) << cases[i].lhs << "?" << cases[i].rhs;
+ }
+}
+
+TEST(VersionTest, IsValidWildcardString) {
+ static const struct version_compare {
+ const char* version;
+ bool expected;
+ } cases[] = {
+ {"1.0", true},
+ {"", false},
+ {"1.2.3.4.5.6", true},
+ {"1.2.3.*", true},
+ {"1.2.3.5*", false},
+ {"1.2.3.56*", false},
+ {"1.*.3", false},
+ {"20.*", true},
+ {"+2.*", false},
+ {"*", false},
+ {"*.2", false},
+ };
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ EXPECT_EQ(Version::IsValidWildcardString(cases[i].version),
+ cases[i].expected) << cases[i].version << "?" << cases[i].expected;
+ }
+}
+
+} // namespace
diff --git a/libchrome/base/vlog.cc b/libchrome/base/vlog.cc
new file mode 100644
index 0000000..c00e631
--- /dev/null
+++ b/libchrome/base/vlog.cc
@@ -0,0 +1,181 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/vlog.h"
+
+#include <stddef.h>
+
+#include <ostream>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+
+namespace logging {
+
+const int VlogInfo::kDefaultVlogLevel = 0;
+
+struct VlogInfo::VmodulePattern {
+ enum MatchTarget { MATCH_MODULE, MATCH_FILE };
+
+ explicit VmodulePattern(const std::string& pattern);
+
+ VmodulePattern();
+
+ std::string pattern;
+ int vlog_level;
+ MatchTarget match_target;
+};
+
+VlogInfo::VmodulePattern::VmodulePattern(const std::string& pattern)
+ : pattern(pattern),
+ vlog_level(VlogInfo::kDefaultVlogLevel),
+ match_target(MATCH_MODULE) {
+ // If the pattern contains a {forward,back} slash, we assume that
+ // it's meant to be tested against the entire __FILE__ string.
+ std::string::size_type first_slash = pattern.find_first_of("\\/");
+ if (first_slash != std::string::npos)
+ match_target = MATCH_FILE;
+}
+
+VlogInfo::VmodulePattern::VmodulePattern()
+ : vlog_level(VlogInfo::kDefaultVlogLevel),
+ match_target(MATCH_MODULE) {}
+
+VlogInfo::VlogInfo(const std::string& v_switch,
+ const std::string& vmodule_switch,
+ int* min_log_level)
+ : min_log_level_(min_log_level) {
+ DCHECK(min_log_level != NULL);
+
+ int vlog_level = 0;
+ if (!v_switch.empty()) {
+ if (base::StringToInt(v_switch, &vlog_level)) {
+ SetMaxVlogLevel(vlog_level);
+ } else {
+ DLOG(WARNING) << "Could not parse v switch \"" << v_switch << "\"";
+ }
+ }
+
+ base::StringPairs kv_pairs;
+ if (!base::SplitStringIntoKeyValuePairs(
+ vmodule_switch, '=', ',', &kv_pairs)) {
+ DLOG(WARNING) << "Could not fully parse vmodule switch \""
+ << vmodule_switch << "\"";
+ }
+ for (base::StringPairs::const_iterator it = kv_pairs.begin();
+ it != kv_pairs.end(); ++it) {
+ VmodulePattern pattern(it->first);
+ if (!base::StringToInt(it->second, &pattern.vlog_level)) {
+ DLOG(WARNING) << "Parsed vlog level for \""
+ << it->first << "=" << it->second
+ << "\" as " << pattern.vlog_level;
+ }
+ vmodule_levels_.push_back(pattern);
+ }
+}
+
+VlogInfo::~VlogInfo() {}
+
+namespace {
+
+// Given a path, returns the basename with the extension chopped off
+// (and any -inl suffix). We avoid using FilePath to minimize the
+// number of dependencies the logging system has.
+base::StringPiece GetModule(const base::StringPiece& file) {
+ base::StringPiece module(file);
+ base::StringPiece::size_type last_slash_pos =
+ module.find_last_of("\\/");
+ if (last_slash_pos != base::StringPiece::npos)
+ module.remove_prefix(last_slash_pos + 1);
+ base::StringPiece::size_type extension_start = module.rfind('.');
+ module = module.substr(0, extension_start);
+ static const char kInlSuffix[] = "-inl";
+ static const int kInlSuffixLen = arraysize(kInlSuffix) - 1;
+ if (module.ends_with(kInlSuffix))
+ module.remove_suffix(kInlSuffixLen);
+ return module;
+}
+
+} // namespace
+
+int VlogInfo::GetVlogLevel(const base::StringPiece& file) const {
+ if (!vmodule_levels_.empty()) {
+ base::StringPiece module(GetModule(file));
+ for (std::vector<VmodulePattern>::const_iterator it =
+ vmodule_levels_.begin(); it != vmodule_levels_.end(); ++it) {
+ base::StringPiece target(
+ (it->match_target == VmodulePattern::MATCH_FILE) ? file : module);
+ if (MatchVlogPattern(target, it->pattern))
+ return it->vlog_level;
+ }
+ }
+ return GetMaxVlogLevel();
+}
+
+void VlogInfo::SetMaxVlogLevel(int level) {
+ // Log severity is the negative verbosity.
+ *min_log_level_ = -level;
+}
+
+int VlogInfo::GetMaxVlogLevel() const {
+ return -*min_log_level_;
+}
+
+bool MatchVlogPattern(const base::StringPiece& string,
+ const base::StringPiece& vlog_pattern) {
+ base::StringPiece p(vlog_pattern);
+ base::StringPiece s(string);
+ // Consume characters until the next star.
+ while (!p.empty() && !s.empty() && (p[0] != '*')) {
+ switch (p[0]) {
+ // A slash (forward or back) must match a slash (forward or back).
+ case '/':
+ case '\\':
+ if ((s[0] != '/') && (s[0] != '\\'))
+ return false;
+ break;
+
+ // A '?' matches anything.
+ case '?':
+ break;
+
+ // Anything else must match literally.
+ default:
+ if (p[0] != s[0])
+ return false;
+ break;
+ }
+ p.remove_prefix(1), s.remove_prefix(1);
+ }
+
+ // An empty pattern here matches only an empty string.
+ if (p.empty())
+ return s.empty();
+
+ // Coalesce runs of consecutive stars. There should be at least
+ // one.
+ while (!p.empty() && (p[0] == '*'))
+ p.remove_prefix(1);
+
+ // Since we moved past the stars, an empty pattern here matches
+ // anything.
+ if (p.empty())
+ return true;
+
+ // Since we moved past the stars and p is non-empty, if some
+ // non-empty substring of s matches p, then we ourselves match.
+ while (!s.empty()) {
+ if (MatchVlogPattern(s, p))
+ return true;
+ s.remove_prefix(1);
+ }
+
+ // Otherwise, we couldn't find a match.
+ return false;
+}
+
+} // namespace logging
diff --git a/libchrome/base/vlog.h b/libchrome/base/vlog.h
new file mode 100644
index 0000000..2950904
--- /dev/null
+++ b/libchrome/base/vlog.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VLOG_H_
+#define BASE_VLOG_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace logging {
+
+// A helper class containing all the settings for vlogging.
+class BASE_EXPORT VlogInfo {
+ public:
+ static const int kDefaultVlogLevel;
+
+ // |v_switch| gives the default maximal active V-logging level; 0 is
+ // the default. Normally positive values are used for V-logging
+ // levels.
+ //
+ // |vmodule_switch| gives the per-module maximal V-logging levels to
+ // override the value given by |v_switch|.
+ // E.g. "my_module=2,foo*=3" would change the logging level for all
+ // code in source files "my_module.*" and "foo*.*" ("-inl" suffixes
+ // are also disregarded for this matching).
+ //
+ // |log_severity| points to an int that stores the log level. If a valid
+ // |v_switch| is provided, it will set the log level, and the default
+ // vlog severity will be read from there..
+ //
+ // Any pattern containing a forward or backward slash will be tested
+ // against the whole pathname and not just the module. E.g.,
+ // "*/foo/bar/*=2" would change the logging level for all code in
+ // source files under a "foo/bar" directory.
+ VlogInfo(const std::string& v_switch,
+ const std::string& vmodule_switch,
+ int* min_log_level);
+ ~VlogInfo();
+
+ // Returns the vlog level for a given file (usually taken from
+ // __FILE__).
+ int GetVlogLevel(const base::StringPiece& file) const;
+
+ private:
+ void SetMaxVlogLevel(int level);
+ int GetMaxVlogLevel() const;
+
+ // VmodulePattern holds all the information for each pattern parsed
+ // from |vmodule_switch|.
+ struct VmodulePattern;
+ std::vector<VmodulePattern> vmodule_levels_;
+ int* min_log_level_;
+
+ DISALLOW_COPY_AND_ASSIGN(VlogInfo);
+};
+
+// Returns true if the string passed in matches the vlog pattern. The
+// vlog pattern string can contain wildcards like * and ?. ? matches
+// exactly one character while * matches 0 or more characters. Also,
+// as a special case, a / or \ character matches either / or \.
+//
+// Examples:
+// "kh?n" matches "khan" but not "khn" or "khaan"
+// "kh*n" matches "khn", "khan", or even "khaaaaan"
+// "/foo\bar" matches "/foo/bar", "\foo\bar", or "/foo\bar"
+// (disregarding C escaping rules)
+BASE_EXPORT bool MatchVlogPattern(const base::StringPiece& string,
+ const base::StringPiece& vlog_pattern);
+
+} // namespace logging
+
+#endif // BASE_VLOG_H_
diff --git a/libchrome/base/vlog_unittest.cc b/libchrome/base/vlog_unittest.cc
new file mode 100644
index 0000000..3c3f49c
--- /dev/null
+++ b/libchrome/base/vlog_unittest.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/vlog.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace logging {
+
+namespace {
+
+TEST(VlogTest, NoVmodule) {
+ int min_log_level = 0;
+ EXPECT_EQ(0,
+ VlogInfo(std::string(), std::string(), &min_log_level)
+ .GetVlogLevel("test1"));
+ EXPECT_EQ(0,
+ VlogInfo("0", std::string(), &min_log_level).GetVlogLevel("test2"));
+ EXPECT_EQ(
+ 0, VlogInfo("blah", std::string(), &min_log_level).GetVlogLevel("test3"));
+ EXPECT_EQ(
+ 0,
+ VlogInfo("0blah1", std::string(), &min_log_level).GetVlogLevel("test4"));
+ EXPECT_EQ(1,
+ VlogInfo("1", std::string(), &min_log_level).GetVlogLevel("test5"));
+ EXPECT_EQ(5,
+ VlogInfo("5", std::string(), &min_log_level).GetVlogLevel("test6"));
+}
+
+TEST(VlogTest, MatchVlogPattern) {
+ // Degenerate cases.
+ EXPECT_TRUE(MatchVlogPattern("", ""));
+ EXPECT_TRUE(MatchVlogPattern("", "****"));
+ EXPECT_FALSE(MatchVlogPattern("", "x"));
+ EXPECT_FALSE(MatchVlogPattern("x", ""));
+
+ // Basic.
+ EXPECT_TRUE(MatchVlogPattern("blah", "blah"));
+
+ // ? should match exactly one character.
+ EXPECT_TRUE(MatchVlogPattern("blah", "bl?h"));
+ EXPECT_FALSE(MatchVlogPattern("blh", "bl?h"));
+ EXPECT_FALSE(MatchVlogPattern("blaah", "bl?h"));
+ EXPECT_TRUE(MatchVlogPattern("blah", "?lah"));
+ EXPECT_FALSE(MatchVlogPattern("lah", "?lah"));
+ EXPECT_FALSE(MatchVlogPattern("bblah", "?lah"));
+
+ // * can match any number (even 0) of characters.
+ EXPECT_TRUE(MatchVlogPattern("blah", "bl*h"));
+ EXPECT_TRUE(MatchVlogPattern("blabcdefh", "bl*h"));
+ EXPECT_TRUE(MatchVlogPattern("blh", "bl*h"));
+ EXPECT_TRUE(MatchVlogPattern("blah", "*blah"));
+ EXPECT_TRUE(MatchVlogPattern("ohblah", "*blah"));
+ EXPECT_TRUE(MatchVlogPattern("blah", "blah*"));
+ EXPECT_TRUE(MatchVlogPattern("blahhhh", "blah*"));
+ EXPECT_TRUE(MatchVlogPattern("blahhhh", "blah*"));
+ EXPECT_TRUE(MatchVlogPattern("blah", "*blah*"));
+ EXPECT_TRUE(MatchVlogPattern("blahhhh", "*blah*"));
+ EXPECT_TRUE(MatchVlogPattern("bbbblahhhh", "*blah*"));
+
+ // Multiple *s should work fine.
+ EXPECT_TRUE(MatchVlogPattern("ballaah", "b*la*h"));
+ EXPECT_TRUE(MatchVlogPattern("blah", "b*la*h"));
+ EXPECT_TRUE(MatchVlogPattern("bbbblah", "b*la*h"));
+ EXPECT_TRUE(MatchVlogPattern("blaaah", "b*la*h"));
+
+ // There should be no escaping going on.
+ EXPECT_TRUE(MatchVlogPattern("bl\\ah", "bl\\?h"));
+ EXPECT_FALSE(MatchVlogPattern("bl?h", "bl\\?h"));
+ EXPECT_TRUE(MatchVlogPattern("bl\\aaaah", "bl\\*h"));
+ EXPECT_FALSE(MatchVlogPattern("bl*h", "bl\\*h"));
+
+ // Any slash matches any slash.
+ EXPECT_TRUE(MatchVlogPattern("/b\\lah", "/b\\lah"));
+ EXPECT_TRUE(MatchVlogPattern("\\b/lah", "/b\\lah"));
+}
+
+TEST(VlogTest, VmoduleBasic) {
+ const char kVSwitch[] = "-1";
+ const char kVModuleSwitch[] =
+ "foo=,bar=0,baz=blah,,qux=0blah1,quux=1,corge.ext=5";
+ int min_log_level = 0;
+ VlogInfo vlog_info(kVSwitch, kVModuleSwitch, &min_log_level);
+ EXPECT_EQ(-1, vlog_info.GetVlogLevel("/path/to/grault.cc"));
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("/path/to/foo.cc"));
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("D:\\Path\\To\\bar-inl.mm"));
+ EXPECT_EQ(-1, vlog_info.GetVlogLevel("D:\\path\\to what/bar_unittest.m"));
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("baz.h"));
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("/another/path/to/qux.h"));
+ EXPECT_EQ(1, vlog_info.GetVlogLevel("/path/to/quux"));
+ EXPECT_EQ(5, vlog_info.GetVlogLevel("c:\\path/to/corge.ext.h"));
+}
+
+TEST(VlogTest, VmoduleDirs) {
+ const char kVModuleSwitch[] =
+ "foo/bar.cc=1,baz\\*\\qux.cc=2,*quux/*=3,*/*-inl.h=4";
+ int min_log_level = 0;
+ VlogInfo vlog_info(std::string(), kVModuleSwitch, &min_log_level);
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("/foo/bar.cc"));
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("bar.cc"));
+ EXPECT_EQ(1, vlog_info.GetVlogLevel("foo/bar.cc"));
+
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("baz/grault/qux.h"));
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("/baz/grault/qux.cc"));
+ EXPECT_EQ(2, vlog_info.GetVlogLevel("baz/grault/qux.cc"));
+ EXPECT_EQ(2, vlog_info.GetVlogLevel("baz/grault/blah/qux.cc"));
+ EXPECT_EQ(2, vlog_info.GetVlogLevel("baz\\grault\\qux.cc"));
+ EXPECT_EQ(2, vlog_info.GetVlogLevel("baz\\grault//blah\\qux.cc"));
+
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("/foo/bar/baz/quux.cc"));
+ EXPECT_EQ(3, vlog_info.GetVlogLevel("/foo/bar/baz/quux/grault.cc"));
+ EXPECT_EQ(3, vlog_info.GetVlogLevel("/foo\\bar/baz\\quux/grault.cc"));
+
+ EXPECT_EQ(0, vlog_info.GetVlogLevel("foo/bar/test-inl.cc"));
+ EXPECT_EQ(4, vlog_info.GetVlogLevel("foo/bar/test-inl.h"));
+ EXPECT_EQ(4, vlog_info.GetVlogLevel("foo/bar/baz/blah-inl.h"));
+}
+
+} // namespace
+
+} // namespace logging
diff --git a/libchrome/base/win/OWNERS b/libchrome/base/win/OWNERS
new file mode 100644
index 0000000..78473b9
--- /dev/null
+++ b/libchrome/base/win/OWNERS
@@ -0,0 +1,4 @@
+cpu@chromium.org
+grt@chromium.org
+jschuh@chromium.org
+scottmg@chromium.org
diff --git a/libchrome/base/win/event_trace_consumer.h b/libchrome/base/win/event_trace_consumer.h
new file mode 100644
index 0000000..9f97e0d
--- /dev/null
+++ b/libchrome/base/win/event_trace_consumer.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Declaration of a Windows event trace consumer base class.
+#ifndef BASE_WIN_EVENT_TRACE_CONSUMER_H_
+#define BASE_WIN_EVENT_TRACE_CONSUMER_H_
+
+#include <windows.h>
+#include <wmistr.h>
+#include <evntrace.h>
+#include <stddef.h>
+#include <vector>
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// This class is a base class that makes it easier to consume events
+// from realtime or file sessions. Concrete consumers need to subclass
+// a specialization of this class and override the ProcessEvent and/or
+// the ProcessBuffer methods to implement the event consumption logic.
+// Usage might look like:
+// class MyConsumer: public EtwTraceConsumerBase<MyConsumer, 1> {
+// protected:
+// static VOID WINAPI ProcessEvent(PEVENT_TRACE event);
+// };
+//
+// MyConsumer consumer;
+// consumer.OpenFileSession(file_path);
+// consumer.Consume();
+template <class ImplClass>
+class EtwTraceConsumerBase {
+ public:
+ // Constructs a closed consumer.
+ EtwTraceConsumerBase() {
+ }
+
+ ~EtwTraceConsumerBase() {
+ Close();
+ }
+
+ // Opens the named realtime session, which must be existent.
+ // Note: You can use OpenRealtimeSession or OpenFileSession
+ // to open as many as MAXIMUM_WAIT_OBJECTS (63) sessions at
+ // any one time, though only one of them may be a realtime
+ // session.
+ HRESULT OpenRealtimeSession(const wchar_t* session_name);
+
+ // Opens the event trace log in "file_name", which must be a full or
+ // relative path to an existing event trace log file.
+ // Note: You can use OpenRealtimeSession or OpenFileSession
+ // to open as many as kNumSessions at any one time.
+ HRESULT OpenFileSession(const wchar_t* file_name);
+
+ // Consume all open sessions from beginning to end.
+ HRESULT Consume();
+
+ // Close all open sessions.
+ HRESULT Close();
+
+ protected:
+ // Override in subclasses to handle events.
+ static void ProcessEvent(EVENT_TRACE* event) {
+ }
+ // Override in subclasses to handle buffers.
+ static bool ProcessBuffer(EVENT_TRACE_LOGFILE* buffer) {
+ return true; // keep going
+ }
+
+ protected:
+ // Currently open sessions.
+ std::vector<TRACEHANDLE> trace_handles_;
+
+ private:
+ // These delegate to ImplClass callbacks with saner signatures.
+ static void WINAPI ProcessEventCallback(EVENT_TRACE* event) {
+ ImplClass::ProcessEvent(event);
+ }
+ static ULONG WINAPI ProcessBufferCallback(PEVENT_TRACE_LOGFILE buffer) {
+ return ImplClass::ProcessBuffer(buffer);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(EtwTraceConsumerBase);
+};
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::OpenRealtimeSession(
+ const wchar_t* session_name) {
+ EVENT_TRACE_LOGFILE logfile = {};
+ logfile.LoggerName = const_cast<wchar_t*>(session_name);
+ logfile.LogFileMode = EVENT_TRACE_REAL_TIME_MODE;
+ logfile.BufferCallback = &ProcessBufferCallback;
+ logfile.EventCallback = &ProcessEventCallback;
+ logfile.Context = this;
+ TRACEHANDLE trace_handle = ::OpenTrace(&logfile);
+ if (reinterpret_cast<TRACEHANDLE>(INVALID_HANDLE_VALUE) == trace_handle)
+ return HRESULT_FROM_WIN32(::GetLastError());
+
+ trace_handles_.push_back(trace_handle);
+ return S_OK;
+}
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::OpenFileSession(
+ const wchar_t* file_name) {
+ EVENT_TRACE_LOGFILE logfile = {};
+ logfile.LogFileName = const_cast<wchar_t*>(file_name);
+ logfile.BufferCallback = &ProcessBufferCallback;
+ logfile.EventCallback = &ProcessEventCallback;
+ logfile.Context = this;
+ TRACEHANDLE trace_handle = ::OpenTrace(&logfile);
+ if (reinterpret_cast<TRACEHANDLE>(INVALID_HANDLE_VALUE) == trace_handle)
+ return HRESULT_FROM_WIN32(::GetLastError());
+
+ trace_handles_.push_back(trace_handle);
+ return S_OK;
+}
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::Consume() {
+ ULONG err = ::ProcessTrace(&trace_handles_[0],
+ static_cast<ULONG>(trace_handles_.size()),
+ NULL,
+ NULL);
+ return HRESULT_FROM_WIN32(err);
+}
+
+template <class ImplClass> inline
+HRESULT EtwTraceConsumerBase<ImplClass>::Close() {
+ HRESULT hr = S_OK;
+ for (size_t i = 0; i < trace_handles_.size(); ++i) {
+ if (NULL != trace_handles_[i]) {
+ ULONG ret = ::CloseTrace(trace_handles_[i]);
+ trace_handles_[i] = NULL;
+
+ if (FAILED(HRESULT_FROM_WIN32(ret)))
+ hr = HRESULT_FROM_WIN32(ret);
+ }
+ }
+ trace_handles_.clear();
+
+ return hr;
+}
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_EVENT_TRACE_CONSUMER_H_
diff --git a/libchrome/base/win/scoped_co_mem.h b/libchrome/base/win/scoped_co_mem.h
new file mode 100644
index 0000000..a3737dd
--- /dev/null
+++ b/libchrome/base/win/scoped_co_mem.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_CO_MEM_H_
+#define BASE_WIN_SCOPED_CO_MEM_H_
+
+#include <objbase.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Simple scoped memory releaser class for COM allocated memory.
+// Example:
+// base::win::ScopedCoMem<ITEMIDLIST> file_item;
+// SHGetSomeInfo(&file_item, ...);
+// ...
+// return; <-- memory released
+template<typename T>
+class ScopedCoMem {
+ public:
+ ScopedCoMem() : mem_ptr_(NULL) {}
+ ~ScopedCoMem() {
+ Reset(NULL);
+ }
+
+ T** operator&() { // NOLINT
+ DCHECK(mem_ptr_ == NULL); // To catch memory leaks.
+ return &mem_ptr_;
+ }
+
+ operator T*() {
+ return mem_ptr_;
+ }
+
+ T* operator->() {
+ DCHECK(mem_ptr_ != NULL);
+ return mem_ptr_;
+ }
+
+ const T* operator->() const {
+ DCHECK(mem_ptr_ != NULL);
+ return mem_ptr_;
+ }
+
+ void Reset(T* ptr) {
+ if (mem_ptr_)
+ CoTaskMemFree(mem_ptr_);
+ mem_ptr_ = ptr;
+ }
+
+ T* get() const {
+ return mem_ptr_;
+ }
+
+ private:
+ T* mem_ptr_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCoMem);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_CO_MEM_H_
diff --git a/libchrome/base/win/scoped_com_initializer.h b/libchrome/base/win/scoped_com_initializer.h
new file mode 100644
index 0000000..8efff85
--- /dev/null
+++ b/libchrome/base/win/scoped_com_initializer.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_COM_INITIALIZER_H_
+#define BASE_WIN_SCOPED_COM_INITIALIZER_H_
+
+#include <objbase.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+namespace base {
+namespace win {
+
+// Initializes COM in the constructor (STA or MTA), and uninitializes COM in the
+// destructor.
+//
+// WARNING: This should only be used once per thread, ideally scoped to a
+// similar lifetime as the thread itself. You should not be using this in
+// random utility functions that make COM calls -- instead ensure these
+// functions are running on a COM-supporting thread!
+class ScopedCOMInitializer {
+ public:
+ // Enum value provided to initialize the thread as an MTA instead of STA.
+ enum SelectMTA { kMTA };
+
+ // Constructor for STA initialization.
+ ScopedCOMInitializer() {
+ Initialize(COINIT_APARTMENTTHREADED);
+ }
+
+ // Constructor for MTA initialization.
+ explicit ScopedCOMInitializer(SelectMTA mta) {
+ Initialize(COINIT_MULTITHREADED);
+ }
+
+ ~ScopedCOMInitializer() {
+#ifndef NDEBUG
+ // Using the windows API directly to avoid dependency on platform_thread.
+ DCHECK_EQ(GetCurrentThreadId(), thread_id_);
+#endif
+ if (succeeded())
+ CoUninitialize();
+ }
+
+ bool succeeded() const { return SUCCEEDED(hr_); }
+
+ private:
+ void Initialize(COINIT init) {
+#ifndef NDEBUG
+ thread_id_ = GetCurrentThreadId();
+#endif
+ hr_ = CoInitializeEx(NULL, init);
+#ifndef NDEBUG
+ if (hr_ == S_FALSE)
+ LOG(ERROR) << "Multiple CoInitialize() calls for thread " << thread_id_;
+ else
+ DCHECK_NE(RPC_E_CHANGED_MODE, hr_) << "Invalid COM thread model change";
+#endif
+ }
+
+ HRESULT hr_;
+#ifndef NDEBUG
+ // In debug builds we use this variable to catch a potential bug where a
+ // ScopedCOMInitializer instance is deleted on a different thread than it
+ // was initially created on. If that ever happens it can have bad
+ // consequences and the cause can be tricky to track down.
+ DWORD thread_id_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCOMInitializer);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_COM_INITIALIZER_H_
diff --git a/libchrome/base/win/scoped_comptr.h b/libchrome/base/win/scoped_comptr.h
new file mode 100644
index 0000000..5ce60e2
--- /dev/null
+++ b/libchrome/base/win/scoped_comptr.h
@@ -0,0 +1,168 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_COMPTR_H_
+#define BASE_WIN_SCOPED_COMPTR_H_
+
+#include <unknwn.h>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+namespace win {
+
+// A fairly minimalistic smart class for COM interface pointers.
+// Uses scoped_refptr for the basic smart pointer functionality
+// and adds a few IUnknown specific services.
+template <class Interface, const IID* interface_id = &__uuidof(Interface)>
+class ScopedComPtr : public scoped_refptr<Interface> {
+ public:
+ // Utility template to prevent users of ScopedComPtr from calling AddRef
+ // and/or Release() without going through the ScopedComPtr class.
+ class BlockIUnknownMethods : public Interface {
+ private:
+ STDMETHOD(QueryInterface)(REFIID iid, void** object) = 0;
+ STDMETHOD_(ULONG, AddRef)() = 0;
+ STDMETHOD_(ULONG, Release)() = 0;
+ };
+
+ typedef scoped_refptr<Interface> ParentClass;
+
+ ScopedComPtr() {
+ }
+
+ explicit ScopedComPtr(Interface* p) : ParentClass(p) {
+ }
+
+ ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p)
+ : ParentClass(p) {
+ }
+
+ ~ScopedComPtr() {
+ // We don't want the smart pointer class to be bigger than the pointer
+ // it wraps.
+ static_assert(
+ sizeof(ScopedComPtr<Interface, interface_id>) == sizeof(Interface*),
+ "ScopedComPtrSize");
+ }
+
+ // Explicit Release() of the held object. Useful for reuse of the
+ // ScopedComPtr instance.
+ // Note that this function equates to IUnknown::Release and should not
+ // be confused with e.g. scoped_ptr::release().
+ void Release() {
+ if (this->ptr_ != NULL) {
+ this->ptr_->Release();
+ this->ptr_ = NULL;
+ }
+ }
+
+ // Sets the internal pointer to NULL and returns the held object without
+ // releasing the reference.
+ Interface* Detach() {
+ Interface* p = this->ptr_;
+ this->ptr_ = NULL;
+ return p;
+ }
+
+ // Accepts an interface pointer that has already been addref-ed.
+ void Attach(Interface* p) {
+ DCHECK(!this->ptr_);
+ this->ptr_ = p;
+ }
+
+ // Retrieves the pointer address.
+ // Used to receive object pointers as out arguments (and take ownership).
+ // The function DCHECKs on the current value being NULL.
+ // Usage: Foo(p.Receive());
+ Interface** Receive() {
+ DCHECK(!this->ptr_) << "Object leak. Pointer must be NULL";
+ return &this->ptr_;
+ }
+
+ // A convenience for whenever a void pointer is needed as an out argument.
+ void** ReceiveVoid() {
+ return reinterpret_cast<void**>(Receive());
+ }
+
+ template <class Query>
+ HRESULT QueryInterface(Query** p) {
+ DCHECK(p != NULL);
+ DCHECK(this->ptr_ != NULL);
+ // IUnknown already has a template version of QueryInterface
+ // so the iid parameter is implicit here. The only thing this
+ // function adds are the DCHECKs.
+ return this->ptr_->QueryInterface(p);
+ }
+
+ // QI for times when the IID is not associated with the type.
+ HRESULT QueryInterface(const IID& iid, void** obj) {
+ DCHECK(obj != NULL);
+ DCHECK(this->ptr_ != NULL);
+ return this->ptr_->QueryInterface(iid, obj);
+ }
+
+ // Queries |other| for the interface this object wraps and returns the
+ // error code from the other->QueryInterface operation.
+ HRESULT QueryFrom(IUnknown* object) {
+ DCHECK(object != NULL);
+ return object->QueryInterface(Receive());
+ }
+
+ // Convenience wrapper around CoCreateInstance
+ HRESULT CreateInstance(const CLSID& clsid, IUnknown* outer = NULL,
+ DWORD context = CLSCTX_ALL) {
+ DCHECK(!this->ptr_);
+ HRESULT hr = ::CoCreateInstance(clsid, outer, context, *interface_id,
+ reinterpret_cast<void**>(&this->ptr_));
+ return hr;
+ }
+
+ // Checks if the identity of |other| and this object is the same.
+ bool IsSameObject(IUnknown* other) {
+ if (!other && !this->ptr_)
+ return true;
+
+ if (!other || !this->ptr_)
+ return false;
+
+ ScopedComPtr<IUnknown> my_identity;
+ QueryInterface(my_identity.Receive());
+
+ ScopedComPtr<IUnknown> other_identity;
+ other->QueryInterface(other_identity.Receive());
+
+ return my_identity == other_identity;
+ }
+
+ // Provides direct access to the interface.
+ // Here we use a well known trick to make sure we block access to
+ // IUnknown methods so that something bad like this doesn't happen:
+ // ScopedComPtr<IUnknown> p(Foo());
+ // p->Release();
+ // ... later the destructor runs, which will Release() again.
+ // and to get the benefit of the DCHECKs we add to QueryInterface.
+ // There's still a way to call these methods if you absolutely must
+ // by statically casting the ScopedComPtr instance to the wrapped interface
+ // and then making the call... but generally that shouldn't be necessary.
+ BlockIUnknownMethods* operator->() const {
+ DCHECK(this->ptr_ != NULL);
+ return reinterpret_cast<BlockIUnknownMethods*>(this->ptr_);
+ }
+
+ // Pull in operator=() from the parent class.
+ using scoped_refptr<Interface>::operator=;
+
+ // static methods
+
+ static const IID& iid() {
+ return *interface_id;
+ }
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_COMPTR_H_
diff --git a/libchrome/base/win/scoped_gdi_object.h b/libchrome/base/win/scoped_gdi_object.h
new file mode 100644
index 0000000..9d8465b
--- /dev/null
+++ b/libchrome/base/win/scoped_gdi_object.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_GDI_OBJECT_H_
+#define BASE_WIN_SCOPED_GDI_OBJECT_H_
+
+#include <windows.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace win {
+
+namespace internal {
+
+template <class T>
+struct ScopedGDIObjectTraits {
+ static T InvalidValue() { return nullptr; }
+ static void Free(T object) { DeleteObject(object); }
+};
+
+// An explicit specialization for HICON because we have to call DestroyIcon()
+// instead of DeleteObject() for HICON.
+template <>
+void inline ScopedGDIObjectTraits<HICON>::Free(HICON icon) {
+ DestroyIcon(icon);
+}
+
+} // namespace internal
+
+// Like ScopedHandle but for GDI objects.
+template <class T>
+using ScopedGDIObject = ScopedGeneric<T, internal::ScopedGDIObjectTraits<T>>;
+
+// Typedefs for some common use cases.
+typedef ScopedGDIObject<HBITMAP> ScopedBitmap;
+typedef ScopedGDIObject<HRGN> ScopedRegion;
+typedef ScopedGDIObject<HFONT> ScopedHFONT;
+typedef ScopedGDIObject<HICON> ScopedHICON;
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_GDI_OBJECT_H_
diff --git a/libchrome/base/win/scoped_handle_test_dll.cc b/libchrome/base/win/scoped_handle_test_dll.cc
new file mode 100644
index 0000000..c72e459
--- /dev/null
+++ b/libchrome/base/win/scoped_handle_test_dll.cc
@@ -0,0 +1,125 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <vector>
+
+#include "base/win/base_features.h"
+#include "base/win/current_module.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+namespace testing {
+
+extern "C" bool __declspec(dllexport) RunTest();
+
+namespace {
+
+struct ThreadParams {
+ HANDLE ready_event;
+ HANDLE start_event;
+};
+
+// Note, this must use all native functions to avoid instantiating the
+// ActiveVerifier. e.g. can't use base::Thread or even base::PlatformThread.
+DWORD __stdcall ThreadFunc(void* params) {
+ ThreadParams* thread_params = reinterpret_cast<ThreadParams*>(params);
+ HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+
+ ::SetEvent(thread_params->ready_event);
+ ::WaitForSingleObject(thread_params->start_event, INFINITE);
+ ScopedHandle handle_holder(handle);
+ return 0;
+}
+
+bool InternalRunThreadTest() {
+ std::vector<HANDLE> threads_;
+ // From manual testing, the bug fixed by crrev.com/678736a starts reliably
+ // causing handle verifier asserts to trigger at around 100 threads, so make
+ // it 200 to be sure to detect any future regressions.
+ const size_t kNumThreads = 200;
+
+ // bManualReset is set to true to allow signalling multiple threads.
+ HANDLE start_event = ::CreateEvent(nullptr, true, false, nullptr);
+ if (!start_event)
+ return false;
+
+ HANDLE ready_event = CreateEvent(nullptr, false, false, nullptr);
+ if (!ready_event)
+ return false;
+
+ ThreadParams thread_params = { ready_event, start_event };
+
+ for (size_t i = 0; i < kNumThreads; i++) {
+ HANDLE thread_handle =
+ ::CreateThread(nullptr, 0, ThreadFunc,
+ reinterpret_cast<void*>(&thread_params), 0, nullptr);
+ if (!thread_handle)
+ break;
+ ::WaitForSingleObject(ready_event, INFINITE);
+ threads_.push_back(thread_handle);
+ }
+
+ ::CloseHandle(ready_event);
+
+ if (threads_.size() != kNumThreads) {
+ for (const auto& thread : threads_)
+ ::CloseHandle(thread);
+ ::CloseHandle(start_event);
+ return false;
+ }
+
+ ::SetEvent(start_event);
+ ::CloseHandle(start_event);
+ for (const auto& thread : threads_) {
+ ::WaitForSingleObject(thread, INFINITE);
+ ::CloseHandle(thread);
+ }
+
+ return true;
+}
+
+bool InternalRunLocationTest() {
+ // Create a new handle and then set LastError again.
+ HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+ if (!handle)
+ return false;
+ ScopedHandle handle_holder(handle);
+
+ HMODULE verifier_module = GetHandleVerifierModuleForTesting();
+ if (!verifier_module)
+ return false;
+
+ // Get my module
+ HMODULE my_module = CURRENT_MODULE();
+ if (!my_module)
+ return false;
+
+ HMODULE main_module = ::GetModuleHandle(NULL);
+
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
+ // In a component build ActiveVerifier will always be created inside base.dll
+ // as the code always lives there.
+ if (verifier_module == my_module || verifier_module == main_module)
+ return false;
+#else
+ // In a non-component build, ActiveVerifier should always be created in the
+ // version of base linked with the main executable.
+ if (verifier_module == my_module || verifier_module != main_module)
+ return false;
+#endif
+ return true;
+}
+
+} // namespace
+
+bool RunTest() {
+ return InternalRunThreadTest() && InternalRunLocationTest();
+}
+
+} // testing
+} // win
+} // base
diff --git a/libchrome/base/win/scoped_hdc.h b/libchrome/base/win/scoped_hdc.h
new file mode 100644
index 0000000..fa686dd
--- /dev/null
+++ b/libchrome/base/win/scoped_hdc.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HDC_H_
+#define BASE_WIN_SCOPED_HDC_H_
+
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+
+// Like ScopedHandle but for HDC. Only use this on HDCs returned from
+// GetDC.
+class ScopedGetDC {
+ public:
+ explicit ScopedGetDC(HWND hwnd)
+ : hwnd_(hwnd),
+ hdc_(GetDC(hwnd)) {
+ if (hwnd_) {
+ DCHECK(IsWindow(hwnd_));
+ DCHECK(hdc_);
+ } else {
+ // If GetDC(NULL) returns NULL, something really bad has happened, like
+ // GDI handle exhaustion. In this case Chrome is going to behave badly no
+ // matter what, so we may as well just force a crash now.
+ CHECK(hdc_);
+ }
+ }
+
+ ~ScopedGetDC() {
+ if (hdc_)
+ ReleaseDC(hwnd_, hdc_);
+ }
+
+ operator HDC() { return hdc_; }
+
+ private:
+ HWND hwnd_;
+ HDC hdc_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedGetDC);
+};
+
+// Like ScopedHandle but for HDC. Only use this on HDCs returned from
+// CreateCompatibleDC, CreateDC and CreateIC.
+class CreateDCTraits {
+ public:
+ typedef HDC Handle;
+
+ static bool CloseHandle(HDC handle) {
+ return ::DeleteDC(handle) != FALSE;
+ }
+
+ static bool IsHandleValid(HDC handle) {
+ return handle != NULL;
+ }
+
+ static HDC NullHandle() {
+ return NULL;
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CreateDCTraits);
+};
+
+typedef GenericScopedHandle<CreateDCTraits, DummyVerifierTraits> ScopedCreateDC;
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_HDC_H_
diff --git a/libchrome/base/win/scoped_hglobal.h b/libchrome/base/win/scoped_hglobal.h
new file mode 100644
index 0000000..abe9a5a
--- /dev/null
+++ b/libchrome/base/win/scoped_hglobal.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_HGLOBAL_H_
+#define BASE_WIN_SCOPED_HGLOBAL_H_
+
+#include <windows.h>
+#include <stddef.h>
+
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Like ScopedHandle except for HGLOBAL.
+template<class T>
+class ScopedHGlobal {
+ public:
+ explicit ScopedHGlobal(HGLOBAL glob) : glob_(glob) {
+ data_ = static_cast<T>(GlobalLock(glob_));
+ }
+ ~ScopedHGlobal() {
+ GlobalUnlock(glob_);
+ }
+
+ T get() { return data_; }
+
+ size_t Size() const { return GlobalSize(glob_); }
+
+ T operator->() const {
+ assert(data_ != 0);
+ return data_;
+ }
+
+ T release() {
+ T data = data_;
+ data_ = NULL;
+ return data;
+ }
+
+ private:
+ HGLOBAL glob_;
+
+ T data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedHGlobal);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_HGLOBAL_H_
diff --git a/libchrome/base/win/scoped_propvariant.h b/libchrome/base/win/scoped_propvariant.h
new file mode 100644
index 0000000..aa9afec
--- /dev/null
+++ b/libchrome/base/win/scoped_propvariant.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_PROPVARIANT_H_
+#define BASE_WIN_SCOPED_PROPVARIANT_H_
+
+#include <propidl.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// A PROPVARIANT that is automatically initialized and cleared upon respective
+// construction and destruction of this class.
+class ScopedPropVariant {
+ public:
+ ScopedPropVariant() {
+ PropVariantInit(&pv_);
+ }
+
+ ~ScopedPropVariant() {
+ Reset();
+ }
+
+ // Returns a pointer to the underlying PROPVARIANT for use as an out param in
+ // a function call.
+ PROPVARIANT* Receive() {
+ DCHECK_EQ(pv_.vt, VT_EMPTY);
+ return &pv_;
+ }
+
+ // Clears the instance to prepare it for re-use (e.g., via Receive).
+ void Reset() {
+ if (pv_.vt != VT_EMPTY) {
+ HRESULT result = PropVariantClear(&pv_);
+ DCHECK_EQ(result, S_OK);
+ }
+ }
+
+ const PROPVARIANT& get() const { return pv_; }
+ const PROPVARIANT* ptr() const { return &pv_; }
+
+ private:
+ PROPVARIANT pv_;
+
+ // Comparison operators for ScopedPropVariant are not supported at this point.
+ bool operator==(const ScopedPropVariant&) const;
+ bool operator!=(const ScopedPropVariant&) const;
+ DISALLOW_COPY_AND_ASSIGN(ScopedPropVariant);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_PROPVARIANT_H_
diff --git a/libchrome/base/win/scoped_select_object.h b/libchrome/base/win/scoped_select_object.h
new file mode 100644
index 0000000..59b21c1
--- /dev/null
+++ b/libchrome/base/win/scoped_select_object.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_SELECT_OBJECT_H_
+#define BASE_WIN_SCOPED_SELECT_OBJECT_H_
+
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace base {
+namespace win {
+
+// Helper class for deselecting object from DC.
+class ScopedSelectObject {
+ public:
+ ScopedSelectObject(HDC hdc, HGDIOBJ object)
+ : hdc_(hdc),
+ oldobj_(SelectObject(hdc, object)) {
+ DCHECK(hdc_);
+ DCHECK(object);
+ DCHECK(oldobj_ != NULL && oldobj_ != HGDI_ERROR);
+ }
+
+ ~ScopedSelectObject() {
+ HGDIOBJ object = SelectObject(hdc_, oldobj_);
+ DCHECK((GetObjectType(oldobj_) != OBJ_REGION && object != NULL) ||
+ (GetObjectType(oldobj_) == OBJ_REGION && object != HGDI_ERROR));
+ }
+
+ private:
+ HDC hdc_;
+ HGDIOBJ oldobj_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSelectObject);
+};
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_SELECT_OBJECT_H_
diff --git a/libchrome/base/win/windows_version_unittest.cc b/libchrome/base/win/windows_version_unittest.cc
new file mode 100644
index 0000000..f0d6d96
--- /dev/null
+++ b/libchrome/base/win/windows_version_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/windows_version.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+namespace {
+
+TEST(WindowsVersion, GetVersionExAndKernelVersionMatch) {
+ // If this fails, we're running in compatibility mode, or need to update the
+ // application manifest.
+ EXPECT_EQ(OSInfo::GetInstance()->version(),
+ OSInfo::GetInstance()->Kernel32Version());
+}
+
+} // namespace
+} // namespace win
+} // namespace base
diff --git a/libchrome/build/build_config.h b/libchrome/build/build_config.h
new file mode 100644
index 0000000..80a93d3
--- /dev/null
+++ b/libchrome/build/build_config.h
@@ -0,0 +1,204 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file adds defines about the platform we're currently building on.
+// Operating System:
+// OS_WIN / OS_MACOSX / OS_LINUX / OS_POSIX (MACOSX or LINUX) /
+// OS_NACL (NACL_SFI or NACL_NONSFI) / OS_NACL_SFI / OS_NACL_NONSFI
+// Compiler:
+// COMPILER_MSVC / COMPILER_GCC
+// Processor:
+// ARCH_CPU_X86 / ARCH_CPU_X86_64 / ARCH_CPU_X86_FAMILY (X86 or X86_64)
+// ARCH_CPU_32_BITS / ARCH_CPU_64_BITS
+
+#ifndef BUILD_BUILD_CONFIG_H_
+#define BUILD_BUILD_CONFIG_H_
+
+// A brief primer on #defines:
+//
+// - __ANDROID__ is automatically defined by the Android toolchain (see
+// https://goo.gl/v61lXa). It's not defined when building host code.
+// - __ANDROID_HOST__ is defined via -D by Android.mk when building host code
+// within an Android checkout.
+// - ANDROID is defined via -D when building code for either Android targets or
+// hosts. Use __ANDROID__ and __ANDROID_HOST__ instead.
+// - OS_ANDROID is a Chrome-specific define used to build Chrome for Android
+// within the NDK.
+
+// Android targets and hosts don't use tcmalloc.
+#if defined(__ANDROID__) || defined(__ANDROID_HOST__)
+#define NO_TCMALLOC
+#endif // defined(__ANDROID__) || defined(__ANDROID_HOST__)
+
+// Use the Chrome OS version of the code for both Android targets and Chrome OS builds.
+#if !defined(__ANDROID_HOST__)
+#define OS_CHROMEOS 1
+#endif // !defined(__ANDROID_HOST__)
+
+#if defined(__ANDROID__) // Android targets
+
+#define __linux__ 1
+#if defined(__BIONIC__)
+#define __UCLIBC__ 1
+#endif // defined(__BIONIC__)
+
+#elif !defined(__ANDROID_HOST__) // Chrome OS
+
+// TODO: Remove these once the GLib MessageLoopForUI isn't being used:
+// https://crbug.com/361635
+#define USE_GLIB 1
+#define USE_OZONE 1
+
+#endif // defined(__ANDROID__)
+
+// A set of macros to use for platform detection.
+#if defined(__native_client__)
+// __native_client__ must be first, so that other OS_ defines are not set.
+#define OS_NACL 1
+// OS_NACL comes in two sandboxing technology flavors, SFI or Non-SFI.
+// PNaCl toolchain defines __native_client_nonsfi__ macro in Non-SFI build
+// mode, while it does not in SFI build mode.
+#if defined(__native_client_nonsfi__)
+#define OS_NACL_NONSFI
+#else
+#define OS_NACL_SFI
+#endif
+// Don't set OS_ANDROID; it's only used when building Chrome for Android.
+#elif defined(__APPLE__)
+// only include TargetConditions after testing ANDROID as some android builds
+// on mac don't have this header available and it's not needed unless the target
+// is really mac/ios.
+#include <TargetConditionals.h>
+#define OS_MACOSX 1
+#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#define OS_IOS 1
+#endif // defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#elif defined(__linux__)
+#define OS_LINUX 1
+// include a system header to pull in features.h for glibc/uclibc macros.
+#include <unistd.h>
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+// we really are using glibc, not uClibc pretending to be glibc
+#define LIBC_GLIBC 1
+#endif
+#elif defined(_WIN32)
+#define OS_WIN 1
+#define TOOLKIT_VIEWS 1
+#elif defined(__FreeBSD__)
+#define OS_FREEBSD 1
+#elif defined(__OpenBSD__)
+#define OS_OPENBSD 1
+#elif defined(__sun)
+#define OS_SOLARIS 1
+#elif defined(__QNXNTO__)
+#define OS_QNX 1
+#else
+#error Please add support for your platform in build/build_config.h
+#endif
+
+#if defined(USE_OPENSSL_CERTS) && defined(USE_NSS_CERTS)
+#error Cannot use both OpenSSL and NSS for certificates
+#endif
+
+// For access to standard BSD features, use OS_BSD instead of a
+// more specific macro.
+#if defined(OS_FREEBSD) || defined(OS_OPENBSD)
+#define OS_BSD 1
+#endif
+
+// For access to standard POSIXish features, use OS_POSIX instead of a
+// more specific macro.
+#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_FREEBSD) || \
+ defined(OS_OPENBSD) || defined(OS_SOLARIS) || defined(OS_ANDROID) || \
+ defined(OS_NACL) || defined(OS_QNX)
+#define OS_POSIX 1
+#endif
+
+// Use tcmalloc
+#if (defined(OS_WIN) || defined(OS_LINUX) || defined(OS_ANDROID)) && \
+ !defined(NO_TCMALLOC)
+#define USE_TCMALLOC 1
+#endif
+
+// Compiler detection.
+#if defined(__GNUC__)
+#define COMPILER_GCC 1
+#elif defined(_MSC_VER)
+#define COMPILER_MSVC 1
+#else
+#error Please add support for your compiler in build/build_config.h
+#endif
+
+// Processor architecture detection. For more info on what's defined, see:
+// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+// http://www.agner.org/optimize/calling_conventions.pdf
+// or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define ARCH_CPU_X86_FAMILY 1
+#define ARCH_CPU_X86_64 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define ARCH_CPU_X86_FAMILY 1
+#define ARCH_CPU_X86 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__ARMEL__)
+#define ARCH_CPU_ARM_FAMILY 1
+#define ARCH_CPU_ARMEL 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__aarch64__)
+#define ARCH_CPU_ARM_FAMILY 1
+#define ARCH_CPU_ARM64 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__pnacl__)
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#elif defined(__MIPSEL__)
+#if defined(__LP64__)
+#define ARCH_CPU_MIPS_FAMILY 1
+#define ARCH_CPU_MIPS64EL 1
+#define ARCH_CPU_64_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#else
+#define ARCH_CPU_MIPS_FAMILY 1
+#define ARCH_CPU_MIPSEL 1
+#define ARCH_CPU_32_BITS 1
+#define ARCH_CPU_LITTLE_ENDIAN 1
+#endif
+#else
+#error Please add support for your architecture in build/build_config.h
+#endif
+
+// Type detection for wchar_t.
+#if defined(OS_WIN)
+#define WCHAR_T_IS_UTF16
+#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
+ defined(__WCHAR_MAX__) && \
+ (__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
+#define WCHAR_T_IS_UTF32
+#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
+ defined(__WCHAR_MAX__) && \
+ (__WCHAR_MAX__ == 0x7fff || __WCHAR_MAX__ == 0xffff)
+// On Posix, we'll detect short wchar_t, but projects aren't guaranteed to
+// compile in this mode (in particular, Chrome doesn't). This is intended for
+// other projects using base who manage their own dependencies and make sure
+// short wchar works for them.
+#define WCHAR_T_IS_UTF16
+#else
+#error Please add support for your compiler in build/build_config.h
+#endif
+
+#if defined(OS_ANDROID)
+// The compiler thinks std::string::const_iterator and "const char*" are
+// equivalent types.
+#define STD_STRING_ITERATOR_IS_CHAR_POINTER
+// The compiler thinks base::string16::const_iterator and "char16*" are
+// equivalent types.
+#define BASE_STRING16_ITERATOR_IS_CHAR16_POINTER
+#endif
+
+#endif // BUILD_BUILD_CONFIG_H_
diff --git a/libchrome/build/buildflag.h b/libchrome/build/buildflag.h
new file mode 100644
index 0000000..5776a75
--- /dev/null
+++ b/libchrome/build/buildflag.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BUILD_BUILDFLAG_H_
+#define BUILD_BUILDFLAG_H_
+
+// These macros un-mangle the names of the build flags in a way that looks
+// natural, and gives errors if the flag is not defined. Normally in the
+// preprocessor it's easy to make mistakes that interpret "you haven't done
+// the setup to know what the flag is" as "flag is off". Normally you would
+// include the generated header rather than include this file directly.
+//
+// This is for use with generated headers. See build/buildflag_header.gni.
+
+// This dance of two macros does a concatenation of two preprocessor args using
+// ## doubly indirectly because using ## directly prevents macros in that
+// parameter from being expanded.
+#define BUILDFLAG_CAT_INDIRECT(a, b) a ## b
+#define BUILDFLAG_CAT(a, b) BUILDFLAG_CAT_INDIRECT(a, b)
+
+// Accessor for build flags.
+//
+// To test for a value, if the build file specifies:
+//
+// ENABLE_FOO=true
+//
+// Then you would check at build-time in source code with:
+//
+// #include "foo_flags.h" // The header the build file specified.
+//
+// #if BUILDFLAG(ENABLE_FOO)
+// ...
+// #endif
+//
+// There will no #define called ENABLE_FOO so if you accidentally test for
+// whether that is defined, it will always be negative. You can also use
+// the value in expressions:
+//
+// const char kSpamServerName[] = BUILDFLAG(SPAM_SERVER_NAME);
+//
+// Because the flag is accessed as a preprocessor macro with (), an error
+// will be thrown if the proper header defining the internal flag value has
+// not been included.
+#define BUILDFLAG(flag) (BUILDFLAG_CAT(BUILDFLAG_INTERNAL_, flag)())
+
+#endif // BUILD_BUILDFLAG_H_
diff --git a/libchrome/components/timers/BUILD.gn b/libchrome/components/timers/BUILD.gn
new file mode 100644
index 0000000..c6f4a12
--- /dev/null
+++ b/libchrome/components/timers/BUILD.gn
@@ -0,0 +1,28 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+static_library("timers") {
+ sources = [
+ "alarm_timer_chromeos.cc",
+ "alarm_timer_chromeos.h",
+ ]
+
+ deps = [
+ "//base",
+ ]
+}
+
+source_set("unit_tests") {
+ testonly = true
+
+ sources = [
+ "alarm_timer_unittest.cc",
+ ]
+
+ deps = [
+ ":timers",
+ "//base",
+ "//testing/gtest",
+ ]
+}
diff --git a/libchrome/components/timers/DEPS b/libchrome/components/timers/DEPS
new file mode 100644
index 0000000..413f57b
--- /dev/null
+++ b/libchrome/components/timers/DEPS
@@ -0,0 +1,10 @@
+include_rules = [
+ # This directory is shared with Chrome OS, which only links against
+ # base/. We don't want any other dependencies to creep in.
+ "-build",
+ "-content",
+ "-library_loaders",
+ "-net",
+ "-third_party",
+ "-url",
+]
\ No newline at end of file
diff --git a/libchrome/components/timers/OWNERS b/libchrome/components/timers/OWNERS
new file mode 100644
index 0000000..0c43483
--- /dev/null
+++ b/libchrome/components/timers/OWNERS
@@ -0,0 +1,2 @@
+chirantan@chromium.org
+derat@chromium.org
diff --git a/libchrome/components/timers/README b/libchrome/components/timers/README
new file mode 100644
index 0000000..0b2b4e3
--- /dev/null
+++ b/libchrome/components/timers/README
@@ -0,0 +1,3 @@
+This directory hosts a timer class that is shared with Chrome OS. Code that
+lives in this directory is not allowed to depend on anything other than base/
+because Chrome OS only pulls in and depends on base/ as a library.
diff --git a/libchrome/components/timers/alarm_timer_chromeos.cc b/libchrome/components/timers/alarm_timer_chromeos.cc
new file mode 100644
index 0000000..3f1abbf
--- /dev/null
+++ b/libchrome/components/timers/alarm_timer_chromeos.cc
@@ -0,0 +1,486 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/timers/alarm_timer_chromeos.h"
+
+#include <stdint.h>
+#include <sys/timerfd.h>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/trace_event.h"
+
+namespace timers {
+namespace {
+// This class represents the IO thread that the AlarmTimer::Delegate may use for
+// watching file descriptors if it gets called from a thread that does not have
+// a MessageLoopForIO. It is a lazy global instance because it may not always
+// be necessary.
+class RtcAlarmIOThread : public base::Thread {
+ public:
+ RtcAlarmIOThread() : Thread("RTC Alarm IO Thread") {
+ CHECK(
+ StartWithOptions(base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+ }
+ ~RtcAlarmIOThread() override { Stop(); }
+};
+
+base::LazyInstance<RtcAlarmIOThread> g_io_thread = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+// Watches a MessageLoop and runs a callback if that MessageLoop will be
+// destroyed.
+class AlarmTimer::MessageLoopObserver
+ : public base::MessageLoop::DestructionObserver {
+ public:
+ // Constructs a MessageLoopObserver that will observe |message_loop| and will
+ // call |on_will_be_destroyed_callback| when |message_loop| is about to be
+ // destroyed.
+ MessageLoopObserver(base::MessageLoop* message_loop,
+ base::Closure on_will_be_destroyed_callback)
+ : message_loop_(message_loop),
+ on_will_be_destroyed_callback_(on_will_be_destroyed_callback) {
+ DCHECK(message_loop_);
+ message_loop_->AddDestructionObserver(this);
+ }
+
+ ~MessageLoopObserver() override {
+ // If |message_loop_| was destroyed, then this class will have already
+ // unregistered itself. Doing it again will trigger a warning.
+ if (message_loop_)
+ message_loop_->RemoveDestructionObserver(this);
+ }
+
+ // base::MessageLoop::DestructionObserver override.
+ void WillDestroyCurrentMessageLoop() override {
+ message_loop_->RemoveDestructionObserver(this);
+ message_loop_ = NULL;
+
+ on_will_be_destroyed_callback_.Run();
+ }
+
+ private:
+ // The MessageLoop that this class should watch. Is a weak pointer.
+ base::MessageLoop* message_loop_;
+
+ // The callback to run when |message_loop_| will be destroyed.
+ base::Closure on_will_be_destroyed_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageLoopObserver);
+};
+
+// This class manages a Real Time Clock (RTC) alarm, a feature that is available
+// from linux version 3.11 onwards. It creates a file descriptor for the RTC
+// alarm timer and then watches that file descriptor to see when it can be read
+// without blocking, indicating that the timer has fired.
+//
+// A major problem for this class is that watching file descriptors is only
+// available on a MessageLoopForIO but there is no guarantee the timer is going
+// to be created on one. To get around this, the timer has a dedicated thread
+// with a MessageLoopForIO that posts tasks back to the thread that started the
+// timer.
+class AlarmTimer::Delegate
+ : public base::RefCountedThreadSafe<AlarmTimer::Delegate>,
+ public base::MessageLoopForIO::Watcher {
+ public:
+ // Construct a Delegate for the AlarmTimer. It should be safe to call
+ // |on_timer_fired_callback| multiple times.
+ explicit Delegate(base::Closure on_timer_fired_callback);
+
+ // Returns true if the system timer managed by this delegate is capable of
+ // waking the system from suspend.
+ bool CanWakeFromSuspend();
+
+ // Resets the timer to fire after |delay| has passed. Cancels any
+ // pre-existing delay.
+ void Reset(base::TimeDelta delay);
+
+ // Stops the currently running timer. It should be safe to call this even if
+ // the timer is not running.
+ void Stop();
+
+ // Sets a hook that will be called when the timer fires and a task has been
+ // queued on |origin_task_runner_|. Used by tests to wait until a task is
+ // pending in the MessageLoop.
+ void SetTimerFiredCallbackForTest(base::Closure test_callback);
+
+ // base::MessageLoopForIO::Watcher overrides.
+ void OnFileCanReadWithoutBlocking(int fd) override;
+ void OnFileCanWriteWithoutBlocking(int fd) override;
+
+ private:
+ friend class base::RefCountedThreadSafe<Delegate>;
+ ~Delegate() override;
+
+ // Actually performs the system calls to set up the timer. This must be
+ // called on a MessageLoopForIO.
+ void ResetImpl(base::TimeDelta delay, int reset_sequence_number);
+
+ // Callback that is run when the timer fires. Must be run on
+ // |origin_task_runner_|.
+ void OnTimerFired(int reset_sequence_number);
+
+ // File descriptor associated with the alarm timer.
+ int alarm_fd_;
+
+ // Task runner which initially started the timer.
+ scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
+
+ // Callback that should be run when the timer fires.
+ base::Closure on_timer_fired_callback_;
+
+ // Hook used by tests to be notified when the timer has fired and a task has
+ // been queued in the MessageLoop.
+ base::Closure on_timer_fired_callback_for_test_;
+
+ // Manages watching file descriptors.
+ std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
+
+ // The sequence numbers of the last Reset() call handled respectively on
+ // |origin_task_runner_| and on the MessageLoopForIO used for watching the
+ // timer file descriptor. Note that these can be the same MessageLoop.
+ // OnTimerFired() runs |on_timer_fired_callback_| only if the sequence number
+ // it receives from the MessageLoopForIO matches
+ // |origin_reset_sequence_number_|.
+ int origin_reset_sequence_number_;
+ int io_reset_sequence_number_;
+
+ DISALLOW_COPY_AND_ASSIGN(Delegate);
+};
+
+AlarmTimer::Delegate::Delegate(base::Closure on_timer_fired_callback)
+ : alarm_fd_(timerfd_create(CLOCK_REALTIME_ALARM, 0)),
+ on_timer_fired_callback_(on_timer_fired_callback),
+ origin_reset_sequence_number_(0),
+ io_reset_sequence_number_(0) {
+ // The call to timerfd_create above may fail. This is the only indication
+ // that CLOCK_REALTIME_ALARM is not supported on this system.
+ DPLOG_IF(INFO, (alarm_fd_ == -1))
+ << "CLOCK_REALTIME_ALARM not supported on this system";
+}
+
+AlarmTimer::Delegate::~Delegate() {
+ if (alarm_fd_ != -1)
+ close(alarm_fd_);
+}
+
+bool AlarmTimer::Delegate::CanWakeFromSuspend() {
+ return alarm_fd_ != -1;
+}
+
+void AlarmTimer::Delegate::Reset(base::TimeDelta delay) {
+ // Get a task runner for the current message loop. When the timer fires, we
+ // will
+ // post tasks to this proxy to let the parent timer know.
+ origin_task_runner_ = base::ThreadTaskRunnerHandle::Get();
+
+ // Increment the sequence number. Used to invalidate any events that have
+ // been queued but not yet run since the last time Reset() was called.
+ origin_reset_sequence_number_++;
+
+ // Calling timerfd_settime with a zero delay actually clears the timer so if
+ // the user has requested a zero delay timer, we need to handle it
+ // differently. We queue the task here but we still go ahead and call
+ // timerfd_settime with the zero delay anyway to cancel any previous delay
+ // that might have been programmed.
+ if (delay <= base::TimeDelta::FromMicroseconds(0)) {
+ // The timerfd_settime documentation is vague on what happens when it is
+ // passed a negative delay. We can sidestep the issue by ensuring that
+ // the delay is 0.
+ delay = base::TimeDelta::FromMicroseconds(0);
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
+ origin_reset_sequence_number_));
+ }
+
+ // Run ResetImpl() on a MessageLoopForIO.
+ if (base::MessageLoopForIO::IsCurrent()) {
+ ResetImpl(delay, origin_reset_sequence_number_);
+ } else {
+ g_io_thread.Pointer()->task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Delegate::ResetImpl, scoped_refptr<Delegate>(this), delay,
+ origin_reset_sequence_number_));
+ }
+}
+
+void AlarmTimer::Delegate::Stop() {
+ // Stop the RTC from a MessageLoopForIO.
+ if (!base::MessageLoopForIO::IsCurrent()) {
+ g_io_thread.Pointer()->task_runner()->PostTask(
+ FROM_HERE, base::Bind(&Delegate::Stop, scoped_refptr<Delegate>(this)));
+ return;
+ }
+
+ // Stop watching for events.
+ fd_watcher_.reset();
+
+ // Now clear the timer.
+ DCHECK_NE(alarm_fd_, -1);
+#if defined(ANDROID)
+ itimerspec blank_time;
+ memset(&blank_time, 0, sizeof(blank_time));
+#else
+ itimerspec blank_time = {};
+#endif // defined(ANDROID)
+ if (timerfd_settime(alarm_fd_, 0, &blank_time, NULL) < 0)
+ PLOG(ERROR) << "Unable to clear alarm time. Timer may still fire.";
+}
+
+void AlarmTimer::Delegate::OnFileCanReadWithoutBlocking(int fd) {
+ DCHECK_EQ(alarm_fd_, fd);
+
+ // Read from the fd to ack the event.
+ char val[sizeof(uint64_t)];
+ if (!base::ReadFromFD(alarm_fd_, val, sizeof(uint64_t)))
+ PLOG(DFATAL) << "Unable to read from timer file descriptor.";
+
+ // Make sure that the parent timer is informed on the proper message loop.
+ if (origin_task_runner_->RunsTasksOnCurrentThread()) {
+ OnTimerFired(io_reset_sequence_number_);
+ } else {
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
+ io_reset_sequence_number_));
+ }
+}
+
+void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /*fd*/) {
+ NOTREACHED();
+}
+
+void AlarmTimer::Delegate::SetTimerFiredCallbackForTest(
+ base::Closure test_callback) {
+ on_timer_fired_callback_for_test_ = test_callback;
+}
+
+void AlarmTimer::Delegate::ResetImpl(base::TimeDelta delay,
+ int reset_sequence_number) {
+ DCHECK(base::MessageLoopForIO::IsCurrent());
+ DCHECK_NE(alarm_fd_, -1);
+
+ // Store the sequence number in the IO thread variable. When the timer
+ // fires, we will bind this value to the OnTimerFired callback to ensure
+ // that we do the right thing if the timer gets reset.
+ io_reset_sequence_number_ = reset_sequence_number;
+
+ // If we were already watching the fd, this will stop watching it.
+ fd_watcher_.reset(new base::MessageLoopForIO::FileDescriptorWatcher);
+
+ // Start watching the fd to see when the timer fires.
+ if (!base::MessageLoopForIO::current()->WatchFileDescriptor(
+ alarm_fd_, false, base::MessageLoopForIO::WATCH_READ,
+ fd_watcher_.get(), this)) {
+ LOG(ERROR) << "Error while attempting to watch file descriptor for RTC "
+ << "alarm. Timer will not fire.";
+ }
+
+ // Actually set the timer. This will also clear the pre-existing timer, if
+ // any.
+#if defined(ANDROID)
+ itimerspec alarm_time;
+ memset(&alarm_time, 0, sizeof(alarm_time));
+#else
+ itimerspec alarm_time = {};
+#endif // defined(ANDROID)
+ alarm_time.it_value.tv_sec = delay.InSeconds();
+ alarm_time.it_value.tv_nsec =
+ (delay.InMicroseconds() % base::Time::kMicrosecondsPerSecond) *
+ base::Time::kNanosecondsPerMicrosecond;
+ if (timerfd_settime(alarm_fd_, 0, &alarm_time, NULL) < 0)
+ PLOG(ERROR) << "Error while setting alarm time. Timer will not fire";
+}
+
+void AlarmTimer::Delegate::OnTimerFired(int reset_sequence_number) {
+ DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
+
+ // If a test wants to be notified when this function is about to run, then
+ // re-queue this task in the MessageLoop and run the test's callback.
+ if (!on_timer_fired_callback_for_test_.is_null()) {
+ origin_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
+ reset_sequence_number));
+
+ on_timer_fired_callback_for_test_.Run();
+ on_timer_fired_callback_for_test_.Reset();
+ return;
+ }
+
+ // Check to make sure that the timer was not reset in the time between when
+ // this task was queued to run and now. If it was reset, then don't do
+ // anything.
+ if (reset_sequence_number != origin_reset_sequence_number_)
+ return;
+
+ on_timer_fired_callback_.Run();
+}
+
+AlarmTimer::AlarmTimer(bool retain_user_task, bool is_repeating)
+ : base::Timer(retain_user_task, is_repeating),
+ can_wake_from_suspend_(false),
+ origin_message_loop_(NULL),
+ weak_factory_(this) {
+ Init();
+}
+
+AlarmTimer::AlarmTimer(const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating)
+ : base::Timer(posted_from, delay, user_task, is_repeating),
+ can_wake_from_suspend_(false),
+ origin_message_loop_(NULL),
+ weak_factory_(this) {
+ Init();
+}
+
+AlarmTimer::~AlarmTimer() {
+ Stop();
+}
+
+void AlarmTimer::SetTimerFiredCallbackForTest(base::Closure test_callback) {
+ delegate_->SetTimerFiredCallbackForTest(test_callback);
+}
+
+void AlarmTimer::Init() {
+ delegate_ = make_scoped_refptr(new AlarmTimer::Delegate(
+ base::Bind(&AlarmTimer::OnTimerFired, weak_factory_.GetWeakPtr())));
+ can_wake_from_suspend_ = delegate_->CanWakeFromSuspend();
+}
+
+void AlarmTimer::Stop() {
+ if (!base::Timer::is_running())
+ return;
+
+ if (!can_wake_from_suspend_) {
+ base::Timer::Stop();
+ return;
+ }
+
+ // Clear the running flag, stop the delegate, and delete the pending task.
+ base::Timer::set_is_running(false);
+ delegate_->Stop();
+ pending_task_.reset();
+
+ // Stop watching |origin_message_loop_|.
+ origin_message_loop_ = NULL;
+ message_loop_observer_.reset();
+
+ if (!base::Timer::retain_user_task())
+ base::Timer::set_user_task(base::Closure());
+}
+
+void AlarmTimer::Reset() {
+ if (!can_wake_from_suspend_) {
+ base::Timer::Reset();
+ return;
+ }
+
+ DCHECK(!base::Timer::user_task().is_null());
+ DCHECK(!origin_message_loop_ ||
+ origin_message_loop_->task_runner()->RunsTasksOnCurrentThread());
+
+ // Make sure that the timer will stop if the underlying message loop is
+ // destroyed.
+ if (!origin_message_loop_) {
+ origin_message_loop_ = base::MessageLoop::current();
+ message_loop_observer_.reset(new MessageLoopObserver(
+ origin_message_loop_,
+ base::Bind(&AlarmTimer::WillDestroyCurrentMessageLoop,
+ weak_factory_.GetWeakPtr())));
+ }
+
+ // Set up the pending task.
+ if (base::Timer::GetCurrentDelay() > base::TimeDelta::FromMicroseconds(0)) {
+ base::Timer::set_desired_run_time(base::TimeTicks::Now() +
+ base::Timer::GetCurrentDelay());
+ pending_task_.reset(new base::PendingTask(
+ base::Timer::posted_from(), base::Timer::user_task(),
+ base::Timer::desired_run_time(), true /* nestable */));
+ } else {
+ base::Timer::set_desired_run_time(base::TimeTicks());
+ pending_task_.reset(new base::PendingTask(base::Timer::posted_from(),
+ base::Timer::user_task()));
+ }
+ base::MessageLoop::current()->task_annotator()->DidQueueTask(
+ "AlarmTimer::Reset", *pending_task_);
+
+ // Now start up the timer.
+ delegate_->Reset(base::Timer::GetCurrentDelay());
+ base::Timer::set_is_running(true);
+}
+
+void AlarmTimer::WillDestroyCurrentMessageLoop() {
+ Stop();
+}
+
+void AlarmTimer::OnTimerFired() {
+ if (!base::Timer::IsRunning())
+ return;
+
+ DCHECK(pending_task_.get());
+
+ // Take ownership of the pending user task, which is going to be cleared by
+ // the Stop() or Reset() functions below.
+ std::unique_ptr<base::PendingTask> pending_user_task(
+ std::move(pending_task_));
+
+ // Re-schedule or stop the timer as requested.
+ if (base::Timer::is_repeating())
+ Reset();
+ else
+ Stop();
+
+ TRACE_TASK_EXECUTION("AlarmTimer::OnTimerFired", *pending_user_task);
+
+ // Now run the user task.
+ base::MessageLoop::current()->task_annotator()->RunTask("AlarmTimer::Reset",
+ *pending_user_task);
+}
+
+OneShotAlarmTimer::OneShotAlarmTimer() : AlarmTimer(false, false) {
+}
+
+OneShotAlarmTimer::~OneShotAlarmTimer() {
+}
+
+RepeatingAlarmTimer::RepeatingAlarmTimer() : AlarmTimer(true, true) {
+}
+
+RepeatingAlarmTimer::RepeatingAlarmTimer(
+ const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task)
+ : AlarmTimer(posted_from, delay, user_task, true) {
+}
+
+RepeatingAlarmTimer::~RepeatingAlarmTimer() {
+}
+
+SimpleAlarmTimer::SimpleAlarmTimer() : AlarmTimer(true, false) {
+}
+
+SimpleAlarmTimer::SimpleAlarmTimer(const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task)
+ : AlarmTimer(posted_from, delay, user_task, false) {
+}
+
+SimpleAlarmTimer::~SimpleAlarmTimer() {
+}
+
+} // namespace timers
diff --git a/libchrome/components/timers/alarm_timer_chromeos.h b/libchrome/components/timers/alarm_timer_chromeos.h
new file mode 100644
index 0000000..313c9f9
--- /dev/null
+++ b/libchrome/components/timers/alarm_timer_chromeos.h
@@ -0,0 +1,148 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
+#define COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "base/timer/timer.h"
+
+namespace base {
+class MessageLoop;
+struct PendingTask;
+}
+
+namespace timers {
+// The class implements a timer that is capable of waking the system up from a
+// suspended state. For example, this is useful for running tasks that are
+// needed for maintaining network connectivity, like sending heartbeat messages.
+// Currently, this feature is only available on Chrome OS systems running linux
+// version 3.11 or higher. On all other platforms, the AlarmTimer behaves
+// exactly the same way as a regular Timer.
+class AlarmTimer : public base::Timer {
+ public:
+ ~AlarmTimer() override;
+
+ bool can_wake_from_suspend() const { return can_wake_from_suspend_; }
+
+ // Sets a hook that will be called when the timer fires and a task has been
+ // queued on |origin_message_loop_|. Used by tests to wait until a task is
+ // pending in the MessageLoop.
+ void SetTimerFiredCallbackForTest(base::Closure test_callback);
+
+ // Timer overrides.
+ void Stop() override;
+ void Reset() override;
+
+ protected:
+ // The constructors for this class are protected because consumers should
+ // instantiate one of the specialized sub-classes defined below instead.
+ AlarmTimer(bool retain_user_task, bool is_repeating);
+ AlarmTimer(const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating);
+
+ private:
+ // Common initialization that must be performed by both constructors. This
+ // really should live in a delegated constructor but the way base::Timer's
+ // constructors are written makes it really hard to do so.
+ void Init();
+
+ // Will be called by the delegate to indicate that the timer has fired and
+ // that the user task should be run.
+ void OnTimerFired();
+
+ // Called when |origin_message_loop_| will be destroyed.
+ void WillDestroyCurrentMessageLoop();
+
+ // Delegate that will manage actually setting the timer.
+ class Delegate;
+ scoped_refptr<Delegate> delegate_;
+
+ // Keeps track of the user task we want to run. A new one is constructed
+ // every time Reset() is called.
+ std::unique_ptr<base::PendingTask> pending_task_;
+
+ // Tracks whether the timer has the ability to wake the system up from
+ // suspend. This is a runtime check because we won't know if the system
+ // supports being woken up from suspend until the delegate actually tries to
+ // set it up.
+ bool can_wake_from_suspend_;
+
+ // Pointer to the message loop that started the timer. Used to track the
+ // destruction of that message loop.
+ base::MessageLoop* origin_message_loop_;
+
+ // Observes |origin_message_loop_| and informs this class if it will be
+ // destroyed.
+ class MessageLoopObserver;
+ std::unique_ptr<MessageLoopObserver> message_loop_observer_;
+
+ base::WeakPtrFactory<AlarmTimer> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(AlarmTimer);
+};
+
+// As its name suggests, a OneShotAlarmTimer runs a given task once. It does
+// not remember the task that was given to it after it has fired and does not
+// repeat. Useful for fire-and-forget tasks.
+class OneShotAlarmTimer : public AlarmTimer {
+ public:
+ // Constructs a basic OneShotAlarmTimer. An AlarmTimer constructed this way
+ // requires that Start() is called before Reset() is called.
+ OneShotAlarmTimer();
+ ~OneShotAlarmTimer() override;
+};
+
+// A RepeatingAlarmTimer takes a task and delay and repeatedly runs the task
+// using the specified delay as an interval between the runs until it is
+// explicitly stopped. It remembers both the task and the delay it was given
+// after it fires.
+class RepeatingAlarmTimer : public AlarmTimer {
+ public:
+ // Constructs a basic RepeatingAlarmTimer. An AlarmTimer constructed this way
+ // requires that Start() is called before Reset() is called.
+ RepeatingAlarmTimer();
+
+ // Constructs a RepeatingAlarmTimer with pre-populated parameters but does not
+ // start it. Useful if |user_task| or |delay| are not going to change.
+ // Reset() can be called immediately after constructing an AlarmTimer in this
+ // way.
+ RepeatingAlarmTimer(const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task);
+
+ ~RepeatingAlarmTimer() override;
+};
+
+// A SimpleAlarmTimer only fires once but remembers the task that it was given
+// even after it has fired. Useful if you want to run the same task multiple
+// times but not at a regular interval.
+class SimpleAlarmTimer : public AlarmTimer {
+ public:
+ // Constructs a basic SimpleAlarmTimer. An AlarmTimer constructed this way
+ // requires that Start() is called before Reset() is called.
+ SimpleAlarmTimer();
+
+ // Constructs a SimpleAlarmTimer with pre-populated parameters but does not
+ // start it. Useful if |user_task| or |delay| are not going to change.
+ // Reset() can be called immediately after constructing an AlarmTimer in this
+ // way.
+ SimpleAlarmTimer(const tracked_objects::Location& posted_from,
+ base::TimeDelta delay,
+ const base::Closure& user_task);
+
+ ~SimpleAlarmTimer() override;
+};
+
+} // namespace timers
+
+#endif // COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
diff --git a/libchrome/crypto/BUILD.gn b/libchrome/crypto/BUILD.gn
new file mode 100644
index 0000000..a912d93
--- /dev/null
+++ b/libchrome/crypto/BUILD.gn
@@ -0,0 +1,228 @@
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/crypto.gni")
+import("//testing/test.gni")
+
+component("crypto") {
+ output_name = "crcrypto" # Avoid colliding with OpenSSL's libcrypto.
+ sources = [
+ "aead.cc",
+ "aead.h",
+ "apple_keychain.h",
+ "apple_keychain_ios.mm",
+ "apple_keychain_mac.mm",
+ "auto_cbb.h",
+ "capi_util.cc",
+ "capi_util.h",
+ "crypto_export.h",
+ "cssm_init.cc",
+ "cssm_init.h",
+ "curve25519.cc",
+ "curve25519.h",
+ "ec_private_key.cc",
+ "ec_private_key.h",
+ "ec_signature_creator.cc",
+ "ec_signature_creator.h",
+ "ec_signature_creator_impl.cc",
+ "ec_signature_creator_impl.h",
+ "encryptor.cc",
+ "encryptor.h",
+ "hkdf.cc",
+ "hkdf.h",
+ "hmac.cc",
+ "hmac.h",
+ "mac_security_services_lock.cc",
+ "mac_security_services_lock.h",
+
+ # TODO(brettw) these mocks should be moved to a test_support_crypto target
+ # if possible.
+ "mock_apple_keychain.cc",
+ "mock_apple_keychain.h",
+ "mock_apple_keychain_ios.cc",
+ "mock_apple_keychain_mac.cc",
+ "nss_key_util.cc",
+ "nss_key_util.h",
+ "nss_util.cc",
+ "nss_util.h",
+ "nss_util_internal.h",
+ "openssl_bio_string.cc",
+ "openssl_bio_string.h",
+ "openssl_util.cc",
+ "openssl_util.h",
+ "p224.cc",
+ "p224.h",
+ "p224_spake.cc",
+ "p224_spake.h",
+ "random.cc",
+ "random.h",
+ "rsa_private_key.cc",
+ "rsa_private_key.h",
+ "scoped_capi_types.h",
+ "scoped_nss_types.h",
+ "secure_hash.cc",
+ "secure_hash.h",
+ "secure_util.cc",
+ "secure_util.h",
+ "sha2.cc",
+ "sha2.h",
+ "signature_creator.cc",
+ "signature_creator.h",
+ "signature_verifier.cc",
+ "signature_verifier.h",
+ "symmetric_key.cc",
+ "symmetric_key.h",
+ ]
+
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ deps = [
+ ":platform",
+ "//base",
+ "//base/third_party/dynamic_annotations",
+ ]
+
+ if (!is_mac && !is_ios) {
+ sources -= [
+ "apple_keychain.h",
+ "mock_apple_keychain.cc",
+ "mock_apple_keychain.h",
+ ]
+ } else {
+ libs = [
+ "CoreFoundation.framework",
+ "Security.framework",
+ ]
+ }
+
+ if (!is_mac) {
+ sources -= [
+ "cssm_init.cc",
+ "cssm_init.h",
+ "mac_security_services_lock.cc",
+ "mac_security_services_lock.h",
+ ]
+ }
+ if (!is_win) {
+ sources -= [
+ "capi_util.cc",
+ "capi_util.h",
+ ]
+ }
+
+ # Some files are built when NSS is used for the platform certificate library.
+ if (!use_nss_certs) {
+ sources -= [
+ "nss_key_util.cc",
+ "nss_key_util.h",
+ "nss_util.cc",
+ "nss_util.h",
+ "nss_util_internal.h",
+ ]
+ }
+
+ defines = [ "CRYPTO_IMPLEMENTATION" ]
+
+ if (is_nacl) {
+ deps += [ "//native_client_sdk/src/libraries/nacl_io" ]
+ }
+}
+
+test("crypto_unittests") {
+ sources = [
+ "aead_unittest.cc",
+ "curve25519_unittest.cc",
+ "ec_private_key_unittest.cc",
+ "ec_signature_creator_unittest.cc",
+ "encryptor_unittest.cc",
+ "hkdf_unittest.cc",
+ "hmac_unittest.cc",
+ "nss_key_util_unittest.cc",
+ "nss_util_unittest.cc",
+ "openssl_bio_string_unittest.cc",
+ "p224_spake_unittest.cc",
+ "p224_unittest.cc",
+ "random_unittest.cc",
+ "rsa_private_key_unittest.cc",
+ "secure_hash_unittest.cc",
+ "sha2_unittest.cc",
+ "signature_creator_unittest.cc",
+ "signature_verifier_unittest.cc",
+ "symmetric_key_unittest.cc",
+ ]
+
+ # Some files are built when NSS is used for the platform certificate library.
+ if (!use_nss_certs) {
+ sources -= [
+ "nss_key_util_unittest.cc",
+ "nss_util_unittest.cc",
+ ]
+ }
+
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ deps = [
+ ":crypto",
+ ":platform",
+ ":test_support",
+ "//base",
+ "//base/test:run_all_unittests",
+ "//base/test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+}
+
+# This has no sources in some cases so can't be a static library.
+source_set("test_support") {
+ testonly = true
+ sources = []
+
+ if (use_nss_certs) {
+ sources += [
+ "scoped_test_nss_db.cc",
+ "scoped_test_nss_db.h",
+ ]
+ }
+
+ if (is_chromeos) {
+ sources += [
+ "scoped_test_nss_chromeos_user.cc",
+ "scoped_test_nss_chromeos_user.h",
+ "scoped_test_system_nss_key_slot.cc",
+ "scoped_test_system_nss_key_slot.h",
+ ]
+ }
+
+ deps = [
+ ":crypto",
+ ":platform",
+ "//base",
+ ]
+}
+
+config("platform_config") {
+ if (use_nss_certs && is_clang) {
+ # There is a broken header guard in /usr/include/nss/secmod.h:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=884072
+ cflags = [ "-Wno-header-guard" ]
+ }
+}
+
+# This is a meta-target that forwards to NSS's SSL library or OpenSSL,
+# according to the state of the crypto flags. A target just wanting to depend
+# on the current SSL library should just depend on this.
+group("platform") {
+ public_deps = [
+ "//third_party/boringssl",
+ ]
+
+ # Link in NSS if it is used for the platform certificate library
+ # (use_nss_certs).
+ if (use_nss_certs) {
+ public_configs = [ ":platform_config" ]
+ public_configs += [ "//third_party/nss:system_nss_no_ssl_config" ]
+ }
+}
diff --git a/libchrome/crypto/OWNERS b/libchrome/crypto/OWNERS
new file mode 100644
index 0000000..42d0d3b
--- /dev/null
+++ b/libchrome/crypto/OWNERS
@@ -0,0 +1,3 @@
+agl@chromium.org
+davidben@chromium.org
+rsleevi@chromium.org
diff --git a/libchrome/crypto/apple_keychain.h b/libchrome/crypto/apple_keychain.h
new file mode 100644
index 0000000..1ea2473
--- /dev/null
+++ b/libchrome/crypto/apple_keychain.h
@@ -0,0 +1,109 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_KEYCHAIN_MAC_H_
+#define CRYPTO_KEYCHAIN_MAC_H_
+
+#include <Security/Security.h>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+
+#if defined (OS_IOS)
+typedef void* SecKeychainRef;
+typedef void* SecKeychainItemRef;
+typedef void SecKeychainAttributeList;
+#endif
+
+namespace crypto {
+
+// Wraps the KeychainServices API in a very thin layer, to allow it to be
+// mocked out for testing.
+
+// See Keychain Services documentation for function documentation, as these call
+// through directly to their Keychain Services equivalents (Foo ->
+// SecKeychainFoo). The only exception is Free, which should be used for
+// anything returned from this class that would normally be freed with
+// CFRelease (to aid in testing).
+class CRYPTO_EXPORT AppleKeychain {
+ public:
+ AppleKeychain();
+ virtual ~AppleKeychain();
+
+ virtual OSStatus FindGenericPassword(CFTypeRef keychainOrArray,
+ UInt32 serviceNameLength,
+ const char* serviceName,
+ UInt32 accountNameLength,
+ const char* accountName,
+ UInt32* passwordLength,
+ void** passwordData,
+ SecKeychainItemRef* itemRef) const;
+
+ virtual OSStatus ItemFreeContent(SecKeychainAttributeList* attrList,
+ void* data) const;
+
+ virtual OSStatus AddGenericPassword(SecKeychainRef keychain,
+ UInt32 serviceNameLength,
+ const char* serviceName,
+ UInt32 accountNameLength,
+ const char* accountName,
+ UInt32 passwordLength,
+ const void* passwordData,
+ SecKeychainItemRef* itemRef) const;
+
+#if !defined(OS_IOS)
+ virtual OSStatus ItemCopyAttributesAndData(
+ SecKeychainItemRef itemRef,
+ SecKeychainAttributeInfo* info,
+ SecItemClass* itemClass,
+ SecKeychainAttributeList** attrList,
+ UInt32* length,
+ void** outData) const;
+
+ virtual OSStatus ItemModifyAttributesAndData(
+ SecKeychainItemRef itemRef,
+ const SecKeychainAttributeList* attrList,
+ UInt32 length,
+ const void* data) const;
+
+ virtual OSStatus ItemFreeAttributesAndData(SecKeychainAttributeList* attrList,
+ void* data) const;
+
+ virtual OSStatus ItemDelete(SecKeychainItemRef itemRef) const;
+
+ virtual OSStatus SearchCreateFromAttributes(
+ CFTypeRef keychainOrArray,
+ SecItemClass itemClass,
+ const SecKeychainAttributeList* attrList,
+ SecKeychainSearchRef* searchRef) const;
+
+ virtual OSStatus SearchCopyNext(SecKeychainSearchRef searchRef,
+ SecKeychainItemRef* itemRef) const;
+
+ virtual OSStatus AddInternetPassword(SecKeychainRef keychain,
+ UInt32 serverNameLength,
+ const char* serverName,
+ UInt32 securityDomainLength,
+ const char* securityDomain,
+ UInt32 accountNameLength,
+ const char* accountName,
+ UInt32 pathLength, const char* path,
+ UInt16 port, SecProtocolType protocol,
+ SecAuthenticationType authenticationType,
+ UInt32 passwordLength,
+ const void* passwordData,
+ SecKeychainItemRef* itemRef) const;
+
+ // Calls CFRelease on the given ref, after checking that |ref| is non-NULL.
+ virtual void Free(CFTypeRef ref) const;
+#endif // !defined(OS_IOS)
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AppleKeychain);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_KEYCHAIN_MAC_H_
diff --git a/libchrome/crypto/auto_cbb.h b/libchrome/crypto/auto_cbb.h
new file mode 100644
index 0000000..5206a21
--- /dev/null
+++ b/libchrome/crypto/auto_cbb.h
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_AUTO_CBB_H_
+#define CRYPTO_AUTO_CBB_H_
+
+#include <openssl/bytestring.h>
+
+#include "base/macros.h"
+
+namespace crypto {
+
+// AutoCBB is a wrapper over OpenSSL's CBB type that automatically releases
+// resources when going out of scope.
+class AutoCBB {
+ public:
+ AutoCBB() { CBB_zero(&cbb_); }
+ ~AutoCBB() { CBB_cleanup(&cbb_); }
+
+ CBB* get() { return &cbb_; }
+
+ void Reset() {
+ CBB_cleanup(&cbb_);
+ CBB_zero(&cbb_);
+ }
+
+ private:
+ CBB cbb_;
+ DISALLOW_COPY_AND_ASSIGN(AutoCBB);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_AUTO_CBB_H_
diff --git a/libchrome/crypto/crypto.gyp b/libchrome/crypto/crypto.gyp
new file mode 100644
index 0000000..8ed2ab2
--- /dev/null
+++ b/libchrome/crypto/crypto.gyp
@@ -0,0 +1,236 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ 'crypto.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'crypto',
+ 'type': '<(component)',
+ 'product_name': 'crcrypto', # Avoid colliding with OpenSSL's libcrypto
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../third_party/boringssl/boringssl.gyp:boringssl',
+ ],
+ 'defines': [
+ 'CRYPTO_IMPLEMENTATION',
+ ],
+ 'conditions': [
+ [ 'os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
+ 'dependencies': [
+ '../build/linux/system.gyp:nss',
+ ],
+ 'export_dependent_settings': [
+ '../build/linux/system.gyp:nss',
+ ],
+ 'conditions': [
+ [ 'chromeos==1', {
+ 'sources/': [ ['include', '_chromeos\\.cc$'] ]
+ },
+ ],
+ ],
+ }],
+ [ 'OS != "mac" and OS != "ios"', {
+ 'sources!': [
+ 'apple_keychain.h',
+ 'mock_apple_keychain.cc',
+ 'mock_apple_keychain.h',
+ ],
+ }],
+ [ 'os_bsd==1', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ],
+ },
+ },
+ ],
+ [ 'OS == "mac"', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+ ],
+ },
+ }, { # OS != "mac"
+ 'sources!': [
+ 'cssm_init.cc',
+ 'cssm_init.h',
+ 'mac_security_services_lock.cc',
+ 'mac_security_services_lock.h',
+ ],
+ }],
+ [ 'OS != "win"', {
+ 'sources!': [
+ 'capi_util.h',
+ 'capi_util.cc',
+ ],
+ }],
+ [ 'OS == "win"', {
+ 'msvs_disabled_warnings': [
+ 4267, # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ ],
+ }],
+ [ 'use_nss_certs==0', {
+ # Some files are built when NSS is used for the platform certificate library.
+ 'sources!': [
+ 'nss_key_util.cc',
+ 'nss_key_util.h',
+ 'nss_util.cc',
+ 'nss_util.h',
+ 'nss_util_internal.h',
+ ],
+ },],
+ ],
+ 'sources': [
+ '<@(crypto_sources)',
+ ],
+ },
+ {
+ 'target_name': 'crypto_unittests',
+ 'type': 'executable',
+ 'sources': [
+ 'aead_unittest.cc',
+ 'curve25519_unittest.cc',
+ 'ec_private_key_unittest.cc',
+ 'ec_signature_creator_unittest.cc',
+ 'encryptor_unittest.cc',
+ 'hkdf_unittest.cc',
+ 'hmac_unittest.cc',
+ 'nss_key_util_unittest.cc',
+ 'nss_util_unittest.cc',
+ 'openssl_bio_string_unittest.cc',
+ 'p224_unittest.cc',
+ 'p224_spake_unittest.cc',
+ 'random_unittest.cc',
+ 'rsa_private_key_unittest.cc',
+ 'secure_hash_unittest.cc',
+ 'sha2_unittest.cc',
+ 'signature_creator_unittest.cc',
+ 'signature_verifier_unittest.cc',
+ 'symmetric_key_unittest.cc',
+ ],
+ 'dependencies': [
+ 'crypto',
+ 'crypto_test_support',
+ '../base/base.gyp:base',
+ '../base/base.gyp:run_all_unittests',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/boringssl/boringssl.gyp:boringssl',
+ ],
+ 'conditions': [
+ [ 'use_nss_certs == 1', {
+ 'dependencies': [
+ '../build/linux/system.gyp:nss',
+ ],
+ }],
+ [ 'use_nss_certs == 0', {
+ # Some files are built when NSS is used for the platform certificate library.
+ 'sources!': [
+ 'nss_key_util_unittest.cc',
+ 'nss_util_unittest.cc',
+ ],
+ }],
+ [ 'OS == "win"', {
+ # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+ 'msvs_disabled_warnings': [4267, ],
+ }],
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS == "win" and target_arch=="ia32"', {
+ 'targets': [
+ {
+ 'target_name': 'crypto_nacl_win64',
+ # We use the native APIs for the helper.
+ 'type': '<(component)',
+ 'dependencies': [
+ '../base/base.gyp:base_win64',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
+ '../third_party/boringssl/boringssl.gyp:boringssl_nacl_win64',
+ ],
+ 'sources': [
+ '<@(nacl_win64_sources)',
+ ],
+ 'defines': [
+ 'CRYPTO_IMPLEMENTATION',
+ '<@(nacl_win64_defines)',
+ ],
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ },
+ ],
+ }],
+ ['use_nss_certs==1', {
+ 'targets': [
+ {
+ 'target_name': 'crypto_test_support',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ 'crypto',
+ ],
+ 'sources': [
+ 'scoped_test_nss_db.cc',
+ 'scoped_test_nss_db.h',
+ 'scoped_test_nss_chromeos_user.cc',
+ 'scoped_test_nss_chromeos_user.h',
+ 'scoped_test_system_nss_key_slot.cc',
+ 'scoped_test_system_nss_key_slot.h',
+ ],
+ 'conditions': [
+ ['use_nss_certs==0', {
+ 'sources!': [
+ 'scoped_test_nss_db.cc',
+ 'scoped_test_nss_db.h',
+ ],
+ }],
+ [ 'chromeos==0', {
+ 'sources!': [
+ 'scoped_test_nss_chromeos_user.cc',
+ 'scoped_test_nss_chromeos_user.h',
+ 'scoped_test_system_nss_key_slot.cc',
+ 'scoped_test_system_nss_key_slot.h',
+ ],
+ }],
+ ],
+ }
+ ]}, { # use_nss_certs==0
+ 'targets': [
+ {
+ 'target_name': 'crypto_test_support',
+ 'type': 'none',
+ 'sources': [],
+ }
+ ]}],
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'crypto_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'crypto_unittests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'crypto_unittests.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/crypto/crypto.gypi b/libchrome/crypto/crypto.gypi
new file mode 100644
index 0000000..dadc0ea
--- /dev/null
+++ b/libchrome/crypto/crypto.gypi
@@ -0,0 +1,88 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ # Put all transitive dependencies for Windows HMAC here.
+ # This is required so that we can build them for nacl win64.
+ 'variables': {
+ 'hmac_win64_related_sources': [
+ 'crypto_export.h',
+ 'hmac.cc',
+ 'hmac.h',
+ 'openssl_util.cc',
+ 'openssl_util.h',
+ 'secure_util.cc',
+ 'secure_util.h',
+ 'symmetric_key.cc',
+ 'symmetric_key.h',
+ ],
+ },
+ 'crypto_sources': [
+ # NOTE: all transitive dependencies of HMAC on windows need
+ # to be placed in the source list above.
+ '<@(hmac_win64_related_sources)',
+ 'aead.cc',
+ 'aead.h',
+ 'apple_keychain.h',
+ 'apple_keychain_ios.mm',
+ 'apple_keychain_mac.mm',
+ 'auto_cbb.h',
+ 'capi_util.cc',
+ 'capi_util.h',
+ 'cssm_init.cc',
+ 'cssm_init.h',
+ 'curve25519.cc',
+ 'curve25519.h',
+ 'ec_private_key.cc',
+ 'ec_private_key.h',
+ 'ec_signature_creator.cc',
+ 'ec_signature_creator.h',
+ 'ec_signature_creator_impl.cc',
+ 'ec_signature_creator_impl.h',
+ 'encryptor.cc',
+ 'encryptor.h',
+ 'hkdf.cc',
+ 'hkdf.h',
+ 'mac_security_services_lock.cc',
+ 'mac_security_services_lock.h',
+ 'mock_apple_keychain.cc',
+ 'mock_apple_keychain.h',
+ 'mock_apple_keychain_ios.cc',
+ 'mock_apple_keychain_mac.cc',
+ 'p224_spake.cc',
+ 'p224_spake.h',
+ 'nss_crypto_module_delegate.h',
+ 'nss_key_util.cc',
+ 'nss_key_util.h',
+ 'nss_util.cc',
+ 'nss_util.h',
+ 'nss_util_internal.h',
+ 'openssl_bio_string.cc',
+ 'openssl_bio_string.h',
+ 'p224.cc',
+ 'p224.h',
+ 'random.h',
+ 'random.cc',
+ 'rsa_private_key.cc',
+ 'rsa_private_key.h',
+ 'scoped_capi_types.h',
+ 'scoped_nss_types.h',
+ 'secure_hash.cc',
+ 'secure_hash.h',
+ 'sha2.cc',
+ 'sha2.h',
+ 'signature_creator.cc',
+ 'signature_creator.h',
+ 'signature_verifier.cc',
+ 'signature_verifier.h',
+ 'wincrypt_shim.h',
+ ],
+ 'nacl_win64_sources': [
+ '<@(hmac_win64_related_sources)',
+ 'random.cc',
+ 'random.h',
+ ],
+ }
+}
diff --git a/libchrome/crypto/crypto_export.h b/libchrome/crypto/crypto_export.h
new file mode 100644
index 0000000..605af94
--- /dev/null
+++ b/libchrome/crypto/crypto_export.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_CRYPTO_EXPORT_H_
+#define CRYPTO_CRYPTO_EXPORT_H_
+
+// Defines CRYPTO_EXPORT so that functionality implemented by the crypto module
+// can be exported to consumers.
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(CRYPTO_IMPLEMENTATION)
+#define CRYPTO_EXPORT __declspec(dllexport)
+#else
+#define CRYPTO_EXPORT __declspec(dllimport)
+#endif // defined(CRYPTO_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#if defined(CRYPTO_IMPLEMENTATION)
+#define CRYPTO_EXPORT __attribute__((visibility("default")))
+#else
+#define CRYPTO_EXPORT
+#endif
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define CRYPTO_EXPORT
+#endif
+
+#endif // CRYPTO_CRYPTO_EXPORT_H_
diff --git a/libchrome/crypto/crypto_nacl.gyp b/libchrome/crypto/crypto_nacl.gyp
new file mode 100644
index 0000000..c7c01a8
--- /dev/null
+++ b/libchrome/crypto/crypto_nacl.gyp
@@ -0,0 +1,44 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ '../native_client/build/untrusted.gypi',
+ 'crypto.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'crypto_nacl',
+ 'type': 'none',
+ 'variables': {
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libcrypto_nacl.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_pnacl_newlib': 1,
+ },
+ 'dependencies': [
+ '../third_party/boringssl/boringssl_nacl.gyp:boringssl_nacl',
+ '../native_client_sdk/native_client_sdk_untrusted.gyp:nacl_io_untrusted',
+ ],
+ 'defines': [
+ 'CRYPTO_IMPLEMENTATION',
+ ],
+ 'sources': [
+ '<@(crypto_sources)',
+ ],
+ 'sources/': [
+ ['exclude', '_nss\.(cc|h)$'],
+ ['exclude', '^(mock_)?apple_'],
+ ['exclude', '^capi_'],
+ ['exclude', '^cssm_'],
+ ['exclude', '^nss_'],
+ ['exclude', '^mac_'],
+ ],
+ },
+ ],
+}
diff --git a/libchrome/crypto/crypto_unittests.isolate b/libchrome/crypto/crypto_unittests.isolate
new file mode 100644
index 0000000..de13aa2
--- /dev/null
+++ b/libchrome/crypto/crypto_unittests.isolate
@@ -0,0 +1,42 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '../testing/test_env.py',
+ '<(PRODUCT_DIR)/crypto_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ 'conditions': [
+ ['OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ '../testing/test_env.py',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1 and fastbuild==0', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/crypto_unittests.dSYM/',
+ ],
+ },
+ }],
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/crypto_unittests.exe.pdb',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ '../base/base.isolate',
+ ],
+}
diff --git a/libchrome/crypto/ec_private_key.h b/libchrome/crypto/ec_private_key.h
new file mode 100644
index 0000000..a24219b
--- /dev/null
+++ b/libchrome/crypto/ec_private_key.h
@@ -0,0 +1,129 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_EC_PRIVATE_KEY_H_
+#define CRYPTO_EC_PRIVATE_KEY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+
+#if defined(USE_OPENSSL)
+// Forward declaration for openssl/*.h
+typedef struct evp_pkey_st EVP_PKEY;
+#else
+// Forward declaration.
+typedef struct CERTSubjectPublicKeyInfoStr CERTSubjectPublicKeyInfo;
+typedef struct PK11SlotInfoStr PK11SlotInfo;
+typedef struct SECKEYPrivateKeyStr SECKEYPrivateKey;
+typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
+#endif
+
+namespace crypto {
+
+// Encapsulates an elliptic curve (EC) private key. Can be used to generate new
+// keys, export keys to other formats, or to extract a public key.
+// TODO(mattm): make this and RSAPrivateKey implement some PrivateKey interface.
+// (The difference in types of key() and public_key() make this a little
+// tricky.)
+class CRYPTO_EXPORT ECPrivateKey {
+ public:
+ ~ECPrivateKey();
+
+ // Creates a new random instance. Can return nullptr if initialization fails.
+ // The created key will use the NIST P-256 curve.
+ // TODO(mattm): Add a curve parameter.
+ static std::unique_ptr<ECPrivateKey> Create();
+
+ // Create a new instance by importing an existing private key. The format is
+ // an ASN.1-encoded PrivateKeyInfo block from PKCS #8. This can return
+ // nullptr if initialization fails.
+ static std::unique_ptr<ECPrivateKey> CreateFromPrivateKeyInfo(
+ const std::vector<uint8_t>& input);
+
+ // Creates a new instance by importing an existing key pair.
+ // The key pair is given as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
+ // block and an X.509 SubjectPublicKeyInfo block.
+ // Returns nullptr if initialization fails.
+ //
+ // This function is deprecated. Use CreateFromPrivateKeyInfo for new code.
+ // See https://crbug.com/603319.
+ static std::unique_ptr<ECPrivateKey> CreateFromEncryptedPrivateKeyInfo(
+ const std::string& password,
+ const std::vector<uint8_t>& encrypted_private_key_info,
+ const std::vector<uint8_t>& subject_public_key_info);
+
+#if !defined(USE_OPENSSL)
+ // Imports the key pair into |slot| and returns in |public_key| and |key|.
+ // Shortcut for code that needs to keep a reference directly to NSS types
+ // without having to create a ECPrivateKey object and make a copy of them.
+ // TODO(mattm): move this function to some NSS util file.
+ static bool ImportFromEncryptedPrivateKeyInfo(
+ PK11SlotInfo* slot,
+ const std::string& password,
+ const uint8_t* encrypted_private_key_info,
+ size_t encrypted_private_key_info_len,
+ CERTSubjectPublicKeyInfo* decoded_spki,
+ bool permanent,
+ bool sensitive,
+ SECKEYPrivateKey** key,
+ SECKEYPublicKey** public_key);
+#endif
+
+ // Returns a copy of the object.
+ std::unique_ptr<ECPrivateKey> Copy() const;
+
+#if defined(USE_OPENSSL)
+ EVP_PKEY* key() { return key_; }
+#else
+ SECKEYPrivateKey* key() { return key_; }
+ SECKEYPublicKey* public_key() { return public_key_; }
+#endif
+
+ // Exports the private key to a PKCS #8 PrivateKeyInfo block.
+ bool ExportPrivateKey(std::vector<uint8_t>* output) const;
+
+ // Exports the private key as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
+ // block and the public key as an X.509 SubjectPublicKeyInfo block.
+ // The |password| and |iterations| are used as inputs to the key derivation
+ // function for generating the encryption key. PKCS #5 recommends a minimum
+ // of 1000 iterations, on modern systems a larger value may be preferrable.
+ //
+ // This function is deprecated. Use ExportPrivateKey for new code. See
+ // https://crbug.com/603319.
+ bool ExportEncryptedPrivateKey(const std::string& password,
+ int iterations,
+ std::vector<uint8_t>* output) const;
+
+ // Exports the public key to an X.509 SubjectPublicKeyInfo block.
+ bool ExportPublicKey(std::vector<uint8_t>* output) const;
+
+ // Exports the public key as an EC point in the uncompressed point format.
+ bool ExportRawPublicKey(std::string* output) const;
+
+ private:
+ // Constructor is private. Use one of the Create*() methods above instead.
+ ECPrivateKey();
+
+#if defined(USE_OPENSSL)
+ EVP_PKEY* key_;
+#else
+ SECKEYPrivateKey* key_;
+ SECKEYPublicKey* public_key_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ECPrivateKey);
+};
+
+
+} // namespace crypto
+
+#endif // CRYPTO_EC_PRIVATE_KEY_H_
diff --git a/libchrome/crypto/ec_signature_creator_impl.h b/libchrome/crypto/ec_signature_creator_impl.h
new file mode 100644
index 0000000..21614f8
--- /dev/null
+++ b/libchrome/crypto/ec_signature_creator_impl.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_EC_SIGNATURE_CREATOR_IMPL_H_
+#define CRYPTO_EC_SIGNATURE_CREATOR_IMPL_H_
+
+#include <stdint.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "crypto/ec_signature_creator.h"
+
+namespace crypto {
+
+class ECSignatureCreatorImpl : public ECSignatureCreator {
+ public:
+ explicit ECSignatureCreatorImpl(ECPrivateKey* key);
+ ~ECSignatureCreatorImpl() override;
+
+ bool Sign(const uint8_t* data,
+ int data_len,
+ std::vector<uint8_t>* signature) override;
+
+ bool DecodeSignature(const std::vector<uint8_t>& der_sig,
+ std::vector<uint8_t>* out_raw_sig) override;
+
+ private:
+ ECPrivateKey* key_;
+
+ DISALLOW_COPY_AND_ASSIGN(ECSignatureCreatorImpl);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_EC_SIGNATURE_CREATOR_IMPL_H_
diff --git a/libchrome/crypto/hmac.cc b/libchrome/crypto/hmac.cc
new file mode 100644
index 0000000..af5580b
--- /dev/null
+++ b/libchrome/crypto/hmac.cc
@@ -0,0 +1,59 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/hmac.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "crypto/secure_util.h"
+#include "crypto/symmetric_key.h"
+
+namespace crypto {
+
+bool HMAC::Init(SymmetricKey* key) {
+ std::string raw_key;
+ bool result = key->GetRawKey(&raw_key) && Init(raw_key);
+ // Zero out key copy. This might get optimized away, but one can hope.
+ // Using std::string to store key info at all is a larger problem.
+ std::fill(raw_key.begin(), raw_key.end(), 0);
+ return result;
+}
+
+size_t HMAC::DigestLength() const {
+ switch (hash_alg_) {
+ case SHA1:
+ return 20;
+ case SHA256:
+ return 32;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+bool HMAC::Verify(const base::StringPiece& data,
+ const base::StringPiece& digest) const {
+ if (digest.size() != DigestLength())
+ return false;
+ return VerifyTruncated(data, digest);
+}
+
+bool HMAC::VerifyTruncated(const base::StringPiece& data,
+ const base::StringPiece& digest) const {
+ if (digest.empty())
+ return false;
+ size_t digest_length = DigestLength();
+ std::unique_ptr<unsigned char[]> computed_digest(
+ new unsigned char[digest_length]);
+ if (!Sign(data, computed_digest.get(), digest_length))
+ return false;
+
+ return SecureMemEqual(digest.data(), computed_digest.get(),
+ std::min(digest.size(), digest_length));
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/hmac.h b/libchrome/crypto/hmac.h
new file mode 100644
index 0000000..ec32ed7
--- /dev/null
+++ b/libchrome/crypto/hmac.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utility class for calculating the HMAC for a given message. We currently
+// only support SHA1 for the hash algorithm, but this can be extended easily.
+
+#ifndef CRYPTO_HMAC_H_
+#define CRYPTO_HMAC_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// Simplify the interface and reduce includes by abstracting out the internals.
+struct HMACPlatformData;
+class SymmetricKey;
+
+class CRYPTO_EXPORT HMAC {
+ public:
+ // The set of supported hash functions. Extend as required.
+ enum HashAlgorithm {
+ SHA1,
+ SHA256,
+ };
+
+ explicit HMAC(HashAlgorithm hash_alg);
+ ~HMAC();
+
+ // Returns the length of digest that this HMAC will create.
+ size_t DigestLength() const;
+
+ // TODO(abarth): Add a PreferredKeyLength() member function.
+
+ // Initializes this instance using |key| of the length |key_length|. Call Init
+ // only once. It returns false on the second or later calls.
+ //
+ // NOTE: the US Federal crypto standard FIPS 198, Section 3 says:
+ // The size of the key, K, shall be equal to or greater than L/2, where L
+ // is the size of the hash function output.
+ // In FIPS 198-1 (and SP-800-107, which describes key size recommendations),
+ // this requirement is gone. But a system crypto library may still enforce
+ // this old requirement. If the key is shorter than this recommended value,
+ // Init() may fail.
+ bool Init(const unsigned char* key, size_t key_length) WARN_UNUSED_RESULT;
+
+ // Initializes this instance using |key|. Call Init
+ // only once. It returns false on the second or later calls.
+ bool Init(SymmetricKey* key) WARN_UNUSED_RESULT;
+
+ // Initializes this instance using |key|. Call Init only once. It returns
+ // false on the second or later calls.
+ bool Init(const base::StringPiece& key) WARN_UNUSED_RESULT {
+ return Init(reinterpret_cast<const unsigned char*>(key.data()),
+ key.size());
+ }
+
+ // Calculates the HMAC for the message in |data| using the algorithm supplied
+ // to the constructor and the key supplied to the Init method. The HMAC is
+ // returned in |digest|, which has |digest_length| bytes of storage available.
+ bool Sign(const base::StringPiece& data, unsigned char* digest,
+ size_t digest_length) const WARN_UNUSED_RESULT;
+
+ // Verifies that the HMAC for the message in |data| equals the HMAC provided
+ // in |digest|, using the algorithm supplied to the constructor and the key
+ // supplied to the Init method. Use of this method is strongly recommended
+ // over using Sign() with a manual comparison (such as memcmp), as such
+ // comparisons may result in side-channel disclosures, such as timing, that
+ // undermine the cryptographic integrity. |digest| must be exactly
+ // |DigestLength()| bytes long.
+ bool Verify(const base::StringPiece& data,
+ const base::StringPiece& digest) const WARN_UNUSED_RESULT;
+
+ // Verifies a truncated HMAC, behaving identical to Verify(), except
+ // that |digest| is allowed to be smaller than |DigestLength()|.
+ bool VerifyTruncated(
+ const base::StringPiece& data,
+ const base::StringPiece& digest) const WARN_UNUSED_RESULT;
+
+ private:
+ HashAlgorithm hash_alg_;
+ std::unique_ptr<HMACPlatformData> plat_;
+
+ DISALLOW_COPY_AND_ASSIGN(HMAC);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_HMAC_H_
diff --git a/libchrome/crypto/hmac_nss.cc b/libchrome/crypto/hmac_nss.cc
new file mode 100644
index 0000000..9d759b5
--- /dev/null
+++ b/libchrome/crypto/hmac_nss.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/hmac.h"
+
+#include <nss.h>
+#include <pk11pub.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace crypto {
+
+struct HMACPlatformData {
+ CK_MECHANISM_TYPE mechanism_;
+ ScopedPK11Slot slot_;
+ ScopedPK11SymKey sym_key_;
+};
+
+HMAC::HMAC(HashAlgorithm hash_alg)
+ : hash_alg_(hash_alg), plat_(new HMACPlatformData()) {
+ // Only SHA-1 and SHA-256 hash algorithms are supported.
+ switch (hash_alg_) {
+ case SHA1:
+ plat_->mechanism_ = CKM_SHA_1_HMAC;
+ break;
+ case SHA256:
+ plat_->mechanism_ = CKM_SHA256_HMAC;
+ break;
+ default:
+ NOTREACHED() << "Unsupported hash algorithm";
+ break;
+ }
+}
+
+HMAC::~HMAC() {
+}
+
+bool HMAC::Init(const unsigned char *key, size_t key_length) {
+ EnsureNSSInit();
+
+ if (plat_->slot_.get()) {
+ // Init must not be called more than twice on the same HMAC object.
+ NOTREACHED();
+ return false;
+ }
+
+ plat_->slot_.reset(PK11_GetInternalSlot());
+ if (!plat_->slot_.get()) {
+ NOTREACHED();
+ return false;
+ }
+
+ SECItem key_item;
+ key_item.type = siBuffer;
+ key_item.data = const_cast<unsigned char*>(key); // NSS API isn't const.
+ key_item.len = key_length;
+
+ plat_->sym_key_.reset(PK11_ImportSymKey(plat_->slot_.get(),
+ plat_->mechanism_,
+ PK11_OriginUnwrap,
+ CKA_SIGN,
+ &key_item,
+ NULL));
+ if (!plat_->sym_key_.get()) {
+ NOTREACHED();
+ return false;
+ }
+
+ return true;
+}
+
+bool HMAC::Sign(const base::StringPiece& data,
+ unsigned char* digest,
+ size_t digest_length) const {
+ if (!plat_->sym_key_.get()) {
+ // Init has not been called before Sign.
+ NOTREACHED();
+ return false;
+ }
+
+ SECItem param = { siBuffer, NULL, 0 };
+ ScopedPK11Context context(PK11_CreateContextBySymKey(plat_->mechanism_,
+ CKA_SIGN,
+ plat_->sym_key_.get(),
+ ¶m));
+ if (!context.get()) {
+ NOTREACHED();
+ return false;
+ }
+
+ if (PK11_DigestBegin(context.get()) != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+
+ if (PK11_DigestOp(context.get(),
+ reinterpret_cast<const unsigned char*>(data.data()),
+ data.length()) != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+
+ unsigned int len = 0;
+ if (PK11_DigestFinal(context.get(),
+ digest, &len, digest_length) != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/hmac_unittest.cc b/libchrome/crypto/hmac_unittest.cc
new file mode 100644
index 0000000..9c42dad
--- /dev/null
+++ b/libchrome/crypto/hmac_unittest.cc
@@ -0,0 +1,298 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/macros.h"
+#include "crypto/hmac.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+static const size_t kSHA1DigestSize = 20;
+static const size_t kSHA256DigestSize = 32;
+
+static const char* kSimpleKey =
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA";
+static const size_t kSimpleKeyLength = 80;
+
+static const struct {
+ const char *data;
+ const int data_len;
+ const char *digest;
+} kSimpleHmacCases[] = {
+ { "Test Using Larger Than Block-Size Key - Hash Key First", 54,
+ "\xAA\x4A\xE5\xE1\x52\x72\xD0\x0E\x95\x70\x56\x37\xCE\x8A\x3B\x55"
+ "\xED\x40\x21\x12" },
+ { "Test Using Larger Than Block-Size Key and Larger "
+ "Than One Block-Size Data", 73,
+ "\xE8\xE9\x9D\x0F\x45\x23\x7D\x78\x6D\x6B\xBA\xA7\x96\x5C\x78\x08"
+ "\xBB\xFF\x1A\x91" }
+};
+
+TEST(HMACTest, HmacSafeBrowsingResponseTest) {
+ const int kKeySize = 16;
+
+ // Client key.
+ const unsigned char kClientKey[kKeySize] =
+ { 0xbf, 0xf6, 0x83, 0x4b, 0x3e, 0xa3, 0x23, 0xdd,
+ 0x96, 0x78, 0x70, 0x8e, 0xa1, 0x9d, 0x3b, 0x40 };
+
+ // Expected HMAC result using kMessage and kClientKey.
+ const unsigned char kReceivedHmac[kSHA1DigestSize] =
+ { 0xb9, 0x3c, 0xd6, 0xf0, 0x49, 0x47, 0xe2, 0x52,
+ 0x59, 0x7a, 0xbd, 0x1f, 0x2b, 0x4c, 0x83, 0xad,
+ 0x86, 0xd2, 0x48, 0x85 };
+
+ const char kMessage[] =
+"n:1896\ni:goog-malware-shavar\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shav"
+"ar_s_445-450\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_439-444\nu:s"
+".ytimg.com/safebrowsing/rd/goog-malware-shavar_s_437\nu:s.ytimg.com/safebrowsi"
+"ng/rd/goog-malware-shavar_s_436\nu:s.ytimg.com/safebrowsing/rd/goog-malware-sh"
+"avar_s_433-435\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_431\nu:s.y"
+"timg.com/safebrowsing/rd/goog-malware-shavar_s_430\nu:s.ytimg.com/safebrowsing"
+"/rd/goog-malware-shavar_s_429\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shav"
+"ar_s_428\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_426\nu:s.ytimg.c"
+"om/safebrowsing/rd/goog-malware-shavar_s_424\nu:s.ytimg.com/safebrowsing/rd/go"
+"og-malware-shavar_s_423\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_4"
+"22\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_420\nu:s.ytimg.com/saf"
+"ebrowsing/rd/goog-malware-shavar_s_419\nu:s.ytimg.com/safebrowsing/rd/goog-mal"
+"ware-shavar_s_414\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_409-411"
+"\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_405\nu:s.ytimg.com/safeb"
+"rowsing/rd/goog-malware-shavar_s_404\nu:s.ytimg.com/safebrowsing/rd/goog-malwa"
+"re-shavar_s_402\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_s_401\nu:s."
+"ytimg.com/safebrowsing/rd/goog-malware-shavar_a_973-978\nu:s.ytimg.com/safebro"
+"wsing/rd/goog-malware-shavar_a_937-972\nu:s.ytimg.com/safebrowsing/rd/goog-mal"
+"ware-shavar_a_931-936\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_a_925"
+"-930\nu:s.ytimg.com/safebrowsing/rd/goog-malware-shavar_a_919-924\ni:goog-phis"
+"h-shavar\nu:s.ytimg.com/safebrowsing/rd/goog-phish-shavar_a_2633\nu:s.ytimg.co"
+"m/safebrowsing/rd/goog-phish-shavar_a_2632\nu:s.ytimg.com/safebrowsing/rd/goog"
+"-phish-shavar_a_2629-2631\nu:s.ytimg.com/safebrowsing/rd/goog-phish-shavar_a_2"
+"626-2628\nu:s.ytimg.com/safebrowsing/rd/goog-phish-shavar_a_2625\n";
+
+ std::string message_data(kMessage);
+
+ crypto::HMAC hmac(crypto::HMAC::SHA1);
+ ASSERT_TRUE(hmac.Init(kClientKey, kKeySize));
+ unsigned char calculated_hmac[kSHA1DigestSize];
+
+ EXPECT_TRUE(hmac.Sign(message_data, calculated_hmac, kSHA1DigestSize));
+ EXPECT_EQ(0, memcmp(kReceivedHmac, calculated_hmac, kSHA1DigestSize));
+}
+
+// Test cases from RFC 2202 section 3
+TEST(HMACTest, RFC2202TestCases) {
+ const struct {
+ const char *key;
+ const int key_len;
+ const char *data;
+ const int data_len;
+ const char *digest;
+ } cases[] = {
+ { "\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B\x0B"
+ "\x0B\x0B\x0B\x0B", 20,
+ "Hi There", 8,
+ "\xB6\x17\x31\x86\x55\x05\x72\x64\xE2\x8B\xC0\xB6\xFB\x37\x8C\x8E"
+ "\xF1\x46\xBE\x00" },
+ { "Jefe", 4,
+ "what do ya want for nothing?", 28,
+ "\xEF\xFC\xDF\x6A\xE5\xEB\x2F\xA2\xD2\x74\x16\xD5\xF1\x84\xDF\x9C"
+ "\x25\x9A\x7C\x79" },
+ { "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA", 20,
+ "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+ "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+ "\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD\xDD"
+ "\xDD\xDD", 50,
+ "\x12\x5D\x73\x42\xB9\xAC\x11\xCD\x91\xA3\x9A\xF4\x8A\xA1\x7B\x4F"
+ "\x63\xF1\x75\xD3" },
+ { "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10"
+ "\x11\x12\x13\x14\x15\x16\x17\x18\x19", 25,
+ "\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD"
+ "\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD"
+ "\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD\xCD"
+ "\xCD\xCD", 50,
+ "\x4C\x90\x07\xF4\x02\x62\x50\xC6\xBC\x84\x14\xF9\xBF\x50\xC8\x6C"
+ "\x2D\x72\x35\xDA" },
+ { "\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C\x0C"
+ "\x0C\x0C\x0C\x0C", 20,
+ "Test With Truncation", 20,
+ "\x4C\x1A\x03\x42\x4B\x55\xE0\x7F\xE7\xF2\x7B\xE1\xD5\x8B\xB9\x32"
+ "\x4A\x9A\x5A\x04" },
+ { "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+ 80,
+ "Test Using Larger Than Block-Size Key - Hash Key First", 54,
+ "\xAA\x4A\xE5\xE1\x52\x72\xD0\x0E\x95\x70\x56\x37\xCE\x8A\x3B\x55"
+ "\xED\x40\x21\x12" },
+ { "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA"
+ "\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA\xAA",
+ 80,
+ "Test Using Larger Than Block-Size Key and Larger "
+ "Than One Block-Size Data", 73,
+ "\xE8\xE9\x9D\x0F\x45\x23\x7D\x78\x6D\x6B\xBA\xA7\x96\x5C\x78\x08"
+ "\xBB\xFF\x1A\x91" }
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ crypto::HMAC hmac(crypto::HMAC::SHA1);
+ ASSERT_TRUE(hmac.Init(reinterpret_cast<const unsigned char*>(cases[i].key),
+ cases[i].key_len));
+ std::string data_string(cases[i].data, cases[i].data_len);
+ unsigned char digest[kSHA1DigestSize];
+ EXPECT_TRUE(hmac.Sign(data_string, digest, kSHA1DigestSize));
+ EXPECT_EQ(0, memcmp(cases[i].digest, digest, kSHA1DigestSize));
+ }
+}
+
+// TODO(wtc): add other test vectors from RFC 4231.
+TEST(HMACTest, RFC4231TestCase6) {
+ unsigned char key[131];
+ for (size_t i = 0; i < sizeof(key); ++i)
+ key[i] = 0xaa;
+
+ std::string data = "Test Using Larger Than Block-Size Key - Hash Key First";
+ ASSERT_EQ(54U, data.size());
+
+ static unsigned char kKnownHMACSHA256[] = {
+ 0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f,
+ 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f,
+ 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14,
+ 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54
+ };
+
+ crypto::HMAC hmac(crypto::HMAC::SHA256);
+ ASSERT_TRUE(hmac.Init(key, sizeof(key)));
+ unsigned char calculated_hmac[kSHA256DigestSize];
+
+ EXPECT_EQ(kSHA256DigestSize, hmac.DigestLength());
+ EXPECT_TRUE(hmac.Sign(data, calculated_hmac, kSHA256DigestSize));
+ EXPECT_EQ(0, memcmp(kKnownHMACSHA256, calculated_hmac, kSHA256DigestSize));
+}
+
+// Based on NSS's FIPS HMAC power-up self-test.
+TEST(HMACTest, NSSFIPSPowerUpSelfTest) {
+ static const char kKnownMessage[] =
+ "The test message for the MD2, MD5, and SHA-1 hashing algorithms.";
+
+ static const unsigned char kKnownSecretKey[] = {
+ 0x46, 0x69, 0x72, 0x65, 0x66, 0x6f, 0x78, 0x20,
+ 0x61, 0x6e, 0x64, 0x20, 0x54, 0x68, 0x75, 0x6e,
+ 0x64, 0x65, 0x72, 0x42, 0x69, 0x72, 0x64, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x61, 0x77, 0x65, 0x73,
+ 0x6f, 0x6d, 0x65, 0x21, 0x00
+ };
+
+ static const size_t kKnownSecretKeySize = sizeof(kKnownSecretKey);
+
+ // HMAC-SHA-1 known answer (20 bytes).
+ static const unsigned char kKnownHMACSHA1[] = {
+ 0xd5, 0x85, 0xf6, 0x5b, 0x39, 0xfa, 0xb9, 0x05,
+ 0x3b, 0x57, 0x1d, 0x61, 0xe7, 0xb8, 0x84, 0x1e,
+ 0x5d, 0x0e, 0x1e, 0x11
+ };
+
+ // HMAC-SHA-256 known answer (32 bytes).
+ static const unsigned char kKnownHMACSHA256[] = {
+ 0x05, 0x75, 0x9a, 0x9e, 0x70, 0x5e, 0xe7, 0x44,
+ 0xe2, 0x46, 0x4b, 0x92, 0x22, 0x14, 0x22, 0xe0,
+ 0x1b, 0x92, 0x8a, 0x0c, 0xfe, 0xf5, 0x49, 0xe9,
+ 0xa7, 0x1b, 0x56, 0x7d, 0x1d, 0x29, 0x40, 0x48
+ };
+
+ std::string message_data(kKnownMessage);
+
+ crypto::HMAC hmac(crypto::HMAC::SHA1);
+ ASSERT_TRUE(hmac.Init(kKnownSecretKey, kKnownSecretKeySize));
+ unsigned char calculated_hmac[kSHA1DigestSize];
+
+ EXPECT_EQ(kSHA1DigestSize, hmac.DigestLength());
+ EXPECT_TRUE(hmac.Sign(message_data, calculated_hmac, kSHA1DigestSize));
+ EXPECT_EQ(0, memcmp(kKnownHMACSHA1, calculated_hmac, kSHA1DigestSize));
+ EXPECT_TRUE(hmac.Verify(
+ message_data,
+ base::StringPiece(reinterpret_cast<const char*>(kKnownHMACSHA1),
+ kSHA1DigestSize)));
+ EXPECT_TRUE(hmac.VerifyTruncated(
+ message_data,
+ base::StringPiece(reinterpret_cast<const char*>(kKnownHMACSHA1),
+ kSHA1DigestSize / 2)));
+
+ crypto::HMAC hmac2(crypto::HMAC::SHA256);
+ ASSERT_TRUE(hmac2.Init(kKnownSecretKey, kKnownSecretKeySize));
+ unsigned char calculated_hmac2[kSHA256DigestSize];
+
+ EXPECT_TRUE(hmac2.Sign(message_data, calculated_hmac2, kSHA256DigestSize));
+ EXPECT_EQ(0, memcmp(kKnownHMACSHA256, calculated_hmac2, kSHA256DigestSize));
+}
+
+TEST(HMACTest, HMACObjectReuse) {
+ crypto::HMAC hmac(crypto::HMAC::SHA1);
+ ASSERT_TRUE(
+ hmac.Init(reinterpret_cast<const unsigned char*>(kSimpleKey),
+ kSimpleKeyLength));
+ for (size_t i = 0; i < arraysize(kSimpleHmacCases); ++i) {
+ std::string data_string(kSimpleHmacCases[i].data,
+ kSimpleHmacCases[i].data_len);
+ unsigned char digest[kSHA1DigestSize];
+ EXPECT_TRUE(hmac.Sign(data_string, digest, kSHA1DigestSize));
+ EXPECT_EQ(0, memcmp(kSimpleHmacCases[i].digest, digest, kSHA1DigestSize));
+ }
+}
+
+TEST(HMACTest, Verify) {
+ crypto::HMAC hmac(crypto::HMAC::SHA1);
+ ASSERT_TRUE(
+ hmac.Init(reinterpret_cast<const unsigned char*>(kSimpleKey),
+ kSimpleKeyLength));
+ const char empty_digest[kSHA1DigestSize] = { 0 };
+ for (size_t i = 0; i < arraysize(kSimpleHmacCases); ++i) {
+ // Expected results
+ EXPECT_TRUE(hmac.Verify(
+ base::StringPiece(kSimpleHmacCases[i].data,
+ kSimpleHmacCases[i].data_len),
+ base::StringPiece(kSimpleHmacCases[i].digest,
+ kSHA1DigestSize)));
+ // Mismatched size
+ EXPECT_FALSE(hmac.Verify(
+ base::StringPiece(kSimpleHmacCases[i].data,
+ kSimpleHmacCases[i].data_len),
+ base::StringPiece(kSimpleHmacCases[i].data,
+ kSimpleHmacCases[i].data_len)));
+
+ // Expected size, mismatched data
+ EXPECT_FALSE(hmac.Verify(
+ base::StringPiece(kSimpleHmacCases[i].data,
+ kSimpleHmacCases[i].data_len),
+ base::StringPiece(empty_digest, kSHA1DigestSize)));
+ }
+}
+
+TEST(HMACTest, EmptyKey) {
+ // Test vector from https://en.wikipedia.org/wiki/HMAC
+ const char* kExpectedDigest =
+ "\xFB\xDB\x1D\x1B\x18\xAA\x6C\x08\x32\x4B\x7D\x64\xB7\x1F\xB7\x63"
+ "\x70\x69\x0E\x1D";
+ base::StringPiece data("");
+
+ crypto::HMAC hmac(crypto::HMAC::SHA1);
+ ASSERT_TRUE(hmac.Init(nullptr, 0));
+
+ unsigned char digest[kSHA1DigestSize];
+ EXPECT_TRUE(hmac.Sign(data, digest, kSHA1DigestSize));
+ EXPECT_EQ(0, memcmp(kExpectedDigest, digest, kSHA1DigestSize));
+
+ EXPECT_TRUE(hmac.Verify(
+ data, base::StringPiece(kExpectedDigest, kSHA1DigestSize)));
+}
diff --git a/libchrome/crypto/nss_crypto_module_delegate.h b/libchrome/crypto/nss_crypto_module_delegate.h
new file mode 100644
index 0000000..6c1da68
--- /dev/null
+++ b/libchrome/crypto/nss_crypto_module_delegate.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_NSS_CRYPTO_MODULE_DELEGATE_H_
+#define CRYPTO_NSS_CRYPTO_MODULE_DELEGATE_H_
+
+#include <string>
+
+#include "base/callback_forward.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace crypto {
+
+// PK11_SetPasswordFunc is a global setting. An implementation of
+// CryptoModuleBlockingPasswordDelegate should be passed using wincx() as the
+// user data argument (|wincx|) to relevant NSS functions, which the global
+// password handler will call to do the actual work. This delegate should only
+// be used in NSS calls on worker threads due to the blocking nature.
+class CryptoModuleBlockingPasswordDelegate {
+ public:
+ virtual ~CryptoModuleBlockingPasswordDelegate() {}
+
+ // Return a value suitable for passing to the |wincx| argument of relevant NSS
+ // functions. This should be used instead of passing the object pointer
+ // directly to avoid accidentally casting a pointer to a subclass to void* and
+ // then casting back to a pointer of the base class
+ void* wincx() { return this; }
+
+ // Requests a password to unlock |slot_name|. The interface is synchronous
+ // because NSS cannot issue an asynchronous request. |retry| is true if this
+ // is a request for the retry and we previously returned the wrong password.
+ // The implementation should set |*cancelled| to true if the user cancelled
+ // instead of entering a password, otherwise it should return the password the
+ // user entered.
+ virtual std::string RequestPassword(const std::string& slot_name, bool retry,
+ bool* cancelled) = 0;
+
+};
+
+// Extends CryptoModuleBlockingPasswordDelegate with the ability to return a
+// slot in which to act. (Eg, which slot to store a generated key in.)
+class NSSCryptoModuleDelegate : public CryptoModuleBlockingPasswordDelegate {
+ public:
+ ~NSSCryptoModuleDelegate() override {}
+
+ // Get the slot to store the generated key.
+ virtual ScopedPK11Slot RequestSlot() = 0;
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_NSS_CRYPTO_MODULE_DELEGATE_H_
diff --git a/libchrome/crypto/nss_key_util.cc b/libchrome/crypto/nss_key_util.cc
new file mode 100644
index 0000000..da8d9c3
--- /dev/null
+++ b/libchrome/crypto/nss_key_util.cc
@@ -0,0 +1,154 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/nss_key_util.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <pk11pub.h>
+#include <secmod.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/logging.h"
+#include "crypto/nss_util.h"
+#include "crypto/nss_util_internal.h"
+
+namespace crypto {
+
+namespace {
+
+struct PublicKeyInfoDeleter {
+ inline void operator()(CERTSubjectPublicKeyInfo* spki) {
+ SECKEY_DestroySubjectPublicKeyInfo(spki);
+ }
+};
+
+typedef std::unique_ptr<CERTSubjectPublicKeyInfo, PublicKeyInfoDeleter>
+ ScopedPublicKeyInfo;
+
+// Decodes |input| as a SubjectPublicKeyInfo and returns a SECItem containing
+// the CKA_ID of that public key or nullptr on error.
+ScopedSECItem MakeIDFromSPKI(const std::vector<uint8_t>& input) {
+ // First, decode and save the public key.
+ SECItem key_der;
+ key_der.type = siBuffer;
+ key_der.data = const_cast<unsigned char*>(input.data());
+ key_der.len = input.size();
+
+ ScopedPublicKeyInfo spki(SECKEY_DecodeDERSubjectPublicKeyInfo(&key_der));
+ if (!spki)
+ return nullptr;
+
+ ScopedSECKEYPublicKey result(SECKEY_ExtractPublicKey(spki.get()));
+ if (!result)
+ return nullptr;
+
+ // See pk11_MakeIDFromPublicKey from NSS. For now, only RSA keys are
+ // supported.
+ if (SECKEY_GetPublicKeyType(result.get()) != rsaKey)
+ return nullptr;
+
+ return ScopedSECItem(PK11_MakeIDFromPubKey(&result->u.rsa.modulus));
+}
+
+} // namespace
+
+bool GenerateRSAKeyPairNSS(PK11SlotInfo* slot,
+ uint16_t num_bits,
+ bool permanent,
+ ScopedSECKEYPublicKey* public_key,
+ ScopedSECKEYPrivateKey* private_key) {
+ DCHECK(slot);
+
+ PK11RSAGenParams param;
+ param.keySizeInBits = num_bits;
+ param.pe = 65537L;
+ SECKEYPublicKey* public_key_raw = nullptr;
+ private_key->reset(PK11_GenerateKeyPair(slot, CKM_RSA_PKCS_KEY_PAIR_GEN,
+ ¶m, &public_key_raw, permanent,
+ permanent /* sensitive */, nullptr));
+ if (!*private_key)
+ return false;
+
+ public_key->reset(public_key_raw);
+ return true;
+}
+
+ScopedSECKEYPrivateKey ImportNSSKeyFromPrivateKeyInfo(
+ PK11SlotInfo* slot,
+ const std::vector<uint8_t>& input,
+ bool permanent) {
+ DCHECK(slot);
+
+ ScopedPLArenaPool arena(PORT_NewArena(DER_DEFAULT_CHUNKSIZE));
+ DCHECK(arena);
+
+ // Excess data is illegal, but NSS silently accepts it, so first ensure that
+ // |input| consists of a single ASN.1 element.
+ SECItem input_item;
+ input_item.data = const_cast<unsigned char*>(input.data());
+ input_item.len = input.size();
+ SECItem der_private_key_info;
+ SECStatus rv =
+ SEC_QuickDERDecodeItem(arena.get(), &der_private_key_info,
+ SEC_ASN1_GET(SEC_AnyTemplate), &input_item);
+ if (rv != SECSuccess)
+ return nullptr;
+
+ // Allow the private key to be used for key unwrapping, data decryption,
+ // and signature generation.
+ const unsigned int key_usage =
+ KU_KEY_ENCIPHERMENT | KU_DATA_ENCIPHERMENT | KU_DIGITAL_SIGNATURE;
+ SECKEYPrivateKey* key_raw = nullptr;
+ rv = PK11_ImportDERPrivateKeyInfoAndReturnKey(
+ slot, &der_private_key_info, nullptr, nullptr, permanent,
+ permanent /* sensitive */, key_usage, &key_raw, nullptr);
+ if (rv != SECSuccess)
+ return nullptr;
+ return ScopedSECKEYPrivateKey(key_raw);
+}
+
+ScopedSECKEYPrivateKey FindNSSKeyFromPublicKeyInfo(
+ const std::vector<uint8_t>& input) {
+ EnsureNSSInit();
+
+ ScopedSECItem cka_id(MakeIDFromSPKI(input));
+ if (!cka_id)
+ return nullptr;
+
+ // Search all slots in all modules for the key with the given ID.
+ AutoSECMODListReadLock auto_lock;
+ const SECMODModuleList* head = SECMOD_GetDefaultModuleList();
+ for (const SECMODModuleList* item = head; item != nullptr;
+ item = item->next) {
+ int slot_count = item->module->loaded ? item->module->slotCount : 0;
+ for (int i = 0; i < slot_count; i++) {
+ // Look for the key in slot |i|.
+ ScopedSECKEYPrivateKey key(
+ PK11_FindKeyByKeyID(item->module->slots[i], cka_id.get(), nullptr));
+ if (key)
+ return key;
+ }
+ }
+
+ // The key wasn't found in any module.
+ return nullptr;
+}
+
+ScopedSECKEYPrivateKey FindNSSKeyFromPublicKeyInfoInSlot(
+ const std::vector<uint8_t>& input,
+ PK11SlotInfo* slot) {
+ DCHECK(slot);
+
+ ScopedSECItem cka_id(MakeIDFromSPKI(input));
+ if (!cka_id)
+ return nullptr;
+
+ return ScopedSECKEYPrivateKey(
+ PK11_FindKeyByKeyID(slot, cka_id.get(), nullptr));
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/nss_key_util.h b/libchrome/crypto/nss_key_util.h
new file mode 100644
index 0000000..86934dd
--- /dev/null
+++ b/libchrome/crypto/nss_key_util.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_NSS_KEY_UTIL_H_
+#define CRYPTO_NSS_KEY_UTIL_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+#include "crypto/scoped_nss_types.h"
+
+typedef struct PK11SlotInfoStr PK11SlotInfo;
+
+namespace crypto {
+
+// Generates a new RSA keypair of size |num_bits| in |slot|. Returns true on
+// success and false on failure. If |permanent| is true, the resulting key is
+// permanent and is not exportable in plaintext form.
+CRYPTO_EXPORT bool GenerateRSAKeyPairNSS(
+ PK11SlotInfo* slot,
+ uint16_t num_bits,
+ bool permanent,
+ ScopedSECKEYPublicKey* out_public_key,
+ ScopedSECKEYPrivateKey* out_private_key);
+
+// Imports a private key from |input| into |slot|. |input| is interpreted as a
+// DER-encoded PrivateKeyInfo block from PKCS #8. Returns nullptr on error. If
+// |permanent| is true, the resulting key is permanent and is not exportable in
+// plaintext form.
+CRYPTO_EXPORT ScopedSECKEYPrivateKey
+ImportNSSKeyFromPrivateKeyInfo(PK11SlotInfo* slot,
+ const std::vector<uint8_t>& input,
+ bool permanent);
+
+// Decodes |input| as a DER-encoded X.509 SubjectPublicKeyInfo and searches for
+// the private key half in the key database. Returns the private key on success
+// or nullptr on error.
+CRYPTO_EXPORT ScopedSECKEYPrivateKey
+FindNSSKeyFromPublicKeyInfo(const std::vector<uint8_t>& input);
+
+// Decodes |input| as a DER-encoded X.509 SubjectPublicKeyInfo and searches for
+// the private key half in the slot specified by |slot|. Returns the private key
+// on success or nullptr on error.
+CRYPTO_EXPORT ScopedSECKEYPrivateKey
+FindNSSKeyFromPublicKeyInfoInSlot(const std::vector<uint8_t>& input,
+ PK11SlotInfo* slot);
+
+} // namespace crypto
+
+#endif // CRYPTO_NSS_KEY_UTIL_H_
diff --git a/libchrome/crypto/nss_key_util_unittest.cc b/libchrome/crypto/nss_key_util_unittest.cc
new file mode 100644
index 0000000..ced9850
--- /dev/null
+++ b/libchrome/crypto/nss_key_util_unittest.cc
@@ -0,0 +1,86 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/nss_key_util.h"
+
+#include <keyhi.h>
+#include <pk11pub.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crypto {
+
+class NSSKeyUtilTest : public testing::Test {
+ public:
+ void SetUp() override {
+ EnsureNSSInit();
+
+ internal_slot_.reset(PK11_GetInternalSlot());
+ ASSERT_TRUE(internal_slot_);
+ }
+
+ PK11SlotInfo* internal_slot() { return internal_slot_.get(); }
+
+ private:
+ ScopedPK11Slot internal_slot_;
+};
+
+TEST_F(NSSKeyUtilTest, GenerateRSAKeyPairNSS) {
+ const int kKeySizeBits = 1024;
+
+ ScopedSECKEYPublicKey public_key;
+ ScopedSECKEYPrivateKey private_key;
+ ASSERT_TRUE(GenerateRSAKeyPairNSS(internal_slot(), kKeySizeBits,
+ false /* not permanent */, &public_key,
+ &private_key));
+
+ EXPECT_EQ(rsaKey, SECKEY_GetPublicKeyType(public_key.get()));
+ EXPECT_EQ(rsaKey, SECKEY_GetPrivateKeyType(private_key.get()));
+ EXPECT_EQ((kKeySizeBits + 7) / 8,
+ PK11_GetPrivateModulusLen(private_key.get()));
+}
+
+TEST_F(NSSKeyUtilTest, FindNSSKeyFromPublicKeyInfo) {
+ // Create an NSS keypair, which will put the keys in the user's NSSDB.
+ ScopedSECKEYPublicKey public_key;
+ ScopedSECKEYPrivateKey private_key;
+ ASSERT_TRUE(GenerateRSAKeyPairNSS(internal_slot(), 512,
+ false /* not permanent */, &public_key,
+ &private_key));
+
+ ScopedSECItem item(SECKEY_EncodeDERSubjectPublicKeyInfo(public_key.get()));
+ ASSERT_TRUE(item);
+ std::vector<uint8_t> public_key_der(item->data, item->data + item->len);
+
+ ScopedSECKEYPrivateKey private_key2 =
+ FindNSSKeyFromPublicKeyInfo(public_key_der);
+ ASSERT_TRUE(private_key2);
+ EXPECT_EQ(private_key->pkcs11ID, private_key2->pkcs11ID);
+}
+
+TEST_F(NSSKeyUtilTest, FailedFindNSSKeyFromPublicKeyInfo) {
+ // Create an NSS keypair, which will put the keys in the user's NSSDB.
+ ScopedSECKEYPublicKey public_key;
+ ScopedSECKEYPrivateKey private_key;
+ ASSERT_TRUE(GenerateRSAKeyPairNSS(internal_slot(), 512,
+ false /* not permanent */, &public_key,
+ &private_key));
+
+ ScopedSECItem item(SECKEY_EncodeDERSubjectPublicKeyInfo(public_key.get()));
+ ASSERT_TRUE(item);
+ std::vector<uint8_t> public_key_der(item->data, item->data + item->len);
+
+ // Remove the keys from the DB, and make sure we can't find them again.
+ PK11_DestroyTokenObject(private_key->pkcs11Slot, private_key->pkcs11ID);
+ PK11_DestroyTokenObject(public_key->pkcs11Slot, public_key->pkcs11ID);
+
+ EXPECT_FALSE(FindNSSKeyFromPublicKeyInfo(public_key_der));
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/nss_util.cc b/libchrome/crypto/nss_util.cc
new file mode 100644
index 0000000..96ee060
--- /dev/null
+++ b/libchrome/crypto/nss_util.cc
@@ -0,0 +1,1034 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/nss_util.h"
+
+#include <nss.h>
+#include <pk11pub.h>
+#include <plarena.h>
+#include <prerror.h>
+#include <prinit.h>
+#include <prtime.h>
+#include <secmod.h>
+
+#include <memory>
+#include <utility>
+
+#include "crypto/nss_util_internal.h"
+
+#if defined(OS_OPENBSD)
+#include <sys/mount.h>
+#include <sys/param.h>
+#endif
+
+#if defined(OS_CHROMEOS)
+#include <dlfcn.h>
+#endif
+
+#include <map>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/cpu.h"
+#include "base/debug/alias.h"
+#include "base/debug/stack_trace.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/native_library.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_checker.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/worker_pool.h"
+#include "build/build_config.h"
+
+#if !defined(OS_CHROMEOS)
+#include "base/base_paths.h"
+#include "base/path_service.h"
+#endif
+
+// USE_NSS_CERTS means NSS is used for certificates and platform integration.
+// This requires additional support to manage the platform certificate and key
+// stores.
+#if defined(USE_NSS_CERTS)
+#include "base/synchronization/lock.h"
+#include "crypto/nss_crypto_module_delegate.h"
+#endif // defined(USE_NSS_CERTS)
+
+namespace crypto {
+
+namespace {
+
+#if defined(OS_CHROMEOS)
+const char kUserNSSDatabaseName[] = "UserNSSDB";
+
+// Constants for loading the Chrome OS TPM-backed PKCS #11 library.
+const char kChapsModuleName[] = "Chaps";
+const char kChapsPath[] = "libchaps.so";
+
+// Fake certificate authority database used for testing.
+static const base::FilePath::CharType kReadOnlyCertDB[] =
+ FILE_PATH_LITERAL("/etc/fake_root_ca/nssdb");
+#endif // defined(OS_CHROMEOS)
+
+std::string GetNSSErrorMessage() {
+ std::string result;
+ if (PR_GetErrorTextLength()) {
+ std::unique_ptr<char[]> error_text(new char[PR_GetErrorTextLength() + 1]);
+ PRInt32 copied = PR_GetErrorText(error_text.get());
+ result = std::string(error_text.get(), copied);
+ } else {
+ result = base::StringPrintf("NSS error code: %d", PR_GetError());
+ }
+ return result;
+}
+
+#if defined(USE_NSS_CERTS)
+#if !defined(OS_CHROMEOS)
+base::FilePath GetDefaultConfigDirectory() {
+ base::FilePath dir;
+ PathService::Get(base::DIR_HOME, &dir);
+ if (dir.empty()) {
+ LOG(ERROR) << "Failed to get home directory.";
+ return dir;
+ }
+ dir = dir.AppendASCII(".pki").AppendASCII("nssdb");
+ if (!base::CreateDirectory(dir)) {
+ LOG(ERROR) << "Failed to create " << dir.value() << " directory.";
+ dir.clear();
+ }
+ DVLOG(2) << "DefaultConfigDirectory: " << dir.value();
+ return dir;
+}
+#endif // !defined(IS_CHROMEOS)
+
+// On non-Chrome OS platforms, return the default config directory. On Chrome OS
+// test images, return a read-only directory with fake root CA certs (which are
+// used by the local Google Accounts server mock we use when testing our login
+// code). On Chrome OS non-test images (where the read-only directory doesn't
+// exist), return an empty path.
+base::FilePath GetInitialConfigDirectory() {
+#if defined(OS_CHROMEOS)
+ base::FilePath database_dir = base::FilePath(kReadOnlyCertDB);
+ if (!base::PathExists(database_dir))
+ database_dir.clear();
+ return database_dir;
+#else
+ return GetDefaultConfigDirectory();
+#endif // defined(OS_CHROMEOS)
+}
+
+// This callback for NSS forwards all requests to a caller-specified
+// CryptoModuleBlockingPasswordDelegate object.
+char* PKCS11PasswordFunc(PK11SlotInfo* slot, PRBool retry, void* arg) {
+ crypto::CryptoModuleBlockingPasswordDelegate* delegate =
+ reinterpret_cast<crypto::CryptoModuleBlockingPasswordDelegate*>(arg);
+ if (delegate) {
+ bool cancelled = false;
+ std::string password = delegate->RequestPassword(PK11_GetTokenName(slot),
+ retry != PR_FALSE,
+ &cancelled);
+ if (cancelled)
+ return NULL;
+ char* result = PORT_Strdup(password.c_str());
+ password.replace(0, password.size(), password.size(), 0);
+ return result;
+ }
+ DLOG(ERROR) << "PK11 password requested with NULL arg";
+ return NULL;
+}
+
+// NSS creates a local cache of the sqlite database if it detects that the
+// filesystem the database is on is much slower than the local disk. The
+// detection doesn't work with the latest versions of sqlite, such as 3.6.22
+// (NSS bug https://bugzilla.mozilla.org/show_bug.cgi?id=578561). So we set
+// the NSS environment variable NSS_SDB_USE_CACHE to "yes" to override NSS's
+// detection when database_dir is on NFS. See http://crbug.com/48585.
+//
+// TODO(wtc): port this function to other USE_NSS_CERTS platforms. It is
+// defined only for OS_LINUX and OS_OPENBSD simply because the statfs structure
+// is OS-specific.
+//
+// Because this function sets an environment variable it must be run before we
+// go multi-threaded.
+void UseLocalCacheOfNSSDatabaseIfNFS(const base::FilePath& database_dir) {
+ bool db_on_nfs = false;
+#if defined(OS_LINUX)
+ base::FileSystemType fs_type = base::FILE_SYSTEM_UNKNOWN;
+ if (base::GetFileSystemType(database_dir, &fs_type))
+ db_on_nfs = (fs_type == base::FILE_SYSTEM_NFS);
+#elif defined(OS_OPENBSD)
+ struct statfs buf;
+ if (statfs(database_dir.value().c_str(), &buf) == 0)
+ db_on_nfs = (strcmp(buf.f_fstypename, MOUNT_NFS) == 0);
+#else
+ NOTIMPLEMENTED();
+#endif
+
+ if (db_on_nfs) {
+ std::unique_ptr<base::Environment> env(base::Environment::Create());
+ static const char kUseCacheEnvVar[] = "NSS_SDB_USE_CACHE";
+ if (!env->HasVar(kUseCacheEnvVar))
+ env->SetVar(kUseCacheEnvVar, "yes");
+ }
+}
+
+#endif // defined(USE_NSS_CERTS)
+
+// A singleton to initialize/deinitialize NSPR.
+// Separate from the NSS singleton because we initialize NSPR on the UI thread.
+// Now that we're leaking the singleton, we could merge back with the NSS
+// singleton.
+class NSPRInitSingleton {
+ private:
+ friend struct base::DefaultLazyInstanceTraits<NSPRInitSingleton>;
+
+ NSPRInitSingleton() {
+ PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
+ }
+
+ // NOTE(willchan): We don't actually execute this code since we leak NSS to
+ // prevent non-joinable threads from using NSS after it's already been shut
+ // down.
+ ~NSPRInitSingleton() {
+ PL_ArenaFinish();
+ PRStatus prstatus = PR_Cleanup();
+ if (prstatus != PR_SUCCESS)
+ LOG(ERROR) << "PR_Cleanup failed; was NSPR initialized on wrong thread?";
+ }
+};
+
+base::LazyInstance<NSPRInitSingleton>::Leaky
+ g_nspr_singleton = LAZY_INSTANCE_INITIALIZER;
+
+// Force a crash with error info on NSS_NoDB_Init failure.
+void CrashOnNSSInitFailure() {
+ int nss_error = PR_GetError();
+ int os_error = PR_GetOSError();
+ base::debug::Alias(&nss_error);
+ base::debug::Alias(&os_error);
+ LOG(ERROR) << "Error initializing NSS without a persistent database: "
+ << GetNSSErrorMessage();
+ LOG(FATAL) << "nss_error=" << nss_error << ", os_error=" << os_error;
+}
+
+#if defined(OS_CHROMEOS)
+class ChromeOSUserData {
+ public:
+ explicit ChromeOSUserData(ScopedPK11Slot public_slot)
+ : public_slot_(std::move(public_slot)),
+ private_slot_initialization_started_(false) {}
+ ~ChromeOSUserData() {
+ if (public_slot_) {
+ SECStatus status = SECMOD_CloseUserDB(public_slot_.get());
+ if (status != SECSuccess)
+ PLOG(ERROR) << "SECMOD_CloseUserDB failed: " << PORT_GetError();
+ }
+ }
+
+ ScopedPK11Slot GetPublicSlot() {
+ return ScopedPK11Slot(
+ public_slot_ ? PK11_ReferenceSlot(public_slot_.get()) : NULL);
+ }
+
+ ScopedPK11Slot GetPrivateSlot(
+ const base::Callback<void(ScopedPK11Slot)>& callback) {
+ if (private_slot_)
+ return ScopedPK11Slot(PK11_ReferenceSlot(private_slot_.get()));
+ if (!callback.is_null())
+ tpm_ready_callback_list_.push_back(callback);
+ return ScopedPK11Slot();
+ }
+
+ void SetPrivateSlot(ScopedPK11Slot private_slot) {
+ DCHECK(!private_slot_);
+ private_slot_ = std::move(private_slot);
+
+ SlotReadyCallbackList callback_list;
+ callback_list.swap(tpm_ready_callback_list_);
+ for (SlotReadyCallbackList::iterator i = callback_list.begin();
+ i != callback_list.end();
+ ++i) {
+ (*i).Run(ScopedPK11Slot(PK11_ReferenceSlot(private_slot_.get())));
+ }
+ }
+
+ bool private_slot_initialization_started() const {
+ return private_slot_initialization_started_;
+ }
+
+ void set_private_slot_initialization_started() {
+ private_slot_initialization_started_ = true;
+ }
+
+ private:
+ ScopedPK11Slot public_slot_;
+ ScopedPK11Slot private_slot_;
+
+ bool private_slot_initialization_started_;
+
+ typedef std::vector<base::Callback<void(ScopedPK11Slot)> >
+ SlotReadyCallbackList;
+ SlotReadyCallbackList tpm_ready_callback_list_;
+};
+
+class ScopedChapsLoadFixup {
+ public:
+ ScopedChapsLoadFixup();
+ ~ScopedChapsLoadFixup();
+
+ private:
+#if defined(COMPONENT_BUILD)
+ void *chaps_handle_;
+#endif
+};
+
+#if defined(COMPONENT_BUILD)
+
+ScopedChapsLoadFixup::ScopedChapsLoadFixup() {
+ // HACK: libchaps links the system protobuf and there are symbol conflicts
+ // with the bundled copy. Load chaps with RTLD_DEEPBIND to workaround.
+ chaps_handle_ = dlopen(kChapsPath, RTLD_LOCAL | RTLD_NOW | RTLD_DEEPBIND);
+}
+
+ScopedChapsLoadFixup::~ScopedChapsLoadFixup() {
+ // LoadModule() will have taken a 2nd reference.
+ if (chaps_handle_)
+ dlclose(chaps_handle_);
+}
+
+#else
+
+ScopedChapsLoadFixup::ScopedChapsLoadFixup() {}
+ScopedChapsLoadFixup::~ScopedChapsLoadFixup() {}
+
+#endif // defined(COMPONENT_BUILD)
+#endif // defined(OS_CHROMEOS)
+
+class NSSInitSingleton {
+ public:
+#if defined(OS_CHROMEOS)
+ // Used with PostTaskAndReply to pass handles to worker thread and back.
+ struct TPMModuleAndSlot {
+ explicit TPMModuleAndSlot(SECMODModule* init_chaps_module)
+ : chaps_module(init_chaps_module) {}
+ SECMODModule* chaps_module;
+ crypto::ScopedPK11Slot tpm_slot;
+ };
+
+ ScopedPK11Slot OpenPersistentNSSDBForPath(const std::string& db_name,
+ const base::FilePath& path) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // NSS is allowed to do IO on the current thread since dispatching
+ // to a dedicated thread would still have the affect of blocking
+ // the current thread, due to NSS's internal locking requirements
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ base::FilePath nssdb_path = path.AppendASCII(".pki").AppendASCII("nssdb");
+ if (!base::CreateDirectory(nssdb_path)) {
+ LOG(ERROR) << "Failed to create " << nssdb_path.value() << " directory.";
+ return ScopedPK11Slot();
+ }
+ return OpenSoftwareNSSDB(nssdb_path, db_name);
+ }
+
+ void EnableTPMTokenForNSS() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If this gets set, then we'll use the TPM for certs with
+ // private keys, otherwise we'll fall back to the software
+ // implementation.
+ tpm_token_enabled_for_nss_ = true;
+ }
+
+ bool IsTPMTokenEnabledForNSS() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return tpm_token_enabled_for_nss_;
+ }
+
+ void InitializeTPMTokenAndSystemSlot(
+ int system_slot_id,
+ const base::Callback<void(bool)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Should not be called while there is already an initialization in
+ // progress.
+ DCHECK(!initializing_tpm_token_);
+ // If EnableTPMTokenForNSS hasn't been called, return false.
+ if (!tpm_token_enabled_for_nss_) {
+ base::MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(callback, false));
+ return;
+ }
+
+ // If everything is already initialized, then return true.
+ // Note that only |tpm_slot_| is checked, since |chaps_module_| could be
+ // NULL in tests while |tpm_slot_| has been set to the test DB.
+ if (tpm_slot_) {
+ base::MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(callback, true));
+ return;
+ }
+
+ // Note that a reference is not taken to chaps_module_. This is safe since
+ // NSSInitSingleton is Leaky, so the reference it holds is never released.
+ std::unique_ptr<TPMModuleAndSlot> tpm_args(
+ new TPMModuleAndSlot(chaps_module_));
+ TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
+ if (base::WorkerPool::PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread,
+ system_slot_id,
+ tpm_args_ptr),
+ base::Bind(&NSSInitSingleton::OnInitializedTPMTokenAndSystemSlot,
+ base::Unretained(this), // NSSInitSingleton is leaky
+ callback,
+ base::Passed(&tpm_args)),
+ true /* task_is_slow */
+ )) {
+ initializing_tpm_token_ = true;
+ } else {
+ base::MessageLoop::current()->PostTask(FROM_HERE,
+ base::Bind(callback, false));
+ }
+ }
+
+ static void InitializeTPMTokenOnWorkerThread(CK_SLOT_ID token_slot_id,
+ TPMModuleAndSlot* tpm_args) {
+ // This tries to load the Chaps module so NSS can talk to the hardware
+ // TPM.
+ if (!tpm_args->chaps_module) {
+ ScopedChapsLoadFixup chaps_loader;
+
+ DVLOG(3) << "Loading chaps...";
+ tpm_args->chaps_module = LoadModule(
+ kChapsModuleName,
+ kChapsPath,
+ // For more details on these parameters, see:
+ // https://developer.mozilla.org/en/PKCS11_Module_Specs
+ // slotFlags=[PublicCerts] -- Certificates and public keys can be
+ // read from this slot without requiring a call to C_Login.
+ // askpw=only -- Only authenticate to the token when necessary.
+ "NSS=\"slotParams=(0={slotFlags=[PublicCerts] askpw=only})\"");
+ }
+ if (tpm_args->chaps_module) {
+ tpm_args->tpm_slot =
+ GetTPMSlotForIdOnWorkerThread(tpm_args->chaps_module, token_slot_id);
+ }
+ }
+
+ void OnInitializedTPMTokenAndSystemSlot(
+ const base::Callback<void(bool)>& callback,
+ std::unique_ptr<TPMModuleAndSlot> tpm_args) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(2) << "Loaded chaps: " << !!tpm_args->chaps_module
+ << ", got tpm slot: " << !!tpm_args->tpm_slot;
+
+ chaps_module_ = tpm_args->chaps_module;
+ tpm_slot_ = std::move(tpm_args->tpm_slot);
+ if (!chaps_module_ && test_system_slot_) {
+ // chromeos_unittests try to test the TPM initialization process. If we
+ // have a test DB open, pretend that it is the TPM slot.
+ tpm_slot_.reset(PK11_ReferenceSlot(test_system_slot_.get()));
+ }
+ initializing_tpm_token_ = false;
+
+ if (tpm_slot_)
+ RunAndClearTPMReadyCallbackList();
+
+ callback.Run(!!tpm_slot_);
+ }
+
+ void RunAndClearTPMReadyCallbackList() {
+ TPMReadyCallbackList callback_list;
+ callback_list.swap(tpm_ready_callback_list_);
+ for (TPMReadyCallbackList::iterator i = callback_list.begin();
+ i != callback_list.end();
+ ++i) {
+ i->Run();
+ }
+ }
+
+ bool IsTPMTokenReady(const base::Closure& callback) {
+ if (!callback.is_null()) {
+ // Cannot DCHECK in the general case yet, but since the callback is
+ // a new addition to the API, DCHECK to make sure at least the new uses
+ // don't regress.
+ DCHECK(thread_checker_.CalledOnValidThread());
+ } else if (!thread_checker_.CalledOnValidThread()) {
+ // TODO(mattm): Change to DCHECK when callers have been fixed.
+ DVLOG(1) << "Called on wrong thread.\n"
+ << base::debug::StackTrace().ToString();
+ }
+
+ if (tpm_slot_)
+ return true;
+
+ if (!callback.is_null())
+ tpm_ready_callback_list_.push_back(callback);
+
+ return false;
+ }
+
+ // Note that CK_SLOT_ID is an unsigned long, but cryptohome gives us the slot
+ // id as an int. This should be safe since this is only used with chaps, which
+ // we also control.
+ static crypto::ScopedPK11Slot GetTPMSlotForIdOnWorkerThread(
+ SECMODModule* chaps_module,
+ CK_SLOT_ID slot_id) {
+ DCHECK(chaps_module);
+
+ DVLOG(3) << "Poking chaps module.";
+ SECStatus rv = SECMOD_UpdateSlotList(chaps_module);
+ if (rv != SECSuccess)
+ PLOG(ERROR) << "SECMOD_UpdateSlotList failed: " << PORT_GetError();
+
+ PK11SlotInfo* slot = SECMOD_LookupSlot(chaps_module->moduleID, slot_id);
+ if (!slot)
+ LOG(ERROR) << "TPM slot " << slot_id << " not found.";
+ return crypto::ScopedPK11Slot(slot);
+ }
+
+ bool InitializeNSSForChromeOSUser(const std::string& username_hash,
+ const base::FilePath& path) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (chromeos_user_map_.find(username_hash) != chromeos_user_map_.end()) {
+ // This user already exists in our mapping.
+ DVLOG(2) << username_hash << " already initialized.";
+ return false;
+ }
+
+ DVLOG(2) << "Opening NSS DB " << path.value();
+ std::string db_name = base::StringPrintf(
+ "%s %s", kUserNSSDatabaseName, username_hash.c_str());
+ ScopedPK11Slot public_slot(OpenPersistentNSSDBForPath(db_name, path));
+ chromeos_user_map_[username_hash] =
+ new ChromeOSUserData(std::move(public_slot));
+ return true;
+ }
+
+ bool ShouldInitializeTPMForChromeOSUser(const std::string& username_hash) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(chromeos_user_map_.find(username_hash) != chromeos_user_map_.end());
+
+ return !chromeos_user_map_[username_hash]
+ ->private_slot_initialization_started();
+ }
+
+ void WillInitializeTPMForChromeOSUser(const std::string& username_hash) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(chromeos_user_map_.find(username_hash) != chromeos_user_map_.end());
+
+ chromeos_user_map_[username_hash]
+ ->set_private_slot_initialization_started();
+ }
+
+ void InitializeTPMForChromeOSUser(const std::string& username_hash,
+ CK_SLOT_ID slot_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(chromeos_user_map_.find(username_hash) != chromeos_user_map_.end());
+ DCHECK(chromeos_user_map_[username_hash]->
+ private_slot_initialization_started());
+
+ if (!chaps_module_)
+ return;
+
+ // Note that a reference is not taken to chaps_module_. This is safe since
+ // NSSInitSingleton is Leaky, so the reference it holds is never released.
+ std::unique_ptr<TPMModuleAndSlot> tpm_args(
+ new TPMModuleAndSlot(chaps_module_));
+ TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
+ base::WorkerPool::PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread,
+ slot_id,
+ tpm_args_ptr),
+ base::Bind(&NSSInitSingleton::OnInitializedTPMForChromeOSUser,
+ base::Unretained(this), // NSSInitSingleton is leaky
+ username_hash,
+ base::Passed(&tpm_args)),
+ true /* task_is_slow */
+ );
+ }
+
+ void OnInitializedTPMForChromeOSUser(
+ const std::string& username_hash,
+ std::unique_ptr<TPMModuleAndSlot> tpm_args) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(2) << "Got tpm slot for " << username_hash << " "
+ << !!tpm_args->tpm_slot;
+ chromeos_user_map_[username_hash]->SetPrivateSlot(
+ std::move(tpm_args->tpm_slot));
+ }
+
+ void InitializePrivateSoftwareSlotForChromeOSUser(
+ const std::string& username_hash) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ VLOG(1) << "using software private slot for " << username_hash;
+ DCHECK(chromeos_user_map_.find(username_hash) != chromeos_user_map_.end());
+ DCHECK(chromeos_user_map_[username_hash]->
+ private_slot_initialization_started());
+
+ chromeos_user_map_[username_hash]->SetPrivateSlot(
+ chromeos_user_map_[username_hash]->GetPublicSlot());
+ }
+
+ ScopedPK11Slot GetPublicSlotForChromeOSUser(
+ const std::string& username_hash) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (username_hash.empty()) {
+ DVLOG(2) << "empty username_hash";
+ return ScopedPK11Slot();
+ }
+
+ if (chromeos_user_map_.find(username_hash) == chromeos_user_map_.end()) {
+ LOG(ERROR) << username_hash << " not initialized.";
+ return ScopedPK11Slot();
+ }
+ return chromeos_user_map_[username_hash]->GetPublicSlot();
+ }
+
+ ScopedPK11Slot GetPrivateSlotForChromeOSUser(
+ const std::string& username_hash,
+ const base::Callback<void(ScopedPK11Slot)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (username_hash.empty()) {
+ DVLOG(2) << "empty username_hash";
+ if (!callback.is_null()) {
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(callback, base::Passed(ScopedPK11Slot())));
+ }
+ return ScopedPK11Slot();
+ }
+
+ DCHECK(chromeos_user_map_.find(username_hash) != chromeos_user_map_.end());
+
+ return chromeos_user_map_[username_hash]->GetPrivateSlot(callback);
+ }
+
+ void CloseChromeOSUserForTesting(const std::string& username_hash) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ChromeOSUserMap::iterator i = chromeos_user_map_.find(username_hash);
+ DCHECK(i != chromeos_user_map_.end());
+ delete i->second;
+ chromeos_user_map_.erase(i);
+ }
+
+ void SetSystemKeySlotForTesting(ScopedPK11Slot slot) {
+ // Ensure that a previous value of test_system_slot_ is not overwritten.
+ // Unsetting, i.e. setting a NULL, however is allowed.
+ DCHECK(!slot || !test_system_slot_);
+ test_system_slot_ = std::move(slot);
+ if (test_system_slot_) {
+ tpm_slot_.reset(PK11_ReferenceSlot(test_system_slot_.get()));
+ RunAndClearTPMReadyCallbackList();
+ } else {
+ tpm_slot_.reset();
+ }
+ }
+#endif // defined(OS_CHROMEOS)
+
+#if !defined(OS_CHROMEOS)
+ PK11SlotInfo* GetPersistentNSSKeySlot() {
+ // TODO(mattm): Change to DCHECK when callers have been fixed.
+ if (!thread_checker_.CalledOnValidThread()) {
+ DVLOG(1) << "Called on wrong thread.\n"
+ << base::debug::StackTrace().ToString();
+ }
+
+ return PK11_GetInternalKeySlot();
+ }
+#endif
+
+#if defined(OS_CHROMEOS)
+ void GetSystemNSSKeySlotCallback(
+ const base::Callback<void(ScopedPK11Slot)>& callback) {
+ callback.Run(ScopedPK11Slot(PK11_ReferenceSlot(tpm_slot_.get())));
+ }
+
+ ScopedPK11Slot GetSystemNSSKeySlot(
+ const base::Callback<void(ScopedPK11Slot)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // TODO(mattm): chromeos::TPMTokenloader always calls
+ // InitializeTPMTokenAndSystemSlot with slot 0. If the system slot is
+ // disabled, tpm_slot_ will be the first user's slot instead. Can that be
+ // detected and return NULL instead?
+
+ base::Closure wrapped_callback;
+ if (!callback.is_null()) {
+ wrapped_callback =
+ base::Bind(&NSSInitSingleton::GetSystemNSSKeySlotCallback,
+ base::Unretained(this) /* singleton is leaky */,
+ callback);
+ }
+ if (IsTPMTokenReady(wrapped_callback))
+ return ScopedPK11Slot(PK11_ReferenceSlot(tpm_slot_.get()));
+ return ScopedPK11Slot();
+ }
+#endif
+
+#if defined(USE_NSS_CERTS)
+ base::Lock* write_lock() {
+ return &write_lock_;
+ }
+#endif // defined(USE_NSS_CERTS)
+
+ private:
+ friend struct base::DefaultLazyInstanceTraits<NSSInitSingleton>;
+
+ NSSInitSingleton()
+ : tpm_token_enabled_for_nss_(false),
+ initializing_tpm_token_(false),
+ chaps_module_(NULL),
+ root_(NULL) {
+ // It's safe to construct on any thread, since LazyInstance will prevent any
+ // other threads from accessing until the constructor is done.
+ thread_checker_.DetachFromThread();
+
+ EnsureNSPRInit();
+
+ // We *must* have NSS >= 3.14.3.
+ static_assert(
+ (NSS_VMAJOR == 3 && NSS_VMINOR == 14 && NSS_VPATCH >= 3) ||
+ (NSS_VMAJOR == 3 && NSS_VMINOR > 14) ||
+ (NSS_VMAJOR > 3),
+ "nss version check failed");
+ // Also check the run-time NSS version.
+ // NSS_VersionCheck is a >= check, not strict equality.
+ if (!NSS_VersionCheck("3.14.3")) {
+ LOG(FATAL) << "NSS_VersionCheck(\"3.14.3\") failed. NSS >= 3.14.3 is "
+ "required. Please upgrade to the latest NSS, and if you "
+ "still get this error, contact your distribution "
+ "maintainer.";
+ }
+
+ SECStatus status = SECFailure;
+ bool nodb_init = false;
+
+#if !defined(USE_NSS_CERTS)
+ // Use the system certificate store, so initialize NSS without database.
+ nodb_init = true;
+#endif
+
+ if (nodb_init) {
+ status = NSS_NoDB_Init(NULL);
+ if (status != SECSuccess) {
+ CrashOnNSSInitFailure();
+ return;
+ }
+#if defined(OS_IOS)
+ root_ = InitDefaultRootCerts();
+#endif // defined(OS_IOS)
+ } else {
+#if defined(USE_NSS_CERTS)
+ base::FilePath database_dir = GetInitialConfigDirectory();
+ if (!database_dir.empty()) {
+ // This duplicates the work which should have been done in
+ // EarlySetupForNSSInit. However, this function is idempotent so
+ // there's no harm done.
+ UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
+
+ // Initialize with a persistent database (likely, ~/.pki/nssdb).
+ // Use "sql:" which can be shared by multiple processes safely.
+ std::string nss_config_dir =
+ base::StringPrintf("sql:%s", database_dir.value().c_str());
+#if defined(OS_CHROMEOS)
+ status = NSS_Init(nss_config_dir.c_str());
+#else
+ status = NSS_InitReadWrite(nss_config_dir.c_str());
+#endif
+ if (status != SECSuccess) {
+ LOG(ERROR) << "Error initializing NSS with a persistent "
+ "database (" << nss_config_dir
+ << "): " << GetNSSErrorMessage();
+ }
+ }
+ if (status != SECSuccess) {
+ VLOG(1) << "Initializing NSS without a persistent database.";
+ status = NSS_NoDB_Init(NULL);
+ if (status != SECSuccess) {
+ CrashOnNSSInitFailure();
+ return;
+ }
+ }
+
+ PK11_SetPasswordFunc(PKCS11PasswordFunc);
+
+ // If we haven't initialized the password for the NSS databases,
+ // initialize an empty-string password so that we don't need to
+ // log in.
+ PK11SlotInfo* slot = PK11_GetInternalKeySlot();
+ if (slot) {
+ // PK11_InitPin may write to the keyDB, but no other thread can use NSS
+ // yet, so we don't need to lock.
+ if (PK11_NeedUserInit(slot))
+ PK11_InitPin(slot, NULL, NULL);
+ PK11_FreeSlot(slot);
+ }
+
+ root_ = InitDefaultRootCerts();
+#endif // defined(USE_NSS_CERTS)
+ }
+
+ // Disable MD5 certificate signatures. (They are disabled by default in
+ // NSS 3.14.)
+ NSS_SetAlgorithmPolicy(SEC_OID_MD5, 0, NSS_USE_ALG_IN_CERT_SIGNATURE);
+ NSS_SetAlgorithmPolicy(SEC_OID_PKCS1_MD5_WITH_RSA_ENCRYPTION,
+ 0, NSS_USE_ALG_IN_CERT_SIGNATURE);
+ }
+
+ // NOTE(willchan): We don't actually execute this code since we leak NSS to
+ // prevent non-joinable threads from using NSS after it's already been shut
+ // down.
+ ~NSSInitSingleton() {
+#if defined(OS_CHROMEOS)
+ STLDeleteValues(&chromeos_user_map_);
+#endif
+ tpm_slot_.reset();
+ if (root_) {
+ SECMOD_UnloadUserModule(root_);
+ SECMOD_DestroyModule(root_);
+ root_ = NULL;
+ }
+ if (chaps_module_) {
+ SECMOD_UnloadUserModule(chaps_module_);
+ SECMOD_DestroyModule(chaps_module_);
+ chaps_module_ = NULL;
+ }
+
+ SECStatus status = NSS_Shutdown();
+ if (status != SECSuccess) {
+ // We VLOG(1) because this failure is relatively harmless (leaking, but
+ // we're shutting down anyway).
+ VLOG(1) << "NSS_Shutdown failed; see http://crbug.com/4609";
+ }
+ }
+
+ // Load nss's built-in root certs.
+ SECMODModule* InitDefaultRootCerts() {
+ SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", NULL);
+ if (root)
+ return root;
+
+ // Aw, snap. Can't find/load root cert shared library.
+ // This will make it hard to talk to anybody via https.
+ // TODO(mattm): Re-add the NOTREACHED here when crbug.com/310972 is fixed.
+ return NULL;
+ }
+
+ // Load the given module for this NSS session.
+ static SECMODModule* LoadModule(const char* name,
+ const char* library_path,
+ const char* params) {
+ std::string modparams = base::StringPrintf(
+ "name=\"%s\" library=\"%s\" %s",
+ name, library_path, params ? params : "");
+
+ // Shouldn't need to const_cast here, but SECMOD doesn't properly
+ // declare input string arguments as const. Bug
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=642546 was filed
+ // on NSS codebase to address this.
+ SECMODModule* module = SECMOD_LoadUserModule(
+ const_cast<char*>(modparams.c_str()), NULL, PR_FALSE);
+ if (!module) {
+ LOG(ERROR) << "Error loading " << name << " module into NSS: "
+ << GetNSSErrorMessage();
+ return NULL;
+ }
+ if (!module->loaded) {
+ LOG(ERROR) << "After loading " << name << ", loaded==false: "
+ << GetNSSErrorMessage();
+ SECMOD_DestroyModule(module);
+ return NULL;
+ }
+ return module;
+ }
+
+ bool tpm_token_enabled_for_nss_;
+ bool initializing_tpm_token_;
+ typedef std::vector<base::Closure> TPMReadyCallbackList;
+ TPMReadyCallbackList tpm_ready_callback_list_;
+ SECMODModule* chaps_module_;
+ crypto::ScopedPK11Slot tpm_slot_;
+ SECMODModule* root_;
+#if defined(OS_CHROMEOS)
+ typedef std::map<std::string, ChromeOSUserData*> ChromeOSUserMap;
+ ChromeOSUserMap chromeos_user_map_;
+ ScopedPK11Slot test_system_slot_;
+#endif
+#if defined(USE_NSS_CERTS)
+ // TODO(davidben): When https://bugzilla.mozilla.org/show_bug.cgi?id=564011
+ // is fixed, we will no longer need the lock.
+ base::Lock write_lock_;
+#endif // defined(USE_NSS_CERTS)
+
+ base::ThreadChecker thread_checker_;
+};
+
+base::LazyInstance<NSSInitSingleton>::Leaky
+ g_nss_singleton = LAZY_INSTANCE_INITIALIZER;
+} // namespace
+
+#if defined(USE_NSS_CERTS)
+ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
+ const std::string& description) {
+ const std::string modspec =
+ base::StringPrintf("configDir='sql:%s' tokenDescription='%s'",
+ path.value().c_str(),
+ description.c_str());
+ PK11SlotInfo* db_slot = SECMOD_OpenUserDB(modspec.c_str());
+ if (db_slot) {
+ if (PK11_NeedUserInit(db_slot))
+ PK11_InitPin(db_slot, NULL, NULL);
+ } else {
+ LOG(ERROR) << "Error opening persistent database (" << modspec
+ << "): " << GetNSSErrorMessage();
+ }
+ return ScopedPK11Slot(db_slot);
+}
+
+void EarlySetupForNSSInit() {
+ base::FilePath database_dir = GetInitialConfigDirectory();
+ if (!database_dir.empty())
+ UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
+}
+#endif
+
+void EnsureNSPRInit() {
+ g_nspr_singleton.Get();
+}
+
+void EnsureNSSInit() {
+ // Initializing SSL causes us to do blocking IO.
+ // Temporarily allow it until we fix
+ // http://code.google.com/p/chromium/issues/detail?id=59847
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ g_nss_singleton.Get();
+}
+
+bool CheckNSSVersion(const char* version) {
+ return !!NSS_VersionCheck(version);
+}
+
+#if defined(USE_NSS_CERTS)
+base::Lock* GetNSSWriteLock() {
+ return g_nss_singleton.Get().write_lock();
+}
+
+AutoNSSWriteLock::AutoNSSWriteLock() : lock_(GetNSSWriteLock()) {
+ // May be NULL if the lock is not needed in our version of NSS.
+ if (lock_)
+ lock_->Acquire();
+}
+
+AutoNSSWriteLock::~AutoNSSWriteLock() {
+ if (lock_) {
+ lock_->AssertAcquired();
+ lock_->Release();
+ }
+}
+
+AutoSECMODListReadLock::AutoSECMODListReadLock()
+ : lock_(SECMOD_GetDefaultModuleListLock()) {
+ SECMOD_GetReadLock(lock_);
+ }
+
+AutoSECMODListReadLock::~AutoSECMODListReadLock() {
+ SECMOD_ReleaseReadLock(lock_);
+}
+#endif // defined(USE_NSS_CERTS)
+
+#if defined(OS_CHROMEOS)
+ScopedPK11Slot GetSystemNSSKeySlot(
+ const base::Callback<void(ScopedPK11Slot)>& callback) {
+ return g_nss_singleton.Get().GetSystemNSSKeySlot(callback);
+}
+
+void SetSystemKeySlotForTesting(ScopedPK11Slot slot) {
+ g_nss_singleton.Get().SetSystemKeySlotForTesting(std::move(slot));
+}
+
+void EnableTPMTokenForNSS() {
+ g_nss_singleton.Get().EnableTPMTokenForNSS();
+}
+
+bool IsTPMTokenEnabledForNSS() {
+ return g_nss_singleton.Get().IsTPMTokenEnabledForNSS();
+}
+
+bool IsTPMTokenReady(const base::Closure& callback) {
+ return g_nss_singleton.Get().IsTPMTokenReady(callback);
+}
+
+void InitializeTPMTokenAndSystemSlot(
+ int token_slot_id,
+ const base::Callback<void(bool)>& callback) {
+ g_nss_singleton.Get().InitializeTPMTokenAndSystemSlot(token_slot_id,
+ callback);
+}
+
+bool InitializeNSSForChromeOSUser(const std::string& username_hash,
+ const base::FilePath& path) {
+ return g_nss_singleton.Get().InitializeNSSForChromeOSUser(username_hash,
+ path);
+}
+
+bool ShouldInitializeTPMForChromeOSUser(const std::string& username_hash) {
+ return g_nss_singleton.Get().ShouldInitializeTPMForChromeOSUser(
+ username_hash);
+}
+
+void WillInitializeTPMForChromeOSUser(const std::string& username_hash) {
+ g_nss_singleton.Get().WillInitializeTPMForChromeOSUser(username_hash);
+}
+
+void InitializeTPMForChromeOSUser(
+ const std::string& username_hash,
+ CK_SLOT_ID slot_id) {
+ g_nss_singleton.Get().InitializeTPMForChromeOSUser(username_hash, slot_id);
+}
+
+void InitializePrivateSoftwareSlotForChromeOSUser(
+ const std::string& username_hash) {
+ g_nss_singleton.Get().InitializePrivateSoftwareSlotForChromeOSUser(
+ username_hash);
+}
+
+ScopedPK11Slot GetPublicSlotForChromeOSUser(const std::string& username_hash) {
+ return g_nss_singleton.Get().GetPublicSlotForChromeOSUser(username_hash);
+}
+
+ScopedPK11Slot GetPrivateSlotForChromeOSUser(
+ const std::string& username_hash,
+ const base::Callback<void(ScopedPK11Slot)>& callback) {
+ return g_nss_singleton.Get().GetPrivateSlotForChromeOSUser(username_hash,
+ callback);
+}
+
+void CloseChromeOSUserForTesting(const std::string& username_hash) {
+ g_nss_singleton.Get().CloseChromeOSUserForTesting(username_hash);
+}
+#endif // defined(OS_CHROMEOS)
+
+base::Time PRTimeToBaseTime(PRTime prtime) {
+ return base::Time::FromInternalValue(
+ prtime + base::Time::UnixEpoch().ToInternalValue());
+}
+
+PRTime BaseTimeToPRTime(base::Time time) {
+ return time.ToInternalValue() - base::Time::UnixEpoch().ToInternalValue();
+}
+
+#if !defined(OS_CHROMEOS)
+PK11SlotInfo* GetPersistentNSSKeySlot() {
+ return g_nss_singleton.Get().GetPersistentNSSKeySlot();
+}
+#endif
+
+} // namespace crypto
diff --git a/libchrome/crypto/nss_util.h b/libchrome/crypto/nss_util.h
new file mode 100644
index 0000000..a8b57ff
--- /dev/null
+++ b/libchrome/crypto/nss_util.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_NSS_UTIL_H_
+#define CRYPTO_NSS_UTIL_H_
+
+#include <stdint.h>
+
+#include <string>
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+
+namespace base {
+class FilePath;
+class Lock;
+class Time;
+} // namespace base
+
+// This file specifically doesn't depend on any NSS or NSPR headers because it
+// is included by various (non-crypto) parts of chrome to call the
+// initialization functions.
+namespace crypto {
+
+// EarlySetupForNSSInit performs lightweight setup which must occur before the
+// process goes multithreaded. This does not initialise NSS. For test, see
+// EnsureNSSInit.
+CRYPTO_EXPORT void EarlySetupForNSSInit();
+
+// Initialize NRPR if it isn't already initialized. This function is
+// thread-safe, and NSPR will only ever be initialized once.
+CRYPTO_EXPORT void EnsureNSPRInit();
+
+// Initialize NSS if it isn't already initialized. This must be called before
+// any other NSS functions. This function is thread-safe, and NSS will only
+// ever be initialized once.
+CRYPTO_EXPORT void EnsureNSSInit();
+
+// Check if the current NSS version is greater than or equals to |version|.
+// A sample version string is "3.12.3".
+bool CheckNSSVersion(const char* version);
+
+#if defined(OS_CHROMEOS)
+// Indicates that NSS should use the Chaps library so that we
+// can access the TPM through NSS. InitializeTPMTokenAndSystemSlot and
+// InitializeTPMForChromeOSUser must still be called to load the slots.
+CRYPTO_EXPORT void EnableTPMTokenForNSS();
+
+// Returns true if EnableTPMTokenForNSS has been called.
+CRYPTO_EXPORT bool IsTPMTokenEnabledForNSS();
+
+// Returns true if the TPM is owned and PKCS#11 initialized with the
+// user and security officer PINs, and has been enabled in NSS by
+// calling EnableTPMForNSS, and Chaps has been successfully
+// loaded into NSS.
+// If |callback| is non-null and the function returns false, the |callback| will
+// be run once the TPM is ready. |callback| will never be run if the function
+// returns true.
+CRYPTO_EXPORT bool IsTPMTokenReady(const base::Closure& callback)
+ WARN_UNUSED_RESULT;
+
+// Initialize the TPM token and system slot. The |callback| will run on the same
+// thread with true if the token and slot were successfully loaded or were
+// already initialized. |callback| will be passed false if loading failed. Once
+// called, InitializeTPMTokenAndSystemSlot must not be called again until the
+// |callback| has been run.
+CRYPTO_EXPORT void InitializeTPMTokenAndSystemSlot(
+ int system_slot_id,
+ const base::Callback<void(bool)>& callback);
+#endif
+
+// Convert a NSS PRTime value into a base::Time object.
+// We use a int64_t instead of PRTime here to avoid depending on NSPR headers.
+CRYPTO_EXPORT base::Time PRTimeToBaseTime(int64_t prtime);
+
+// Convert a base::Time object into a PRTime value.
+// We use a int64_t instead of PRTime here to avoid depending on NSPR headers.
+CRYPTO_EXPORT int64_t BaseTimeToPRTime(base::Time time);
+
+// NSS has a bug which can cause a deadlock or stall in some cases when writing
+// to the certDB and keyDB. It also has a bug which causes concurrent key pair
+// generations to scribble over each other. To work around this, we synchronize
+// writes to the NSS databases with a global lock. The lock is hidden beneath a
+// function for easy disabling when the bug is fixed. Callers should allow for
+// it to return NULL in the future.
+//
+// See https://bugzilla.mozilla.org/show_bug.cgi?id=564011
+base::Lock* GetNSSWriteLock();
+
+// A helper class that acquires the NSS write Lock while the AutoNSSWriteLock
+// is in scope.
+class CRYPTO_EXPORT AutoNSSWriteLock {
+ public:
+ AutoNSSWriteLock();
+ ~AutoNSSWriteLock();
+ private:
+ base::Lock *lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoNSSWriteLock);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_NSS_UTIL_H_
diff --git a/libchrome/crypto/nss_util_internal.h b/libchrome/crypto/nss_util_internal.h
new file mode 100644
index 0000000..697e376
--- /dev/null
+++ b/libchrome/crypto/nss_util_internal.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_NSS_UTIL_INTERNAL_H_
+#define CRYPTO_NSS_UTIL_INTERNAL_H_
+
+#include <secmodt.h>
+
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace base {
+class FilePath;
+}
+
+// These functions return a type defined in an NSS header, and so cannot be
+// declared in nss_util.h. Hence, they are declared here.
+
+namespace crypto {
+
+// Opens an NSS software database in folder |path|, with the (potentially)
+// user-visible description |description|. Returns the slot for the opened
+// database, or nullptr if the database could not be opened.
+CRYPTO_EXPORT ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
+ const std::string& description);
+
+#if !defined(OS_CHROMEOS)
+// Returns a reference to the default NSS key slot for storing persistent data.
+// Caller must release returned reference with PK11_FreeSlot.
+CRYPTO_EXPORT PK11SlotInfo* GetPersistentNSSKeySlot() WARN_UNUSED_RESULT;
+#endif
+
+// A helper class that acquires the SECMOD list read lock while the
+// AutoSECMODListReadLock is in scope.
+class CRYPTO_EXPORT AutoSECMODListReadLock {
+ public:
+ AutoSECMODListReadLock();
+ ~AutoSECMODListReadLock();
+
+ private:
+ SECMODListLock* lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoSECMODListReadLock);
+};
+
+#if defined(OS_CHROMEOS)
+// Returns a reference to the system-wide TPM slot if it is loaded. If it is not
+// loaded and |callback| is non-null, the |callback| will be run once the slot
+// is loaded.
+CRYPTO_EXPORT ScopedPK11Slot GetSystemNSSKeySlot(
+ const base::Callback<void(ScopedPK11Slot)>& callback) WARN_UNUSED_RESULT;
+
+// Sets the test system slot to |slot|, which means that |slot| will be exposed
+// through |GetSystemNSSKeySlot| and |IsTPMTokenReady| will return true.
+// |InitializeTPMTokenAndSystemSlot|, which triggers the TPM initialization,
+// does not have to be called if the test system slot is set.
+// This must must not be called consecutively with a |slot| != nullptr. If
+// |slot| is nullptr, the test system slot is unset.
+CRYPTO_EXPORT void SetSystemKeySlotForTesting(ScopedPK11Slot slot);
+
+// Prepare per-user NSS slot mapping. It is safe to call this function multiple
+// times. Returns true if the user was added, or false if it already existed.
+CRYPTO_EXPORT bool InitializeNSSForChromeOSUser(
+ const std::string& username_hash,
+ const base::FilePath& path);
+
+// Returns whether TPM for ChromeOS user still needs initialization. If
+// true is returned, the caller can proceed to initialize TPM slot for the
+// user, but should call |WillInitializeTPMForChromeOSUser| first.
+// |InitializeNSSForChromeOSUser| must have been called first.
+CRYPTO_EXPORT bool ShouldInitializeTPMForChromeOSUser(
+ const std::string& username_hash) WARN_UNUSED_RESULT;
+
+// Makes |ShouldInitializeTPMForChromeOSUser| start returning false.
+// Should be called before starting TPM initialization for the user.
+// Assumes |InitializeNSSForChromeOSUser| had already been called.
+CRYPTO_EXPORT void WillInitializeTPMForChromeOSUser(
+ const std::string& username_hash);
+
+// Use TPM slot |slot_id| for user. InitializeNSSForChromeOSUser must have been
+// called first.
+CRYPTO_EXPORT void InitializeTPMForChromeOSUser(
+ const std::string& username_hash,
+ CK_SLOT_ID slot_id);
+
+// Use the software slot as the private slot for user.
+// InitializeNSSForChromeOSUser must have been called first.
+CRYPTO_EXPORT void InitializePrivateSoftwareSlotForChromeOSUser(
+ const std::string& username_hash);
+
+// Returns a reference to the public slot for user.
+CRYPTO_EXPORT ScopedPK11Slot GetPublicSlotForChromeOSUser(
+ const std::string& username_hash) WARN_UNUSED_RESULT;
+
+// Returns the private slot for |username_hash| if it is loaded. If it is not
+// loaded and |callback| is non-null, the |callback| will be run once the slot
+// is loaded.
+CRYPTO_EXPORT ScopedPK11Slot GetPrivateSlotForChromeOSUser(
+ const std::string& username_hash,
+ const base::Callback<void(ScopedPK11Slot)>& callback) WARN_UNUSED_RESULT;
+
+// Closes the NSS DB for |username_hash| that was previously opened by the
+// *Initialize*ForChromeOSUser functions.
+CRYPTO_EXPORT void CloseChromeOSUserForTesting(
+ const std::string& username_hash);
+#endif // defined(OS_CHROMEOS)
+
+} // namespace crypto
+
+#endif // CRYPTO_NSS_UTIL_INTERNAL_H_
diff --git a/libchrome/crypto/nss_util_unittest.cc b/libchrome/crypto/nss_util_unittest.cc
new file mode 100644
index 0000000..729d5bf
--- /dev/null
+++ b/libchrome/crypto/nss_util_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/nss_util.h"
+
+#include <prtime.h>
+
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crypto {
+
+TEST(NSSUtilTest, PRTimeConversion) {
+ EXPECT_EQ(base::Time::UnixEpoch(), PRTimeToBaseTime(0));
+ EXPECT_EQ(0, BaseTimeToPRTime(base::Time::UnixEpoch()));
+
+ PRExplodedTime prxtime;
+ prxtime.tm_params.tp_gmt_offset = 0;
+ prxtime.tm_params.tp_dst_offset = 0;
+ base::Time::Exploded exploded;
+ exploded.year = prxtime.tm_year = 2011;
+ exploded.month = 12;
+ prxtime.tm_month = 11;
+ // PRExplodedTime::tm_wday is a smaller type than Exploded::day_of_week, so
+ // assigning the two in this order instead of the reverse avoids potential
+ // warnings about type downcasting.
+ exploded.day_of_week = prxtime.tm_wday = 0; // Should be unused.
+ exploded.day_of_month = prxtime.tm_mday = 10;
+ exploded.hour = prxtime.tm_hour = 2;
+ exploded.minute = prxtime.tm_min = 52;
+ exploded.second = prxtime.tm_sec = 19;
+ exploded.millisecond = 342;
+ prxtime.tm_usec = 342000;
+
+ PRTime pr_time = PR_ImplodeTime(&prxtime);
+ base::Time base_time;
+ EXPECT_TRUE(base::Time::FromUTCExploded(exploded, &base_time));
+
+ EXPECT_EQ(base_time, PRTimeToBaseTime(pr_time));
+ EXPECT_EQ(pr_time, BaseTimeToPRTime(base_time));
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/openssl_bio_string.cc b/libchrome/crypto/openssl_bio_string.cc
new file mode 100644
index 0000000..4880500
--- /dev/null
+++ b/libchrome/crypto/openssl_bio_string.cc
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/openssl_bio_string.h"
+
+#include <openssl/bio.h>
+#include <string.h>
+
+namespace crypto {
+
+namespace {
+
+int bio_string_write(BIO* bio, const char* data, int len) {
+ reinterpret_cast<std::string*>(bio->ptr)->append(data, len);
+ return len;
+}
+
+int bio_string_puts(BIO* bio, const char* data) {
+ // Note: unlike puts(), BIO_puts does not add a newline.
+ return bio_string_write(bio, data, strlen(data));
+}
+
+long bio_string_ctrl(BIO* bio, int cmd, long num, void* ptr) {
+ std::string* str = reinterpret_cast<std::string*>(bio->ptr);
+ switch (cmd) {
+ case BIO_CTRL_RESET:
+ str->clear();
+ return 1;
+ case BIO_C_FILE_SEEK:
+ return -1;
+ case BIO_C_FILE_TELL:
+ return str->size();
+ case BIO_CTRL_FLUSH:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int bio_string_new(BIO* bio) {
+ bio->ptr = NULL;
+ bio->init = 0;
+ return 1;
+}
+
+int bio_string_free(BIO* bio) {
+ // The string is owned by the caller, so there's nothing to do here.
+ return bio != NULL;
+}
+
+BIO_METHOD bio_string_methods = {
+ // TODO(mattm): Should add some type number too? (bio.h uses 1-24)
+ BIO_TYPE_SOURCE_SINK,
+ "bio_string",
+ bio_string_write,
+ NULL, /* read */
+ bio_string_puts,
+ NULL, /* gets */
+ bio_string_ctrl,
+ bio_string_new,
+ bio_string_free,
+ NULL, /* callback_ctrl */
+};
+
+} // namespace
+
+BIO* BIO_new_string(std::string* out) {
+ BIO* bio = BIO_new(&bio_string_methods);
+ if (!bio)
+ return bio;
+ bio->ptr = out;
+ bio->init = 1;
+ return bio;
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/openssl_bio_string.h b/libchrome/crypto/openssl_bio_string.h
new file mode 100644
index 0000000..ca46c12
--- /dev/null
+++ b/libchrome/crypto/openssl_bio_string.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_OPENSSL_BIO_STRING_H_
+#define CRYPTO_OPENSSL_BIO_STRING_H_
+
+#include <string>
+
+#include "crypto/crypto_export.h"
+
+// From <openssl/bio.h>
+typedef struct bio_st BIO;
+
+namespace crypto {
+
+// Creates a new BIO that can be used with OpenSSL's various output functions,
+// and which will write all output directly into |out|. This is primarily
+// intended as a utility to reduce the amount of copying and separate
+// allocations when performing extensive string modifications or streaming
+// within OpenSSL.
+//
+// Note: |out| must remain valid for the duration of the BIO.
+CRYPTO_EXPORT BIO* BIO_new_string(std::string* out);
+
+} // namespace crypto
+
+#endif // CRYPTO_OPENSSL_BIO_STRING_H_
+
diff --git a/libchrome/crypto/openssl_bio_string_unittest.cc b/libchrome/crypto/openssl_bio_string_unittest.cc
new file mode 100644
index 0000000..9dfa0e7
--- /dev/null
+++ b/libchrome/crypto/openssl_bio_string_unittest.cc
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/openssl_bio_string.h"
+
+#include <openssl/bio.h>
+
+#include "crypto/scoped_openssl_types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crypto {
+
+TEST(OpenSSLBIOString, TestWrite) {
+ std::string s;
+ const std::string expected1("a one\nb 2\n");
+ const std::string expected2("c d e f");
+ const std::string expected3("g h i");
+ {
+ ScopedBIO bio(BIO_new_string(&s));
+ ASSERT_TRUE(bio.get());
+
+ EXPECT_EQ(static_cast<int>(expected1.size()),
+ BIO_printf(bio.get(), "a %s\nb %i\n", "one", 2));
+ EXPECT_EQ(expected1, s);
+
+ EXPECT_EQ(1, BIO_flush(bio.get()));
+ EXPECT_EQ(expected1, s);
+
+ EXPECT_EQ(static_cast<int>(expected2.size()),
+ BIO_write(bio.get(), expected2.data(), expected2.size()));
+ EXPECT_EQ(expected1 + expected2, s);
+
+ EXPECT_EQ(static_cast<int>(expected3.size()),
+ BIO_puts(bio.get(), expected3.c_str()));
+ EXPECT_EQ(expected1 + expected2 + expected3, s);
+ }
+ EXPECT_EQ(expected1 + expected2 + expected3, s);
+}
+
+TEST(OpenSSLBIOString, TestReset) {
+ std::string s;
+ const std::string expected1("a b c\n");
+ const std::string expected2("d e f g\n");
+ {
+ ScopedBIO bio(BIO_new_string(&s));
+ ASSERT_TRUE(bio.get());
+
+ EXPECT_EQ(static_cast<int>(expected1.size()),
+ BIO_write(bio.get(), expected1.data(), expected1.size()));
+ EXPECT_EQ(expected1, s);
+
+ EXPECT_EQ(1, BIO_reset(bio.get()));
+ EXPECT_EQ(std::string(), s);
+
+ EXPECT_EQ(static_cast<int>(expected2.size()),
+ BIO_write(bio.get(), expected2.data(), expected2.size()));
+ EXPECT_EQ(expected2, s);
+ }
+ EXPECT_EQ(expected2, s);
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/openssl_util.cc b/libchrome/crypto/openssl_util.cc
new file mode 100644
index 0000000..78c6cbb
--- /dev/null
+++ b/libchrome/crypto/openssl_util.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/openssl_util.h"
+
+#if defined(OPENSSL_IS_BORINGSSL)
+#include <openssl/cpu.h>
+#else
+#include <openssl/ssl.h>
+#endif
+#include <openssl/crypto.h>
+#include <openssl/err.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/strings/string_piece.h"
+
+namespace crypto {
+
+namespace {
+
+// Callback routine for OpenSSL to print error messages. |str| is a
+// NULL-terminated string of length |len| containing diagnostic information
+// such as the library, function and reason for the error, the file and line
+// where the error originated, plus potentially any context-specific
+// information about the error. |context| contains a pointer to user-supplied
+// data, which is currently unused.
+// If this callback returns a value <= 0, OpenSSL will stop processing the
+// error queue and return, otherwise it will continue calling this function
+// until all errors have been removed from the queue.
+int OpenSSLErrorCallback(const char* str, size_t len, void* context) {
+ DVLOG(1) << "\t" << base::StringPiece(str, len);
+ return 1;
+}
+
+} // namespace
+
+void EnsureOpenSSLInit() {
+#if defined(OPENSSL_IS_BORINGSSL)
+ // CRYPTO_library_init may be safely called concurrently.
+ CRYPTO_library_init();
+#else
+ SSL_library_init();
+#endif
+}
+
+void ClearOpenSSLERRStack(const tracked_objects::Location& location) {
+ if (logging::DEBUG_MODE && VLOG_IS_ON(1)) {
+ uint32_t error_num = ERR_peek_error();
+ if (error_num == 0)
+ return;
+
+ std::string message;
+ location.Write(true, true, &message);
+ DVLOG(1) << "OpenSSL ERR_get_error stack from " << message;
+ ERR_print_errors_cb(&OpenSSLErrorCallback, NULL);
+ } else {
+ ERR_clear_error();
+ }
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/openssl_util.h b/libchrome/crypto/openssl_util.h
new file mode 100644
index 0000000..d608cde
--- /dev/null
+++ b/libchrome/crypto/openssl_util.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_OPENSSL_UTIL_H_
+#define CRYPTO_OPENSSL_UTIL_H_
+
+#include <stddef.h>
+
+#include "base/location.h"
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// Provides a buffer of at least MIN_SIZE bytes, for use when calling OpenSSL's
+// SHA256, HMAC, etc functions, adapting the buffer sizing rules to meet those
+// of the our base wrapper APIs.
+// This allows the library to write directly to the caller's buffer if it is of
+// sufficient size, but if not it will write to temporary |min_sized_buffer_|
+// of required size and then its content is automatically copied out on
+// destruction, with truncation as appropriate.
+template<int MIN_SIZE>
+class ScopedOpenSSLSafeSizeBuffer {
+ public:
+ ScopedOpenSSLSafeSizeBuffer(unsigned char* output, size_t output_len)
+ : output_(output),
+ output_len_(output_len) {
+ }
+
+ ~ScopedOpenSSLSafeSizeBuffer() {
+ if (output_len_ < MIN_SIZE) {
+ // Copy the temporary buffer out, truncating as needed.
+ memcpy(output_, min_sized_buffer_, output_len_);
+ }
+ // else... any writing already happened directly into |output_|.
+ }
+
+ unsigned char* safe_buffer() {
+ return output_len_ < MIN_SIZE ? min_sized_buffer_ : output_;
+ }
+
+ private:
+ // Pointer to the caller's data area and its associated size, where data
+ // written via safe_buffer() will [eventually] end up.
+ unsigned char* output_;
+ size_t output_len_;
+
+ // Temporary buffer writen into in the case where the caller's
+ // buffer is not of sufficient size.
+ unsigned char min_sized_buffer_[MIN_SIZE];
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedOpenSSLSafeSizeBuffer);
+};
+
+// Initialize OpenSSL if it isn't already initialized. This must be called
+// before any other OpenSSL functions though it is safe and cheap to call this
+// multiple times.
+// This function is thread-safe, and OpenSSL will only ever be initialized once.
+// OpenSSL will be properly shut down on program exit.
+CRYPTO_EXPORT void EnsureOpenSSLInit();
+
+// Drains the OpenSSL ERR_get_error stack. On a debug build the error codes
+// are send to VLOG(1), on a release build they are disregarded. In most
+// cases you should pass FROM_HERE as the |location|.
+CRYPTO_EXPORT void ClearOpenSSLERRStack(
+ const tracked_objects::Location& location);
+
+// Place an instance of this class on the call stack to automatically clear
+// the OpenSSL error stack on function exit.
+class OpenSSLErrStackTracer {
+ public:
+ // Pass FROM_HERE as |location|, to help track the source of OpenSSL error
+ // messages. Note any diagnostic emitted will be tagged with the location of
+ // the constructor call as it's not possible to trace a destructor's callsite.
+ explicit OpenSSLErrStackTracer(const tracked_objects::Location& location)
+ : location_(location) {
+ EnsureOpenSSLInit();
+ }
+ ~OpenSSLErrStackTracer() {
+ ClearOpenSSLERRStack(location_);
+ }
+
+ private:
+ const tracked_objects::Location location_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(OpenSSLErrStackTracer);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_OPENSSL_UTIL_H_
diff --git a/libchrome/crypto/p224.cc b/libchrome/crypto/p224.cc
new file mode 100644
index 0000000..685a335
--- /dev/null
+++ b/libchrome/crypto/p224.cc
@@ -0,0 +1,747 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is an implementation of the P224 elliptic curve group. It's written to
+// be short and simple rather than fast, although it's still constant-time.
+//
+// See http://www.imperialviolet.org/2010/12/04/ecc.html ([1]) for background.
+
+#include "crypto/p224.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "base/sys_byteorder.h"
+
+namespace {
+
+using base::HostToNet32;
+using base::NetToHost32;
+
+// Field element functions.
+//
+// The field that we're dealing with is ℤ/pℤ where p = 2**224 - 2**96 + 1.
+//
+// Field elements are represented by a FieldElement, which is a typedef to an
+// array of 8 uint32_t's. The value of a FieldElement, a, is:
+// a[0] + 2**28·a[1] + 2**56·a[1] + ... + 2**196·a[7]
+//
+// Using 28-bit limbs means that there's only 4 bits of headroom, which is less
+// than we would really like. But it has the useful feature that we hit 2**224
+// exactly, making the reflections during a reduce much nicer.
+
+using crypto::p224::FieldElement;
+
+// kP is the P224 prime.
+const FieldElement kP = {
+ 1, 0, 0, 268431360,
+ 268435455, 268435455, 268435455, 268435455,
+};
+
+void Contract(FieldElement* inout);
+
+// IsZero returns 0xffffffff if a == 0 mod p and 0 otherwise.
+uint32_t IsZero(const FieldElement& a) {
+ FieldElement minimal;
+ memcpy(&minimal, &a, sizeof(minimal));
+ Contract(&minimal);
+
+ uint32_t is_zero = 0, is_p = 0;
+ for (unsigned i = 0; i < 8; i++) {
+ is_zero |= minimal[i];
+ is_p |= minimal[i] - kP[i];
+ }
+
+ // If either is_zero or is_p is 0, then we should return 1.
+ is_zero |= is_zero >> 16;
+ is_zero |= is_zero >> 8;
+ is_zero |= is_zero >> 4;
+ is_zero |= is_zero >> 2;
+ is_zero |= is_zero >> 1;
+
+ is_p |= is_p >> 16;
+ is_p |= is_p >> 8;
+ is_p |= is_p >> 4;
+ is_p |= is_p >> 2;
+ is_p |= is_p >> 1;
+
+ // For is_zero and is_p, the LSB is 0 iff all the bits are zero.
+ is_zero &= is_p & 1;
+ is_zero = (~is_zero) << 31;
+ is_zero = static_cast<int32_t>(is_zero) >> 31;
+ return is_zero;
+}
+
+// Add computes *out = a+b
+//
+// a[i] + b[i] < 2**32
+void Add(FieldElement* out, const FieldElement& a, const FieldElement& b) {
+ for (int i = 0; i < 8; i++) {
+ (*out)[i] = a[i] + b[i];
+ }
+}
+
+static const uint32_t kTwo31p3 = (1u << 31) + (1u << 3);
+static const uint32_t kTwo31m3 = (1u << 31) - (1u << 3);
+static const uint32_t kTwo31m15m3 = (1u << 31) - (1u << 15) - (1u << 3);
+// kZero31ModP is 0 mod p where bit 31 is set in all limbs so that we can
+// subtract smaller amounts without underflow. See the section "Subtraction" in
+// [1] for why.
+static const FieldElement kZero31ModP = {
+ kTwo31p3, kTwo31m3, kTwo31m3, kTwo31m15m3,
+ kTwo31m3, kTwo31m3, kTwo31m3, kTwo31m3
+};
+
+// Subtract computes *out = a-b
+//
+// a[i], b[i] < 2**30
+// out[i] < 2**32
+void Subtract(FieldElement* out, const FieldElement& a, const FieldElement& b) {
+ for (int i = 0; i < 8; i++) {
+ // See the section on "Subtraction" in [1] for details.
+ (*out)[i] = a[i] + kZero31ModP[i] - b[i];
+ }
+}
+
+static const uint64_t kTwo63p35 = (1ull << 63) + (1ull << 35);
+static const uint64_t kTwo63m35 = (1ull << 63) - (1ull << 35);
+static const uint64_t kTwo63m35m19 = (1ull << 63) - (1ull << 35) - (1ull << 19);
+// kZero63ModP is 0 mod p where bit 63 is set in all limbs. See the section
+// "Subtraction" in [1] for why.
+static const uint64_t kZero63ModP[8] = {
+ kTwo63p35, kTwo63m35, kTwo63m35, kTwo63m35,
+ kTwo63m35m19, kTwo63m35, kTwo63m35, kTwo63m35,
+};
+
+static const uint32_t kBottom28Bits = 0xfffffff;
+
+// LargeFieldElement also represents an element of the field. The limbs are
+// still spaced 28-bits apart and in little-endian order. So the limbs are at
+// 0, 28, 56, ..., 392 bits, each 64-bits wide.
+typedef uint64_t LargeFieldElement[15];
+
+// ReduceLarge converts a LargeFieldElement to a FieldElement.
+//
+// in[i] < 2**62
+void ReduceLarge(FieldElement* out, LargeFieldElement* inptr) {
+ LargeFieldElement& in(*inptr);
+
+ for (int i = 0; i < 8; i++) {
+ in[i] += kZero63ModP[i];
+ }
+
+ // Eliminate the coefficients at 2**224 and greater while maintaining the
+ // same value mod p.
+ for (int i = 14; i >= 8; i--) {
+ in[i-8] -= in[i]; // reflection off the "+1" term of p.
+ in[i-5] += (in[i] & 0xffff) << 12; // part of the "-2**96" reflection.
+ in[i-4] += in[i] >> 16; // the rest of the "-2**96" reflection.
+ }
+ in[8] = 0;
+ // in[0..8] < 2**64
+
+ // As the values become small enough, we start to store them in |out| and use
+ // 32-bit operations.
+ for (int i = 1; i < 8; i++) {
+ in[i+1] += in[i] >> 28;
+ (*out)[i] = static_cast<uint32_t>(in[i] & kBottom28Bits);
+ }
+ // Eliminate the term at 2*224 that we introduced while keeping the same
+ // value mod p.
+ in[0] -= in[8]; // reflection off the "+1" term of p.
+ (*out)[3] += static_cast<uint32_t>(in[8] & 0xffff) << 12; // "-2**96" term
+ (*out)[4] += static_cast<uint32_t>(in[8] >> 16); // rest of "-2**96" term
+ // in[0] < 2**64
+ // out[3] < 2**29
+ // out[4] < 2**29
+ // out[1,2,5..7] < 2**28
+
+ (*out)[0] = static_cast<uint32_t>(in[0] & kBottom28Bits);
+ (*out)[1] += static_cast<uint32_t>((in[0] >> 28) & kBottom28Bits);
+ (*out)[2] += static_cast<uint32_t>(in[0] >> 56);
+ // out[0] < 2**28
+ // out[1..4] < 2**29
+ // out[5..7] < 2**28
+}
+
+// Mul computes *out = a*b
+//
+// a[i] < 2**29, b[i] < 2**30 (or vice versa)
+// out[i] < 2**29
+void Mul(FieldElement* out, const FieldElement& a, const FieldElement& b) {
+ LargeFieldElement tmp;
+ memset(&tmp, 0, sizeof(tmp));
+
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 8; j++) {
+ tmp[i + j] += static_cast<uint64_t>(a[i]) * static_cast<uint64_t>(b[j]);
+ }
+ }
+
+ ReduceLarge(out, &tmp);
+}
+
+// Square computes *out = a*a
+//
+// a[i] < 2**29
+// out[i] < 2**29
+void Square(FieldElement* out, const FieldElement& a) {
+ LargeFieldElement tmp;
+ memset(&tmp, 0, sizeof(tmp));
+
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j <= i; j++) {
+ uint64_t r = static_cast<uint64_t>(a[i]) * static_cast<uint64_t>(a[j]);
+ if (i == j) {
+ tmp[i+j] += r;
+ } else {
+ tmp[i+j] += r << 1;
+ }
+ }
+ }
+
+ ReduceLarge(out, &tmp);
+}
+
+// Reduce reduces the coefficients of in_out to smaller bounds.
+//
+// On entry: a[i] < 2**31 + 2**30
+// On exit: a[i] < 2**29
+void Reduce(FieldElement* in_out) {
+ FieldElement& a = *in_out;
+
+ for (int i = 0; i < 7; i++) {
+ a[i+1] += a[i] >> 28;
+ a[i] &= kBottom28Bits;
+ }
+ uint32_t top = a[7] >> 28;
+ a[7] &= kBottom28Bits;
+
+ // top < 2**4
+ // Constant-time: mask = (top != 0) ? 0xffffffff : 0
+ uint32_t mask = top;
+ mask |= mask >> 2;
+ mask |= mask >> 1;
+ mask <<= 31;
+ mask = static_cast<uint32_t>(static_cast<int32_t>(mask) >> 31);
+
+ // Eliminate top while maintaining the same value mod p.
+ a[0] -= top;
+ a[3] += top << 12;
+
+ // We may have just made a[0] negative but, if we did, then we must
+ // have added something to a[3], thus it's > 2**12. Therefore we can
+ // carry down to a[0].
+ a[3] -= 1 & mask;
+ a[2] += mask & ((1<<28) - 1);
+ a[1] += mask & ((1<<28) - 1);
+ a[0] += mask & (1<<28);
+}
+
+// Invert calcuates *out = in**-1 by computing in**(2**224 - 2**96 - 1), i.e.
+// Fermat's little theorem.
+void Invert(FieldElement* out, const FieldElement& in) {
+ FieldElement f1, f2, f3, f4;
+
+ Square(&f1, in); // 2
+ Mul(&f1, f1, in); // 2**2 - 1
+ Square(&f1, f1); // 2**3 - 2
+ Mul(&f1, f1, in); // 2**3 - 1
+ Square(&f2, f1); // 2**4 - 2
+ Square(&f2, f2); // 2**5 - 4
+ Square(&f2, f2); // 2**6 - 8
+ Mul(&f1, f1, f2); // 2**6 - 1
+ Square(&f2, f1); // 2**7 - 2
+ for (int i = 0; i < 5; i++) { // 2**12 - 2**6
+ Square(&f2, f2);
+ }
+ Mul(&f2, f2, f1); // 2**12 - 1
+ Square(&f3, f2); // 2**13 - 2
+ for (int i = 0; i < 11; i++) { // 2**24 - 2**12
+ Square(&f3, f3);
+ }
+ Mul(&f2, f3, f2); // 2**24 - 1
+ Square(&f3, f2); // 2**25 - 2
+ for (int i = 0; i < 23; i++) { // 2**48 - 2**24
+ Square(&f3, f3);
+ }
+ Mul(&f3, f3, f2); // 2**48 - 1
+ Square(&f4, f3); // 2**49 - 2
+ for (int i = 0; i < 47; i++) { // 2**96 - 2**48
+ Square(&f4, f4);
+ }
+ Mul(&f3, f3, f4); // 2**96 - 1
+ Square(&f4, f3); // 2**97 - 2
+ for (int i = 0; i < 23; i++) { // 2**120 - 2**24
+ Square(&f4, f4);
+ }
+ Mul(&f2, f4, f2); // 2**120 - 1
+ for (int i = 0; i < 6; i++) { // 2**126 - 2**6
+ Square(&f2, f2);
+ }
+ Mul(&f1, f1, f2); // 2**126 - 1
+ Square(&f1, f1); // 2**127 - 2
+ Mul(&f1, f1, in); // 2**127 - 1
+ for (int i = 0; i < 97; i++) { // 2**224 - 2**97
+ Square(&f1, f1);
+ }
+ Mul(out, f1, f3); // 2**224 - 2**96 - 1
+}
+
+// Contract converts a FieldElement to its minimal, distinguished form.
+//
+// On entry, in[i] < 2**29
+// On exit, in[i] < 2**28
+void Contract(FieldElement* inout) {
+ FieldElement& out = *inout;
+
+ // Reduce the coefficients to < 2**28.
+ for (int i = 0; i < 7; i++) {
+ out[i+1] += out[i] >> 28;
+ out[i] &= kBottom28Bits;
+ }
+ uint32_t top = out[7] >> 28;
+ out[7] &= kBottom28Bits;
+
+ // Eliminate top while maintaining the same value mod p.
+ out[0] -= top;
+ out[3] += top << 12;
+
+ // We may just have made out[0] negative. So we carry down. If we made
+ // out[0] negative then we know that out[3] is sufficiently positive
+ // because we just added to it.
+ for (int i = 0; i < 3; i++) {
+ uint32_t mask = static_cast<uint32_t>(static_cast<int32_t>(out[i]) >> 31);
+ out[i] += (1 << 28) & mask;
+ out[i+1] -= 1 & mask;
+ }
+
+ // We might have pushed out[3] over 2**28 so we perform another, partial
+ // carry chain.
+ for (int i = 3; i < 7; i++) {
+ out[i+1] += out[i] >> 28;
+ out[i] &= kBottom28Bits;
+ }
+ top = out[7] >> 28;
+ out[7] &= kBottom28Bits;
+
+ // Eliminate top while maintaining the same value mod p.
+ out[0] -= top;
+ out[3] += top << 12;
+
+ // There are two cases to consider for out[3]:
+ // 1) The first time that we eliminated top, we didn't push out[3] over
+ // 2**28. In this case, the partial carry chain didn't change any values
+ // and top is zero.
+ // 2) We did push out[3] over 2**28 the first time that we eliminated top.
+ // The first value of top was in [0..16), therefore, prior to eliminating
+ // the first top, 0xfff1000 <= out[3] <= 0xfffffff. Therefore, after
+ // overflowing and being reduced by the second carry chain, out[3] <=
+ // 0xf000. Thus it cannot have overflowed when we eliminated top for the
+ // second time.
+
+ // Again, we may just have made out[0] negative, so do the same carry down.
+ // As before, if we made out[0] negative then we know that out[3] is
+ // sufficiently positive.
+ for (int i = 0; i < 3; i++) {
+ uint32_t mask = static_cast<uint32_t>(static_cast<int32_t>(out[i]) >> 31);
+ out[i] += (1 << 28) & mask;
+ out[i+1] -= 1 & mask;
+ }
+
+ // The value is < 2**224, but maybe greater than p. In order to reduce to a
+ // unique, minimal value we see if the value is >= p and, if so, subtract p.
+
+ // First we build a mask from the top four limbs, which must all be
+ // equal to bottom28Bits if the whole value is >= p. If top_4_all_ones
+ // ends up with any zero bits in the bottom 28 bits, then this wasn't
+ // true.
+ uint32_t top_4_all_ones = 0xffffffffu;
+ for (int i = 4; i < 8; i++) {
+ top_4_all_ones &= out[i];
+ }
+ top_4_all_ones |= 0xf0000000;
+ // Now we replicate any zero bits to all the bits in top_4_all_ones.
+ top_4_all_ones &= top_4_all_ones >> 16;
+ top_4_all_ones &= top_4_all_ones >> 8;
+ top_4_all_ones &= top_4_all_ones >> 4;
+ top_4_all_ones &= top_4_all_ones >> 2;
+ top_4_all_ones &= top_4_all_ones >> 1;
+ top_4_all_ones =
+ static_cast<uint32_t>(static_cast<int32_t>(top_4_all_ones << 31) >> 31);
+
+ // Now we test whether the bottom three limbs are non-zero.
+ uint32_t bottom_3_non_zero = out[0] | out[1] | out[2];
+ bottom_3_non_zero |= bottom_3_non_zero >> 16;
+ bottom_3_non_zero |= bottom_3_non_zero >> 8;
+ bottom_3_non_zero |= bottom_3_non_zero >> 4;
+ bottom_3_non_zero |= bottom_3_non_zero >> 2;
+ bottom_3_non_zero |= bottom_3_non_zero >> 1;
+ bottom_3_non_zero =
+ static_cast<uint32_t>(static_cast<int32_t>(bottom_3_non_zero) >> 31);
+
+ // Everything depends on the value of out[3].
+ // If it's > 0xffff000 and top_4_all_ones != 0 then the whole value is >= p
+ // If it's = 0xffff000 and top_4_all_ones != 0 and bottom_3_non_zero != 0,
+ // then the whole value is >= p
+ // If it's < 0xffff000, then the whole value is < p
+ uint32_t n = out[3] - 0xffff000;
+ uint32_t out_3_equal = n;
+ out_3_equal |= out_3_equal >> 16;
+ out_3_equal |= out_3_equal >> 8;
+ out_3_equal |= out_3_equal >> 4;
+ out_3_equal |= out_3_equal >> 2;
+ out_3_equal |= out_3_equal >> 1;
+ out_3_equal =
+ ~static_cast<uint32_t>(static_cast<int32_t>(out_3_equal << 31) >> 31);
+
+ // If out[3] > 0xffff000 then n's MSB will be zero.
+ uint32_t out_3_gt =
+ ~static_cast<uint32_t>(static_cast<int32_t>(n << 31) >> 31);
+
+ uint32_t mask =
+ top_4_all_ones & ((out_3_equal & bottom_3_non_zero) | out_3_gt);
+ out[0] -= 1 & mask;
+ out[3] -= 0xffff000 & mask;
+ out[4] -= 0xfffffff & mask;
+ out[5] -= 0xfffffff & mask;
+ out[6] -= 0xfffffff & mask;
+ out[7] -= 0xfffffff & mask;
+}
+
+
+// Group element functions.
+//
+// These functions deal with group elements. The group is an elliptic curve
+// group with a = -3 defined in FIPS 186-3, section D.2.2.
+
+using crypto::p224::Point;
+
+// kB is parameter of the elliptic curve.
+const FieldElement kB = {
+ 55967668, 11768882, 265861671, 185302395,
+ 39211076, 180311059, 84673715, 188764328,
+};
+
+void CopyConditional(Point* out, const Point& a, uint32_t mask);
+void DoubleJacobian(Point* out, const Point& a);
+
+// AddJacobian computes *out = a+b where a != b.
+void AddJacobian(Point *out,
+ const Point& a,
+ const Point& b) {
+ // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ FieldElement z1z1, z2z2, u1, u2, s1, s2, h, i, j, r, v;
+
+ uint32_t z1_is_zero = IsZero(a.z);
+ uint32_t z2_is_zero = IsZero(b.z);
+
+ // Z1Z1 = Z1²
+ Square(&z1z1, a.z);
+
+ // Z2Z2 = Z2²
+ Square(&z2z2, b.z);
+
+ // U1 = X1*Z2Z2
+ Mul(&u1, a.x, z2z2);
+
+ // U2 = X2*Z1Z1
+ Mul(&u2, b.x, z1z1);
+
+ // S1 = Y1*Z2*Z2Z2
+ Mul(&s1, b.z, z2z2);
+ Mul(&s1, a.y, s1);
+
+ // S2 = Y2*Z1*Z1Z1
+ Mul(&s2, a.z, z1z1);
+ Mul(&s2, b.y, s2);
+
+ // H = U2-U1
+ Subtract(&h, u2, u1);
+ Reduce(&h);
+ uint32_t x_equal = IsZero(h);
+
+ // I = (2*H)²
+ for (int k = 0; k < 8; k++) {
+ i[k] = h[k] << 1;
+ }
+ Reduce(&i);
+ Square(&i, i);
+
+ // J = H*I
+ Mul(&j, h, i);
+ // r = 2*(S2-S1)
+ Subtract(&r, s2, s1);
+ Reduce(&r);
+ uint32_t y_equal = IsZero(r);
+
+ if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
+ // The two input points are the same therefore we must use the dedicated
+ // doubling function as the slope of the line is undefined.
+ DoubleJacobian(out, a);
+ return;
+ }
+
+ for (int k = 0; k < 8; k++) {
+ r[k] <<= 1;
+ }
+ Reduce(&r);
+
+ // V = U1*I
+ Mul(&v, u1, i);
+
+ // Z3 = ((Z1+Z2)²-Z1Z1-Z2Z2)*H
+ Add(&z1z1, z1z1, z2z2);
+ Add(&z2z2, a.z, b.z);
+ Reduce(&z2z2);
+ Square(&z2z2, z2z2);
+ Subtract(&out->z, z2z2, z1z1);
+ Reduce(&out->z);
+ Mul(&out->z, out->z, h);
+
+ // X3 = r²-J-2*V
+ for (int k = 0; k < 8; k++) {
+ z1z1[k] = v[k] << 1;
+ }
+ Add(&z1z1, j, z1z1);
+ Reduce(&z1z1);
+ Square(&out->x, r);
+ Subtract(&out->x, out->x, z1z1);
+ Reduce(&out->x);
+
+ // Y3 = r*(V-X3)-2*S1*J
+ for (int k = 0; k < 8; k++) {
+ s1[k] <<= 1;
+ }
+ Mul(&s1, s1, j);
+ Subtract(&z1z1, v, out->x);
+ Reduce(&z1z1);
+ Mul(&z1z1, z1z1, r);
+ Subtract(&out->y, z1z1, s1);
+ Reduce(&out->y);
+
+ CopyConditional(out, a, z2_is_zero);
+ CopyConditional(out, b, z1_is_zero);
+}
+
+// DoubleJacobian computes *out = a+a.
+void DoubleJacobian(Point* out, const Point& a) {
+ // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
+ FieldElement delta, gamma, beta, alpha, t;
+
+ Square(&delta, a.z);
+ Square(&gamma, a.y);
+ Mul(&beta, a.x, gamma);
+
+ // alpha = 3*(X1-delta)*(X1+delta)
+ Add(&t, a.x, delta);
+ for (int i = 0; i < 8; i++) {
+ t[i] += t[i] << 1;
+ }
+ Reduce(&t);
+ Subtract(&alpha, a.x, delta);
+ Reduce(&alpha);
+ Mul(&alpha, alpha, t);
+
+ // Z3 = (Y1+Z1)²-gamma-delta
+ Add(&out->z, a.y, a.z);
+ Reduce(&out->z);
+ Square(&out->z, out->z);
+ Subtract(&out->z, out->z, gamma);
+ Reduce(&out->z);
+ Subtract(&out->z, out->z, delta);
+ Reduce(&out->z);
+
+ // X3 = alpha²-8*beta
+ for (int i = 0; i < 8; i++) {
+ delta[i] = beta[i] << 3;
+ }
+ Reduce(&delta);
+ Square(&out->x, alpha);
+ Subtract(&out->x, out->x, delta);
+ Reduce(&out->x);
+
+ // Y3 = alpha*(4*beta-X3)-8*gamma²
+ for (int i = 0; i < 8; i++) {
+ beta[i] <<= 2;
+ }
+ Reduce(&beta);
+ Subtract(&beta, beta, out->x);
+ Reduce(&beta);
+ Square(&gamma, gamma);
+ for (int i = 0; i < 8; i++) {
+ gamma[i] <<= 3;
+ }
+ Reduce(&gamma);
+ Mul(&out->y, alpha, beta);
+ Subtract(&out->y, out->y, gamma);
+ Reduce(&out->y);
+}
+
+// CopyConditional sets *out=a if mask is 0xffffffff. mask must be either 0 of
+// 0xffffffff.
+void CopyConditional(Point* out, const Point& a, uint32_t mask) {
+ for (int i = 0; i < 8; i++) {
+ out->x[i] ^= mask & (a.x[i] ^ out->x[i]);
+ out->y[i] ^= mask & (a.y[i] ^ out->y[i]);
+ out->z[i] ^= mask & (a.z[i] ^ out->z[i]);
+ }
+}
+
+// ScalarMult calculates *out = a*scalar where scalar is a big-endian number of
+// length scalar_len and != 0.
+void ScalarMult(Point* out,
+ const Point& a,
+ const uint8_t* scalar,
+ size_t scalar_len) {
+ memset(out, 0, sizeof(*out));
+ Point tmp;
+
+ for (size_t i = 0; i < scalar_len; i++) {
+ for (unsigned int bit_num = 0; bit_num < 8; bit_num++) {
+ DoubleJacobian(out, *out);
+ uint32_t bit = static_cast<uint32_t>(static_cast<int32_t>(
+ (((scalar[i] >> (7 - bit_num)) & 1) << 31) >> 31));
+ AddJacobian(&tmp, a, *out);
+ CopyConditional(out, tmp, bit);
+ }
+ }
+}
+
+// Get224Bits reads 7 words from in and scatters their contents in
+// little-endian form into 8 words at out, 28 bits per output word.
+void Get224Bits(uint32_t* out, const uint32_t* in) {
+ out[0] = NetToHost32(in[6]) & kBottom28Bits;
+ out[1] = ((NetToHost32(in[5]) << 4) |
+ (NetToHost32(in[6]) >> 28)) & kBottom28Bits;
+ out[2] = ((NetToHost32(in[4]) << 8) |
+ (NetToHost32(in[5]) >> 24)) & kBottom28Bits;
+ out[3] = ((NetToHost32(in[3]) << 12) |
+ (NetToHost32(in[4]) >> 20)) & kBottom28Bits;
+ out[4] = ((NetToHost32(in[2]) << 16) |
+ (NetToHost32(in[3]) >> 16)) & kBottom28Bits;
+ out[5] = ((NetToHost32(in[1]) << 20) |
+ (NetToHost32(in[2]) >> 12)) & kBottom28Bits;
+ out[6] = ((NetToHost32(in[0]) << 24) |
+ (NetToHost32(in[1]) >> 8)) & kBottom28Bits;
+ out[7] = (NetToHost32(in[0]) >> 4) & kBottom28Bits;
+}
+
+// Put224Bits performs the inverse operation to Get224Bits: taking 28 bits from
+// each of 8 input words and writing them in big-endian order to 7 words at
+// out.
+void Put224Bits(uint32_t* out, const uint32_t* in) {
+ out[6] = HostToNet32((in[0] >> 0) | (in[1] << 28));
+ out[5] = HostToNet32((in[1] >> 4) | (in[2] << 24));
+ out[4] = HostToNet32((in[2] >> 8) | (in[3] << 20));
+ out[3] = HostToNet32((in[3] >> 12) | (in[4] << 16));
+ out[2] = HostToNet32((in[4] >> 16) | (in[5] << 12));
+ out[1] = HostToNet32((in[5] >> 20) | (in[6] << 8));
+ out[0] = HostToNet32((in[6] >> 24) | (in[7] << 4));
+}
+
+} // anonymous namespace
+
+namespace crypto {
+
+namespace p224 {
+
+bool Point::SetFromString(const base::StringPiece& in) {
+ if (in.size() != 2*28)
+ return false;
+ const uint32_t* inwords = reinterpret_cast<const uint32_t*>(in.data());
+ Get224Bits(x, inwords);
+ Get224Bits(y, inwords + 7);
+ memset(&z, 0, sizeof(z));
+ z[0] = 1;
+
+ // Check that the point is on the curve, i.e. that y² = x³ - 3x + b.
+ FieldElement lhs;
+ Square(&lhs, y);
+ Contract(&lhs);
+
+ FieldElement rhs;
+ Square(&rhs, x);
+ Mul(&rhs, x, rhs);
+
+ FieldElement three_x;
+ for (int i = 0; i < 8; i++) {
+ three_x[i] = x[i] * 3;
+ }
+ Reduce(&three_x);
+ Subtract(&rhs, rhs, three_x);
+ Reduce(&rhs);
+
+ ::Add(&rhs, rhs, kB);
+ Contract(&rhs);
+ return memcmp(&lhs, &rhs, sizeof(lhs)) == 0;
+}
+
+std::string Point::ToString() const {
+ FieldElement zinv, zinv_sq, xx, yy;
+
+ // If this is the point at infinity we return a string of all zeros.
+ if (IsZero(this->z)) {
+ static const char zeros[56] = {0};
+ return std::string(zeros, sizeof(zeros));
+ }
+
+ Invert(&zinv, this->z);
+ Square(&zinv_sq, zinv);
+ Mul(&xx, x, zinv_sq);
+ Mul(&zinv_sq, zinv_sq, zinv);
+ Mul(&yy, y, zinv_sq);
+
+ Contract(&xx);
+ Contract(&yy);
+
+ uint32_t outwords[14];
+ Put224Bits(outwords, xx);
+ Put224Bits(outwords + 7, yy);
+ return std::string(reinterpret_cast<const char*>(outwords), sizeof(outwords));
+}
+
+void ScalarMult(const Point& in, const uint8_t* scalar, Point* out) {
+ ::ScalarMult(out, in, scalar, 28);
+}
+
+// kBasePoint is the base point (generator) of the elliptic curve group.
+static const Point kBasePoint = {
+ {22813985, 52956513, 34677300, 203240812,
+ 12143107, 133374265, 225162431, 191946955},
+ {83918388, 223877528, 122119236, 123340192,
+ 266784067, 263504429, 146143011, 198407736},
+ {1, 0, 0, 0, 0, 0, 0, 0},
+};
+
+void ScalarBaseMult(const uint8_t* scalar, Point* out) {
+ ::ScalarMult(out, kBasePoint, scalar, 28);
+}
+
+void Add(const Point& a, const Point& b, Point* out) {
+ AddJacobian(out, a, b);
+}
+
+void Negate(const Point& in, Point* out) {
+ // Guide to elliptic curve cryptography, page 89 suggests that (X : X+Y : Z)
+ // is the negative in Jacobian coordinates, but it doesn't actually appear to
+ // be true in testing so this performs the negation in affine coordinates.
+ FieldElement zinv, zinv_sq, y;
+ Invert(&zinv, in.z);
+ Square(&zinv_sq, zinv);
+ Mul(&out->x, in.x, zinv_sq);
+ Mul(&zinv_sq, zinv_sq, zinv);
+ Mul(&y, in.y, zinv_sq);
+
+ Subtract(&out->y, kP, y);
+ Reduce(&out->y);
+
+ memset(&out->z, 0, sizeof(out->z));
+ out->z[0] = 1;
+}
+
+} // namespace p224
+
+} // namespace crypto
diff --git a/libchrome/crypto/p224.h b/libchrome/crypto/p224.h
new file mode 100644
index 0000000..e9a53a9
--- /dev/null
+++ b/libchrome/crypto/p224.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_P224_H_
+#define CRYPTO_P224_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/strings/string_piece.h"
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// P224 implements an elliptic curve group, commonly known as P224 and defined
+// in FIPS 186-3, section D.2.2.
+namespace p224 {
+
+// An element of the field (ℤ/pℤ) is represented with 8, 28-bit limbs in
+// little endian order.
+typedef uint32_t FieldElement[8];
+
+struct CRYPTO_EXPORT Point {
+ // SetFromString the value of the point from the 56 byte, external
+ // representation. The external point representation is an (x, y) pair of a
+ // point on the curve. Each field element is represented as a big-endian
+ // number < p.
+ bool SetFromString(const base::StringPiece& in);
+
+ // ToString returns an external representation of the Point.
+ std::string ToString() const;
+
+ // An Point is represented in Jacobian form (x/z², y/z³).
+ FieldElement x, y, z;
+};
+
+// kScalarBytes is the number of bytes needed to represent an element of the
+// P224 field.
+static const size_t kScalarBytes = 28;
+
+// ScalarMult computes *out = in*scalar where scalar is a 28-byte, big-endian
+// number.
+void CRYPTO_EXPORT ScalarMult(const Point& in,
+ const uint8_t* scalar,
+ Point* out);
+
+// ScalarBaseMult computes *out = g*scalar where g is the base point of the
+// curve and scalar is a 28-byte, big-endian number.
+void CRYPTO_EXPORT ScalarBaseMult(const uint8_t* scalar, Point* out);
+
+// Add computes *out = a+b.
+void CRYPTO_EXPORT Add(const Point& a, const Point& b, Point* out);
+
+// Negate calculates out = -a;
+void CRYPTO_EXPORT Negate(const Point& a, Point* out);
+
+} // namespace p224
+
+} // namespace crypto
+
+#endif // CRYPTO_P224_H_
diff --git a/libchrome/crypto/p224_spake.cc b/libchrome/crypto/p224_spake.cc
new file mode 100644
index 0000000..1574105
--- /dev/null
+++ b/libchrome/crypto/p224_spake.cc
@@ -0,0 +1,268 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This code implements SPAKE2, a variant of EKE:
+// http://www.di.ens.fr/~pointche/pub.php?reference=AbPo04
+
+#include <crypto/p224_spake.h>
+
+#include <algorithm>
+
+#include <base/logging.h>
+#include <crypto/p224.h>
+#include <crypto/random.h>
+#include <crypto/secure_util.h>
+
+namespace {
+
+// The following two points (M and N in the protocol) are verifiable random
+// points on the curve and can be generated with the following code:
+
+// #include <stdint.h>
+// #include <stdio.h>
+// #include <string.h>
+//
+// #include <openssl/ec.h>
+// #include <openssl/obj_mac.h>
+// #include <openssl/sha.h>
+//
+// static const char kSeed1[] = "P224 point generation seed (M)";
+// static const char kSeed2[] = "P224 point generation seed (N)";
+//
+// void find_seed(const char* seed) {
+// SHA256_CTX sha256;
+// uint8_t digest[SHA256_DIGEST_LENGTH];
+//
+// SHA256_Init(&sha256);
+// SHA256_Update(&sha256, seed, strlen(seed));
+// SHA256_Final(digest, &sha256);
+//
+// BIGNUM x, y;
+// EC_GROUP* p224 = EC_GROUP_new_by_curve_name(NID_secp224r1);
+// EC_POINT* p = EC_POINT_new(p224);
+//
+// for (unsigned i = 0;; i++) {
+// BN_init(&x);
+// BN_bin2bn(digest, 28, &x);
+//
+// if (EC_POINT_set_compressed_coordinates_GFp(
+// p224, p, &x, digest[28] & 1, NULL)) {
+// BN_init(&y);
+// EC_POINT_get_affine_coordinates_GFp(p224, p, &x, &y, NULL);
+// char* x_str = BN_bn2hex(&x);
+// char* y_str = BN_bn2hex(&y);
+// printf("Found after %u iterations:\n%s\n%s\n", i, x_str, y_str);
+// OPENSSL_free(x_str);
+// OPENSSL_free(y_str);
+// BN_free(&x);
+// BN_free(&y);
+// break;
+// }
+//
+// SHA256_Init(&sha256);
+// SHA256_Update(&sha256, digest, sizeof(digest));
+// SHA256_Final(digest, &sha256);
+//
+// BN_free(&x);
+// }
+//
+// EC_POINT_free(p);
+// EC_GROUP_free(p224);
+// }
+//
+// int main() {
+// find_seed(kSeed1);
+// find_seed(kSeed2);
+// return 0;
+// }
+
+const crypto::p224::Point kM = {
+ {174237515, 77186811, 235213682, 33849492,
+ 33188520, 48266885, 177021753, 81038478},
+ {104523827, 245682244, 266509668, 236196369,
+ 28372046, 145351378, 198520366, 113345994},
+ {1, 0, 0, 0, 0, 0, 0, 0},
+};
+
+const crypto::p224::Point kN = {
+ {136176322, 263523628, 251628795, 229292285,
+ 5034302, 185981975, 171998428, 11653062},
+ {197567436, 51226044, 60372156, 175772188,
+ 42075930, 8083165, 160827401, 65097570},
+ {1, 0, 0, 0, 0, 0, 0, 0},
+};
+
+} // anonymous namespace
+
+namespace crypto {
+
+P224EncryptedKeyExchange::P224EncryptedKeyExchange(
+ PeerType peer_type, const base::StringPiece& password)
+ : state_(kStateInitial),
+ is_server_(peer_type == kPeerTypeServer) {
+ memset(&x_, 0, sizeof(x_));
+ memset(&expected_authenticator_, 0, sizeof(expected_authenticator_));
+
+ // x_ is a random scalar.
+ RandBytes(x_, sizeof(x_));
+
+ // Calculate |password| hash to get SPAKE password value.
+ SHA256HashString(std::string(password.data(), password.length()),
+ pw_, sizeof(pw_));
+
+ Init();
+}
+
+void P224EncryptedKeyExchange::Init() {
+ // X = g**x_
+ p224::Point X;
+ p224::ScalarBaseMult(x_, &X);
+
+ // The client masks the Diffie-Hellman value, X, by adding M**pw and the
+ // server uses N**pw.
+ p224::Point MNpw;
+ p224::ScalarMult(is_server_ ? kN : kM, pw_, &MNpw);
+
+ // X* = X + (N|M)**pw
+ p224::Point Xstar;
+ p224::Add(X, MNpw, &Xstar);
+
+ next_message_ = Xstar.ToString();
+}
+
+const std::string& P224EncryptedKeyExchange::GetNextMessage() {
+ if (state_ == kStateInitial) {
+ state_ = kStateRecvDH;
+ return next_message_;
+ } else if (state_ == kStateSendHash) {
+ state_ = kStateRecvHash;
+ return next_message_;
+ }
+
+ LOG(FATAL) << "P224EncryptedKeyExchange::GetNextMessage called in"
+ " bad state " << state_;
+ next_message_ = "";
+ return next_message_;
+}
+
+P224EncryptedKeyExchange::Result P224EncryptedKeyExchange::ProcessMessage(
+ const base::StringPiece& message) {
+ if (state_ == kStateRecvHash) {
+ // This is the final state of the protocol: we are reading the peer's
+ // authentication hash and checking that it matches the one that we expect.
+ if (message.size() != sizeof(expected_authenticator_)) {
+ error_ = "peer's hash had an incorrect size";
+ return kResultFailed;
+ }
+ if (!SecureMemEqual(message.data(), expected_authenticator_,
+ message.size())) {
+ error_ = "peer's hash had incorrect value";
+ return kResultFailed;
+ }
+ state_ = kStateDone;
+ return kResultSuccess;
+ }
+
+ if (state_ != kStateRecvDH) {
+ LOG(FATAL) << "P224EncryptedKeyExchange::ProcessMessage called in"
+ " bad state " << state_;
+ error_ = "internal error";
+ return kResultFailed;
+ }
+
+ // Y* is the other party's masked, Diffie-Hellman value.
+ p224::Point Ystar;
+ if (!Ystar.SetFromString(message)) {
+ error_ = "failed to parse peer's masked Diffie-Hellman value";
+ return kResultFailed;
+ }
+
+ // We calculate the mask value: (N|M)**pw
+ p224::Point MNpw, minus_MNpw, Y, k;
+ p224::ScalarMult(is_server_ ? kM : kN, pw_, &MNpw);
+ p224::Negate(MNpw, &minus_MNpw);
+
+ // Y = Y* - (N|M)**pw
+ p224::Add(Ystar, minus_MNpw, &Y);
+
+ // K = Y**x_
+ p224::ScalarMult(Y, x_, &k);
+
+ // If everything worked out, then K is the same for both parties.
+ key_ = k.ToString();
+
+ std::string client_masked_dh, server_masked_dh;
+ if (is_server_) {
+ client_masked_dh = message.as_string();
+ server_masked_dh = next_message_;
+ } else {
+ client_masked_dh = next_message_;
+ server_masked_dh = message.as_string();
+ }
+
+ // Now we calculate the hashes that each side will use to prove to the other
+ // that they derived the correct value for K.
+ uint8_t client_hash[kSHA256Length], server_hash[kSHA256Length];
+ CalculateHash(kPeerTypeClient, client_masked_dh, server_masked_dh, key_,
+ client_hash);
+ CalculateHash(kPeerTypeServer, client_masked_dh, server_masked_dh, key_,
+ server_hash);
+
+ const uint8_t* my_hash = is_server_ ? server_hash : client_hash;
+ const uint8_t* their_hash = is_server_ ? client_hash : server_hash;
+
+ next_message_ =
+ std::string(reinterpret_cast<const char*>(my_hash), kSHA256Length);
+ memcpy(expected_authenticator_, their_hash, kSHA256Length);
+ state_ = kStateSendHash;
+ return kResultPending;
+}
+
+void P224EncryptedKeyExchange::CalculateHash(
+ PeerType peer_type,
+ const std::string& client_masked_dh,
+ const std::string& server_masked_dh,
+ const std::string& k,
+ uint8_t* out_digest) {
+ std::string hash_contents;
+
+ if (peer_type == kPeerTypeServer) {
+ hash_contents = "server";
+ } else {
+ hash_contents = "client";
+ }
+
+ hash_contents += client_masked_dh;
+ hash_contents += server_masked_dh;
+ hash_contents +=
+ std::string(reinterpret_cast<const char *>(pw_), sizeof(pw_));
+ hash_contents += k;
+
+ SHA256HashString(hash_contents, out_digest, kSHA256Length);
+}
+
+const std::string& P224EncryptedKeyExchange::error() const {
+ return error_;
+}
+
+const std::string& P224EncryptedKeyExchange::GetKey() const {
+ DCHECK_EQ(state_, kStateDone);
+ return GetUnverifiedKey();
+}
+
+const std::string& P224EncryptedKeyExchange::GetUnverifiedKey() const {
+ // Key is already final when state is kStateSendHash. Subsequent states are
+ // used only for verification of the key. Some users may combine verification
+ // with sending verifiable data instead of |expected_authenticator_|.
+ DCHECK_GE(state_, kStateSendHash);
+ return key_;
+}
+
+void P224EncryptedKeyExchange::SetXForTesting(const std::string& x) {
+ memset(&x_, 0, sizeof(x_));
+ memcpy(&x_, x.data(), std::min(x.size(), sizeof(x_)));
+ Init();
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/p224_spake.h b/libchrome/crypto/p224_spake.h
new file mode 100644
index 0000000..f9a44e7
--- /dev/null
+++ b/libchrome/crypto/p224_spake.h
@@ -0,0 +1,127 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_P224_SPAKE_H_
+#define CRYPTO_P224_SPAKE_H_
+
+#include <crypto/p224.h>
+#include <crypto/sha2.h>
+#include <stdint.h>
+
+#include "base/gtest_prod_util.h"
+#include "base/strings/string_piece.h"
+
+namespace crypto {
+
+// P224EncryptedKeyExchange implements SPAKE2, a variant of Encrypted
+// Key Exchange. It allows two parties that have a secret common
+// password to establish a common secure key by exchanging messages
+// over an insecure channel without disclosing the password.
+//
+// The password can be low entropy as authenticating with an attacker only
+// gives the attacker a one-shot password oracle. No other information about
+// the password is leaked. (However, you must be sure to limit the number of
+// permitted authentication attempts otherwise they get many one-shot oracles.)
+//
+// The protocol requires several RTTs (actually two, but you shouldn't assume
+// that.) To use the object, call GetNextMessage() and pass that message to the
+// peer. Get a message from the peer and feed it into ProcessMessage. Then
+// examine the return value of ProcessMessage:
+// kResultPending: Another round is required. Call GetNextMessage and repeat.
+// kResultFailed: The authentication has failed. You can get a human readable
+// error message by calling error().
+// kResultSuccess: The authentication was successful.
+//
+// In each exchange, each peer always sends a message.
+class CRYPTO_EXPORT P224EncryptedKeyExchange {
+ public:
+ enum Result {
+ kResultPending,
+ kResultFailed,
+ kResultSuccess,
+ };
+
+ // PeerType's values are named client and server due to convention. But
+ // they could be called "A" and "B" as far as the protocol is concerned so
+ // long as the two parties don't both get the same label.
+ enum PeerType {
+ kPeerTypeClient,
+ kPeerTypeServer,
+ };
+
+ // peer_type: the type of the local authentication party.
+ // password: secret session password. Both parties to the
+ // authentication must pass the same value. For the case of a
+ // TLS connection, see RFC 5705.
+ P224EncryptedKeyExchange(PeerType peer_type,
+ const base::StringPiece& password);
+
+ // GetNextMessage returns a byte string which must be passed to the other
+ // party in the authentication.
+ const std::string& GetNextMessage();
+
+ // ProcessMessage processes a message which must have been generated by a
+ // call to GetNextMessage() by the other party.
+ Result ProcessMessage(const base::StringPiece& message);
+
+ // In the event that ProcessMessage() returns kResultFailed, error will
+ // return a human readable error message.
+ const std::string& error() const;
+
+ // The key established as result of the key exchange. Must be called
+ // at then end after ProcessMessage() returns kResultSuccess.
+ const std::string& GetKey() const;
+
+ // The key established as result of the key exchange. Can be called after
+ // the first ProcessMessage()
+ const std::string& GetUnverifiedKey() const;
+
+ private:
+ // The authentication state machine is very simple and each party proceeds
+ // through each of these states, in order.
+ enum State {
+ kStateInitial,
+ kStateRecvDH,
+ kStateSendHash,
+ kStateRecvHash,
+ kStateDone,
+ };
+
+ FRIEND_TEST_ALL_PREFIXES(MutualAuth, ExpectedValues);
+
+ void Init();
+
+ // Sets internal random scalar. Should be used by tests only.
+ void SetXForTesting(const std::string& x);
+
+ State state_;
+ const bool is_server_;
+ // next_message_ contains a value for GetNextMessage() to return.
+ std::string next_message_;
+ std::string error_;
+
+ // CalculateHash computes the verification hash for the given peer and writes
+ // |kSHA256Length| bytes at |out_digest|.
+ void CalculateHash(PeerType peer_type,
+ const std::string& client_masked_dh,
+ const std::string& server_masked_dh,
+ const std::string& k,
+ uint8_t* out_digest);
+
+ // x_ is the secret Diffie-Hellman exponent (see paper referenced in .cc
+ // file).
+ uint8_t x_[p224::kScalarBytes];
+ // pw_ is SHA256(P(password), P(session))[:28] where P() prepends a uint32_t,
+ // big-endian length prefix (see paper referenced in .cc file).
+ uint8_t pw_[p224::kScalarBytes];
+ // expected_authenticator_ is used to store the hash value expected from the
+ // other party.
+ uint8_t expected_authenticator_[kSHA256Length];
+
+ std::string key_;
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_P224_SPAKE_H_
diff --git a/libchrome/crypto/p224_spake_unittest.cc b/libchrome/crypto/p224_spake_unittest.cc
new file mode 100644
index 0000000..3bca430
--- /dev/null
+++ b/libchrome/crypto/p224_spake_unittest.cc
@@ -0,0 +1,177 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/p224_spake.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crypto {
+
+namespace {
+
+std::string HexEncodeString(const std::string& binary_data) {
+ return base::HexEncode(binary_data.c_str(), binary_data.size());
+}
+
+bool RunExchange(P224EncryptedKeyExchange* client,
+ P224EncryptedKeyExchange* server,
+ bool is_password_same) {
+ for (;;) {
+ std::string client_message, server_message;
+ client_message = client->GetNextMessage();
+ server_message = server->GetNextMessage();
+
+ P224EncryptedKeyExchange::Result client_result, server_result;
+ client_result = client->ProcessMessage(server_message);
+ server_result = server->ProcessMessage(client_message);
+
+ // Check that we never hit the case where only one succeeds.
+ EXPECT_EQ(client_result == P224EncryptedKeyExchange::kResultSuccess,
+ server_result == P224EncryptedKeyExchange::kResultSuccess);
+
+ if (client_result == P224EncryptedKeyExchange::kResultFailed ||
+ server_result == P224EncryptedKeyExchange::kResultFailed) {
+ return false;
+ }
+
+ EXPECT_EQ(is_password_same,
+ client->GetUnverifiedKey() == server->GetUnverifiedKey());
+
+ if (client_result == P224EncryptedKeyExchange::kResultSuccess &&
+ server_result == P224EncryptedKeyExchange::kResultSuccess) {
+ return true;
+ }
+
+ EXPECT_EQ(P224EncryptedKeyExchange::kResultPending, client_result);
+ EXPECT_EQ(P224EncryptedKeyExchange::kResultPending, server_result);
+ }
+}
+
+const char kPassword[] = "foo";
+
+} // namespace
+
+TEST(MutualAuth, CorrectAuth) {
+ P224EncryptedKeyExchange client(
+ P224EncryptedKeyExchange::kPeerTypeClient, kPassword);
+ P224EncryptedKeyExchange server(
+ P224EncryptedKeyExchange::kPeerTypeServer, kPassword);
+
+ EXPECT_TRUE(RunExchange(&client, &server, true));
+ EXPECT_EQ(client.GetKey(), server.GetKey());
+}
+
+TEST(MutualAuth, IncorrectPassword) {
+ P224EncryptedKeyExchange client(
+ P224EncryptedKeyExchange::kPeerTypeClient,
+ kPassword);
+ P224EncryptedKeyExchange server(
+ P224EncryptedKeyExchange::kPeerTypeServer,
+ "wrongpassword");
+
+ EXPECT_FALSE(RunExchange(&client, &server, false));
+}
+
+TEST(MutualAuth, ExpectedValues) {
+ P224EncryptedKeyExchange client(P224EncryptedKeyExchange::kPeerTypeClient,
+ kPassword);
+ client.SetXForTesting("Client x");
+ P224EncryptedKeyExchange server(P224EncryptedKeyExchange::kPeerTypeServer,
+ kPassword);
+ server.SetXForTesting("Server x");
+
+ std::string client_message = client.GetNextMessage();
+ EXPECT_EQ(
+ "3508EF7DECC8AB9F9C439FBB0154288BBECC0A82E8448F4CF29554EB"
+ "BE9D486686226255EAD1D077C635B1A41F46AC91D7F7F32CED9EC3E0",
+ HexEncodeString(client_message));
+
+ std::string server_message = server.GetNextMessage();
+ EXPECT_EQ(
+ "A3088C18B75D2C2B107105661AEC85424777475EB29F1DDFB8C14AFB"
+ "F1603D0DF38413A00F420ACF2059E7997C935F5A957A193D09A2B584",
+ HexEncodeString(server_message));
+
+ EXPECT_EQ(P224EncryptedKeyExchange::kResultPending,
+ client.ProcessMessage(server_message));
+ EXPECT_EQ(P224EncryptedKeyExchange::kResultPending,
+ server.ProcessMessage(client_message));
+
+ EXPECT_EQ(client.GetUnverifiedKey(), server.GetUnverifiedKey());
+ // Must stay the same. External implementations should be able to pair with.
+ EXPECT_EQ(
+ "CE7CCFC435CDA4F01EC8826788B1F8B82EF7D550A34696B371096E64"
+ "C487D4FE193F7D1A6FF6820BC7F807796BA3889E8F999BBDEFC32FFA",
+ HexEncodeString(server.GetUnverifiedKey()));
+
+ EXPECT_TRUE(RunExchange(&client, &server, true));
+ EXPECT_EQ(client.GetKey(), server.GetKey());
+}
+
+TEST(MutualAuth, Fuzz) {
+ static const unsigned kIterations = 40;
+
+ for (unsigned i = 0; i < kIterations; i++) {
+ P224EncryptedKeyExchange client(
+ P224EncryptedKeyExchange::kPeerTypeClient, kPassword);
+ P224EncryptedKeyExchange server(
+ P224EncryptedKeyExchange::kPeerTypeServer, kPassword);
+
+ // We'll only be testing small values of i, but we don't want that to bias
+ // the test coverage. So we disperse the value of i by multiplying by the
+ // FNV, 32-bit prime, producing a poor-man's PRNG.
+ const uint32_t rand = i * 16777619;
+
+ for (unsigned round = 0;; round++) {
+ std::string client_message, server_message;
+ client_message = client.GetNextMessage();
+ server_message = server.GetNextMessage();
+
+ if ((rand & 1) == round) {
+ const bool server_or_client = rand & 2;
+ std::string* m = server_or_client ? &server_message : &client_message;
+ if (rand & 4) {
+ // Truncate
+ *m = m->substr(0, (i >> 3) % m->size());
+ } else {
+ // Corrupt
+ const size_t bits = m->size() * 8;
+ const size_t bit_to_corrupt = (rand >> 3) % bits;
+ const_cast<char*>(m->data())[bit_to_corrupt / 8] ^=
+ 1 << (bit_to_corrupt % 8);
+ }
+ }
+
+ P224EncryptedKeyExchange::Result client_result, server_result;
+ client_result = client.ProcessMessage(server_message);
+ server_result = server.ProcessMessage(client_message);
+
+ // If we have corrupted anything, we expect the authentication to fail,
+ // although one side can succeed if we happen to corrupt the second round
+ // message to the other.
+ ASSERT_FALSE(
+ client_result == P224EncryptedKeyExchange::kResultSuccess &&
+ server_result == P224EncryptedKeyExchange::kResultSuccess);
+
+ if (client_result == P224EncryptedKeyExchange::kResultFailed ||
+ server_result == P224EncryptedKeyExchange::kResultFailed) {
+ break;
+ }
+
+ ASSERT_EQ(P224EncryptedKeyExchange::kResultPending,
+ client_result);
+ ASSERT_EQ(P224EncryptedKeyExchange::kResultPending,
+ server_result);
+ }
+ }
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/p224_unittest.cc b/libchrome/crypto/p224_unittest.cc
new file mode 100644
index 0000000..faa08eb
--- /dev/null
+++ b/libchrome/crypto/p224_unittest.cc
@@ -0,0 +1,825 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "crypto/p224.h"
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crypto {
+
+using p224::Point;
+
+// kBasePointExternal is the P224 base point in external representation.
+static const uint8_t kBasePointExternal[56] = {
+ 0xb7, 0x0e, 0x0c, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f, 0x32, 0x13, 0x90, 0xb9,
+ 0x4a, 0x03, 0xc1, 0xd3, 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6,
+ 0x11, 0x5c, 0x1d, 0x21, 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb,
+ 0x4c, 0x22, 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
+ 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34,
+};
+
+// TestVector represents a test of scalar multiplication of the base point.
+// |scalar| is a big-endian scalar and |affine| is the external representation
+// of g*scalar.
+struct TestVector {
+ uint8_t scalar[28];
+ uint8_t affine[28 * 2];
+};
+
+static const int kNumNISTTestVectors = 52;
+
+// kNISTTestVectors are the NIST test vectors for P224.
+static const TestVector kNISTTestVectors[kNumNISTTestVectors] = {
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01},
+ {0xb7, 0x0e, 0x0c, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f,
+ 0x32, 0x13, 0x90, 0xb9, 0x4a, 0x03, 0xc1, 0xd3,
+ 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6,
+ 0x11, 0x5c, 0x1d, 0x21, 0xbd, 0x37, 0x63, 0x88,
+ 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6,
+ 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
+ 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, },
+
+ {0x70, 0x6a, 0x46, 0xdc, 0x76, 0xdc, 0xb7, 0x67,
+ 0x98, 0xe6, 0x0e, 0x6d, 0x89, 0x47, 0x47, 0x88,
+ 0xd1, 0x6d, 0xc1, 0x80, 0x32, 0xd2, 0x68, 0xfd,
+ 0x1a, 0x70, 0x4f, 0xa6, 0x1c, 0x2b, 0x76, 0xa7,
+ 0xbc, 0x25, 0xe7, 0x70, 0x2a, 0x70, 0x4f, 0xa9,
+ 0x86, 0x89, 0x28, 0x49, 0xfc, 0xa6, 0x29, 0x48,
+ 0x7a, 0xcf, 0x37, 0x09, 0xd2, 0xe4, 0xe8, 0xbb,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x03, },
+ {0xdf, 0x1b, 0x1d, 0x66, 0xa5, 0x51, 0xd0, 0xd3,
+ 0x1e, 0xff, 0x82, 0x25, 0x58, 0xb9, 0xd2, 0xcc,
+ 0x75, 0xc2, 0x18, 0x02, 0x79, 0xfe, 0x0d, 0x08,
+ 0xfd, 0x89, 0x6d, 0x04, 0xa3, 0xf7, 0xf0, 0x3c,
+ 0xad, 0xd0, 0xbe, 0x44, 0x4c, 0x0a, 0xa5, 0x68,
+ 0x30, 0x13, 0x0d, 0xdf, 0x77, 0xd3, 0x17, 0x34,
+ 0x4e, 0x1a, 0xf3, 0x59, 0x19, 0x81, 0xa9, 0x25,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, },
+ {0xae, 0x99, 0xfe, 0xeb, 0xb5, 0xd2, 0x69, 0x45,
+ 0xb5, 0x48, 0x92, 0x09, 0x2a, 0x8a, 0xee, 0x02,
+ 0x91, 0x29, 0x30, 0xfa, 0x41, 0xcd, 0x11, 0x4e,
+ 0x40, 0x44, 0x73, 0x01, 0x04, 0x82, 0x58, 0x0a,
+ 0x0e, 0xc5, 0xbc, 0x47, 0xe8, 0x8b, 0xc8, 0xc3,
+ 0x78, 0x63, 0x2c, 0xd1, 0x96, 0xcb, 0x3f, 0xa0,
+ 0x58, 0xa7, 0x11, 0x4e, 0xb0, 0x30, 0x54, 0xc9,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x05, },
+ {0x31, 0xc4, 0x9a, 0xe7, 0x5b, 0xce, 0x78, 0x07,
+ 0xcd, 0xff, 0x22, 0x05, 0x5d, 0x94, 0xee, 0x90,
+ 0x21, 0xfe, 0xdb, 0xb5, 0xab, 0x51, 0xc5, 0x75,
+ 0x26, 0xf0, 0x11, 0xaa, 0x27, 0xe8, 0xbf, 0xf1,
+ 0x74, 0x56, 0x35, 0xec, 0x5b, 0xa0, 0xc9, 0xf1,
+ 0xc2, 0xed, 0xe1, 0x54, 0x14, 0xc6, 0x50, 0x7d,
+ 0x29, 0xff, 0xe3, 0x7e, 0x79, 0x0a, 0x07, 0x9b,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x06, },
+ {0x1f, 0x24, 0x83, 0xf8, 0x25, 0x72, 0x25, 0x1f,
+ 0xca, 0x97, 0x5f, 0xea, 0x40, 0xdb, 0x82, 0x1d,
+ 0xf8, 0xad, 0x82, 0xa3, 0xc0, 0x02, 0xee, 0x6c,
+ 0x57, 0x11, 0x24, 0x08, 0x89, 0xfa, 0xf0, 0xcc,
+ 0xb7, 0x50, 0xd9, 0x9b, 0x55, 0x3c, 0x57, 0x4f,
+ 0xad, 0x7e, 0xcf, 0xb0, 0x43, 0x85, 0x86, 0xeb,
+ 0x39, 0x52, 0xaf, 0x5b, 0x4b, 0x15, 0x3c, 0x7e,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x07, },
+ {0xdb, 0x2f, 0x6b, 0xe6, 0x30, 0xe2, 0x46, 0xa5,
+ 0xcf, 0x7d, 0x99, 0xb8, 0x51, 0x94, 0xb1, 0x23,
+ 0xd4, 0x87, 0xe2, 0xd4, 0x66, 0xb9, 0x4b, 0x24,
+ 0xa0, 0x3c, 0x3e, 0x28, 0x0f, 0x3a, 0x30, 0x08,
+ 0x54, 0x97, 0xf2, 0xf6, 0x11, 0xee, 0x25, 0x17,
+ 0xb1, 0x63, 0xef, 0x8c, 0x53, 0xb7, 0x15, 0xd1,
+ 0x8b, 0xb4, 0xe4, 0x80, 0x8d, 0x02, 0xb9, 0x63,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, },
+ {0x85, 0x8e, 0x6f, 0x9c, 0xc6, 0xc1, 0x2c, 0x31,
+ 0xf5, 0xdf, 0x12, 0x4a, 0xa7, 0x77, 0x67, 0xb0,
+ 0x5c, 0x8b, 0xc0, 0x21, 0xbd, 0x68, 0x3d, 0x2b,
+ 0x55, 0x57, 0x15, 0x50, 0x04, 0x6d, 0xcd, 0x3e,
+ 0xa5, 0xc4, 0x38, 0x98, 0xc5, 0xc5, 0xfc, 0x4f,
+ 0xda, 0xc7, 0xdb, 0x39, 0xc2, 0xf0, 0x2e, 0xbe,
+ 0xe4, 0xe3, 0x54, 0x1d, 0x1e, 0x78, 0x04, 0x7a,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x09, },
+ {0x2f, 0xdc, 0xcc, 0xfe, 0xe7, 0x20, 0xa7, 0x7e,
+ 0xf6, 0xcb, 0x3b, 0xfb, 0xb4, 0x47, 0xf9, 0x38,
+ 0x31, 0x17, 0xe3, 0xda, 0xa4, 0xa0, 0x7e, 0x36,
+ 0xed, 0x15, 0xf7, 0x8d, 0x37, 0x17, 0x32, 0xe4,
+ 0xf4, 0x1b, 0xf4, 0xf7, 0x88, 0x30, 0x35, 0xe6,
+ 0xa7, 0x9f, 0xce, 0xdc, 0x0e, 0x19, 0x6e, 0xb0,
+ 0x7b, 0x48, 0x17, 0x16, 0x97, 0x51, 0x74, 0x63,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0a, },
+ {0xae, 0xa9, 0xe1, 0x7a, 0x30, 0x65, 0x17, 0xeb,
+ 0x89, 0x15, 0x2a, 0xa7, 0x09, 0x6d, 0x2c, 0x38,
+ 0x1e, 0xc8, 0x13, 0xc5, 0x1a, 0xa8, 0x80, 0xe7,
+ 0xbe, 0xe2, 0xc0, 0xfd, 0x39, 0xbb, 0x30, 0xea,
+ 0xb3, 0x37, 0xe0, 0xa5, 0x21, 0xb6, 0xcb, 0xa1,
+ 0xab, 0xe4, 0xb2, 0xb3, 0xa3, 0xe5, 0x24, 0xc1,
+ 0x4a, 0x3f, 0xe3, 0xeb, 0x11, 0x6b, 0x65, 0x5f,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0b, },
+ {0xef, 0x53, 0xb6, 0x29, 0x4a, 0xca, 0x43, 0x1f,
+ 0x0f, 0x3c, 0x22, 0xdc, 0x82, 0xeb, 0x90, 0x50,
+ 0x32, 0x4f, 0x1d, 0x88, 0xd3, 0x77, 0xe7, 0x16,
+ 0x44, 0x8e, 0x50, 0x7c, 0x20, 0xb5, 0x10, 0x00,
+ 0x40, 0x92, 0xe9, 0x66, 0x36, 0xcf, 0xb7, 0xe3,
+ 0x2e, 0xfd, 0xed, 0x82, 0x65, 0xc2, 0x66, 0xdf,
+ 0xb7, 0x54, 0xfa, 0x6d, 0x64, 0x91, 0xa6, 0xda,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0c, },
+ {0x6e, 0x31, 0xee, 0x1d, 0xc1, 0x37, 0xf8, 0x1b,
+ 0x05, 0x67, 0x52, 0xe4, 0xde, 0xab, 0x14, 0x43,
+ 0xa4, 0x81, 0x03, 0x3e, 0x9b, 0x4c, 0x93, 0xa3,
+ 0x04, 0x4f, 0x4f, 0x7a, 0x20, 0x7d, 0xdd, 0xf0,
+ 0x38, 0x5b, 0xfd, 0xea, 0xb6, 0xe9, 0xac, 0xda,
+ 0x8d, 0xa0, 0x6b, 0x3b, 0xbe, 0xf2, 0x24, 0xa9,
+ 0x3a, 0xb1, 0xe9, 0xe0, 0x36, 0x10, 0x9d, 0x13,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0d, },
+ {0x34, 0xe8, 0xe1, 0x7a, 0x43, 0x0e, 0x43, 0x28,
+ 0x97, 0x93, 0xc3, 0x83, 0xfa, 0xc9, 0x77, 0x42,
+ 0x47, 0xb4, 0x0e, 0x9e, 0xbd, 0x33, 0x66, 0x98,
+ 0x1f, 0xcf, 0xae, 0xca, 0x25, 0x28, 0x19, 0xf7,
+ 0x1c, 0x7f, 0xb7, 0xfb, 0xcb, 0x15, 0x9b, 0xe3,
+ 0x37, 0xd3, 0x7d, 0x33, 0x36, 0xd7, 0xfe, 0xb9,
+ 0x63, 0x72, 0x4f, 0xdf, 0xb0, 0xec, 0xb7, 0x67,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0e, },
+ {0xa5, 0x36, 0x40, 0xc8, 0x3d, 0xc2, 0x08, 0x60,
+ 0x3d, 0xed, 0x83, 0xe4, 0xec, 0xf7, 0x58, 0xf2,
+ 0x4c, 0x35, 0x7d, 0x7c, 0xf4, 0x80, 0x88, 0xb2,
+ 0xce, 0x01, 0xe9, 0xfa, 0xd5, 0x81, 0x4c, 0xd7,
+ 0x24, 0x19, 0x9c, 0x4a, 0x5b, 0x97, 0x4a, 0x43,
+ 0x68, 0x5f, 0xbf, 0x5b, 0x8b, 0xac, 0x69, 0x45,
+ 0x9c, 0x94, 0x69, 0xbc, 0x8f, 0x23, 0xcc, 0xaf,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0f, },
+ {0xba, 0xa4, 0xd8, 0x63, 0x55, 0x11, 0xa7, 0xd2,
+ 0x88, 0xae, 0xbe, 0xed, 0xd1, 0x2c, 0xe5, 0x29,
+ 0xff, 0x10, 0x2c, 0x91, 0xf9, 0x7f, 0x86, 0x7e,
+ 0x21, 0x91, 0x6b, 0xf9, 0x97, 0x9a, 0x5f, 0x47,
+ 0x59, 0xf8, 0x0f, 0x4f, 0xb4, 0xec, 0x2e, 0x34,
+ 0xf5, 0x56, 0x6d, 0x59, 0x56, 0x80, 0xa1, 0x17,
+ 0x35, 0xe7, 0xb6, 0x10, 0x46, 0x12, 0x79, 0x89,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, },
+ {0x0b, 0x6e, 0xc4, 0xfe, 0x17, 0x77, 0x38, 0x24,
+ 0x04, 0xef, 0x67, 0x99, 0x97, 0xba, 0x8d, 0x1c,
+ 0xc5, 0xcd, 0x8e, 0x85, 0x34, 0x92, 0x59, 0xf5,
+ 0x90, 0xc4, 0xc6, 0x6d, 0x33, 0x99, 0xd4, 0x64,
+ 0x34, 0x59, 0x06, 0xb1, 0x1b, 0x00, 0xe3, 0x63,
+ 0xef, 0x42, 0x92, 0x21, 0xf2, 0xec, 0x72, 0x0d,
+ 0x2f, 0x66, 0x5d, 0x7d, 0xea, 0xd5, 0xb4, 0x82,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x11, },
+ {0xb8, 0x35, 0x7c, 0x3a, 0x6c, 0xee, 0xf2, 0x88,
+ 0x31, 0x0e, 0x17, 0xb8, 0xbf, 0xef, 0xf9, 0x20,
+ 0x08, 0x46, 0xca, 0x8c, 0x19, 0x42, 0x49, 0x7c,
+ 0x48, 0x44, 0x03, 0xbc, 0xff, 0x14, 0x9e, 0xfa,
+ 0x66, 0x06, 0xa6, 0xbd, 0x20, 0xef, 0x7d, 0x1b,
+ 0x06, 0xbd, 0x92, 0xf6, 0x90, 0x46, 0x39, 0xdc,
+ 0xe5, 0x17, 0x4d, 0xb6, 0xcc, 0x55, 0x4a, 0x26,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x12, },
+ {0xc9, 0xff, 0x61, 0xb0, 0x40, 0x87, 0x4c, 0x05,
+ 0x68, 0x47, 0x92, 0x16, 0x82, 0x4a, 0x15, 0xea,
+ 0xb1, 0xa8, 0x38, 0xa7, 0x97, 0xd1, 0x89, 0x74,
+ 0x62, 0x26, 0xe4, 0xcc, 0xea, 0x98, 0xd6, 0x0e,
+ 0x5f, 0xfc, 0x9b, 0x8f, 0xcf, 0x99, 0x9f, 0xab,
+ 0x1d, 0xf7, 0xe7, 0xef, 0x70, 0x84, 0xf2, 0x0d,
+ 0xdb, 0x61, 0xbb, 0x04, 0x5a, 0x6c, 0xe0, 0x02,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x13, },
+ {0xa1, 0xe8, 0x1c, 0x04, 0xf3, 0x0c, 0xe2, 0x01,
+ 0xc7, 0xc9, 0xac, 0xe7, 0x85, 0xed, 0x44, 0xcc,
+ 0x33, 0xb4, 0x55, 0xa0, 0x22, 0xf2, 0xac, 0xdb,
+ 0xc6, 0xca, 0xe8, 0x3c, 0xdc, 0xf1, 0xf6, 0xc3,
+ 0xdb, 0x09, 0xc7, 0x0a, 0xcc, 0x25, 0x39, 0x1d,
+ 0x49, 0x2f, 0xe2, 0x5b, 0x4a, 0x18, 0x0b, 0xab,
+ 0xd6, 0xce, 0xa3, 0x56, 0xc0, 0x47, 0x19, 0xcd,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x14, },
+ {0xfc, 0xc7, 0xf2, 0xb4, 0x5d, 0xf1, 0xcd, 0x5a,
+ 0x3c, 0x0c, 0x07, 0x31, 0xca, 0x47, 0xa8, 0xaf,
+ 0x75, 0xcf, 0xb0, 0x34, 0x7e, 0x83, 0x54, 0xee,
+ 0xfe, 0x78, 0x24, 0x55, 0x0d, 0x5d, 0x71, 0x10,
+ 0x27, 0x4c, 0xba, 0x7c, 0xde, 0xe9, 0x0e, 0x1a,
+ 0x8b, 0x0d, 0x39, 0x4c, 0x37, 0x6a, 0x55, 0x73,
+ 0xdb, 0x6b, 0xe0, 0xbf, 0x27, 0x47, 0xf5, 0x30,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x8e, 0xbb, 0xb9,
+ 0x5e, 0xed, 0x0e, 0x13, },
+ {0x61, 0xf0, 0x77, 0xc6, 0xf6, 0x2e, 0xd8, 0x02,
+ 0xda, 0xd7, 0xc2, 0xf3, 0x8f, 0x5c, 0x67, 0xf2,
+ 0xcc, 0x45, 0x36, 0x01, 0xe6, 0x1b, 0xd0, 0x76,
+ 0xbb, 0x46, 0x17, 0x9e, 0x22, 0x72, 0xf9, 0xe9,
+ 0xf5, 0x93, 0x3e, 0x70, 0x38, 0x8e, 0xe6, 0x52,
+ 0x51, 0x34, 0x43, 0xb5, 0xe2, 0x89, 0xdd, 0x13,
+ 0x5d, 0xcc, 0x0d, 0x02, 0x99, 0xb2, 0x25, 0xe4,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x9d, 0x89,
+ 0x3d, 0x4c, 0xdd, 0x74, 0x72, 0x46, 0xcd, 0xca,
+ 0x43, 0x59, 0x0e, 0x13, },
+ {0x02, 0x98, 0x95, 0xf0, 0xaf, 0x49, 0x6b, 0xfc,
+ 0x62, 0xb6, 0xef, 0x8d, 0x8a, 0x65, 0xc8, 0x8c,
+ 0x61, 0x39, 0x49, 0xb0, 0x36, 0x68, 0xaa, 0xb4,
+ 0xf0, 0x42, 0x9e, 0x35, 0x3e, 0xa6, 0xe5, 0x3f,
+ 0x9a, 0x84, 0x1f, 0x20, 0x19, 0xec, 0x24, 0xbd,
+ 0xe1, 0xa7, 0x56, 0x77, 0xaa, 0x9b, 0x59, 0x02,
+ 0xe6, 0x10, 0x81, 0xc0, 0x10, 0x64, 0xde, 0x93,
+ },
+ },
+ {
+ {0x41, 0xff, 0xc1, 0xff, 0xff, 0xfe, 0x01, 0xff,
+ 0xfc, 0x00, 0x03, 0xff, 0xfe, 0x00, 0x07, 0xc0,
+ 0x01, 0xff, 0xf0, 0x00, 0x03, 0xff, 0xf0, 0x7f,
+ 0xfe, 0x00, 0x07, 0xc0, },
+ {0xab, 0x68, 0x99, 0x30, 0xbc, 0xae, 0x4a, 0x4a,
+ 0xa5, 0xf5, 0xcb, 0x08, 0x5e, 0x82, 0x3e, 0x8a,
+ 0xe3, 0x0f, 0xd3, 0x65, 0xeb, 0x1d, 0xa4, 0xab,
+ 0xa9, 0xcf, 0x03, 0x79, 0x33, 0x45, 0xa1, 0x21,
+ 0xbb, 0xd2, 0x33, 0x54, 0x8a, 0xf0, 0xd2, 0x10,
+ 0x65, 0x4e, 0xb4, 0x0b, 0xab, 0x78, 0x8a, 0x03,
+ 0x66, 0x64, 0x19, 0xbe, 0x6f, 0xbd, 0x34, 0xe7,
+ },
+ },
+ {
+ {0x7f, 0xff, 0xff, 0xc0, 0x3f, 0xff, 0xc0, 0x03,
+ 0xff, 0xff, 0xfc, 0x00, 0x7f, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0x07, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x0e, 0x00, 0xff, },
+ {0xbd, 0xb6, 0xa8, 0x81, 0x7c, 0x1f, 0x89, 0xda,
+ 0x1c, 0x2f, 0x3d, 0xd8, 0xe9, 0x7f, 0xeb, 0x44,
+ 0x94, 0xf2, 0xed, 0x30, 0x2a, 0x4c, 0xe2, 0xbc,
+ 0x7f, 0x5f, 0x40, 0x25, 0x4c, 0x70, 0x20, 0xd5,
+ 0x7c, 0x00, 0x41, 0x18, 0x89, 0x46, 0x2d, 0x77,
+ 0xa5, 0x43, 0x8b, 0xb4, 0xe9, 0x7d, 0x17, 0x77,
+ 0x00, 0xbf, 0x72, 0x43, 0xa0, 0x7f, 0x16, 0x80,
+ },
+ },
+ {
+ {0x7f, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xf0, 0x1f, 0xff, 0xf8, 0xff, 0xff,
+ 0xc0, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x00,
+ 0x00, 0x0f, 0xff, 0xff, },
+ {0xd5, 0x8b, 0x61, 0xaa, 0x41, 0xc3, 0x2d, 0xd5,
+ 0xeb, 0xa4, 0x62, 0x64, 0x7d, 0xba, 0x75, 0xc5,
+ 0xd6, 0x7c, 0x83, 0x60, 0x6c, 0x0a, 0xf2, 0xbd,
+ 0x92, 0x84, 0x46, 0xa9, 0xd2, 0x4b, 0xa6, 0xa8,
+ 0x37, 0xbe, 0x04, 0x60, 0xdd, 0x10, 0x7a, 0xe7,
+ 0x77, 0x25, 0x69, 0x6d, 0x21, 0x14, 0x46, 0xc5,
+ 0x60, 0x9b, 0x45, 0x95, 0x97, 0x6b, 0x16, 0xbd,
+ },
+ },
+ {
+ {0x7f, 0xff, 0xff, 0xc0, 0x00, 0xff, 0xfe, 0x3f,
+ 0xff, 0xfc, 0x10, 0x00, 0x00, 0x20, 0x00, 0x3f,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00,
+ 0x3f, 0xff, 0xff, 0xff, },
+ {0xdc, 0x9f, 0xa7, 0x79, 0x78, 0xa0, 0x05, 0x51,
+ 0x09, 0x80, 0xe9, 0x29, 0xa1, 0x48, 0x5f, 0x63,
+ 0x71, 0x6d, 0xf6, 0x95, 0xd7, 0xa0, 0xc1, 0x8b,
+ 0xb5, 0x18, 0xdf, 0x03, 0xed, 0xe2, 0xb0, 0x16,
+ 0xf2, 0xdd, 0xff, 0xc2, 0xa8, 0xc0, 0x15, 0xb1,
+ 0x34, 0x92, 0x82, 0x75, 0xce, 0x09, 0xe5, 0x66,
+ 0x1b, 0x7a, 0xb1, 0x4c, 0xe0, 0xd1, 0xd4, 0x03,
+ },
+ },
+ {
+ {0x70, 0x01, 0xf0, 0x00, 0x1c, 0x00, 0x01, 0xc0,
+ 0x00, 0x00, 0x1f, 0xff, 0xff, 0xfc, 0x00, 0x00,
+ 0x1f, 0xff, 0xff, 0xf8, 0x00, 0x0f, 0xc0, 0x00,
+ 0x00, 0x01, 0xfc, 0x00, },
+ {0x49, 0x9d, 0x8b, 0x28, 0x29, 0xcf, 0xb8, 0x79,
+ 0xc9, 0x01, 0xf7, 0xd8, 0x5d, 0x35, 0x70, 0x45,
+ 0xed, 0xab, 0x55, 0x02, 0x88, 0x24, 0xd0, 0xf0,
+ 0x5b, 0xa2, 0x79, 0xba, 0xbf, 0x92, 0x95, 0x37,
+ 0xb0, 0x6e, 0x40, 0x15, 0x91, 0x96, 0x39, 0xd9,
+ 0x4f, 0x57, 0x83, 0x8f, 0xa3, 0x3f, 0xc3, 0xd9,
+ 0x52, 0x59, 0x8d, 0xcd, 0xbb, 0x44, 0xd6, 0x38,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x1f, 0xfc, 0x00, 0x00,
+ 0x00, 0xff, 0xf0, 0x30, 0x00, 0x1f, 0x00, 0x00,
+ 0xff, 0xff, 0xf0, 0x00, 0x00, 0x38, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, },
+ {0x82, 0x46, 0xc9, 0x99, 0x13, 0x71, 0x86, 0x63,
+ 0x2c, 0x5f, 0x9e, 0xdd, 0xf3, 0xb1, 0xb0, 0xe1,
+ 0x76, 0x4c, 0x5e, 0x8b, 0xd0, 0xe0, 0xd8, 0xa5,
+ 0x54, 0xb9, 0xcb, 0x77, 0xe8, 0x0e, 0xd8, 0x66,
+ 0x0b, 0xc1, 0xcb, 0x17, 0xac, 0x7d, 0x84, 0x5b,
+ 0xe4, 0x0a, 0x7a, 0x02, 0x2d, 0x33, 0x06, 0xf1,
+ 0x16, 0xae, 0x9f, 0x81, 0xfe, 0xa6, 0x59, 0x47,
+ },
+ },
+ {
+ {0x7f, 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xfe, 0x08, 0x00, 0x00, 0x1f,
+ 0xf0, 0x00, 0x1f, 0xff, },
+ {0x66, 0x70, 0xc2, 0x0a, 0xfc, 0xce, 0xae, 0xa6,
+ 0x72, 0xc9, 0x7f, 0x75, 0xe2, 0xe9, 0xdd, 0x5c,
+ 0x84, 0x60, 0xe5, 0x4b, 0xb3, 0x85, 0x38, 0xeb,
+ 0xb4, 0xbd, 0x30, 0xeb, 0xf2, 0x80, 0xd8, 0x00,
+ 0x8d, 0x07, 0xa4, 0xca, 0xf5, 0x42, 0x71, 0xf9,
+ 0x93, 0x52, 0x7d, 0x46, 0xff, 0x3f, 0xf4, 0x6f,
+ 0xd1, 0x19, 0x0a, 0x3f, 0x1f, 0xaa, 0x4f, 0x74,
+ },
+ },
+ {
+ {0x00, 0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc0, 0x00, 0x07, 0xff, 0xff, 0xe0, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, },
+ {0x00, 0x0e, 0xca, 0x93, 0x42, 0x47, 0x42, 0x5c,
+ 0xfd, 0x94, 0x9b, 0x79, 0x5c, 0xb5, 0xce, 0x1e,
+ 0xff, 0x40, 0x15, 0x50, 0x38, 0x6e, 0x28, 0xd1,
+ 0xa4, 0xc5, 0xa8, 0xeb, 0xd4, 0xc0, 0x10, 0x40,
+ 0xdb, 0xa1, 0x96, 0x28, 0x93, 0x1b, 0xc8, 0x85,
+ 0x53, 0x70, 0x31, 0x7c, 0x72, 0x2c, 0xbd, 0x9c,
+ 0xa6, 0x15, 0x69, 0x85, 0xf1, 0xc2, 0xe9, 0xce,
+ },
+ },
+ {
+ {0x7f, 0xff, 0xfc, 0x03, 0xff, 0x80, 0x7f, 0xff,
+ 0xe0, 0x00, 0x1f, 0xff, 0xff, 0x80, 0x0f, 0xff,
+ 0x80, 0x00, 0x01, 0xff, 0xff, 0x00, 0x01, 0xff,
+ 0xff, 0xfe, 0x00, 0x1f, },
+ {0xef, 0x35, 0x3b, 0xf5, 0xc7, 0x3c, 0xd5, 0x51,
+ 0xb9, 0x6d, 0x59, 0x6f, 0xbc, 0x9a, 0x67, 0xf1,
+ 0x6d, 0x61, 0xdd, 0x9f, 0xe5, 0x6a, 0xf1, 0x9d,
+ 0xe1, 0xfb, 0xa9, 0xcd, 0x21, 0x77, 0x1b, 0x9c,
+ 0xdc, 0xe3, 0xe8, 0x43, 0x0c, 0x09, 0xb3, 0x83,
+ 0x8b, 0xe7, 0x0b, 0x48, 0xc2, 0x1e, 0x15, 0xbc,
+ 0x09, 0xee, 0x1f, 0x2d, 0x79, 0x45, 0xb9, 0x1f,
+ },
+ },
+ {
+ {0x00, 0x00, 0x00, 0x07, 0xff, 0xc0, 0x7f, 0xff,
+ 0xff, 0xff, 0x01, 0xff, 0xfe, 0x03, 0xff, 0xfe,
+ 0x40, 0x00, 0x38, 0x00, 0x07, 0xe0, 0x00, 0x3f,
+ 0xfe, 0x00, 0x00, 0x00, },
+ {0x40, 0x36, 0x05, 0x2a, 0x30, 0x91, 0xeb, 0x48,
+ 0x10, 0x46, 0xad, 0x32, 0x89, 0xc9, 0x5d, 0x3a,
+ 0xc9, 0x05, 0xca, 0x00, 0x23, 0xde, 0x2c, 0x03,
+ 0xec, 0xd4, 0x51, 0xcf, 0xd7, 0x68, 0x16, 0x5a,
+ 0x38, 0xa2, 0xb9, 0x6f, 0x81, 0x25, 0x86, 0xa9,
+ 0xd5, 0x9d, 0x41, 0x36, 0x03, 0x5d, 0x9c, 0x85,
+ 0x3a, 0x5b, 0xf2, 0xe1, 0xc8, 0x6a, 0x49, 0x93,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x29, },
+ {0xfc, 0xc7, 0xf2, 0xb4, 0x5d, 0xf1, 0xcd, 0x5a,
+ 0x3c, 0x0c, 0x07, 0x31, 0xca, 0x47, 0xa8, 0xaf,
+ 0x75, 0xcf, 0xb0, 0x34, 0x7e, 0x83, 0x54, 0xee,
+ 0xfe, 0x78, 0x24, 0x55, 0xf2, 0xa2, 0x8e, 0xef,
+ 0xd8, 0xb3, 0x45, 0x83, 0x21, 0x16, 0xf1, 0xe5,
+ 0x74, 0xf2, 0xc6, 0xb2, 0xc8, 0x95, 0xaa, 0x8c,
+ 0x24, 0x94, 0x1f, 0x40, 0xd8, 0xb8, 0x0a, 0xd1,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x2a, },
+ {0xa1, 0xe8, 0x1c, 0x04, 0xf3, 0x0c, 0xe2, 0x01,
+ 0xc7, 0xc9, 0xac, 0xe7, 0x85, 0xed, 0x44, 0xcc,
+ 0x33, 0xb4, 0x55, 0xa0, 0x22, 0xf2, 0xac, 0xdb,
+ 0xc6, 0xca, 0xe8, 0x3c, 0x23, 0x0e, 0x09, 0x3c,
+ 0x24, 0xf6, 0x38, 0xf5, 0x33, 0xda, 0xc6, 0xe2,
+ 0xb6, 0xd0, 0x1d, 0xa3, 0xb5, 0xe7, 0xf4, 0x54,
+ 0x29, 0x31, 0x5c, 0xa9, 0x3f, 0xb8, 0xe6, 0x34,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x2b, },
+ {0xc9, 0xff, 0x61, 0xb0, 0x40, 0x87, 0x4c, 0x05,
+ 0x68, 0x47, 0x92, 0x16, 0x82, 0x4a, 0x15, 0xea,
+ 0xb1, 0xa8, 0x38, 0xa7, 0x97, 0xd1, 0x89, 0x74,
+ 0x62, 0x26, 0xe4, 0xcc, 0x15, 0x67, 0x29, 0xf1,
+ 0xa0, 0x03, 0x64, 0x70, 0x30, 0x66, 0x60, 0x54,
+ 0xe2, 0x08, 0x18, 0x0f, 0x8f, 0x7b, 0x0d, 0xf2,
+ 0x24, 0x9e, 0x44, 0xfb, 0xa5, 0x93, 0x1f, 0xff,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x2c, },
+ {0xb8, 0x35, 0x7c, 0x3a, 0x6c, 0xee, 0xf2, 0x88,
+ 0x31, 0x0e, 0x17, 0xb8, 0xbf, 0xef, 0xf9, 0x20,
+ 0x08, 0x46, 0xca, 0x8c, 0x19, 0x42, 0x49, 0x7c,
+ 0x48, 0x44, 0x03, 0xbc, 0x00, 0xeb, 0x61, 0x05,
+ 0x99, 0xf9, 0x59, 0x42, 0xdf, 0x10, 0x82, 0xe4,
+ 0xf9, 0x42, 0x6d, 0x08, 0x6f, 0xb9, 0xc6, 0x23,
+ 0x1a, 0xe8, 0xb2, 0x49, 0x33, 0xaa, 0xb5, 0xdb,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x2d, },
+ {0x0b, 0x6e, 0xc4, 0xfe, 0x17, 0x77, 0x38, 0x24,
+ 0x04, 0xef, 0x67, 0x99, 0x97, 0xba, 0x8d, 0x1c,
+ 0xc5, 0xcd, 0x8e, 0x85, 0x34, 0x92, 0x59, 0xf5,
+ 0x90, 0xc4, 0xc6, 0x6d, 0xcc, 0x66, 0x2b, 0x9b,
+ 0xcb, 0xa6, 0xf9, 0x4e, 0xe4, 0xff, 0x1c, 0x9c,
+ 0x10, 0xbd, 0x6d, 0xdd, 0x0d, 0x13, 0x8d, 0xf2,
+ 0xd0, 0x99, 0xa2, 0x82, 0x15, 0x2a, 0x4b, 0x7f,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x2e, },
+ {0xba, 0xa4, 0xd8, 0x63, 0x55, 0x11, 0xa7, 0xd2,
+ 0x88, 0xae, 0xbe, 0xed, 0xd1, 0x2c, 0xe5, 0x29,
+ 0xff, 0x10, 0x2c, 0x91, 0xf9, 0x7f, 0x86, 0x7e,
+ 0x21, 0x91, 0x6b, 0xf9, 0x68, 0x65, 0xa0, 0xb8,
+ 0xa6, 0x07, 0xf0, 0xb0, 0x4b, 0x13, 0xd1, 0xcb,
+ 0x0a, 0xa9, 0x92, 0xa5, 0xa9, 0x7f, 0x5e, 0xe8,
+ 0xca, 0x18, 0x49, 0xef, 0xb9, 0xed, 0x86, 0x78,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x2f, },
+ {0xa5, 0x36, 0x40, 0xc8, 0x3d, 0xc2, 0x08, 0x60,
+ 0x3d, 0xed, 0x83, 0xe4, 0xec, 0xf7, 0x58, 0xf2,
+ 0x4c, 0x35, 0x7d, 0x7c, 0xf4, 0x80, 0x88, 0xb2,
+ 0xce, 0x01, 0xe9, 0xfa, 0x2a, 0x7e, 0xb3, 0x28,
+ 0xdb, 0xe6, 0x63, 0xb5, 0xa4, 0x68, 0xb5, 0xbc,
+ 0x97, 0xa0, 0x40, 0xa3, 0x74, 0x53, 0x96, 0xba,
+ 0x63, 0x6b, 0x96, 0x43, 0x70, 0xdc, 0x33, 0x52,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x30, },
+ {0x34, 0xe8, 0xe1, 0x7a, 0x43, 0x0e, 0x43, 0x28,
+ 0x97, 0x93, 0xc3, 0x83, 0xfa, 0xc9, 0x77, 0x42,
+ 0x47, 0xb4, 0x0e, 0x9e, 0xbd, 0x33, 0x66, 0x98,
+ 0x1f, 0xcf, 0xae, 0xca, 0xda, 0xd7, 0xe6, 0x08,
+ 0xe3, 0x80, 0x48, 0x04, 0x34, 0xea, 0x64, 0x1c,
+ 0xc8, 0x2c, 0x82, 0xcb, 0xc9, 0x28, 0x01, 0x46,
+ 0x9c, 0x8d, 0xb0, 0x20, 0x4f, 0x13, 0x48, 0x9a,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x31, },
+ {0x6e, 0x31, 0xee, 0x1d, 0xc1, 0x37, 0xf8, 0x1b,
+ 0x05, 0x67, 0x52, 0xe4, 0xde, 0xab, 0x14, 0x43,
+ 0xa4, 0x81, 0x03, 0x3e, 0x9b, 0x4c, 0x93, 0xa3,
+ 0x04, 0x4f, 0x4f, 0x7a, 0xdf, 0x82, 0x22, 0x0f,
+ 0xc7, 0xa4, 0x02, 0x15, 0x49, 0x16, 0x53, 0x25,
+ 0x72, 0x5f, 0x94, 0xc3, 0x41, 0x0d, 0xdb, 0x56,
+ 0xc5, 0x4e, 0x16, 0x1f, 0xc9, 0xef, 0x62, 0xee,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x32, },
+ {0xef, 0x53, 0xb6, 0x29, 0x4a, 0xca, 0x43, 0x1f,
+ 0x0f, 0x3c, 0x22, 0xdc, 0x82, 0xeb, 0x90, 0x50,
+ 0x32, 0x4f, 0x1d, 0x88, 0xd3, 0x77, 0xe7, 0x16,
+ 0x44, 0x8e, 0x50, 0x7c, 0xdf, 0x4a, 0xef, 0xff,
+ 0xbf, 0x6d, 0x16, 0x99, 0xc9, 0x30, 0x48, 0x1c,
+ 0xd1, 0x02, 0x12, 0x7c, 0x9a, 0x3d, 0x99, 0x20,
+ 0x48, 0xab, 0x05, 0x92, 0x9b, 0x6e, 0x59, 0x27,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x33, },
+ {0xae, 0xa9, 0xe1, 0x7a, 0x30, 0x65, 0x17, 0xeb,
+ 0x89, 0x15, 0x2a, 0xa7, 0x09, 0x6d, 0x2c, 0x38,
+ 0x1e, 0xc8, 0x13, 0xc5, 0x1a, 0xa8, 0x80, 0xe7,
+ 0xbe, 0xe2, 0xc0, 0xfd, 0xc6, 0x44, 0xcf, 0x15,
+ 0x4c, 0xc8, 0x1f, 0x5a, 0xde, 0x49, 0x34, 0x5e,
+ 0x54, 0x1b, 0x4d, 0x4b, 0x5c, 0x1a, 0xdb, 0x3e,
+ 0xb5, 0xc0, 0x1c, 0x14, 0xee, 0x94, 0x9a, 0xa2,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x34, },
+ {0x2f, 0xdc, 0xcc, 0xfe, 0xe7, 0x20, 0xa7, 0x7e,
+ 0xf6, 0xcb, 0x3b, 0xfb, 0xb4, 0x47, 0xf9, 0x38,
+ 0x31, 0x17, 0xe3, 0xda, 0xa4, 0xa0, 0x7e, 0x36,
+ 0xed, 0x15, 0xf7, 0x8d, 0xc8, 0xe8, 0xcd, 0x1b,
+ 0x0b, 0xe4, 0x0b, 0x08, 0x77, 0xcf, 0xca, 0x19,
+ 0x58, 0x60, 0x31, 0x22, 0xf1, 0xe6, 0x91, 0x4f,
+ 0x84, 0xb7, 0xe8, 0xe9, 0x68, 0xae, 0x8b, 0x9e,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x35, },
+ {0x85, 0x8e, 0x6f, 0x9c, 0xc6, 0xc1, 0x2c, 0x31,
+ 0xf5, 0xdf, 0x12, 0x4a, 0xa7, 0x77, 0x67, 0xb0,
+ 0x5c, 0x8b, 0xc0, 0x21, 0xbd, 0x68, 0x3d, 0x2b,
+ 0x55, 0x57, 0x15, 0x50, 0xfb, 0x92, 0x32, 0xc1,
+ 0x5a, 0x3b, 0xc7, 0x67, 0x3a, 0x3a, 0x03, 0xb0,
+ 0x25, 0x38, 0x24, 0xc5, 0x3d, 0x0f, 0xd1, 0x41,
+ 0x1b, 0x1c, 0xab, 0xe2, 0xe1, 0x87, 0xfb, 0x87,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x36, },
+ {0xdb, 0x2f, 0x6b, 0xe6, 0x30, 0xe2, 0x46, 0xa5,
+ 0xcf, 0x7d, 0x99, 0xb8, 0x51, 0x94, 0xb1, 0x23,
+ 0xd4, 0x87, 0xe2, 0xd4, 0x66, 0xb9, 0x4b, 0x24,
+ 0xa0, 0x3c, 0x3e, 0x28, 0xf0, 0xc5, 0xcf, 0xf7,
+ 0xab, 0x68, 0x0d, 0x09, 0xee, 0x11, 0xda, 0xe8,
+ 0x4e, 0x9c, 0x10, 0x72, 0xac, 0x48, 0xea, 0x2e,
+ 0x74, 0x4b, 0x1b, 0x7f, 0x72, 0xfd, 0x46, 0x9e,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x37, },
+ {0x1f, 0x24, 0x83, 0xf8, 0x25, 0x72, 0x25, 0x1f,
+ 0xca, 0x97, 0x5f, 0xea, 0x40, 0xdb, 0x82, 0x1d,
+ 0xf8, 0xad, 0x82, 0xa3, 0xc0, 0x02, 0xee, 0x6c,
+ 0x57, 0x11, 0x24, 0x08, 0x76, 0x05, 0x0f, 0x33,
+ 0x48, 0xaf, 0x26, 0x64, 0xaa, 0xc3, 0xa8, 0xb0,
+ 0x52, 0x81, 0x30, 0x4e, 0xbc, 0x7a, 0x79, 0x14,
+ 0xc6, 0xad, 0x50, 0xa4, 0xb4, 0xea, 0xc3, 0x83,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x38, },
+ {0x31, 0xc4, 0x9a, 0xe7, 0x5b, 0xce, 0x78, 0x07,
+ 0xcd, 0xff, 0x22, 0x05, 0x5d, 0x94, 0xee, 0x90,
+ 0x21, 0xfe, 0xdb, 0xb5, 0xab, 0x51, 0xc5, 0x75,
+ 0x26, 0xf0, 0x11, 0xaa, 0xd8, 0x17, 0x40, 0x0e,
+ 0x8b, 0xa9, 0xca, 0x13, 0xa4, 0x5f, 0x36, 0x0e,
+ 0x3d, 0x12, 0x1e, 0xaa, 0xeb, 0x39, 0xaf, 0x82,
+ 0xd6, 0x00, 0x1c, 0x81, 0x86, 0xf5, 0xf8, 0x66,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x39, },
+ {0xae, 0x99, 0xfe, 0xeb, 0xb5, 0xd2, 0x69, 0x45,
+ 0xb5, 0x48, 0x92, 0x09, 0x2a, 0x8a, 0xee, 0x02,
+ 0x91, 0x29, 0x30, 0xfa, 0x41, 0xcd, 0x11, 0x4e,
+ 0x40, 0x44, 0x73, 0x01, 0xfb, 0x7d, 0xa7, 0xf5,
+ 0xf1, 0x3a, 0x43, 0xb8, 0x17, 0x74, 0x37, 0x3c,
+ 0x87, 0x9c, 0xd3, 0x2d, 0x69, 0x34, 0xc0, 0x5f,
+ 0xa7, 0x58, 0xee, 0xb1, 0x4f, 0xcf, 0xab, 0x38,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x3a, },
+ {0xdf, 0x1b, 0x1d, 0x66, 0xa5, 0x51, 0xd0, 0xd3,
+ 0x1e, 0xff, 0x82, 0x25, 0x58, 0xb9, 0xd2, 0xcc,
+ 0x75, 0xc2, 0x18, 0x02, 0x79, 0xfe, 0x0d, 0x08,
+ 0xfd, 0x89, 0x6d, 0x04, 0x5c, 0x08, 0x0f, 0xc3,
+ 0x52, 0x2f, 0x41, 0xbb, 0xb3, 0xf5, 0x5a, 0x97,
+ 0xcf, 0xec, 0xf2, 0x1f, 0x88, 0x2c, 0xe8, 0xcb,
+ 0xb1, 0xe5, 0x0c, 0xa6, 0xe6, 0x7e, 0x56, 0xdc,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x3b, },
+ {0x70, 0x6a, 0x46, 0xdc, 0x76, 0xdc, 0xb7, 0x67,
+ 0x98, 0xe6, 0x0e, 0x6d, 0x89, 0x47, 0x47, 0x88,
+ 0xd1, 0x6d, 0xc1, 0x80, 0x32, 0xd2, 0x68, 0xfd,
+ 0x1a, 0x70, 0x4f, 0xa6, 0xe3, 0xd4, 0x89, 0x58,
+ 0x43, 0xda, 0x18, 0x8f, 0xd5, 0x8f, 0xb0, 0x56,
+ 0x79, 0x76, 0xd7, 0xb5, 0x03, 0x59, 0xd6, 0xb7,
+ 0x85, 0x30, 0xc8, 0xf6, 0x2d, 0x1b, 0x17, 0x46,
+ },
+ },
+ {
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x16, 0xa2,
+ 0xe0, 0xb8, 0xf0, 0x3e, 0x13, 0xdd, 0x29, 0x45,
+ 0x5c, 0x5c, 0x2a, 0x3c, },
+ {0xb7, 0x0e, 0x0c, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f,
+ 0x32, 0x13, 0x90, 0xb9, 0x4a, 0x03, 0xc1, 0xd3,
+ 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6,
+ 0x11, 0x5c, 0x1d, 0x21, 0x42, 0xc8, 0x9c, 0x77,
+ 0x4a, 0x08, 0xdc, 0x04, 0xb3, 0xdd, 0x20, 0x19,
+ 0x32, 0xbc, 0x8a, 0x5e, 0xa5, 0xf8, 0xb8, 0x9b,
+ 0xbb, 0x2a, 0x7e, 0x66, 0x7a, 0xff, 0x81, 0xcd,
+ },
+ },
+};
+
+TEST(P224, ExternalToInternalAndBack) {
+ Point point;
+
+ EXPECT_TRUE(point.SetFromString(base::StringPiece(
+ reinterpret_cast<const char *>(kBasePointExternal),
+ sizeof(kBasePointExternal))));
+
+ const std::string external = point.ToString();
+
+ ASSERT_EQ(external.size(), 56u);
+ EXPECT_TRUE(memcmp(external.data(), kBasePointExternal,
+ sizeof(kBasePointExternal)) == 0);
+}
+
+TEST(P224, ScalarBaseMult) {
+ Point point;
+
+ for (size_t i = 0; i < arraysize(kNISTTestVectors); i++) {
+ p224::ScalarBaseMult(kNISTTestVectors[i].scalar, &point);
+ const std::string external = point.ToString();
+ ASSERT_EQ(external.size(), 56u);
+ EXPECT_TRUE(memcmp(external.data(), kNISTTestVectors[i].affine,
+ external.size()) == 0);
+ }
+}
+
+TEST(P224, Addition) {
+ Point a, b, minus_b, sum, a_again;
+
+ ASSERT_TRUE(a.SetFromString(base::StringPiece(
+ reinterpret_cast<const char *>(kNISTTestVectors[10].affine), 56)));
+ ASSERT_TRUE(b.SetFromString(base::StringPiece(
+ reinterpret_cast<const char *>(kNISTTestVectors[11].affine), 56)));
+
+ p224::Negate(b, &minus_b);
+ p224::Add(a, b, &sum);
+ EXPECT_TRUE(memcmp(&sum, &a, sizeof(sum)) != 0);
+ p224::Add(minus_b, sum, &a_again);
+ EXPECT_TRUE(a_again.ToString() == a.ToString());
+}
+
+TEST(P224, Infinity) {
+ char zeros[56];
+ memset(zeros, 0, sizeof(zeros));
+
+ // Test that x^0 = ∞.
+ Point a;
+ p224::ScalarBaseMult(reinterpret_cast<const uint8_t*>(zeros), &a);
+ EXPECT_TRUE(memcmp(zeros, a.ToString().data(), sizeof(zeros)) == 0);
+
+ // We shouldn't allow ∞ to be imported.
+ EXPECT_FALSE(a.SetFromString(std::string(zeros, sizeof(zeros))));
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/random.cc b/libchrome/crypto/random.cc
new file mode 100644
index 0000000..355914e
--- /dev/null
+++ b/libchrome/crypto/random.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/random.h"
+
+#include <stddef.h>
+
+#include "base/rand_util.h"
+
+namespace crypto {
+
+void RandBytes(void *bytes, size_t length) {
+ // It's OK to call base::RandBytes(), because it's already strongly random.
+ // But _other_ code should go through this function to ensure that code which
+ // needs secure randomness is easily discoverable.
+ base::RandBytes(bytes, length);
+}
+
+} // namespace crypto
+
diff --git a/libchrome/crypto/random.h b/libchrome/crypto/random.h
new file mode 100644
index 0000000..002616b
--- /dev/null
+++ b/libchrome/crypto/random.h
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_RANDOM_H_
+#define CRYPTO_RANDOM_H_
+
+#include <stddef.h>
+
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// Fills the given buffer with |length| random bytes of cryptographically
+// secure random numbers.
+// |length| must be positive.
+CRYPTO_EXPORT void RandBytes(void *bytes, size_t length);
+
+}
+
+#endif
diff --git a/libchrome/crypto/random_unittest.cc b/libchrome/crypto/random_unittest.cc
new file mode 100644
index 0000000..caee512
--- /dev/null
+++ b/libchrome/crypto/random_unittest.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/random.h"
+
+#include <stddef.h>
+
+#include "base/strings/string_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Basic functionality tests. Does NOT test the security of the random data.
+
+// Ensures we don't have all trivial data, i.e. that the data is indeed random.
+// Currently, that means the bytes cannot be all the same (e.g. all zeros).
+bool IsTrivial(const std::string& bytes) {
+ for (size_t i = 0; i < bytes.size(); i++) {
+ if (bytes[i] != bytes[0]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+TEST(RandBytes, RandBytes) {
+ std::string bytes(16, '\0');
+ crypto::RandBytes(base::WriteInto(&bytes, bytes.size()), bytes.size());
+ EXPECT_TRUE(!IsTrivial(bytes));
+}
diff --git a/libchrome/crypto/rsa_private_key.cc b/libchrome/crypto/rsa_private_key.cc
new file mode 100644
index 0000000..c546c91
--- /dev/null
+++ b/libchrome/crypto/rsa_private_key.cc
@@ -0,0 +1,389 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/rsa_private_key.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_util.h"
+
+// This file manually encodes and decodes RSA private keys using PrivateKeyInfo
+// from PKCS #8 and RSAPrivateKey from PKCS #1. These structures are:
+//
+// PrivateKeyInfo ::= SEQUENCE {
+// version Version,
+// privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
+// privateKey PrivateKey,
+// attributes [0] IMPLICIT Attributes OPTIONAL
+// }
+//
+// RSAPrivateKey ::= SEQUENCE {
+// version Version,
+// modulus INTEGER,
+// publicExponent INTEGER,
+// privateExponent INTEGER,
+// prime1 INTEGER,
+// prime2 INTEGER,
+// exponent1 INTEGER,
+// exponent2 INTEGER,
+// coefficient INTEGER
+// }
+
+namespace {
+// Helper for error handling during key import.
+#define READ_ASSERT(truth) \
+ if (!(truth)) { \
+ NOTREACHED(); \
+ return false; \
+ }
+} // namespace
+
+namespace crypto {
+
+const uint8_t PrivateKeyInfoCodec::kRsaAlgorithmIdentifier[] = {
+ 0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86,
+ 0xF7, 0x0D, 0x01, 0x01, 0x01, 0x05, 0x00};
+
+PrivateKeyInfoCodec::PrivateKeyInfoCodec(bool big_endian)
+ : big_endian_(big_endian) {}
+
+PrivateKeyInfoCodec::~PrivateKeyInfoCodec() {}
+
+bool PrivateKeyInfoCodec::Export(std::vector<uint8_t>* output) {
+ std::list<uint8_t> content;
+
+ // Version (always zero)
+ uint8_t version = 0;
+
+ PrependInteger(coefficient_, &content);
+ PrependInteger(exponent2_, &content);
+ PrependInteger(exponent1_, &content);
+ PrependInteger(prime2_, &content);
+ PrependInteger(prime1_, &content);
+ PrependInteger(private_exponent_, &content);
+ PrependInteger(public_exponent_, &content);
+ PrependInteger(modulus_, &content);
+ PrependInteger(&version, 1, &content);
+ PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+ PrependTypeHeaderAndLength(kOctetStringTag, content.size(), &content);
+
+ // RSA algorithm OID
+ for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i)
+ content.push_front(kRsaAlgorithmIdentifier[i - 1]);
+
+ PrependInteger(&version, 1, &content);
+ PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+
+ // Copy everying into the output.
+ output->reserve(content.size());
+ output->assign(content.begin(), content.end());
+
+ return true;
+}
+
+bool PrivateKeyInfoCodec::ExportPublicKeyInfo(std::vector<uint8_t>* output) {
+ // Create a sequence with the modulus (n) and public exponent (e).
+ std::vector<uint8_t> bit_string;
+ if (!ExportPublicKey(&bit_string))
+ return false;
+
+ // Add the sequence as the contents of a bit string.
+ std::list<uint8_t> content;
+ PrependBitString(&bit_string[0], static_cast<int>(bit_string.size()),
+ &content);
+
+ // Add the RSA algorithm OID.
+ for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i)
+ content.push_front(kRsaAlgorithmIdentifier[i - 1]);
+
+ // Finally, wrap everything in a sequence.
+ PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+
+ // Copy everything into the output.
+ output->reserve(content.size());
+ output->assign(content.begin(), content.end());
+
+ return true;
+}
+
+bool PrivateKeyInfoCodec::ExportPublicKey(std::vector<uint8_t>* output) {
+ // Create a sequence with the modulus (n) and public exponent (e).
+ std::list<uint8_t> content;
+ PrependInteger(&public_exponent_[0],
+ static_cast<int>(public_exponent_.size()),
+ &content);
+ PrependInteger(&modulus_[0], static_cast<int>(modulus_.size()), &content);
+ PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+
+ // Copy everything into the output.
+ output->reserve(content.size());
+ output->assign(content.begin(), content.end());
+
+ return true;
+}
+
+bool PrivateKeyInfoCodec::Import(const std::vector<uint8_t>& input) {
+ if (input.empty()) {
+ return false;
+ }
+
+ // Parse the private key info up to the public key values, ignoring
+ // the subsequent private key values.
+ uint8_t* src = const_cast<uint8_t*>(&input.front());
+ uint8_t* end = src + input.size();
+ if (!ReadSequence(&src, end) ||
+ !ReadVersion(&src, end) ||
+ !ReadAlgorithmIdentifier(&src, end) ||
+ !ReadTypeHeaderAndLength(&src, end, kOctetStringTag, NULL) ||
+ !ReadSequence(&src, end) ||
+ !ReadVersion(&src, end) ||
+ !ReadInteger(&src, end, &modulus_))
+ return false;
+
+ int mod_size = modulus_.size();
+ READ_ASSERT(mod_size % 2 == 0);
+ int primes_size = mod_size / 2;
+
+ if (!ReadIntegerWithExpectedSize(&src, end, 4, &public_exponent_) ||
+ !ReadIntegerWithExpectedSize(&src, end, mod_size, &private_exponent_) ||
+ !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime1_) ||
+ !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime2_) ||
+ !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent1_) ||
+ !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent2_) ||
+ !ReadIntegerWithExpectedSize(&src, end, primes_size, &coefficient_))
+ return false;
+
+ READ_ASSERT(src == end);
+
+
+ return true;
+}
+
+void PrivateKeyInfoCodec::PrependInteger(const std::vector<uint8_t>& in,
+ std::list<uint8_t>* out) {
+ uint8_t* ptr = const_cast<uint8_t*>(&in.front());
+ PrependIntegerImpl(ptr, in.size(), out, big_endian_);
+}
+
+// Helper to prepend an ASN.1 integer.
+void PrivateKeyInfoCodec::PrependInteger(uint8_t* val,
+ int num_bytes,
+ std::list<uint8_t>* data) {
+ PrependIntegerImpl(val, num_bytes, data, big_endian_);
+}
+
+void PrivateKeyInfoCodec::PrependIntegerImpl(uint8_t* val,
+ int num_bytes,
+ std::list<uint8_t>* data,
+ bool big_endian) {
+ // Reverse input if little-endian.
+ std::vector<uint8_t> tmp;
+ if (!big_endian) {
+ tmp.assign(val, val + num_bytes);
+ std::reverse(tmp.begin(), tmp.end());
+ val = &tmp.front();
+ }
+
+ // ASN.1 integers are unpadded byte arrays, so skip any null padding bytes
+ // from the most-significant end of the integer.
+ int start = 0;
+ while (start < (num_bytes - 1) && val[start] == 0x00) {
+ start++;
+ num_bytes--;
+ }
+ PrependBytes(val, start, num_bytes, data);
+
+ // ASN.1 integers are signed. To encode a positive integer whose sign bit
+ // (the most significant bit) would otherwise be set and make the number
+ // negative, ASN.1 requires a leading null byte to force the integer to be
+ // positive.
+ uint8_t front = data->front();
+ if ((front & 0x80) != 0) {
+ data->push_front(0x00);
+ num_bytes++;
+ }
+
+ PrependTypeHeaderAndLength(kIntegerTag, num_bytes, data);
+}
+
+bool PrivateKeyInfoCodec::ReadInteger(uint8_t** pos,
+ uint8_t* end,
+ std::vector<uint8_t>* out) {
+ return ReadIntegerImpl(pos, end, out, big_endian_);
+}
+
+bool PrivateKeyInfoCodec::ReadIntegerWithExpectedSize(
+ uint8_t** pos,
+ uint8_t* end,
+ size_t expected_size,
+ std::vector<uint8_t>* out) {
+ std::vector<uint8_t> temp;
+ if (!ReadIntegerImpl(pos, end, &temp, true)) // Big-Endian
+ return false;
+
+ int pad = expected_size - temp.size();
+ int index = 0;
+ if (out->size() == expected_size + 1) {
+ READ_ASSERT(out->front() == 0x00);
+ pad++;
+ index++;
+ } else {
+ READ_ASSERT(out->size() <= expected_size);
+ }
+
+ out->insert(out->end(), pad, 0x00);
+ out->insert(out->end(), temp.begin(), temp.end());
+
+ // Reverse output if little-endian.
+ if (!big_endian_)
+ std::reverse(out->begin(), out->end());
+ return true;
+}
+
+bool PrivateKeyInfoCodec::ReadIntegerImpl(uint8_t** pos,
+ uint8_t* end,
+ std::vector<uint8_t>* out,
+ bool big_endian) {
+ uint32_t length = 0;
+ if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length) || !length)
+ return false;
+
+ // The first byte can be zero to force positiveness. We can ignore this.
+ if (**pos == 0x00) {
+ ++(*pos);
+ --length;
+ }
+
+ if (length)
+ out->insert(out->end(), *pos, (*pos) + length);
+
+ (*pos) += length;
+
+ // Reverse output if little-endian.
+ if (!big_endian)
+ std::reverse(out->begin(), out->end());
+ return true;
+}
+
+void PrivateKeyInfoCodec::PrependBytes(uint8_t* val,
+ int start,
+ int num_bytes,
+ std::list<uint8_t>* data) {
+ while (num_bytes > 0) {
+ --num_bytes;
+ data->push_front(val[start + num_bytes]);
+ }
+}
+
+void PrivateKeyInfoCodec::PrependLength(size_t size, std::list<uint8_t>* data) {
+ // The high bit is used to indicate whether additional octets are needed to
+ // represent the length.
+ if (size < 0x80) {
+ data->push_front(static_cast<uint8_t>(size));
+ } else {
+ uint8_t num_bytes = 0;
+ while (size > 0) {
+ data->push_front(static_cast<uint8_t>(size & 0xFF));
+ size >>= 8;
+ num_bytes++;
+ }
+ CHECK_LE(num_bytes, 4);
+ data->push_front(0x80 | num_bytes);
+ }
+}
+
+void PrivateKeyInfoCodec::PrependTypeHeaderAndLength(
+ uint8_t type,
+ uint32_t length,
+ std::list<uint8_t>* output) {
+ PrependLength(length, output);
+ output->push_front(type);
+}
+
+void PrivateKeyInfoCodec::PrependBitString(uint8_t* val,
+ int num_bytes,
+ std::list<uint8_t>* output) {
+ // Start with the data.
+ PrependBytes(val, 0, num_bytes, output);
+ // Zero unused bits.
+ output->push_front(0);
+ // Add the length.
+ PrependLength(num_bytes + 1, output);
+ // Finally, add the bit string tag.
+ output->push_front((uint8_t)kBitStringTag);
+}
+
+bool PrivateKeyInfoCodec::ReadLength(uint8_t** pos,
+ uint8_t* end,
+ uint32_t* result) {
+ READ_ASSERT(*pos < end);
+ int length = 0;
+
+ // If the MSB is not set, the length is just the byte itself.
+ if (!(**pos & 0x80)) {
+ length = **pos;
+ (*pos)++;
+ } else {
+ // Otherwise, the lower 7 indicate the length of the length.
+ int length_of_length = **pos & 0x7F;
+ READ_ASSERT(length_of_length <= 4);
+ (*pos)++;
+ READ_ASSERT(*pos + length_of_length < end);
+
+ length = 0;
+ for (int i = 0; i < length_of_length; ++i) {
+ length <<= 8;
+ length |= **pos;
+ (*pos)++;
+ }
+ }
+
+ READ_ASSERT(*pos + length <= end);
+ if (result) *result = length;
+ return true;
+}
+
+bool PrivateKeyInfoCodec::ReadTypeHeaderAndLength(uint8_t** pos,
+ uint8_t* end,
+ uint8_t expected_tag,
+ uint32_t* length) {
+ READ_ASSERT(*pos < end);
+ READ_ASSERT(**pos == expected_tag);
+ (*pos)++;
+
+ return ReadLength(pos, end, length);
+}
+
+bool PrivateKeyInfoCodec::ReadSequence(uint8_t** pos, uint8_t* end) {
+ return ReadTypeHeaderAndLength(pos, end, kSequenceTag, NULL);
+}
+
+bool PrivateKeyInfoCodec::ReadAlgorithmIdentifier(uint8_t** pos, uint8_t* end) {
+ READ_ASSERT(*pos + sizeof(kRsaAlgorithmIdentifier) < end);
+ READ_ASSERT(memcmp(*pos, kRsaAlgorithmIdentifier,
+ sizeof(kRsaAlgorithmIdentifier)) == 0);
+ (*pos) += sizeof(kRsaAlgorithmIdentifier);
+ return true;
+}
+
+bool PrivateKeyInfoCodec::ReadVersion(uint8_t** pos, uint8_t* end) {
+ uint32_t length = 0;
+ if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length))
+ return false;
+
+ // The version should be zero.
+ for (uint32_t i = 0; i < length; ++i) {
+ READ_ASSERT(**pos == 0x00);
+ (*pos)++;
+ }
+
+ return true;
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/rsa_private_key.h b/libchrome/crypto/rsa_private_key.h
new file mode 100644
index 0000000..d4808f5
--- /dev/null
+++ b/libchrome/crypto/rsa_private_key.h
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_RSA_PRIVATE_KEY_H_
+#define CRYPTO_RSA_PRIVATE_KEY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <list>
+#include <vector>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+
+#if defined(USE_OPENSSL)
+// Forward declaration for openssl/*.h
+typedef struct evp_pkey_st EVP_PKEY;
+#else
+// Forward declaration.
+typedef struct PK11SlotInfoStr PK11SlotInfo;
+typedef struct SECKEYPrivateKeyStr SECKEYPrivateKey;
+typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
+#endif
+
+
+namespace crypto {
+
+// Used internally by RSAPrivateKey for serializing and deserializing
+// PKCS #8 PrivateKeyInfo and PublicKeyInfo.
+class PrivateKeyInfoCodec {
+ public:
+ // ASN.1 encoding of the AlgorithmIdentifier from PKCS #8.
+ static const uint8_t kRsaAlgorithmIdentifier[];
+
+ // ASN.1 tags for some types we use.
+ static const uint8_t kBitStringTag = 0x03;
+ static const uint8_t kIntegerTag = 0x02;
+ static const uint8_t kNullTag = 0x05;
+ static const uint8_t kOctetStringTag = 0x04;
+ static const uint8_t kSequenceTag = 0x30;
+
+ // |big_endian| here specifies the byte-significance of the integer components
+ // that will be parsed & serialized (modulus(), etc...) during Import(),
+ // Export() and ExportPublicKeyInfo() -- not the ASN.1 DER encoding of the
+ // PrivateKeyInfo/PublicKeyInfo (which is always big-endian).
+ explicit PrivateKeyInfoCodec(bool big_endian);
+
+ ~PrivateKeyInfoCodec();
+
+ // Exports the contents of the integer components to the ASN.1 DER encoding
+ // of the PrivateKeyInfo structure to |output|.
+ bool Export(std::vector<uint8_t>* output);
+
+ // Exports the contents of the integer components to the ASN.1 DER encoding
+ // of the PublicKeyInfo structure to |output|.
+ bool ExportPublicKeyInfo(std::vector<uint8_t>* output);
+
+ // Exports the contents of the integer components to the ASN.1 DER encoding
+ // of the RSAPublicKey structure to |output|.
+ bool ExportPublicKey(std::vector<uint8_t>* output);
+
+ // Parses the ASN.1 DER encoding of the PrivateKeyInfo structure in |input|
+ // and populates the integer components with |big_endian_| byte-significance.
+ // IMPORTANT NOTE: This is currently *not* security-approved for importing
+ // keys from unstrusted sources.
+ bool Import(const std::vector<uint8_t>& input);
+
+ // Accessors to the contents of the integer components of the PrivateKeyInfo
+ // structure.
+ std::vector<uint8_t>* modulus() { return &modulus_; }
+ std::vector<uint8_t>* public_exponent() { return &public_exponent_; }
+ std::vector<uint8_t>* private_exponent() { return &private_exponent_; }
+ std::vector<uint8_t>* prime1() { return &prime1_; }
+ std::vector<uint8_t>* prime2() { return &prime2_; }
+ std::vector<uint8_t>* exponent1() { return &exponent1_; }
+ std::vector<uint8_t>* exponent2() { return &exponent2_; }
+ std::vector<uint8_t>* coefficient() { return &coefficient_; }
+
+ private:
+ // Utility wrappers for PrependIntegerImpl that use the class's |big_endian_|
+ // value.
+ void PrependInteger(const std::vector<uint8_t>& in, std::list<uint8_t>* out);
+ void PrependInteger(uint8_t* val, int num_bytes, std::list<uint8_t>* data);
+
+ // Prepends the integer stored in |val| - |val + num_bytes| with |big_endian|
+ // byte-significance into |data| as an ASN.1 integer.
+ void PrependIntegerImpl(uint8_t* val,
+ int num_bytes,
+ std::list<uint8_t>* data,
+ bool big_endian);
+
+ // Utility wrappers for ReadIntegerImpl that use the class's |big_endian_|
+ // value.
+ bool ReadInteger(uint8_t** pos, uint8_t* end, std::vector<uint8_t>* out);
+ bool ReadIntegerWithExpectedSize(uint8_t** pos,
+ uint8_t* end,
+ size_t expected_size,
+ std::vector<uint8_t>* out);
+
+ // Reads an ASN.1 integer from |pos|, and stores the result into |out| with
+ // |big_endian| byte-significance.
+ bool ReadIntegerImpl(uint8_t** pos,
+ uint8_t* end,
+ std::vector<uint8_t>* out,
+ bool big_endian);
+
+ // Prepends the integer stored in |val|, starting a index |start|, for
+ // |num_bytes| bytes onto |data|.
+ void PrependBytes(uint8_t* val,
+ int start,
+ int num_bytes,
+ std::list<uint8_t>* data);
+
+ // Helper to prepend an ASN.1 length field.
+ void PrependLength(size_t size, std::list<uint8_t>* data);
+
+ // Helper to prepend an ASN.1 type header.
+ void PrependTypeHeaderAndLength(uint8_t type,
+ uint32_t length,
+ std::list<uint8_t>* output);
+
+ // Helper to prepend an ASN.1 bit string
+ void PrependBitString(uint8_t* val,
+ int num_bytes,
+ std::list<uint8_t>* output);
+
+ // Read an ASN.1 length field. This also checks that the length does not
+ // extend beyond |end|.
+ bool ReadLength(uint8_t** pos, uint8_t* end, uint32_t* result);
+
+ // Read an ASN.1 type header and its length.
+ bool ReadTypeHeaderAndLength(uint8_t** pos,
+ uint8_t* end,
+ uint8_t expected_tag,
+ uint32_t* length);
+
+ // Read an ASN.1 sequence declaration. This consumes the type header and
+ // length field, but not the contents of the sequence.
+ bool ReadSequence(uint8_t** pos, uint8_t* end);
+
+ // Read the RSA AlgorithmIdentifier.
+ bool ReadAlgorithmIdentifier(uint8_t** pos, uint8_t* end);
+
+ // Read one of the two version fields in PrivateKeyInfo.
+ bool ReadVersion(uint8_t** pos, uint8_t* end);
+
+ // The byte-significance of the stored components (modulus, etc..).
+ bool big_endian_;
+
+ // Component integers of the PrivateKeyInfo
+ std::vector<uint8_t> modulus_;
+ std::vector<uint8_t> public_exponent_;
+ std::vector<uint8_t> private_exponent_;
+ std::vector<uint8_t> prime1_;
+ std::vector<uint8_t> prime2_;
+ std::vector<uint8_t> exponent1_;
+ std::vector<uint8_t> exponent2_;
+ std::vector<uint8_t> coefficient_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrivateKeyInfoCodec);
+};
+
+// Encapsulates an RSA private key. Can be used to generate new keys, export
+// keys to other formats, or to extract a public key.
+// TODO(hclam): This class should be ref-counted so it can be reused easily.
+class CRYPTO_EXPORT RSAPrivateKey {
+ public:
+ ~RSAPrivateKey();
+
+ // Create a new random instance. Can return NULL if initialization fails.
+ static RSAPrivateKey* Create(uint16_t num_bits);
+
+ // Create a new instance by importing an existing private key. The format is
+ // an ASN.1-encoded PrivateKeyInfo block from PKCS #8. This can return NULL if
+ // initialization fails.
+ static RSAPrivateKey* CreateFromPrivateKeyInfo(
+ const std::vector<uint8_t>& input);
+
+#if defined(USE_OPENSSL)
+ // Create a new instance from an existing EVP_PKEY, taking a
+ // reference to it. |key| must be an RSA key. Returns NULL on
+ // failure.
+ static RSAPrivateKey* CreateFromKey(EVP_PKEY* key);
+#else
+ // Create a new instance by referencing an existing private key
+ // structure. Does not import the key.
+ static RSAPrivateKey* CreateFromKey(SECKEYPrivateKey* key);
+#endif
+
+#if defined(USE_OPENSSL)
+ EVP_PKEY* key() { return key_; }
+#else
+ SECKEYPrivateKey* key() { return key_; }
+ SECKEYPublicKey* public_key() { return public_key_; }
+#endif
+
+ // Creates a copy of the object.
+ RSAPrivateKey* Copy() const;
+
+ // Exports the private key to a PKCS #8 PrivateKeyInfo block.
+ bool ExportPrivateKey(std::vector<uint8_t>* output) const;
+
+ // Exports the public key to an X509 SubjectPublicKeyInfo block.
+ bool ExportPublicKey(std::vector<uint8_t>* output) const;
+
+ private:
+ // Constructor is private. Use one of the Create*() methods above instead.
+ RSAPrivateKey();
+
+#if defined(USE_OPENSSL)
+ EVP_PKEY* key_;
+#else
+ SECKEYPrivateKey* key_;
+ SECKEYPublicKey* public_key_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RSAPrivateKey);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_RSA_PRIVATE_KEY_H_
diff --git a/libchrome/crypto/rsa_private_key_nss.cc b/libchrome/crypto/rsa_private_key_nss.cc
new file mode 100644
index 0000000..b1026c1
--- /dev/null
+++ b/libchrome/crypto/rsa_private_key_nss.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/rsa_private_key.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <pk11pub.h>
+#include <stdint.h>
+
+#include <list>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_util.h"
+#include "crypto/nss_key_util.h"
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+
+// TODO(rafaelw): Consider using NSS's ASN.1 encoder.
+namespace {
+
+static bool ReadAttribute(SECKEYPrivateKey* key,
+ CK_ATTRIBUTE_TYPE type,
+ std::vector<uint8_t>* output) {
+ SECItem item;
+ SECStatus rv;
+ rv = PK11_ReadRawAttribute(PK11_TypePrivKey, key, type, &item);
+ if (rv != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+
+ output->assign(item.data, item.data + item.len);
+ SECITEM_FreeItem(&item, PR_FALSE);
+ return true;
+}
+
+} // namespace
+
+namespace crypto {
+
+RSAPrivateKey::~RSAPrivateKey() {
+ if (key_)
+ SECKEY_DestroyPrivateKey(key_);
+ if (public_key_)
+ SECKEY_DestroyPublicKey(public_key_);
+}
+
+// static
+RSAPrivateKey* RSAPrivateKey::Create(uint16_t num_bits) {
+ EnsureNSSInit();
+
+ ScopedPK11Slot slot(PK11_GetInternalSlot());
+ if (!slot) {
+ NOTREACHED();
+ return nullptr;
+ }
+
+ ScopedSECKEYPublicKey public_key;
+ ScopedSECKEYPrivateKey private_key;
+ if (!GenerateRSAKeyPairNSS(slot.get(), num_bits, false /* not permanent */,
+ &public_key, &private_key)) {
+ return nullptr;
+ }
+
+ RSAPrivateKey* rsa_key = new RSAPrivateKey;
+ rsa_key->public_key_ = public_key.release();
+ rsa_key->key_ = private_key.release();
+ return rsa_key;
+}
+
+// static
+RSAPrivateKey* RSAPrivateKey::CreateFromPrivateKeyInfo(
+ const std::vector<uint8_t>& input) {
+ EnsureNSSInit();
+
+ ScopedPK11Slot slot(PK11_GetInternalSlot());
+ if (!slot) {
+ NOTREACHED();
+ return nullptr;
+ }
+ ScopedSECKEYPrivateKey key(ImportNSSKeyFromPrivateKeyInfo(
+ slot.get(), input, false /* not permanent */));
+ if (!key || SECKEY_GetPrivateKeyType(key.get()) != rsaKey)
+ return nullptr;
+ return RSAPrivateKey::CreateFromKey(key.get());
+}
+
+// static
+RSAPrivateKey* RSAPrivateKey::CreateFromKey(SECKEYPrivateKey* key) {
+ DCHECK(key);
+ if (SECKEY_GetPrivateKeyType(key) != rsaKey)
+ return NULL;
+ RSAPrivateKey* copy = new RSAPrivateKey();
+ copy->key_ = SECKEY_CopyPrivateKey(key);
+ copy->public_key_ = SECKEY_ConvertToPublicKey(key);
+ if (!copy->key_ || !copy->public_key_) {
+ NOTREACHED();
+ delete copy;
+ return NULL;
+ }
+ return copy;
+}
+
+RSAPrivateKey* RSAPrivateKey::Copy() const {
+ RSAPrivateKey* copy = new RSAPrivateKey();
+ copy->key_ = SECKEY_CopyPrivateKey(key_);
+ copy->public_key_ = SECKEY_CopyPublicKey(public_key_);
+ return copy;
+}
+
+bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
+ PrivateKeyInfoCodec private_key_info(true);
+
+ // Manually read the component attributes of the private key and build up
+ // the PrivateKeyInfo.
+ if (!ReadAttribute(key_, CKA_MODULUS, private_key_info.modulus()) ||
+ !ReadAttribute(key_, CKA_PUBLIC_EXPONENT,
+ private_key_info.public_exponent()) ||
+ !ReadAttribute(key_, CKA_PRIVATE_EXPONENT,
+ private_key_info.private_exponent()) ||
+ !ReadAttribute(key_, CKA_PRIME_1, private_key_info.prime1()) ||
+ !ReadAttribute(key_, CKA_PRIME_2, private_key_info.prime2()) ||
+ !ReadAttribute(key_, CKA_EXPONENT_1, private_key_info.exponent1()) ||
+ !ReadAttribute(key_, CKA_EXPONENT_2, private_key_info.exponent2()) ||
+ !ReadAttribute(key_, CKA_COEFFICIENT, private_key_info.coefficient())) {
+ NOTREACHED();
+ return false;
+ }
+
+ return private_key_info.Export(output);
+}
+
+bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
+ ScopedSECItem der_pubkey(SECKEY_EncodeDERSubjectPublicKeyInfo(public_key_));
+ if (!der_pubkey.get()) {
+ NOTREACHED();
+ return false;
+ }
+
+ output->assign(der_pubkey->data, der_pubkey->data + der_pubkey->len);
+ return true;
+}
+
+RSAPrivateKey::RSAPrivateKey() : key_(NULL), public_key_(NULL) {
+ EnsureNSSInit();
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/rsa_private_key_unittest.cc b/libchrome/crypto/rsa_private_key_unittest.cc
new file mode 100644
index 0000000..393a24c
--- /dev/null
+++ b/libchrome/crypto/rsa_private_key_unittest.cc
@@ -0,0 +1,380 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/rsa_private_key.h"
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+const uint8_t kTestPrivateKeyInfo[] = {
+ 0x30, 0x82, 0x02, 0x78, 0x02, 0x01, 0x00, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+ 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x82,
+ 0x02, 0x62, 0x30, 0x82, 0x02, 0x5e, 0x02, 0x01, 0x00, 0x02, 0x81, 0x81,
+ 0x00, 0xb8, 0x7f, 0x2b, 0x20, 0xdc, 0x7c, 0x9b, 0x0c, 0xdc, 0x51, 0x61,
+ 0x99, 0x0d, 0x36, 0x0f, 0xd4, 0x66, 0x88, 0x08, 0x55, 0x84, 0xd5, 0x3a,
+ 0xbf, 0x2b, 0xa4, 0x64, 0x85, 0x7b, 0x0c, 0x04, 0x13, 0x3f, 0x8d, 0xf4,
+ 0xbc, 0x38, 0x0d, 0x49, 0xfe, 0x6b, 0xc4, 0x5a, 0xb0, 0x40, 0x53, 0x3a,
+ 0xd7, 0x66, 0x09, 0x0f, 0x9e, 0x36, 0x74, 0x30, 0xda, 0x8a, 0x31, 0x4f,
+ 0x1f, 0x14, 0x50, 0xd7, 0xc7, 0x20, 0x94, 0x17, 0xde, 0x4e, 0xb9, 0x57,
+ 0x5e, 0x7e, 0x0a, 0xe5, 0xb2, 0x65, 0x7a, 0x89, 0x4e, 0xb6, 0x47, 0xff,
+ 0x1c, 0xbd, 0xb7, 0x38, 0x13, 0xaf, 0x47, 0x85, 0x84, 0x32, 0x33, 0xf3,
+ 0x17, 0x49, 0xbf, 0xe9, 0x96, 0xd0, 0xd6, 0x14, 0x6f, 0x13, 0x8d, 0xc5,
+ 0xfc, 0x2c, 0x72, 0xba, 0xac, 0xea, 0x7e, 0x18, 0x53, 0x56, 0xa6, 0x83,
+ 0xa2, 0xce, 0x93, 0x93, 0xe7, 0x1f, 0x0f, 0xe6, 0x0f, 0x02, 0x03, 0x01,
+ 0x00, 0x01, 0x02, 0x81, 0x80, 0x03, 0x61, 0x89, 0x37, 0xcb, 0xf2, 0x98,
+ 0xa0, 0xce, 0xb4, 0xcb, 0x16, 0x13, 0xf0, 0xe6, 0xaf, 0x5c, 0xc5, 0xa7,
+ 0x69, 0x71, 0xca, 0xba, 0x8d, 0xe0, 0x4d, 0xdd, 0xed, 0xb8, 0x48, 0x8b,
+ 0x16, 0x93, 0x36, 0x95, 0xc2, 0x91, 0x40, 0x65, 0x17, 0xbd, 0x7f, 0xd6,
+ 0xad, 0x9e, 0x30, 0x28, 0x46, 0xe4, 0x3e, 0xcc, 0x43, 0x78, 0xf9, 0xfe,
+ 0x1f, 0x33, 0x23, 0x1e, 0x31, 0x12, 0x9d, 0x3c, 0xa7, 0x08, 0x82, 0x7b,
+ 0x7d, 0x25, 0x4e, 0x5e, 0x19, 0xa8, 0x9b, 0xed, 0x86, 0xb2, 0xcb, 0x3c,
+ 0xfe, 0x4e, 0xa1, 0xfa, 0x62, 0x87, 0x3a, 0x17, 0xf7, 0x60, 0xec, 0x38,
+ 0x29, 0xe8, 0x4f, 0x34, 0x9f, 0x76, 0x9d, 0xee, 0xa3, 0xf6, 0x85, 0x6b,
+ 0x84, 0x43, 0xc9, 0x1e, 0x01, 0xff, 0xfd, 0xd0, 0x29, 0x4c, 0xfa, 0x8e,
+ 0x57, 0x0c, 0xc0, 0x71, 0xa5, 0xbb, 0x88, 0x46, 0x29, 0x5c, 0xc0, 0x4f,
+ 0x01, 0x02, 0x41, 0x00, 0xf5, 0x83, 0xa4, 0x64, 0x4a, 0xf2, 0xdd, 0x8c,
+ 0x2c, 0xed, 0xa8, 0xd5, 0x60, 0x5a, 0xe4, 0xc7, 0xcc, 0x61, 0xcd, 0x38,
+ 0x42, 0x20, 0xd3, 0x82, 0x18, 0xf2, 0x35, 0x00, 0x72, 0x2d, 0xf7, 0x89,
+ 0x80, 0x67, 0xb5, 0x93, 0x05, 0x5f, 0xdd, 0x42, 0xba, 0x16, 0x1a, 0xea,
+ 0x15, 0xc6, 0xf0, 0xb8, 0x8c, 0xbc, 0xbf, 0x54, 0x9e, 0xf1, 0xc1, 0xb2,
+ 0xb3, 0x8b, 0xb6, 0x26, 0x02, 0x30, 0xc4, 0x81, 0x02, 0x41, 0x00, 0xc0,
+ 0x60, 0x62, 0x80, 0xe1, 0x22, 0x78, 0xf6, 0x9d, 0x83, 0x18, 0xeb, 0x72,
+ 0x45, 0xd7, 0xc8, 0x01, 0x7f, 0xa9, 0xca, 0x8f, 0x7d, 0xd6, 0xb8, 0x31,
+ 0x2b, 0x84, 0x7f, 0x62, 0xd9, 0xa9, 0x22, 0x17, 0x7d, 0x06, 0x35, 0x6c,
+ 0xf3, 0xc1, 0x94, 0x17, 0x85, 0x5a, 0xaf, 0x9c, 0x5c, 0x09, 0x3c, 0xcf,
+ 0x2f, 0x44, 0x9d, 0xb6, 0x52, 0x68, 0x5f, 0xf9, 0x59, 0xc8, 0x84, 0x2b,
+ 0x39, 0x22, 0x8f, 0x02, 0x41, 0x00, 0xb2, 0x04, 0xe2, 0x0e, 0x56, 0xca,
+ 0x03, 0x1a, 0xc0, 0xf9, 0x12, 0x92, 0xa5, 0x6b, 0x42, 0xb8, 0x1c, 0xda,
+ 0x4d, 0x93, 0x9d, 0x5f, 0x6f, 0xfd, 0xc5, 0x58, 0xda, 0x55, 0x98, 0x74,
+ 0xfc, 0x28, 0x17, 0x93, 0x1b, 0x75, 0x9f, 0x50, 0x03, 0x7f, 0x7e, 0xae,
+ 0xc8, 0x95, 0x33, 0x75, 0x2c, 0xd6, 0xa4, 0x35, 0xb8, 0x06, 0x03, 0xba,
+ 0x08, 0x59, 0x2b, 0x17, 0x02, 0xdc, 0x4c, 0x7a, 0x50, 0x01, 0x02, 0x41,
+ 0x00, 0x9d, 0xdb, 0x39, 0x59, 0x09, 0xe4, 0x30, 0xa0, 0x24, 0xf5, 0xdb,
+ 0x2f, 0xf0, 0x2f, 0xf1, 0x75, 0x74, 0x0d, 0x5e, 0xb5, 0x11, 0x73, 0xb0,
+ 0x0a, 0xaa, 0x86, 0x4c, 0x0d, 0xff, 0x7e, 0x1d, 0xb4, 0x14, 0xd4, 0x09,
+ 0x91, 0x33, 0x5a, 0xfd, 0xa0, 0x58, 0x80, 0x9b, 0xbe, 0x78, 0x2e, 0x69,
+ 0x82, 0x15, 0x7c, 0x72, 0xf0, 0x7b, 0x18, 0x39, 0xff, 0x6e, 0xeb, 0xc6,
+ 0x86, 0xf5, 0xb4, 0xc7, 0x6f, 0x02, 0x41, 0x00, 0x8d, 0x1a, 0x37, 0x0f,
+ 0x76, 0xc4, 0x82, 0xfa, 0x5c, 0xc3, 0x79, 0x35, 0x3e, 0x70, 0x8a, 0xbf,
+ 0x27, 0x49, 0xb0, 0x99, 0x63, 0xcb, 0x77, 0x5f, 0xa8, 0x82, 0x65, 0xf6,
+ 0x03, 0x52, 0x51, 0xf1, 0xae, 0x2e, 0x05, 0xb3, 0xc6, 0xa4, 0x92, 0xd1,
+ 0xce, 0x6c, 0x72, 0xfb, 0x21, 0xb3, 0x02, 0x87, 0xe4, 0xfd, 0x61, 0xca,
+ 0x00, 0x42, 0x19, 0xf0, 0xda, 0x5a, 0x53, 0xe3, 0xb1, 0xc5, 0x15, 0xf3};
+
+} // namespace
+
+// Generate random private keys with two different sizes. Reimport, then
+// export them again. We should get back the same exact bytes.
+TEST(RSAPrivateKeyUnitTest, InitRandomTest) {
+ std::unique_ptr<crypto::RSAPrivateKey> keypair1(
+ crypto::RSAPrivateKey::Create(1024));
+ std::unique_ptr<crypto::RSAPrivateKey> keypair2(
+ crypto::RSAPrivateKey::Create(2048));
+ ASSERT_TRUE(keypair1.get());
+ ASSERT_TRUE(keypair2.get());
+
+ std::vector<uint8_t> privkey1;
+ std::vector<uint8_t> privkey2;
+ std::vector<uint8_t> pubkey1;
+ std::vector<uint8_t> pubkey2;
+
+ ASSERT_TRUE(keypair1->ExportPrivateKey(&privkey1));
+ ASSERT_TRUE(keypair2->ExportPrivateKey(&privkey2));
+ ASSERT_TRUE(keypair1->ExportPublicKey(&pubkey1));
+ ASSERT_TRUE(keypair2->ExportPublicKey(&pubkey2));
+
+ std::unique_ptr<crypto::RSAPrivateKey> keypair3(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(privkey1));
+ std::unique_ptr<crypto::RSAPrivateKey> keypair4(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(privkey2));
+ ASSERT_TRUE(keypair3.get());
+ ASSERT_TRUE(keypair4.get());
+
+ std::vector<uint8_t> privkey3;
+ std::vector<uint8_t> privkey4;
+ ASSERT_TRUE(keypair3->ExportPrivateKey(&privkey3));
+ ASSERT_TRUE(keypair4->ExportPrivateKey(&privkey4));
+
+ ASSERT_EQ(privkey1.size(), privkey3.size());
+ ASSERT_EQ(privkey2.size(), privkey4.size());
+ ASSERT_TRUE(0 == memcmp(&privkey1.front(), &privkey3.front(),
+ privkey1.size()));
+ ASSERT_TRUE(0 == memcmp(&privkey2.front(), &privkey4.front(),
+ privkey2.size()));
+}
+
+// Test Copy() method.
+TEST(RSAPrivateKeyUnitTest, CopyTest) {
+ std::vector<uint8_t> input(kTestPrivateKeyInfo,
+ kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
+
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
+
+ std::unique_ptr<crypto::RSAPrivateKey> key_copy(key->Copy());
+ ASSERT_TRUE(key_copy.get());
+
+ std::vector<uint8_t> privkey_copy;
+ ASSERT_TRUE(key_copy->ExportPrivateKey(&privkey_copy));
+ ASSERT_EQ(input, privkey_copy);
+}
+
+// Test that CreateFromPrivateKeyInfo fails if there is extra data after the RSA
+// key.
+TEST(RSAPrivateKeyUnitTest, ExtraData) {
+ std::vector<uint8_t> input(kTestPrivateKeyInfo,
+ kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
+ input.push_back(0);
+
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
+
+ // Import should fail.
+ EXPECT_FALSE(key);
+}
+
+TEST(RSAPrivateKeyUnitTest, NotRsaKey) {
+ // Defines a valid P-256 private key.
+ const uint8_t kTestEcPrivateKeyInfo[] = {
+ 0x30, 0x81, 0x87, 0x02, 0x01, 0x00, 0x30, 0x13, 0x06, 0x07, 0x2A, 0x86,
+ 0x48, 0xCE, 0x3D, 0x02, 0x01, 0x06, 0x08, 0x2A, 0x86, 0x48, 0xCE, 0x3D,
+ 0x03, 0x01, 0x07, 0x04, 0x6D, 0x30, 0x6B, 0x02, 0x01, 0x01, 0x04, 0x20,
+ 0x1F, 0xE3, 0x39, 0x50, 0xC5, 0xF4, 0x61, 0x12, 0x4A, 0xE9, 0x92, 0xC2,
+ 0xBD, 0xFD, 0xF1, 0xC7, 0x3B, 0x16, 0x15, 0xF5, 0x71, 0xBD, 0x56, 0x7E,
+ 0x60, 0xD1, 0x9A, 0xA1, 0xF4, 0x8C, 0xDF, 0x42, 0xA1, 0x44, 0x03, 0x42,
+ 0x00, 0x04, 0x7C, 0x11, 0x0C, 0x66, 0xDC, 0xFD, 0xA8, 0x07, 0xF6, 0xE6,
+ 0x9E, 0x45, 0xDD, 0xB3, 0xC7, 0x4F, 0x69, 0xA1, 0x48, 0x4D, 0x20, 0x3E,
+ 0x8D, 0xC5, 0xAD, 0xA8, 0xE9, 0xA9, 0xDD, 0x7C, 0xB3, 0xC7, 0x0D, 0xF4,
+ 0x48, 0x98, 0x6E, 0x51, 0xBD, 0xE5, 0xD1, 0x57, 0x6F, 0x99, 0x90, 0x1F,
+ 0x9C, 0x2C, 0x6A, 0x80, 0x6A, 0x47, 0xFD, 0x90, 0x76, 0x43, 0xA7, 0x2B,
+ 0x83, 0x55, 0x97, 0xEF, 0xC8, 0xC6};
+
+ std::vector<uint8_t> input(
+ kTestEcPrivateKeyInfo,
+ kTestEcPrivateKeyInfo + sizeof(kTestEcPrivateKeyInfo));
+
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
+
+ // Import should fail as the given PKCS8 bytes were for an EC key not RSA key.
+ EXPECT_FALSE(key);
+}
+
+// Verify that generated public keys look good. This test data was generated
+// with the openssl command line tool.
+TEST(RSAPrivateKeyUnitTest, PublicKeyTest) {
+ const uint8_t expected_public_key_info[] = {
+ 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
+ 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, 0x81,
+ 0x89, 0x02, 0x81, 0x81, 0x00, 0xb8, 0x7f, 0x2b, 0x20, 0xdc, 0x7c, 0x9b,
+ 0x0c, 0xdc, 0x51, 0x61, 0x99, 0x0d, 0x36, 0x0f, 0xd4, 0x66, 0x88, 0x08,
+ 0x55, 0x84, 0xd5, 0x3a, 0xbf, 0x2b, 0xa4, 0x64, 0x85, 0x7b, 0x0c, 0x04,
+ 0x13, 0x3f, 0x8d, 0xf4, 0xbc, 0x38, 0x0d, 0x49, 0xfe, 0x6b, 0xc4, 0x5a,
+ 0xb0, 0x40, 0x53, 0x3a, 0xd7, 0x66, 0x09, 0x0f, 0x9e, 0x36, 0x74, 0x30,
+ 0xda, 0x8a, 0x31, 0x4f, 0x1f, 0x14, 0x50, 0xd7, 0xc7, 0x20, 0x94, 0x17,
+ 0xde, 0x4e, 0xb9, 0x57, 0x5e, 0x7e, 0x0a, 0xe5, 0xb2, 0x65, 0x7a, 0x89,
+ 0x4e, 0xb6, 0x47, 0xff, 0x1c, 0xbd, 0xb7, 0x38, 0x13, 0xaf, 0x47, 0x85,
+ 0x84, 0x32, 0x33, 0xf3, 0x17, 0x49, 0xbf, 0xe9, 0x96, 0xd0, 0xd6, 0x14,
+ 0x6f, 0x13, 0x8d, 0xc5, 0xfc, 0x2c, 0x72, 0xba, 0xac, 0xea, 0x7e, 0x18,
+ 0x53, 0x56, 0xa6, 0x83, 0xa2, 0xce, 0x93, 0x93, 0xe7, 0x1f, 0x0f, 0xe6,
+ 0x0f, 0x02, 0x03, 0x01, 0x00, 0x01};
+
+ std::vector<uint8_t> input(kTestPrivateKeyInfo,
+ kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
+
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
+ ASSERT_TRUE(key.get());
+
+ std::vector<uint8_t> output;
+ ASSERT_TRUE(key->ExportPublicKey(&output));
+
+ ASSERT_TRUE(
+ memcmp(expected_public_key_info, &output.front(), output.size()) == 0);
+}
+
+// These two test keys each contain an integer that has 0x00 for its most
+// significant byte. When encoded as ASN.1, this byte is dropped and there are
+// two interesting sub-cases. When the sign bit of the integer is set, an extra
+// null byte is added back to force the encoded value to be positive. When the
+// sign bit is not set, the encoded integer is just left shorter than usual.
+// See also: http://code.google.com/p/chromium/issues/detail?id=14877.
+//
+// Before we were handling this correctly, we would see one of two failures:
+// * RSAPrivateKey::CreateFromPrivateKeyInfo would return null because the
+// underlying windows API failed to import the key.
+// * The import would succeed, but incorrectly interpret the data. On export,
+// the key would contain different values.
+//
+// This test case verifies these two failures modes don't occur.
+TEST(RSAPrivateKeyUnitTest, ShortIntegers) {
+ const uint8_t short_integer_with_high_bit[] = {
+ 0x30, 0x82, 0x02, 0x77, 0x02, 0x01, 0x00, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+ 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x82,
+ 0x02, 0x61, 0x30, 0x82, 0x02, 0x5d, 0x02, 0x01, 0x00, 0x02, 0x81, 0x81,
+ 0x00, 0x92, 0x59, 0x32, 0x7d, 0x8e, 0xaf, 0x2e, 0xd5, 0xb2, 0x5c, 0x67,
+ 0xc8, 0x7d, 0x48, 0xb7, 0x84, 0x12, 0xd0, 0x76, 0xda, 0xe1, 0xa3, 0x1e,
+ 0x40, 0x01, 0x14, 0x5c, 0xef, 0x26, 0x6e, 0x28, 0xa2, 0xf7, 0xa5, 0xb4,
+ 0x02, 0x37, 0xd0, 0x53, 0x10, 0xcb, 0x7c, 0x6a, 0xf4, 0x53, 0x9f, 0xb8,
+ 0xe0, 0x83, 0x93, 0xd1, 0x19, 0xd8, 0x28, 0xd1, 0xd1, 0xd8, 0x87, 0x8f,
+ 0x92, 0xfd, 0x73, 0xc0, 0x4d, 0x3e, 0x07, 0x22, 0x1f, 0xc1, 0x20, 0xb0,
+ 0x70, 0xb2, 0x3b, 0xea, 0xb1, 0xe5, 0x0a, 0xfd, 0x56, 0x49, 0x5e, 0x39,
+ 0x90, 0x91, 0xce, 0x04, 0x83, 0x29, 0xaa, 0xfd, 0x12, 0xa4, 0x42, 0x26,
+ 0x6c, 0x6e, 0x79, 0x70, 0x77, 0x03, 0xb2, 0x07, 0x01, 0x3d, 0x85, 0x81,
+ 0x95, 0x9e, 0xda, 0x5a, 0xa3, 0xf4, 0x2d, 0x38, 0x04, 0x58, 0xf5, 0x6b,
+ 0xc9, 0xf1, 0xb5, 0x65, 0xfe, 0x66, 0x0d, 0xa2, 0xd5, 0x02, 0x03, 0x01,
+ 0x00, 0x01, 0x02, 0x81, 0x80, 0x5e, 0x01, 0x5f, 0xb6, 0x59, 0x1d, 0xdc,
+ 0x36, 0xb6, 0x60, 0x36, 0xe6, 0x08, 0xdb, 0xd9, 0xcd, 0xc3, 0x8c, 0x16,
+ 0x9c, 0x98, 0x8d, 0x7f, 0xd3, 0xdb, 0x1d, 0xaa, 0x68, 0x8f, 0xc5, 0xf8,
+ 0xe2, 0x5d, 0xb3, 0x19, 0xc2, 0xc6, 0xf9, 0x51, 0x32, 0x1b, 0x93, 0x6a,
+ 0xdc, 0x50, 0x8e, 0xeb, 0x61, 0x84, 0x03, 0x42, 0x30, 0x98, 0xb1, 0xf7,
+ 0xbd, 0x14, 0x9a, 0x57, 0x36, 0x33, 0x09, 0xd4, 0x3e, 0x90, 0xda, 0xef,
+ 0x09, 0x6e, 0xef, 0x49, 0xb6, 0x60, 0x68, 0x5e, 0x54, 0x17, 0x25, 0x5b,
+ 0x37, 0xe3, 0x35, 0x63, 0x5b, 0x60, 0x3c, 0xbd, 0x50, 0xdf, 0x46, 0x43,
+ 0x08, 0xa4, 0x71, 0x21, 0xf1, 0x30, 0x71, 0xdc, 0xda, 0xd7, 0x6f, 0xd2,
+ 0x18, 0xbd, 0x39, 0xf1, 0xe1, 0xbe, 0xa8, 0x8d, 0x62, 0xdf, 0xa2, 0x3e,
+ 0xb6, 0x15, 0x26, 0xb6, 0x57, 0xbd, 0x63, 0xdb, 0xc1, 0x91, 0xec, 0xb8,
+ 0x01, 0x02, 0x41, 0x00, 0xc6, 0x1a, 0x06, 0x48, 0xf2, 0x12, 0x1c, 0x9f,
+ 0x74, 0x20, 0x5c, 0x85, 0xa2, 0xda, 0xe5, 0x62, 0x96, 0x8d, 0x22, 0x7b,
+ 0x78, 0x73, 0xea, 0xbb, 0x9f, 0x59, 0x42, 0x13, 0x15, 0xc8, 0x11, 0x50,
+ 0x6c, 0x55, 0xf6, 0xdf, 0x8b, 0xfe, 0xc7, 0xdd, 0xa8, 0xca, 0x54, 0x41,
+ 0xe8, 0xce, 0xbe, 0x7d, 0xbd, 0xe2, 0x13, 0x4b, 0x5b, 0x61, 0xeb, 0x69,
+ 0x6c, 0xb1, 0x9b, 0x28, 0x68, 0x5b, 0xd6, 0x01, 0x02, 0x41, 0x00, 0xbd,
+ 0x1e, 0xfe, 0x51, 0x99, 0xb6, 0xe3, 0x84, 0xfe, 0xf1, 0x9e, 0xfd, 0x9c,
+ 0xe7, 0x86, 0x43, 0x68, 0x7f, 0x2f, 0x6a, 0x2a, 0x4c, 0xae, 0xa6, 0x41,
+ 0x1c, 0xf0, 0x10, 0x37, 0x54, 0x23, 0xba, 0x05, 0x0d, 0x18, 0x27, 0x8d,
+ 0xb8, 0xe4, 0x8f, 0xf2, 0x25, 0x73, 0x8a, 0xd7, 0x05, 0x98, 0x6b, 0x3d,
+ 0x55, 0xb7, 0x6f, 0x7c, 0xec, 0x77, 0x61, 0x54, 0x7b, 0xb6, 0x6b, 0x31,
+ 0xec, 0x94, 0xd5, 0x02, 0x41, 0x00, 0x90, 0xa2, 0xa5, 0x9e, 0x12, 0xa7,
+ 0x68, 0xa0, 0x7e, 0xdf, 0xb5, 0xcd, 0x98, 0x26, 0xab, 0xbd, 0xbc, 0x5f,
+ 0xd5, 0x22, 0x42, 0xc2, 0x97, 0x4a, 0x5f, 0x40, 0x82, 0xfe, 0x7e, 0x33,
+ 0xb1, 0x78, 0x7f, 0x70, 0x90, 0x2b, 0x8d, 0x01, 0xfb, 0x18, 0xfa, 0x48,
+ 0xa7, 0x15, 0xec, 0x0d, 0x2e, 0x85, 0x8d, 0xe2, 0x86, 0xe5, 0xc9, 0x15,
+ 0x88, 0x14, 0x53, 0xd8, 0xa4, 0x88, 0xef, 0x10, 0xc6, 0x01, 0x02, 0x41,
+ 0x00, 0xba, 0xe4, 0xaf, 0x14, 0xfa, 0xdf, 0xf6, 0xd5, 0xce, 0x8f, 0xfe,
+ 0xbb, 0xc8, 0x5c, 0x30, 0x9d, 0xda, 0xdd, 0x9d, 0x80, 0xc0, 0x0e, 0x89,
+ 0xa5, 0xb8, 0xc1, 0x1d, 0x28, 0x19, 0x55, 0x67, 0xfd, 0x03, 0xd2, 0xdd,
+ 0xe4, 0xf0, 0xb4, 0x20, 0x03, 0x74, 0x9b, 0xb8, 0x24, 0x23, 0xbb, 0xde,
+ 0xd5, 0x53, 0x86, 0xaa, 0xc1, 0x5d, 0x65, 0xdd, 0xcf, 0xec, 0x8a, 0x59,
+ 0x4a, 0x73, 0xca, 0xc5, 0x85, 0x02, 0x40, 0x00, 0xc4, 0x5e, 0x8d, 0xa4,
+ 0xea, 0xbb, 0x6a, 0x9b, 0xe6, 0x3a, 0x4d, 0xc1, 0xdb, 0xe5, 0x52, 0x38,
+ 0xf9, 0x59, 0x91, 0x2d, 0x90, 0x82, 0xe3, 0x31, 0x1b, 0x48, 0xb7, 0x42,
+ 0xfa, 0x1d, 0x83, 0xd5, 0x3d, 0x02, 0xc2, 0x12, 0x71, 0x10, 0x3a, 0xbd,
+ 0x92, 0x8f, 0x9b, 0xa2, 0x6b, 0x2d, 0x21, 0xa4, 0x65, 0xe9, 0xfa, 0x8c,
+ 0x30, 0x2a, 0x89, 0xce, 0xd0, 0xa7, 0x67, 0xd8, 0x45, 0x84, 0xb0};
+
+ const uint8_t short_integer_without_high_bit[] = {
+ 0x30, 0x82, 0x02, 0x76, 0x02, 0x01, 0x00, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+ 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x82,
+ 0x02, 0x60, 0x30, 0x82, 0x02, 0x5c, 0x02, 0x01, 0x00, 0x02, 0x81, 0x81,
+ 0x00, 0xc3, 0x9e, 0x8d, 0xc4, 0x6d, 0x38, 0xe8, 0x0e, 0x9f, 0x84, 0x03,
+ 0x40, 0x8e, 0x81, 0x2e, 0x56, 0x67, 0x78, 0x11, 0x85, 0x27, 0x81, 0x52,
+ 0xf2, 0x1b, 0x3e, 0x5b, 0xf8, 0xab, 0xfc, 0xaf, 0xca, 0x5c, 0x26, 0xd5,
+ 0xfa, 0xd4, 0x55, 0x50, 0x38, 0xb9, 0x9d, 0x89, 0x92, 0x7e, 0x34, 0xcf,
+ 0x37, 0x82, 0x48, 0x2d, 0xaa, 0xc4, 0x6a, 0x0e, 0x93, 0xea, 0xad, 0x8a,
+ 0x33, 0xf0, 0x42, 0x23, 0xe0, 0x4c, 0x98, 0xbf, 0x01, 0x00, 0x1b, 0xfe,
+ 0x06, 0x15, 0xc6, 0xe3, 0x80, 0x79, 0x6d, 0xfe, 0x48, 0xcd, 0x40, 0xbb,
+ 0xf9, 0x58, 0xe6, 0xbf, 0xd5, 0x4c, 0x29, 0x48, 0x53, 0x78, 0x06, 0x03,
+ 0x0d, 0x59, 0xf5, 0x20, 0xe0, 0xe6, 0x8c, 0xb2, 0xf5, 0xd8, 0x61, 0x52,
+ 0x7e, 0x40, 0x83, 0xd7, 0x69, 0xae, 0xd7, 0x75, 0x02, 0x2d, 0x49, 0xd5,
+ 0x15, 0x5b, 0xf1, 0xd9, 0x4d, 0x60, 0x7d, 0x62, 0xa5, 0x02, 0x03, 0x01,
+ 0x00, 0x01, 0x02, 0x7f, 0x6d, 0x45, 0x23, 0xeb, 0x95, 0x17, 0x34, 0x88,
+ 0xf6, 0x91, 0xc7, 0x3f, 0x48, 0x5a, 0xe0, 0x87, 0x63, 0x44, 0xae, 0x84,
+ 0xb2, 0x8c, 0x8a, 0xc8, 0xb2, 0x6f, 0x22, 0xf0, 0xc5, 0x21, 0x61, 0x10,
+ 0xa8, 0x69, 0x09, 0x1e, 0x13, 0x7d, 0x94, 0x52, 0x1b, 0x5c, 0xe4, 0x7b,
+ 0xf0, 0x03, 0x8f, 0xbc, 0x72, 0x09, 0xdf, 0x78, 0x84, 0x3e, 0xb9, 0xe5,
+ 0xe6, 0x31, 0x0a, 0x01, 0xf9, 0x32, 0xf8, 0xd6, 0x57, 0xa3, 0x87, 0xe6,
+ 0xf5, 0x98, 0xbc, 0x8e, 0x41, 0xb9, 0x50, 0x17, 0x7b, 0xd3, 0x97, 0x5a,
+ 0x44, 0x3a, 0xee, 0xff, 0x6b, 0xb3, 0x3a, 0x52, 0xe7, 0xa4, 0x96, 0x9a,
+ 0xf6, 0x83, 0xc8, 0x97, 0x1c, 0x63, 0xa1, 0xd6, 0xb3, 0xa8, 0xb2, 0xc7,
+ 0x73, 0x25, 0x0f, 0x58, 0x36, 0xb9, 0x7a, 0x47, 0xa7, 0x4d, 0x30, 0xfe,
+ 0x4d, 0x74, 0x56, 0xe8, 0xfb, 0xd6, 0x50, 0xe5, 0xe0, 0x28, 0x15, 0x02,
+ 0x41, 0x00, 0xeb, 0x15, 0x62, 0xb6, 0x37, 0x41, 0x7c, 0xc5, 0x00, 0x22,
+ 0x2c, 0x5a, 0x5e, 0xe4, 0xb2, 0x11, 0x87, 0x89, 0xad, 0xf4, 0x57, 0x68,
+ 0x90, 0xb7, 0x9f, 0xe2, 0x79, 0x20, 0x6b, 0x98, 0x00, 0x0d, 0x3a, 0x3b,
+ 0xc1, 0xcd, 0x36, 0xf9, 0x27, 0xda, 0x40, 0x36, 0x1d, 0xb8, 0x5c, 0x96,
+ 0xeb, 0x04, 0x08, 0xe1, 0x3f, 0xfa, 0x94, 0x8b, 0x0f, 0xa0, 0xff, 0xc1,
+ 0x51, 0xea, 0x90, 0xad, 0x15, 0xc7, 0x02, 0x41, 0x00, 0xd5, 0x06, 0x45,
+ 0xd7, 0x55, 0x63, 0x1a, 0xf0, 0x89, 0x81, 0xae, 0x87, 0x23, 0xa2, 0x39,
+ 0xfe, 0x3d, 0x82, 0xc7, 0xcb, 0x15, 0xb9, 0xe3, 0xe2, 0x5b, 0xc6, 0xd2,
+ 0x55, 0xdd, 0xab, 0x55, 0x29, 0x7c, 0xda, 0x0e, 0x1c, 0x09, 0xfc, 0x73,
+ 0x0d, 0x01, 0xed, 0x6d, 0x2f, 0x05, 0xd0, 0xd5, 0x1d, 0xce, 0x18, 0x7f,
+ 0xb0, 0xc8, 0x47, 0x77, 0xd2, 0xa9, 0x9e, 0xfc, 0x39, 0x4b, 0x3d, 0x94,
+ 0x33, 0x02, 0x41, 0x00, 0x8f, 0x94, 0x09, 0x2d, 0x17, 0x44, 0x75, 0x0a,
+ 0xf1, 0x10, 0xee, 0x1b, 0xe7, 0xd7, 0x2f, 0xf6, 0xca, 0xdc, 0x49, 0x15,
+ 0x72, 0x09, 0x58, 0x51, 0xfe, 0x61, 0xd8, 0xee, 0xf7, 0x27, 0xe7, 0xe8,
+ 0x2c, 0x47, 0xf1, 0x0f, 0x00, 0x63, 0x5e, 0x76, 0xcb, 0x3f, 0x02, 0x19,
+ 0xe6, 0xda, 0xfa, 0x01, 0x05, 0xd7, 0x65, 0x37, 0x0b, 0x60, 0x7f, 0x94,
+ 0x2a, 0x80, 0x8d, 0x22, 0x81, 0x68, 0x65, 0x63, 0x02, 0x41, 0x00, 0xc2,
+ 0xd4, 0x18, 0xde, 0x47, 0x9e, 0xfb, 0x8d, 0x91, 0x05, 0xc5, 0x3c, 0x9d,
+ 0xcf, 0x8a, 0x60, 0xc7, 0x9b, 0x2b, 0xe5, 0xc6, 0xba, 0x1b, 0xfc, 0xf3,
+ 0xd9, 0x54, 0x97, 0xe9, 0xc4, 0x00, 0x80, 0x90, 0x4a, 0xd2, 0x6a, 0xbc,
+ 0x8b, 0x62, 0x22, 0x3c, 0x68, 0x0c, 0xda, 0xdb, 0xe3, 0xd2, 0x76, 0x8e,
+ 0xff, 0x03, 0x12, 0x09, 0x2a, 0xac, 0x21, 0x44, 0xb7, 0x3e, 0x91, 0x9c,
+ 0x09, 0xf6, 0xd7, 0x02, 0x41, 0x00, 0xc0, 0xa1, 0xbb, 0x70, 0xdc, 0xf8,
+ 0xeb, 0x17, 0x61, 0xd4, 0x8c, 0x7c, 0x3b, 0x82, 0x91, 0x58, 0xff, 0xf9,
+ 0x19, 0xac, 0x3a, 0x73, 0xa7, 0x20, 0xe5, 0x22, 0x02, 0xc4, 0xf6, 0xb9,
+ 0xb9, 0x43, 0x53, 0x35, 0x88, 0xe1, 0x05, 0xb6, 0x43, 0x9b, 0x39, 0xc8,
+ 0x04, 0x4d, 0x2b, 0x01, 0xf7, 0xe6, 0x1b, 0x8d, 0x7e, 0x89, 0xe3, 0x43,
+ 0xd4, 0xf3, 0xab, 0x28, 0xd4, 0x5a, 0x1f, 0x20, 0xea, 0xbe};
+
+ std::vector<uint8_t> input1;
+ std::vector<uint8_t> input2;
+
+ input1.resize(sizeof(short_integer_with_high_bit));
+ input2.resize(sizeof(short_integer_without_high_bit));
+
+ memcpy(&input1.front(), short_integer_with_high_bit,
+ sizeof(short_integer_with_high_bit));
+ memcpy(&input2.front(), short_integer_without_high_bit,
+ sizeof(short_integer_without_high_bit));
+
+ std::unique_ptr<crypto::RSAPrivateKey> keypair1(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input1));
+ std::unique_ptr<crypto::RSAPrivateKey> keypair2(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input2));
+ ASSERT_TRUE(keypair1.get());
+ ASSERT_TRUE(keypair2.get());
+
+ std::vector<uint8_t> output1;
+ std::vector<uint8_t> output2;
+ ASSERT_TRUE(keypair1->ExportPrivateKey(&output1));
+ ASSERT_TRUE(keypair2->ExportPrivateKey(&output2));
+
+ ASSERT_EQ(input1.size(), output1.size());
+ ASSERT_EQ(input2.size(), output2.size());
+ ASSERT_TRUE(0 == memcmp(&output1.front(), &input1.front(),
+ input1.size()));
+ ASSERT_TRUE(0 == memcmp(&output2.front(), &input2.front(),
+ input2.size()));
+}
+
+TEST(RSAPrivateKeyUnitTest, CreateFromKeyTest) {
+ std::unique_ptr<crypto::RSAPrivateKey> key_pair(
+ crypto::RSAPrivateKey::Create(512));
+ ASSERT_TRUE(key_pair.get());
+
+ std::unique_ptr<crypto::RSAPrivateKey> key_copy(
+ crypto::RSAPrivateKey::CreateFromKey(key_pair->key()));
+ ASSERT_TRUE(key_copy.get());
+
+ std::vector<uint8_t> privkey;
+ std::vector<uint8_t> pubkey;
+ ASSERT_TRUE(key_pair->ExportPrivateKey(&privkey));
+ ASSERT_TRUE(key_pair->ExportPublicKey(&pubkey));
+
+ std::vector<uint8_t> privkey_copy;
+ std::vector<uint8_t> pubkey_copy;
+ ASSERT_TRUE(key_copy->ExportPrivateKey(&privkey_copy));
+ ASSERT_TRUE(key_copy->ExportPublicKey(&pubkey_copy));
+
+ ASSERT_EQ(privkey, privkey_copy);
+ ASSERT_EQ(pubkey, pubkey_copy);
+}
+
diff --git a/libchrome/crypto/scoped_capi_types.h b/libchrome/crypto/scoped_capi_types.h
new file mode 100644
index 0000000..74e5765
--- /dev/null
+++ b/libchrome/crypto/scoped_capi_types.h
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_CAPI_TYPES_H_
+#define CRYPTO_SCOPED_CAPI_TYPES_H_
+
+#include <windows.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "crypto/wincrypt_shim.h"
+
+namespace crypto {
+
+// Simple destructor for the Free family of CryptoAPI functions, such as
+// CryptDestroyHash, which take only a single argument to release.
+template <typename CAPIHandle, BOOL (WINAPI *Destroyer)(CAPIHandle)>
+struct CAPIDestroyer {
+ void operator()(CAPIHandle handle) const {
+ if (handle) {
+ BOOL ok = Destroyer(handle);
+ DCHECK(ok);
+ }
+ }
+};
+
+// Destructor for the Close/Release family of CryptoAPI functions, which take
+// a second DWORD parameter indicating flags to use when closing or releasing.
+// This includes functions like CertCloseStore or CryptReleaseContext.
+template <typename CAPIHandle, BOOL (WINAPI *Destroyer)(CAPIHandle, DWORD),
+ DWORD flags>
+struct CAPIDestroyerWithFlags {
+ void operator()(CAPIHandle handle) const {
+ if (handle) {
+ BOOL ok = Destroyer(handle, flags);
+ DCHECK(ok);
+ }
+ }
+};
+
+// scoped_ptr-like class for the CryptoAPI cryptography and certificate
+// handles. Because these handles are defined as integer types, and not
+// pointers, the existing scoped classes, such as scoped_ptr, are insufficient.
+// The semantics are the same as scoped_ptr.
+template <class CAPIHandle, typename FreeProc>
+class ScopedCAPIHandle {
+ public:
+ explicit ScopedCAPIHandle(CAPIHandle handle = NULL) : handle_(handle) {}
+
+ ~ScopedCAPIHandle() {
+ reset();
+ }
+
+ void reset(CAPIHandle handle = NULL) {
+ if (handle_ != handle) {
+ FreeProc free_proc;
+ free_proc(handle_);
+ handle_ = handle;
+ }
+ }
+
+ operator CAPIHandle() const { return handle_; }
+ CAPIHandle get() const { return handle_; }
+
+ CAPIHandle* receive() {
+ CHECK(handle_ == NULL);
+ return &handle_;
+ }
+
+ bool operator==(CAPIHandle handle) const {
+ return handle_ == handle;
+ }
+
+ bool operator!=(CAPIHandle handle) const {
+ return handle_ != handle;
+ }
+
+ void swap(ScopedCAPIHandle& b) {
+ CAPIHandle tmp = b.handle_;
+ b.handle_ = handle_;
+ handle_ = tmp;
+ }
+
+ CAPIHandle release() {
+ CAPIHandle tmp = handle_;
+ handle_ = NULL;
+ return tmp;
+ }
+
+ private:
+ CAPIHandle handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCAPIHandle);
+};
+
+template<class CH, typename FP> inline
+bool operator==(CH h, const ScopedCAPIHandle<CH, FP>& b) {
+ return h == b.get();
+}
+
+template<class CH, typename FP> inline
+bool operator!=(CH h, const ScopedCAPIHandle<CH, FP>& b) {
+ return h != b.get();
+}
+
+typedef ScopedCAPIHandle<
+ HCRYPTPROV,
+ CAPIDestroyerWithFlags<HCRYPTPROV,
+ CryptReleaseContext, 0> > ScopedHCRYPTPROV;
+
+typedef ScopedCAPIHandle<
+ HCRYPTKEY, CAPIDestroyer<HCRYPTKEY, CryptDestroyKey> > ScopedHCRYPTKEY;
+
+typedef ScopedCAPIHandle<
+ HCRYPTHASH, CAPIDestroyer<HCRYPTHASH, CryptDestroyHash> > ScopedHCRYPTHASH;
+
+} // namespace crypto
+
+#endif // CRYPTO_SCOPED_CAPI_TYPES_H_
diff --git a/libchrome/crypto/scoped_nss_types.h b/libchrome/crypto/scoped_nss_types.h
new file mode 100644
index 0000000..a739565
--- /dev/null
+++ b/libchrome/crypto/scoped_nss_types.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_NSS_TYPES_H_
+#define CRYPTO_SCOPED_NSS_TYPES_H_
+
+#include <keyhi.h>
+#include <nss.h>
+#include <pk11pub.h>
+#include <plarena.h>
+
+#include <memory>
+
+namespace crypto {
+
+template <typename Type, void (*Destroyer)(Type*)>
+struct NSSDestroyer {
+ void operator()(Type* ptr) const {
+ Destroyer(ptr);
+ }
+};
+
+template <typename Type, void (*Destroyer)(Type*, PRBool), PRBool freeit>
+struct NSSDestroyer1 {
+ void operator()(Type* ptr) const {
+ Destroyer(ptr, freeit);
+ }
+};
+
+// Define some convenient scopers around NSS pointers.
+typedef std::unique_ptr<
+ PK11Context,
+ NSSDestroyer1<PK11Context, PK11_DestroyContext, PR_TRUE>>
+ ScopedPK11Context;
+typedef std::unique_ptr<PK11SlotInfo, NSSDestroyer<PK11SlotInfo, PK11_FreeSlot>>
+ ScopedPK11Slot;
+typedef std::unique_ptr<PK11SlotList,
+ NSSDestroyer<PK11SlotList, PK11_FreeSlotList>>
+ ScopedPK11SlotList;
+typedef std::unique_ptr<PK11SymKey, NSSDestroyer<PK11SymKey, PK11_FreeSymKey>>
+ ScopedPK11SymKey;
+typedef std::unique_ptr<SECKEYPublicKey,
+ NSSDestroyer<SECKEYPublicKey, SECKEY_DestroyPublicKey>>
+ ScopedSECKEYPublicKey;
+typedef std::unique_ptr<
+ SECKEYPrivateKey,
+ NSSDestroyer<SECKEYPrivateKey, SECKEY_DestroyPrivateKey>>
+ ScopedSECKEYPrivateKey;
+typedef std::unique_ptr<
+ SECAlgorithmID,
+ NSSDestroyer1<SECAlgorithmID, SECOID_DestroyAlgorithmID, PR_TRUE>>
+ ScopedSECAlgorithmID;
+typedef std::unique_ptr<SECItem,
+ NSSDestroyer1<SECItem, SECITEM_FreeItem, PR_TRUE>>
+ ScopedSECItem;
+typedef std::unique_ptr<PLArenaPool,
+ NSSDestroyer1<PLArenaPool, PORT_FreeArena, PR_FALSE>>
+ ScopedPLArenaPool;
+
+} // namespace crypto
+
+#endif // CRYPTO_SCOPED_NSS_TYPES_H_
diff --git a/libchrome/crypto/scoped_openssl_types.h b/libchrome/crypto/scoped_openssl_types.h
new file mode 100644
index 0000000..622fed2
--- /dev/null
+++ b/libchrome/crypto/scoped_openssl_types.h
@@ -0,0 +1,62 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_OPENSSL_TYPES_H_
+#define CRYPTO_SCOPED_OPENSSL_TYPES_H_
+
+#include <openssl/bio.h>
+#include <openssl/bn.h>
+#include <openssl/dsa.h>
+#include <openssl/ec.h>
+#include <openssl/ecdsa.h>
+#include <openssl/evp.h>
+#ifdef OPENSSL_IS_BORINGSSL
+#include <openssl/mem.h>
+#endif
+#include <openssl/rsa.h>
+#include <stdint.h>
+
+#include <memory>
+
+namespace crypto {
+
+// Simplistic helper that wraps a call to a deleter function. In a C++11 world,
+// this would be std::function<>. An alternative would be to re-use
+// base::internal::RunnableAdapter<>, but that's far too heavy weight.
+template <typename Type, void (*Destroyer)(Type*)>
+struct OpenSSLDestroyer {
+ void operator()(Type* ptr) const { Destroyer(ptr); }
+};
+
+template <typename PointerType, void (*Destroyer)(PointerType*)>
+using ScopedOpenSSL =
+ std::unique_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
+
+struct OpenSSLFree {
+ void operator()(uint8_t* ptr) const { OPENSSL_free(ptr); }
+};
+
+// Several typedefs are provided for crypto-specific primitives, for
+// short-hand and prevalence. Note that OpenSSL types related to X.509 are
+// intentionally not included, as crypto/ does not generally deal with
+// certificates or PKI.
+using ScopedBIGNUM = ScopedOpenSSL<BIGNUM, BN_free>;
+using ScopedEC_Key = ScopedOpenSSL<EC_KEY, EC_KEY_free>;
+using ScopedBIO = ScopedOpenSSL<BIO, BIO_free_all>;
+using ScopedDSA = ScopedOpenSSL<DSA, DSA_free>;
+using ScopedECDSA_SIG = ScopedOpenSSL<ECDSA_SIG, ECDSA_SIG_free>;
+using ScopedEC_GROUP = ScopedOpenSSL<EC_GROUP, EC_GROUP_free>;
+using ScopedEC_KEY = ScopedOpenSSL<EC_KEY, EC_KEY_free>;
+using ScopedEC_POINT = ScopedOpenSSL<EC_POINT, EC_POINT_free>;
+using ScopedEVP_MD_CTX = ScopedOpenSSL<EVP_MD_CTX, EVP_MD_CTX_destroy>;
+using ScopedEVP_PKEY = ScopedOpenSSL<EVP_PKEY, EVP_PKEY_free>;
+using ScopedEVP_PKEY_CTX = ScopedOpenSSL<EVP_PKEY_CTX, EVP_PKEY_CTX_free>;
+using ScopedRSA = ScopedOpenSSL<RSA, RSA_free>;
+
+// The bytes must have been allocated with OPENSSL_malloc.
+using ScopedOpenSSLBytes = std::unique_ptr<uint8_t, OpenSSLFree>;
+
+} // namespace crypto
+
+#endif // CRYPTO_SCOPED_OPENSSL_TYPES_H_
diff --git a/libchrome/crypto/scoped_test_nss_chromeos_user.cc b/libchrome/crypto/scoped_test_nss_chromeos_user.cc
new file mode 100644
index 0000000..aec25d8
--- /dev/null
+++ b/libchrome/crypto/scoped_test_nss_chromeos_user.cc
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/scoped_test_nss_chromeos_user.h"
+
+#include "base/logging.h"
+#include "crypto/nss_util.h"
+#include "crypto/nss_util_internal.h"
+
+namespace crypto {
+
+ScopedTestNSSChromeOSUser::ScopedTestNSSChromeOSUser(
+ const std::string& username_hash)
+ : username_hash_(username_hash), constructed_successfully_(false) {
+ if (!temp_dir_.CreateUniqueTempDir())
+ return;
+ // This opens a software DB in the given folder. In production code that is in
+ // the home folder, but for testing the temp folder is used.
+ constructed_successfully_ =
+ InitializeNSSForChromeOSUser(username_hash, temp_dir_.path());
+}
+
+ScopedTestNSSChromeOSUser::~ScopedTestNSSChromeOSUser() {
+ if (constructed_successfully_)
+ CloseChromeOSUserForTesting(username_hash_);
+}
+
+void ScopedTestNSSChromeOSUser::FinishInit() {
+ DCHECK(constructed_successfully_);
+ if (!ShouldInitializeTPMForChromeOSUser(username_hash_))
+ return;
+ WillInitializeTPMForChromeOSUser(username_hash_);
+ InitializePrivateSoftwareSlotForChromeOSUser(username_hash_);
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/scoped_test_nss_chromeos_user.h b/libchrome/crypto/scoped_test_nss_chromeos_user.h
new file mode 100644
index 0000000..9202b0f
--- /dev/null
+++ b/libchrome/crypto/scoped_test_nss_chromeos_user.h
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_TEST_NSS_CHROMEOS_USER_H_
+#define CRYPTO_SCOPED_TEST_NSS_CHROMEOS_USER_H_
+
+#include <string>
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// Opens a persistent NSS software database in a temporary directory for the
+// user with |username_hash|. This database will be used for both the user's
+// public and private slot.
+class CRYPTO_EXPORT ScopedTestNSSChromeOSUser {
+ public:
+ // Opens the software database and sets the public slot for the user. The
+ // private slot will not be initialized until FinishInit() is called.
+ explicit ScopedTestNSSChromeOSUser(const std::string& username_hash);
+ ~ScopedTestNSSChromeOSUser();
+
+ std::string username_hash() const { return username_hash_; }
+ bool constructed_successfully() const { return constructed_successfully_; }
+
+ // Completes initialization of user. Causes any waiting private slot callbacks
+ // to run, see GetPrivateSlotForChromeOSUser().
+ void FinishInit();
+
+ private:
+ const std::string username_hash_;
+ base::ScopedTempDir temp_dir_;
+ bool constructed_successfully_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTestNSSChromeOSUser);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SCOPED_TEST_NSS_CHROMEOS_USER_H_
diff --git a/libchrome/crypto/scoped_test_nss_db.cc b/libchrome/crypto/scoped_test_nss_db.cc
new file mode 100644
index 0000000..dc58031
--- /dev/null
+++ b/libchrome/crypto/scoped_test_nss_db.cc
@@ -0,0 +1,71 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/scoped_test_nss_db.h"
+
+#include <cert.h>
+
+#include "base/logging.h"
+#include "base/threading/thread_restrictions.h"
+#include "crypto/nss_util.h"
+#include "crypto/nss_util_internal.h"
+
+namespace crypto {
+
+ScopedTestNSSDB::ScopedTestNSSDB() {
+ EnsureNSSInit();
+ // NSS is allowed to do IO on the current thread since dispatching
+ // to a dedicated thread would still have the affect of blocking
+ // the current thread, due to NSS's internal locking requirements
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ if (!temp_dir_.CreateUniqueTempDir())
+ return;
+
+ const char kTestDescription[] = "Test DB";
+ slot_ = OpenSoftwareNSSDB(temp_dir_.path(), kTestDescription);
+}
+
+ScopedTestNSSDB::~ScopedTestNSSDB() {
+ // Remove trust from any certs in the test DB before closing it. Otherwise NSS
+ // may cache verification results even after the test DB is gone.
+ if (slot_) {
+ CERTCertList* cert_list = PK11_ListCertsInSlot(slot_.get());
+ for (CERTCertListNode* node = CERT_LIST_HEAD(cert_list);
+ !CERT_LIST_END(node, cert_list);
+ node = CERT_LIST_NEXT(node)) {
+ CERTCertTrust trust = {0};
+ if (CERT_ChangeCertTrust(CERT_GetDefaultCertDB(), node->cert, &trust) !=
+ SECSuccess) {
+ LOG(ERROR) << "CERT_ChangeCertTrust failed: " << PORT_GetError();
+ }
+ }
+ CERT_DestroyCertList(cert_list);
+ }
+
+ // Don't close when NSS is < 3.15.1, because it would require an additional
+ // sleep for 1 second after closing the database, due to
+ // http://bugzil.la/875601.
+ if (!NSS_VersionCheck("3.15.1")) {
+ LOG(ERROR) << "NSS version is < 3.15.1, test DB will not be closed.";
+ temp_dir_.Take();
+ return;
+ }
+
+ // NSS is allowed to do IO on the current thread since dispatching
+ // to a dedicated thread would still have the affect of blocking
+ // the current thread, due to NSS's internal locking requirements
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ if (slot_) {
+ SECStatus status = SECMOD_CloseUserDB(slot_.get());
+ if (status != SECSuccess)
+ PLOG(ERROR) << "SECMOD_CloseUserDB failed: " << PORT_GetError();
+ }
+
+ if (!temp_dir_.Delete())
+ LOG(ERROR) << "Could not delete temporary directory.";
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/scoped_test_nss_db.h b/libchrome/crypto/scoped_test_nss_db.h
new file mode 100644
index 0000000..c01653f
--- /dev/null
+++ b/libchrome/crypto/scoped_test_nss_db.h
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_TEST_NSS_DB_H_
+#define CRYPTO_SCOPED_TEST_NSS_DB_H_
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace crypto {
+
+// Opens a persistent NSS database in a temporary directory.
+// Prior NSS version 3.15.1, because of http://bugzil.la/875601 , the opened DB
+// will not be closed automatically.
+class CRYPTO_EXPORT ScopedTestNSSDB {
+ public:
+ ScopedTestNSSDB();
+ ~ScopedTestNSSDB();
+
+ bool is_open() const { return !!slot_; }
+ PK11SlotInfo* slot() const { return slot_.get(); }
+
+ private:
+ base::ScopedTempDir temp_dir_;
+ ScopedPK11Slot slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTestNSSDB);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SCOPED_TEST_NSS_DB_H_
diff --git a/libchrome/crypto/scoped_test_system_nss_key_slot.cc b/libchrome/crypto/scoped_test_system_nss_key_slot.cc
new file mode 100644
index 0000000..53fbbff
--- /dev/null
+++ b/libchrome/crypto/scoped_test_system_nss_key_slot.cc
@@ -0,0 +1,32 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/scoped_test_system_nss_key_slot.h"
+
+#include "crypto/nss_util_internal.h"
+#include "crypto/scoped_test_nss_db.h"
+
+namespace crypto {
+
+ScopedTestSystemNSSKeySlot::ScopedTestSystemNSSKeySlot()
+ : test_db_(new ScopedTestNSSDB) {
+ if (!test_db_->is_open())
+ return;
+ SetSystemKeySlotForTesting(
+ ScopedPK11Slot(PK11_ReferenceSlot(test_db_->slot())));
+}
+
+ScopedTestSystemNSSKeySlot::~ScopedTestSystemNSSKeySlot() {
+ SetSystemKeySlotForTesting(ScopedPK11Slot());
+}
+
+bool ScopedTestSystemNSSKeySlot::ConstructedSuccessfully() const {
+ return test_db_->is_open();
+}
+
+PK11SlotInfo* ScopedTestSystemNSSKeySlot::slot() const {
+ return test_db_->slot();
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/scoped_test_system_nss_key_slot.h b/libchrome/crypto/scoped_test_system_nss_key_slot.h
new file mode 100644
index 0000000..eb8fbc9
--- /dev/null
+++ b/libchrome/crypto/scoped_test_system_nss_key_slot.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
+#define CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+
+// Forward declaration, from <pk11pub.h>
+typedef struct PK11SlotInfoStr PK11SlotInfo;
+
+namespace crypto {
+
+class ScopedTestNSSDB;
+
+// Opens a persistent NSS software database in a temporary directory and sets
+// the test system slot to the opened database. This helper should be created in
+// tests to fake the system token that is usually provided by the Chaps module.
+// |slot| is exposed through |GetSystemNSSKeySlot| and |IsTPMTokenReady| will
+// return true.
+// |InitializeTPMTokenAndSystemSlot|, which triggers the TPM initialization,
+// does not have to be called if this helper is used.
+// At most one instance of this helper must be used at a time.
+class CRYPTO_EXPORT ScopedTestSystemNSSKeySlot {
+ public:
+ explicit ScopedTestSystemNSSKeySlot();
+ ~ScopedTestSystemNSSKeySlot();
+
+ bool ConstructedSuccessfully() const;
+ PK11SlotInfo* slot() const;
+
+ private:
+ std::unique_ptr<ScopedTestNSSDB> test_db_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTestSystemNSSKeySlot);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
diff --git a/libchrome/crypto/secure_hash.cc b/libchrome/crypto/secure_hash.cc
new file mode 100644
index 0000000..9003b9c
--- /dev/null
+++ b/libchrome/crypto/secure_hash.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/secure_hash.h"
+
+#if defined(OPENSSL_IS_BORINGSSL)
+#include <openssl/mem.h>
+#else
+#include <openssl/crypto.h>
+#endif
+#include <openssl/sha.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/pickle.h"
+#include "crypto/openssl_util.h"
+
+namespace crypto {
+
+namespace {
+
+class SecureHashSHA256 : public SecureHash {
+ public:
+ SecureHashSHA256() {
+ SHA256_Init(&ctx_);
+ }
+
+ SecureHashSHA256(const SecureHashSHA256& other) : SecureHash() {
+ memcpy(&ctx_, &other.ctx_, sizeof(ctx_));
+ }
+
+ ~SecureHashSHA256() override {
+ OPENSSL_cleanse(&ctx_, sizeof(ctx_));
+ }
+
+ void Update(const void* input, size_t len) override {
+ SHA256_Update(&ctx_, static_cast<const unsigned char*>(input), len);
+ }
+
+ void Finish(void* output, size_t len) override {
+ ScopedOpenSSLSafeSizeBuffer<SHA256_DIGEST_LENGTH> result(
+ static_cast<unsigned char*>(output), len);
+ SHA256_Final(result.safe_buffer(), &ctx_);
+ }
+
+ std::unique_ptr<SecureHash> Clone() const override {
+ return base::MakeUnique<SecureHashSHA256>(*this);
+ }
+
+ size_t GetHashLength() const override { return SHA256_DIGEST_LENGTH; }
+
+ private:
+ SHA256_CTX ctx_;
+};
+
+} // namespace
+
+std::unique_ptr<SecureHash> SecureHash::Create(Algorithm algorithm) {
+ switch (algorithm) {
+ case SHA256:
+ return base::MakeUnique<SecureHashSHA256>();
+ default:
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/secure_hash.h b/libchrome/crypto/secure_hash.h
new file mode 100644
index 0000000..30b9fdc
--- /dev/null
+++ b/libchrome/crypto/secure_hash.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SECURE_HASH_H_
+#define CRYPTO_SECURE_HASH_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// A wrapper to calculate secure hashes incrementally, allowing to
+// be used when the full input is not known in advance.
+class CRYPTO_EXPORT SecureHash {
+ public:
+ enum Algorithm {
+ SHA256,
+ };
+ virtual ~SecureHash() {}
+
+ static std::unique_ptr<SecureHash> Create(Algorithm type);
+
+ virtual void Update(const void* input, size_t len) = 0;
+ virtual void Finish(void* output, size_t len) = 0;
+ virtual size_t GetHashLength() const = 0;
+
+ // Create a clone of this SecureHash. The returned clone and this both
+ // represent the same hash state. But from this point on, calling
+ // Update()/Finish() on either doesn't affect the state of the other.
+ virtual std::unique_ptr<SecureHash> Clone() const = 0;
+
+ protected:
+ SecureHash() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SecureHash);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SECURE_HASH_H_
diff --git a/libchrome/crypto/secure_hash_unittest.cc b/libchrome/crypto/secure_hash_unittest.cc
new file mode 100644
index 0000000..cb9f585
--- /dev/null
+++ b/libchrome/crypto/secure_hash_unittest.cc
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/secure_hash.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+#include "crypto/sha2.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(SecureHashTest, TestUpdate) {
+ // Example B.3 from FIPS 180-2: long message.
+ std::string input3(500000, 'a'); // 'a' repeated half a million times
+ const int kExpectedHashOfInput3[] = {
+ 0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7,
+ 0xe2, 0x84, 0xd7, 0x3e, 0x67, 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97,
+ 0x20, 0x0e, 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0};
+
+ uint8_t output3[crypto::kSHA256Length];
+
+ std::unique_ptr<crypto::SecureHash> ctx(
+ crypto::SecureHash::Create(crypto::SecureHash::SHA256));
+ ctx->Update(input3.data(), input3.size());
+ ctx->Update(input3.data(), input3.size());
+
+ ctx->Finish(output3, sizeof(output3));
+ for (size_t i = 0; i < crypto::kSHA256Length; i++)
+ EXPECT_EQ(kExpectedHashOfInput3[i], static_cast<int>(output3[i]));
+}
+
+TEST(SecureHashTest, TestClone) {
+ std::string input1(10001, 'a'); // 'a' repeated 10001 times
+ std::string input2(10001, 'd'); // 'd' repeated 10001 times
+
+ const uint8_t kExpectedHashOfInput1[crypto::kSHA256Length] = {
+ 0x0c, 0xab, 0x99, 0xa0, 0x58, 0x60, 0x0f, 0xfa, 0xad, 0x12, 0x92,
+ 0xd0, 0xc5, 0x3c, 0x05, 0x48, 0xeb, 0xaf, 0x88, 0xdd, 0x1d, 0x01,
+ 0x03, 0x03, 0x45, 0x70, 0x5f, 0x01, 0x8a, 0x81, 0x39, 0x09};
+ const uint8_t kExpectedHashOfInput1And2[crypto::kSHA256Length] = {
+ 0x4c, 0x8e, 0x26, 0x5a, 0xc3, 0x85, 0x1f, 0x1f, 0xa5, 0x04, 0x1c,
+ 0xc7, 0x88, 0x53, 0x1c, 0xc7, 0x80, 0x47, 0x15, 0xfb, 0x47, 0xff,
+ 0x72, 0xb1, 0x28, 0x37, 0xb0, 0x4d, 0x6e, 0x22, 0x2e, 0x4d};
+
+ uint8_t output1[crypto::kSHA256Length];
+ uint8_t output2[crypto::kSHA256Length];
+ uint8_t output3[crypto::kSHA256Length];
+
+ std::unique_ptr<crypto::SecureHash> ctx1(
+ crypto::SecureHash::Create(crypto::SecureHash::SHA256));
+ ctx1->Update(input1.data(), input1.size());
+
+ std::unique_ptr<crypto::SecureHash> ctx2(ctx1->Clone());
+ std::unique_ptr<crypto::SecureHash> ctx3(ctx2->Clone());
+ // At this point, ctx1, ctx2, and ctx3 are all equivalent and represent the
+ // state after hashing input1.
+
+ // Updating ctx1 and ctx2 with input2 should produce equivalent results.
+ ctx1->Update(input2.data(), input2.size());
+ ctx1->Finish(output1, sizeof(output1));
+
+ ctx2->Update(input2.data(), input2.size());
+ ctx2->Finish(output2, sizeof(output2));
+
+ EXPECT_EQ(0, memcmp(output1, output2, crypto::kSHA256Length));
+ EXPECT_EQ(0,
+ memcmp(output1, kExpectedHashOfInput1And2, crypto::kSHA256Length));
+
+ // Finish() ctx3, which should produce the hash of input1.
+ ctx3->Finish(&output3, sizeof(output3));
+ EXPECT_EQ(0, memcmp(output3, kExpectedHashOfInput1, crypto::kSHA256Length));
+}
+
+TEST(SecureHashTest, TestLength) {
+ std::unique_ptr<crypto::SecureHash> ctx(
+ crypto::SecureHash::Create(crypto::SecureHash::SHA256));
+ EXPECT_EQ(crypto::kSHA256Length, ctx->GetHashLength());
+}
diff --git a/libchrome/crypto/secure_util.cc b/libchrome/crypto/secure_util.cc
new file mode 100644
index 0000000..fe86d65
--- /dev/null
+++ b/libchrome/crypto/secure_util.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "crypto/secure_util.h"
+
+namespace crypto {
+
+bool SecureMemEqual(const void* s1, const void* s2, size_t n) {
+ const unsigned char* s1_ptr = reinterpret_cast<const unsigned char*>(s1);
+ const unsigned char* s2_ptr = reinterpret_cast<const unsigned char*>(s2);
+ unsigned char tmp = 0;
+ for (size_t i = 0; i < n; ++i, ++s1_ptr, ++s2_ptr)
+ tmp |= *s1_ptr ^ *s2_ptr;
+ return (tmp == 0);
+}
+
+} // namespace crypto
+
diff --git a/libchrome/crypto/secure_util.h b/libchrome/crypto/secure_util.h
new file mode 100644
index 0000000..cfe05ca
--- /dev/null
+++ b/libchrome/crypto/secure_util.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SECURE_UTIL_H_
+#define CRYPTO_SECURE_UTIL_H_
+
+#include <stddef.h>
+
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// Performs a constant-time comparison of two strings, returning true if the
+// strings are equal.
+//
+// For cryptographic operations, comparison functions such as memcmp() may
+// expose side-channel information about input, allowing an attacker to
+// perform timing analysis to determine what the expected bits should be. In
+// order to avoid such attacks, the comparison must execute in constant time,
+// so as to not to reveal to the attacker where the difference(s) are.
+// For an example attack, see
+// http://groups.google.com/group/keyczar-discuss/browse_thread/thread/5571eca0948b2a13
+CRYPTO_EXPORT bool SecureMemEqual(const void* s1, const void* s2, size_t n);
+
+} // namespace crypto
+
+#endif // CRYPTO_SECURE_UTIL_H_
+
diff --git a/libchrome/crypto/sha2.cc b/libchrome/crypto/sha2.cc
new file mode 100644
index 0000000..e97b8f4
--- /dev/null
+++ b/libchrome/crypto/sha2.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/sha2.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/stl_util.h"
+#include "crypto/secure_hash.h"
+
+namespace crypto {
+
+void SHA256HashString(const base::StringPiece& str, void* output, size_t len) {
+ std::unique_ptr<SecureHash> ctx(SecureHash::Create(SecureHash::SHA256));
+ ctx->Update(str.data(), str.length());
+ ctx->Finish(output, len);
+}
+
+std::string SHA256HashString(const base::StringPiece& str) {
+ std::string output(kSHA256Length, 0);
+ SHA256HashString(str, string_as_array(&output), output.size());
+ return output;
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/sha2.h b/libchrome/crypto/sha2.h
new file mode 100644
index 0000000..d575815
--- /dev/null
+++ b/libchrome/crypto/sha2.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SHA2_H_
+#define CRYPTO_SHA2_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/strings/string_piece.h"
+#include "crypto/crypto_export.h"
+
+namespace crypto {
+
+// These functions perform SHA-256 operations.
+//
+// Functions for SHA-384 and SHA-512 can be added when the need arises.
+
+static const size_t kSHA256Length = 32; // Length in bytes of a SHA-256 hash.
+
+// Computes the SHA-256 hash of the input string 'str' and stores the first
+// 'len' bytes of the hash in the output buffer 'output'. If 'len' > 32,
+// only 32 bytes (the full hash) are stored in the 'output' buffer.
+CRYPTO_EXPORT void SHA256HashString(const base::StringPiece& str,
+ void* output, size_t len);
+
+// Convenience version of the above that returns the result in a 32-byte
+// string.
+CRYPTO_EXPORT std::string SHA256HashString(const base::StringPiece& str);
+
+} // namespace crypto
+
+#endif // CRYPTO_SHA2_H_
diff --git a/libchrome/crypto/sha2_unittest.cc b/libchrome/crypto/sha2_unittest.cc
new file mode 100644
index 0000000..27d6d25
--- /dev/null
+++ b/libchrome/crypto/sha2_unittest.cc
@@ -0,0 +1,102 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/sha2.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(Sha256Test, Test1) {
+ // Example B.1 from FIPS 180-2: one-block message.
+ std::string input1 = "abc";
+ int expected1[] = { 0xba, 0x78, 0x16, 0xbf,
+ 0x8f, 0x01, 0xcf, 0xea,
+ 0x41, 0x41, 0x40, 0xde,
+ 0x5d, 0xae, 0x22, 0x23,
+ 0xb0, 0x03, 0x61, 0xa3,
+ 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61,
+ 0xf2, 0x00, 0x15, 0xad };
+
+ uint8_t output1[crypto::kSHA256Length];
+ crypto::SHA256HashString(input1, output1, sizeof(output1));
+ for (size_t i = 0; i < crypto::kSHA256Length; i++)
+ EXPECT_EQ(expected1[i], static_cast<int>(output1[i]));
+
+ uint8_t output_truncated1[4]; // 4 bytes == 32 bits
+ crypto::SHA256HashString(input1,
+ output_truncated1, sizeof(output_truncated1));
+ for (size_t i = 0; i < sizeof(output_truncated1); i++)
+ EXPECT_EQ(expected1[i], static_cast<int>(output_truncated1[i]));
+}
+
+TEST(Sha256Test, Test1_String) {
+ // Same as the above, but using the wrapper that returns a std::string.
+ // Example B.1 from FIPS 180-2: one-block message.
+ std::string input1 = "abc";
+ int expected1[] = { 0xba, 0x78, 0x16, 0xbf,
+ 0x8f, 0x01, 0xcf, 0xea,
+ 0x41, 0x41, 0x40, 0xde,
+ 0x5d, 0xae, 0x22, 0x23,
+ 0xb0, 0x03, 0x61, 0xa3,
+ 0x96, 0x17, 0x7a, 0x9c,
+ 0xb4, 0x10, 0xff, 0x61,
+ 0xf2, 0x00, 0x15, 0xad };
+
+ std::string output1 = crypto::SHA256HashString(input1);
+ ASSERT_EQ(crypto::kSHA256Length, output1.size());
+ for (size_t i = 0; i < crypto::kSHA256Length; i++)
+ EXPECT_EQ(expected1[i], static_cast<uint8_t>(output1[i]));
+}
+
+TEST(Sha256Test, Test2) {
+ // Example B.2 from FIPS 180-2: multi-block message.
+ std::string input2 =
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
+ int expected2[] = { 0x24, 0x8d, 0x6a, 0x61,
+ 0xd2, 0x06, 0x38, 0xb8,
+ 0xe5, 0xc0, 0x26, 0x93,
+ 0x0c, 0x3e, 0x60, 0x39,
+ 0xa3, 0x3c, 0xe4, 0x59,
+ 0x64, 0xff, 0x21, 0x67,
+ 0xf6, 0xec, 0xed, 0xd4,
+ 0x19, 0xdb, 0x06, 0xc1 };
+
+ uint8_t output2[crypto::kSHA256Length];
+ crypto::SHA256HashString(input2, output2, sizeof(output2));
+ for (size_t i = 0; i < crypto::kSHA256Length; i++)
+ EXPECT_EQ(expected2[i], static_cast<int>(output2[i]));
+
+ uint8_t output_truncated2[6];
+ crypto::SHA256HashString(input2,
+ output_truncated2, sizeof(output_truncated2));
+ for (size_t i = 0; i < sizeof(output_truncated2); i++)
+ EXPECT_EQ(expected2[i], static_cast<int>(output_truncated2[i]));
+}
+
+TEST(Sha256Test, Test3) {
+ // Example B.3 from FIPS 180-2: long message.
+ std::string input3(1000000, 'a'); // 'a' repeated a million times
+ int expected3[] = { 0xcd, 0xc7, 0x6e, 0x5c,
+ 0x99, 0x14, 0xfb, 0x92,
+ 0x81, 0xa1, 0xc7, 0xe2,
+ 0x84, 0xd7, 0x3e, 0x67,
+ 0xf1, 0x80, 0x9a, 0x48,
+ 0xa4, 0x97, 0x20, 0x0e,
+ 0x04, 0x6d, 0x39, 0xcc,
+ 0xc7, 0x11, 0x2c, 0xd0 };
+
+ uint8_t output3[crypto::kSHA256Length];
+ crypto::SHA256HashString(input3, output3, sizeof(output3));
+ for (size_t i = 0; i < crypto::kSHA256Length; i++)
+ EXPECT_EQ(expected3[i], static_cast<int>(output3[i]));
+
+ uint8_t output_truncated3[12];
+ crypto::SHA256HashString(input3,
+ output_truncated3, sizeof(output_truncated3));
+ for (size_t i = 0; i < sizeof(output_truncated3); i++)
+ EXPECT_EQ(expected3[i], static_cast<int>(output_truncated3[i]));
+}
diff --git a/libchrome/crypto/signature_creator.h b/libchrome/crypto/signature_creator.h
new file mode 100644
index 0000000..1e8e856
--- /dev/null
+++ b/libchrome/crypto/signature_creator.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SIGNATURE_CREATOR_H_
+#define CRYPTO_SIGNATURE_CREATOR_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+
+#if defined(USE_OPENSSL)
+// Forward declaration for openssl/*.h
+typedef struct env_md_ctx_st EVP_MD_CTX;
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+// Forward declaration.
+struct SGNContextStr;
+#endif
+
+namespace crypto {
+
+class RSAPrivateKey;
+
+// Signs data using a bare private key (as opposed to a full certificate).
+// Currently can only sign data using SHA-1 or SHA-256 with RSA PKCS#1v1.5.
+class CRYPTO_EXPORT SignatureCreator {
+ public:
+ // The set of supported hash functions. Extend as required.
+ enum HashAlgorithm {
+ SHA1,
+ SHA256,
+ };
+
+ ~SignatureCreator();
+
+ // Create an instance. The caller must ensure that the provided PrivateKey
+ // instance outlives the created SignatureCreator. Uses the HashAlgorithm
+ // specified.
+ static std::unique_ptr<SignatureCreator> Create(RSAPrivateKey* key,
+ HashAlgorithm hash_alg);
+
+ // Signs the precomputed |hash_alg| digest |data| using private |key| as
+ // specified in PKCS #1 v1.5.
+ static bool Sign(RSAPrivateKey* key,
+ HashAlgorithm hash_alg,
+ const uint8_t* data,
+ int data_len,
+ std::vector<uint8_t>* signature);
+
+ // Update the signature with more data.
+ bool Update(const uint8_t* data_part, int data_part_len);
+
+ // Finalize the signature.
+ bool Final(std::vector<uint8_t>* signature);
+
+ private:
+ // Private constructor. Use the Create() method instead.
+ SignatureCreator();
+
+#if defined(USE_OPENSSL)
+ EVP_MD_CTX* sign_context_;
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+ SGNContextStr* sign_context_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(SignatureCreator);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SIGNATURE_CREATOR_H_
diff --git a/libchrome/crypto/signature_creator_nss.cc b/libchrome/crypto/signature_creator_nss.cc
new file mode 100644
index 0000000..bf20413
--- /dev/null
+++ b/libchrome/crypto/signature_creator_nss.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/signature_creator.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "crypto/nss_util.h"
+#include "crypto/rsa_private_key.h"
+
+namespace crypto {
+
+namespace {
+
+SECOidTag ToNSSSigOid(SignatureCreator::HashAlgorithm hash_alg) {
+ switch (hash_alg) {
+ case SignatureCreator::SHA1:
+ return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
+ case SignatureCreator::SHA256:
+ return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+ }
+ return SEC_OID_UNKNOWN;
+}
+
+SECOidTag ToNSSHashOid(SignatureCreator::HashAlgorithm hash_alg) {
+ switch (hash_alg) {
+ case SignatureCreator::SHA1:
+ return SEC_OID_SHA1;
+ case SignatureCreator::SHA256:
+ return SEC_OID_SHA256;
+ }
+ return SEC_OID_UNKNOWN;
+}
+
+} // namespace
+
+SignatureCreator::~SignatureCreator() {
+ if (sign_context_) {
+ SGN_DestroyContext(sign_context_, PR_TRUE);
+ sign_context_ = NULL;
+ }
+}
+
+// static
+SignatureCreator* SignatureCreator::Create(RSAPrivateKey* key,
+ HashAlgorithm hash_alg) {
+ scoped_ptr<SignatureCreator> result(new SignatureCreator);
+ result->sign_context_ = SGN_NewContext(ToNSSSigOid(hash_alg), key->key());
+ if (!result->sign_context_) {
+ NOTREACHED();
+ return NULL;
+ }
+
+ SECStatus rv = SGN_Begin(result->sign_context_);
+ if (rv != SECSuccess) {
+ NOTREACHED();
+ return NULL;
+ }
+
+ return result.release();
+}
+
+// static
+bool SignatureCreator::Sign(RSAPrivateKey* key,
+ HashAlgorithm hash_alg,
+ const uint8_t* data,
+ int data_len,
+ std::vector<uint8_t>* signature) {
+ SECItem data_item;
+ data_item.type = siBuffer;
+ data_item.data = const_cast<unsigned char*>(data);
+ data_item.len = data_len;
+
+ SECItem signature_item;
+ SECStatus rv = SGN_Digest(key->key(), ToNSSHashOid(hash_alg), &signature_item,
+ &data_item);
+ if (rv != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+ signature->assign(signature_item.data,
+ signature_item.data + signature_item.len);
+ SECITEM_FreeItem(&signature_item, PR_FALSE);
+ return true;
+}
+
+bool SignatureCreator::Update(const uint8_t* data_part, int data_part_len) {
+ SECStatus rv = SGN_Update(sign_context_, data_part, data_part_len);
+ if (rv != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+
+ return true;
+}
+
+bool SignatureCreator::Final(std::vector<uint8_t>* signature) {
+ SECItem signature_item;
+ SECStatus rv = SGN_End(sign_context_, &signature_item);
+ if (rv != SECSuccess) {
+ return false;
+ }
+ signature->assign(signature_item.data,
+ signature_item.data + signature_item.len);
+ SECITEM_FreeItem(&signature_item, PR_FALSE);
+ return true;
+}
+
+SignatureCreator::SignatureCreator() : sign_context_(NULL) {
+ EnsureNSSInit();
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/signature_creator_unittest.cc b/libchrome/crypto/signature_creator_unittest.cc
new file mode 100644
index 0000000..819e663
--- /dev/null
+++ b/libchrome/crypto/signature_creator_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/signature_creator.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/sha1.h"
+#include "crypto/rsa_private_key.h"
+#include "crypto/sha2.h"
+#include "crypto/signature_verifier.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(SignatureCreatorTest, BasicTest) {
+ // Do a verify round trip.
+ std::unique_ptr<crypto::RSAPrivateKey> key_original(
+ crypto::RSAPrivateKey::Create(1024));
+ ASSERT_TRUE(key_original.get());
+
+ std::vector<uint8_t> key_info;
+ key_original->ExportPrivateKey(&key_info);
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
+ ASSERT_TRUE(key.get());
+
+ std::unique_ptr<crypto::SignatureCreator> signer(
+ crypto::SignatureCreator::Create(key.get(),
+ crypto::SignatureCreator::SHA1));
+ ASSERT_TRUE(signer.get());
+
+ std::string data("Hello, World!");
+ ASSERT_TRUE(signer->Update(reinterpret_cast<const uint8_t*>(data.c_str()),
+ data.size()));
+
+ std::vector<uint8_t> signature;
+ ASSERT_TRUE(signer->Final(&signature));
+
+ std::vector<uint8_t> public_key_info;
+ ASSERT_TRUE(key_original->ExportPublicKey(&public_key_info));
+
+ crypto::SignatureVerifier verifier;
+ ASSERT_TRUE(verifier.VerifyInit(
+ crypto::SignatureVerifier::RSA_PKCS1_SHA1, &signature.front(),
+ signature.size(), &public_key_info.front(), public_key_info.size()));
+
+ verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
+ data.size());
+ ASSERT_TRUE(verifier.VerifyFinal());
+}
+
+TEST(SignatureCreatorTest, SignDigestTest) {
+ // Do a verify round trip.
+ std::unique_ptr<crypto::RSAPrivateKey> key_original(
+ crypto::RSAPrivateKey::Create(1024));
+ ASSERT_TRUE(key_original.get());
+
+ std::vector<uint8_t> key_info;
+ key_original->ExportPrivateKey(&key_info);
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
+ ASSERT_TRUE(key.get());
+
+ std::string data("Hello, World!");
+ std::string sha1 = base::SHA1HashString(data);
+ // Sign sha1 of the input data.
+ std::vector<uint8_t> signature;
+ ASSERT_TRUE(crypto::SignatureCreator::Sign(
+ key.get(), crypto::SignatureCreator::SHA1,
+ reinterpret_cast<const uint8_t*>(sha1.c_str()), sha1.size(), &signature));
+
+ std::vector<uint8_t> public_key_info;
+ ASSERT_TRUE(key_original->ExportPublicKey(&public_key_info));
+
+ // Verify the input data.
+ crypto::SignatureVerifier verifier;
+ ASSERT_TRUE(verifier.VerifyInit(
+ crypto::SignatureVerifier::RSA_PKCS1_SHA1, &signature.front(),
+ signature.size(), &public_key_info.front(), public_key_info.size()));
+
+ verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
+ data.size());
+ ASSERT_TRUE(verifier.VerifyFinal());
+}
+
+TEST(SignatureCreatorTest, SignSHA256DigestTest) {
+ // Do a verify round trip.
+ std::unique_ptr<crypto::RSAPrivateKey> key_original(
+ crypto::RSAPrivateKey::Create(1024));
+ ASSERT_TRUE(key_original.get());
+
+ std::vector<uint8_t> key_info;
+ key_original->ExportPrivateKey(&key_info);
+ std::unique_ptr<crypto::RSAPrivateKey> key(
+ crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
+ ASSERT_TRUE(key.get());
+
+ std::string data("Hello, World!");
+ std::string sha256 = crypto::SHA256HashString(data);
+ // Sign sha256 of the input data.
+ std::vector<uint8_t> signature;
+ ASSERT_TRUE(crypto::SignatureCreator::Sign(
+ key.get(), crypto::SignatureCreator::HashAlgorithm::SHA256,
+ reinterpret_cast<const uint8_t*>(sha256.c_str()), sha256.size(),
+ &signature));
+
+ std::vector<uint8_t> public_key_info;
+ ASSERT_TRUE(key_original->ExportPublicKey(&public_key_info));
+
+ // Verify the input data.
+ crypto::SignatureVerifier verifier;
+ ASSERT_TRUE(verifier.VerifyInit(
+ crypto::SignatureVerifier::RSA_PKCS1_SHA256, &signature.front(),
+ signature.size(), &public_key_info.front(), public_key_info.size()));
+
+ verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
+ data.size());
+ ASSERT_TRUE(verifier.VerifyFinal());
+}
diff --git a/libchrome/crypto/signature_verifier.h b/libchrome/crypto/signature_verifier.h
new file mode 100644
index 0000000..5b7369f
--- /dev/null
+++ b/libchrome/crypto/signature_verifier.h
@@ -0,0 +1,134 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SIGNATURE_VERIFIER_H_
+#define CRYPTO_SIGNATURE_VERIFIER_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+
+#if defined(USE_OPENSSL)
+typedef struct env_md_st EVP_MD;
+typedef struct evp_pkey_ctx_st EVP_PKEY_CTX;
+#else
+typedef struct HASHContextStr HASHContext;
+typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
+typedef struct VFYContextStr VFYContext;
+#endif
+
+namespace crypto {
+
+// The SignatureVerifier class verifies a signature using a bare public key
+// (as opposed to a certificate).
+class CRYPTO_EXPORT SignatureVerifier {
+ public:
+ // The set of supported hash functions. Extend as required.
+ enum HashAlgorithm {
+ SHA1,
+ SHA256,
+ };
+
+ // The set of supported signature algorithms. Extend as required.
+ enum SignatureAlgorithm {
+ RSA_PKCS1_SHA1,
+ RSA_PKCS1_SHA256,
+ ECDSA_SHA256,
+ };
+
+ SignatureVerifier();
+ ~SignatureVerifier();
+
+ // Streaming interface:
+
+ // Initiates a signature verification operation. This should be followed
+ // by one or more VerifyUpdate calls and a VerifyFinal call.
+ // NOTE: for RSA-PSS signatures, use VerifyInitRSAPSS instead.
+ //
+ // The signature is encoded according to the signature algorithm.
+ //
+ // The public key is specified as a DER encoded ASN.1 SubjectPublicKeyInfo
+ // structure, which contains not only the public key but also its type
+ // (algorithm):
+ // SubjectPublicKeyInfo ::= SEQUENCE {
+ // algorithm AlgorithmIdentifier,
+ // subjectPublicKey BIT STRING }
+ bool VerifyInit(SignatureAlgorithm signature_algorithm,
+ const uint8_t* signature,
+ int signature_len,
+ const uint8_t* public_key_info,
+ int public_key_info_len);
+
+ // Initiates a RSA-PSS signature verification operation. This should be
+ // followed by one or more VerifyUpdate calls and a VerifyFinal call.
+ //
+ // The RSA-PSS signature algorithm parameters are specified with the
+ // |hash_alg|, |mask_hash_alg|, and |salt_len| arguments.
+ //
+ // An RSA-PSS signature is a nonnegative integer encoded as a byte string
+ // (of the same length as the RSA modulus) in big-endian byte order. It
+ // must not be further encoded in an ASN.1 BIT STRING.
+ //
+ // The public key is specified as a DER encoded ASN.1 SubjectPublicKeyInfo
+ // structure, which contains not only the public key but also its type
+ // (algorithm):
+ // SubjectPublicKeyInfo ::= SEQUENCE {
+ // algorithm AlgorithmIdentifier,
+ // subjectPublicKey BIT STRING }
+ bool VerifyInitRSAPSS(HashAlgorithm hash_alg,
+ HashAlgorithm mask_hash_alg,
+ int salt_len,
+ const uint8_t* signature,
+ int signature_len,
+ const uint8_t* public_key_info,
+ int public_key_info_len);
+
+ // Feeds a piece of the data to the signature verifier.
+ void VerifyUpdate(const uint8_t* data_part, int data_part_len);
+
+ // Concludes a signature verification operation. Returns true if the
+ // signature is valid. Returns false if the signature is invalid or an
+ // error occurred.
+ bool VerifyFinal();
+
+ private:
+#if defined(USE_OPENSSL)
+ bool CommonInit(int pkey_type,
+ const EVP_MD* digest,
+ const uint8_t* signature,
+ int signature_len,
+ const uint8_t* public_key_info,
+ int public_key_info_len,
+ EVP_PKEY_CTX** pkey_ctx);
+#else
+ static SECKEYPublicKey* DecodePublicKeyInfo(const uint8_t* public_key_info,
+ int public_key_info_len);
+#endif
+
+ void Reset();
+
+ std::vector<uint8_t> signature_;
+
+#if defined(USE_OPENSSL)
+ struct VerifyContext;
+ VerifyContext* verify_context_;
+#else
+ // Used for all signature types except RSA-PSS.
+ VFYContext* vfy_context_;
+
+ // Used for RSA-PSS signatures.
+ HashAlgorithm hash_alg_;
+ HashAlgorithm mask_hash_alg_;
+ unsigned int salt_len_;
+ SECKEYPublicKey* public_key_;
+ HASHContext* hash_context_;
+#endif
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SIGNATURE_VERIFIER_H_
diff --git a/libchrome/crypto/signature_verifier_nss.cc b/libchrome/crypto/signature_verifier_nss.cc
new file mode 100644
index 0000000..edbd3f6
--- /dev/null
+++ b/libchrome/crypto/signature_verifier_nss.cc
@@ -0,0 +1,213 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/signature_verifier.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <pk11pub.h>
+#include <secerr.h>
+#include <sechash.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+#include "crypto/nss_util.h"
+#include "crypto/third_party/nss/chromium-nss.h"
+
+namespace crypto {
+
+namespace {
+
+HASH_HashType ToNSSHashType(SignatureVerifier::HashAlgorithm hash_alg) {
+ switch (hash_alg) {
+ case SignatureVerifier::SHA1:
+ return HASH_AlgSHA1;
+ case SignatureVerifier::SHA256:
+ return HASH_AlgSHA256;
+ }
+ return HASH_AlgNULL;
+}
+
+SECOidTag ToNSSSignatureType(SignatureVerifier::SignatureAlgorithm sig_alg) {
+ switch (sig_alg) {
+ case SignatureVerifier::RSA_PKCS1_SHA1:
+ return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
+ case SignatureVerifier::RSA_PKCS1_SHA256:
+ return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+ case SignatureVerifier::ECDSA_SHA256:
+ return SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE;
+ }
+ return SEC_OID_UNKNOWN;
+}
+
+SECStatus VerifyRSAPSS_End(SECKEYPublicKey* public_key,
+ HASHContext* hash_context,
+ HASH_HashType mask_hash_alg,
+ unsigned int salt_len,
+ const unsigned char* signature,
+ unsigned int signature_len) {
+ unsigned int hash_len = HASH_ResultLenContext(hash_context);
+ std::vector<unsigned char> hash(hash_len);
+ HASH_End(hash_context, &hash[0], &hash_len, hash.size());
+
+ unsigned int modulus_len = SECKEY_PublicKeyStrength(public_key);
+ if (signature_len != modulus_len) {
+ PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+ return SECFailure;
+ }
+ std::vector<unsigned char> enc(signature_len);
+ SECStatus rv = PK11_PubEncryptRaw(public_key, &enc[0],
+ const_cast<unsigned char*>(signature),
+ signature_len, NULL);
+ if (rv != SECSuccess) {
+ LOG(WARNING) << "PK11_PubEncryptRaw failed";
+ return rv;
+ }
+ return emsa_pss_verify(&hash[0], &enc[0], enc.size(),
+ HASH_GetType(hash_context), mask_hash_alg,
+ salt_len);
+}
+
+} // namespace
+
+SignatureVerifier::SignatureVerifier()
+ : vfy_context_(NULL),
+ hash_alg_(SHA1),
+ mask_hash_alg_(SHA1),
+ salt_len_(0),
+ public_key_(NULL),
+ hash_context_(NULL) {
+ EnsureNSSInit();
+}
+
+SignatureVerifier::~SignatureVerifier() {
+ Reset();
+}
+
+bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
+ const uint8_t* signature,
+ int signature_len,
+ const uint8_t* public_key_info,
+ int public_key_info_len) {
+ if (vfy_context_ || hash_context_)
+ return false;
+
+ signature_.assign(signature, signature + signature_len);
+
+ SECKEYPublicKey* public_key = DecodePublicKeyInfo(public_key_info,
+ public_key_info_len);
+ if (!public_key)
+ return false;
+
+ SECItem sig;
+ sig.type = siBuffer;
+ sig.data = const_cast<uint8_t*>(signature);
+ sig.len = signature_len;
+ vfy_context_ = VFY_CreateContext(
+ public_key, &sig, ToNSSSignatureType(signature_algorithm), nullptr);
+ SECKEY_DestroyPublicKey(public_key); // Done with public_key.
+ if (!vfy_context_) {
+ // A corrupted RSA signature could be detected without the data, so
+ // VFY_CreateContextWithAlgorithmID may fail with SEC_ERROR_BAD_SIGNATURE
+ // (-8182).
+ return false;
+ }
+
+ if (VFY_Begin(vfy_context_) != SECSuccess) {
+ NOTREACHED();
+ return false;
+ }
+ return true;
+}
+
+bool SignatureVerifier::VerifyInitRSAPSS(HashAlgorithm hash_alg,
+ HashAlgorithm mask_hash_alg,
+ int salt_len,
+ const uint8_t* signature,
+ int signature_len,
+ const uint8_t* public_key_info,
+ int public_key_info_len) {
+ if (vfy_context_ || hash_context_)
+ return false;
+
+ signature_.assign(signature, signature + signature_len);
+
+ SECKEYPublicKey* public_key = DecodePublicKeyInfo(public_key_info,
+ public_key_info_len);
+ if (!public_key)
+ return false;
+
+ public_key_ = public_key;
+ hash_alg_ = hash_alg;
+ mask_hash_alg_ = mask_hash_alg;
+ salt_len_ = salt_len;
+ hash_context_ = HASH_Create(ToNSSHashType(hash_alg_));
+ if (!hash_context_)
+ return false;
+ HASH_Begin(hash_context_);
+ return true;
+}
+
+void SignatureVerifier::VerifyUpdate(const uint8_t* data_part,
+ int data_part_len) {
+ if (vfy_context_) {
+ SECStatus rv = VFY_Update(vfy_context_, data_part, data_part_len);
+ DCHECK_EQ(SECSuccess, rv);
+ } else {
+ HASH_Update(hash_context_, data_part, data_part_len);
+ }
+}
+
+bool SignatureVerifier::VerifyFinal() {
+ SECStatus rv;
+ if (vfy_context_) {
+ rv = VFY_End(vfy_context_);
+ } else {
+ rv = VerifyRSAPSS_End(public_key_, hash_context_,
+ ToNSSHashType(mask_hash_alg_), salt_len_,
+ signature_.data(),
+ signature_.size());
+ }
+ Reset();
+
+ // If signature verification fails, the error code is
+ // SEC_ERROR_BAD_SIGNATURE (-8182).
+ return (rv == SECSuccess);
+}
+
+// static
+SECKEYPublicKey* SignatureVerifier::DecodePublicKeyInfo(
+ const uint8_t* public_key_info,
+ int public_key_info_len) {
+ CERTSubjectPublicKeyInfo* spki = NULL;
+ SECItem spki_der;
+ spki_der.type = siBuffer;
+ spki_der.data = const_cast<uint8_t*>(public_key_info);
+ spki_der.len = public_key_info_len;
+ spki = SECKEY_DecodeDERSubjectPublicKeyInfo(&spki_der);
+ if (!spki)
+ return NULL;
+ SECKEYPublicKey* public_key = SECKEY_ExtractPublicKey(spki);
+ SECKEY_DestroySubjectPublicKeyInfo(spki); // Done with spki.
+ return public_key;
+}
+
+void SignatureVerifier::Reset() {
+ if (vfy_context_) {
+ VFY_DestroyContext(vfy_context_, PR_TRUE);
+ vfy_context_ = NULL;
+ }
+ if (hash_context_) {
+ HASH_Destroy(hash_context_);
+ hash_context_ = NULL;
+ }
+ if (public_key_) {
+ SECKEY_DestroyPublicKey(public_key_);
+ public_key_ = NULL;
+ }
+ signature_.clear();
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/signature_verifier_unittest.cc b/libchrome/crypto/signature_verifier_unittest.cc
new file mode 100644
index 0000000..d71ea82
--- /dev/null
+++ b/libchrome/crypto/signature_verifier_unittest.cc
@@ -0,0 +1,1144 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/signature_verifier.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(SignatureVerifierTest, BasicTest) {
+ // The input data in this test comes from real certificates.
+ //
+ // tbs_certificate ("to-be-signed certificate", the part of a certificate that
+ // is signed), signature, and algorithm come from the certificate of
+ // bugs.webkit.org.
+ //
+ // public_key_info comes from the certificate of the issuer, Go Daddy Secure
+ // Certification Authority.
+ //
+ // The bytes in the array initializers are formatted to expose the DER
+ // encoding of the ASN.1 structures.
+
+ // The data that is signed is the following ASN.1 structure:
+ // TBSCertificate ::= SEQUENCE {
+ // ... -- omitted, not important
+ // }
+ const uint8_t tbs_certificate[1017] = {
+ 0x30, 0x82, 0x03, 0xf5, // a SEQUENCE of length 1013 (0x3f5)
+ 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x03, 0x43, 0xdd, 0x63, 0x30, 0x0d,
+ 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05,
+ 0x00, 0x30, 0x81, 0xca, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04,
+ 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55,
+ 0x04, 0x08, 0x13, 0x07, 0x41, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x61, 0x31,
+ 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x0a, 0x53, 0x63,
+ 0x6f, 0x74, 0x74, 0x73, 0x64, 0x61, 0x6c, 0x65, 0x31, 0x1a, 0x30, 0x18,
+ 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x11, 0x47, 0x6f, 0x44, 0x61, 0x64,
+ 0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2c, 0x20, 0x49, 0x6e, 0x63, 0x2e,
+ 0x31, 0x33, 0x30, 0x31, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x2a, 0x68,
+ 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64,
+ 0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73,
+ 0x69, 0x74, 0x6f, 0x72, 0x79, 0x31, 0x30, 0x30, 0x2e, 0x06, 0x03, 0x55,
+ 0x04, 0x03, 0x13, 0x27, 0x47, 0x6f, 0x20, 0x44, 0x61, 0x64, 0x64, 0x79,
+ 0x20, 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x20, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, 0x0f, 0x06,
+ 0x03, 0x55, 0x04, 0x05, 0x13, 0x08, 0x30, 0x37, 0x39, 0x36, 0x39, 0x32,
+ 0x38, 0x37, 0x30, 0x1e, 0x17, 0x0d, 0x30, 0x38, 0x30, 0x33, 0x31, 0x38,
+ 0x32, 0x33, 0x33, 0x35, 0x31, 0x39, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30,
+ 0x33, 0x31, 0x38, 0x32, 0x33, 0x33, 0x35, 0x31, 0x39, 0x5a, 0x30, 0x79,
+ 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55,
+ 0x53, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a,
+ 0x43, 0x61, 0x6c, 0x69, 0x66, 0x6f, 0x72, 0x6e, 0x69, 0x61, 0x31, 0x12,
+ 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, 0x09, 0x43, 0x75, 0x70,
+ 0x65, 0x72, 0x74, 0x69, 0x6e, 0x6f, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03,
+ 0x55, 0x04, 0x0a, 0x13, 0x0a, 0x41, 0x70, 0x70, 0x6c, 0x65, 0x20, 0x49,
+ 0x6e, 0x63, 0x2e, 0x31, 0x15, 0x30, 0x13, 0x06, 0x03, 0x55, 0x04, 0x0b,
+ 0x13, 0x0c, 0x4d, 0x61, 0x63, 0x20, 0x4f, 0x53, 0x20, 0x46, 0x6f, 0x72,
+ 0x67, 0x65, 0x31, 0x15, 0x30, 0x13, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13,
+ 0x0c, 0x2a, 0x2e, 0x77, 0x65, 0x62, 0x6b, 0x69, 0x74, 0x2e, 0x6f, 0x72,
+ 0x67, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86,
+ 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30,
+ 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xa7, 0x62, 0x79, 0x41, 0xda, 0x28,
+ 0xf2, 0xc0, 0x4f, 0xe0, 0x25, 0xaa, 0xa1, 0x2e, 0x3b, 0x30, 0x94, 0xb5,
+ 0xc9, 0x26, 0x3a, 0x1b, 0xe2, 0xd0, 0xcc, 0xa2, 0x95, 0xe2, 0x91, 0xc0,
+ 0xf0, 0x40, 0x9e, 0x27, 0x6e, 0xbd, 0x6e, 0xde, 0x7c, 0xb6, 0x30, 0x5c,
+ 0xb8, 0x9b, 0x01, 0x2f, 0x92, 0x04, 0xa1, 0xef, 0x4a, 0xb1, 0x6c, 0xb1,
+ 0x7e, 0x8e, 0xcd, 0xa6, 0xf4, 0x40, 0x73, 0x1f, 0x2c, 0x96, 0xad, 0xff,
+ 0x2a, 0x6d, 0x0e, 0xba, 0x52, 0x84, 0x83, 0xb0, 0x39, 0xee, 0xc9, 0x39,
+ 0xdc, 0x1e, 0x34, 0xd0, 0xd8, 0x5d, 0x7a, 0x09, 0xac, 0xa9, 0xee, 0xca,
+ 0x65, 0xf6, 0x85, 0x3a, 0x6b, 0xee, 0xe4, 0x5c, 0x5e, 0xf8, 0xda, 0xd1,
+ 0xce, 0x88, 0x47, 0xcd, 0x06, 0x21, 0xe0, 0xb9, 0x4b, 0xe4, 0x07, 0xcb,
+ 0x57, 0xdc, 0xca, 0x99, 0x54, 0xf7, 0x0e, 0xd5, 0x17, 0x95, 0x05, 0x2e,
+ 0xe9, 0xb1, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x82, 0x01, 0xce, 0x30,
+ 0x82, 0x01, 0xca, 0x30, 0x09, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x02,
+ 0x30, 0x00, 0x30, 0x0b, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x04, 0x04, 0x03,
+ 0x02, 0x05, 0xa0, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, 0x16,
+ 0x30, 0x14, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01,
+ 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x02, 0x30, 0x57,
+ 0x06, 0x03, 0x55, 0x1d, 0x1f, 0x04, 0x50, 0x30, 0x4e, 0x30, 0x4c, 0xa0,
+ 0x4a, 0xa0, 0x48, 0x86, 0x46, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f,
+ 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64, 0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x2f,
+ 0x67, 0x6f, 0x64, 0x61, 0x64, 0x64, 0x79, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x64, 0x65, 0x64, 0x69, 0x73, 0x73, 0x75, 0x69, 0x6e, 0x67, 0x33, 0x2e,
+ 0x63, 0x72, 0x6c, 0x30, 0x52, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04, 0x4b,
+ 0x30, 0x49, 0x30, 0x47, 0x06, 0x0b, 0x60, 0x86, 0x48, 0x01, 0x86, 0xfd,
+ 0x6d, 0x01, 0x07, 0x17, 0x02, 0x30, 0x38, 0x30, 0x36, 0x06, 0x08, 0x2b,
+ 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x01, 0x16, 0x2a, 0x68, 0x74, 0x74,
+ 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64, 0x64, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x6f, 0x72, 0x79, 0x30, 0x7f, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05,
+ 0x07, 0x01, 0x01, 0x04, 0x73, 0x30, 0x71, 0x30, 0x23, 0x06, 0x08, 0x2b,
+ 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x01, 0x86, 0x17, 0x68, 0x74, 0x74,
+ 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63, 0x73, 0x70, 0x2e, 0x67, 0x6f, 0x64,
+ 0x61, 0x64, 0x64, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x4a, 0x06, 0x08,
+ 0x2b, 0x06, 0x01, 0x05, 0x05, 0x07, 0x30, 0x02, 0x86, 0x3e, 0x68, 0x74,
+ 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x64, 0x61, 0x64, 0x64,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x67, 0x64, 0x5f, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x2e, 0x63, 0x72, 0x74,
+ 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x48,
+ 0xdf, 0x60, 0x32, 0xcc, 0x89, 0x01, 0xb6, 0xdc, 0x2f, 0xe3, 0x73, 0xb5,
+ 0x9c, 0x16, 0x58, 0x32, 0x68, 0xa9, 0xc3, 0x30, 0x1f, 0x06, 0x03, 0x55,
+ 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0xfd, 0xac, 0x61, 0x32,
+ 0x93, 0x6c, 0x45, 0xd6, 0xe2, 0xee, 0x85, 0x5f, 0x9a, 0xba, 0xe7, 0x76,
+ 0x99, 0x68, 0xcc, 0xe7, 0x30, 0x23, 0x06, 0x03, 0x55, 0x1d, 0x11, 0x04,
+ 0x1c, 0x30, 0x1a, 0x82, 0x0c, 0x2a, 0x2e, 0x77, 0x65, 0x62, 0x6b, 0x69,
+ 0x74, 0x2e, 0x6f, 0x72, 0x67, 0x82, 0x0a, 0x77, 0x65, 0x62, 0x6b, 0x69,
+ 0x74, 0x2e, 0x6f, 0x72, 0x67};
+
+ // RSA signature, a big integer in the big-endian byte order.
+ const uint8_t signature[256] = {
+ 0x1e, 0x6a, 0xe7, 0xe0, 0x4f, 0xe7, 0x4d, 0xd0, 0x69, 0x7c, 0xf8, 0x8f,
+ 0x99, 0xb4, 0x18, 0x95, 0x36, 0x24, 0x0f, 0x0e, 0xa3, 0xea, 0x34, 0x37,
+ 0xf4, 0x7d, 0xd5, 0x92, 0x35, 0x53, 0x72, 0x76, 0x3f, 0x69, 0xf0, 0x82,
+ 0x56, 0xe3, 0x94, 0x7a, 0x1d, 0x1a, 0x81, 0xaf, 0x9f, 0xc7, 0x43, 0x01,
+ 0x64, 0xd3, 0x7c, 0x0d, 0xc8, 0x11, 0x4e, 0x4a, 0xe6, 0x1a, 0xc3, 0x01,
+ 0x74, 0xe8, 0x35, 0x87, 0x5c, 0x61, 0xaa, 0x8a, 0x46, 0x06, 0xbe, 0x98,
+ 0x95, 0x24, 0x9e, 0x01, 0xe3, 0xe6, 0xa0, 0x98, 0xee, 0x36, 0x44, 0x56,
+ 0x8d, 0x23, 0x9c, 0x65, 0xea, 0x55, 0x6a, 0xdf, 0x66, 0xee, 0x45, 0xe8,
+ 0xa0, 0xe9, 0x7d, 0x9a, 0xba, 0x94, 0xc5, 0xc8, 0xc4, 0x4b, 0x98, 0xff,
+ 0x9a, 0x01, 0x31, 0x6d, 0xf9, 0x2b, 0x58, 0xe7, 0xe7, 0x2a, 0xc5, 0x4d,
+ 0xbb, 0xbb, 0xcd, 0x0d, 0x70, 0xe1, 0xad, 0x03, 0xf5, 0xfe, 0xf4, 0x84,
+ 0x71, 0x08, 0xd2, 0xbc, 0x04, 0x7b, 0x26, 0x1c, 0xa8, 0x0f, 0x9c, 0xd8,
+ 0x12, 0x6a, 0x6f, 0x2b, 0x67, 0xa1, 0x03, 0x80, 0x9a, 0x11, 0x0b, 0xe9,
+ 0xe0, 0xb5, 0xb3, 0xb8, 0x19, 0x4e, 0x0c, 0xa4, 0xd9, 0x2b, 0x3b, 0xc2,
+ 0xca, 0x20, 0xd3, 0x0c, 0xa4, 0xff, 0x93, 0x13, 0x1f, 0xfc, 0xba, 0x94,
+ 0x93, 0x8c, 0x64, 0x15, 0x2e, 0x28, 0xa9, 0x55, 0x8c, 0x2c, 0x48, 0xd3,
+ 0xd3, 0xc1, 0x50, 0x69, 0x19, 0xe8, 0x34, 0xd3, 0xf1, 0x04, 0x9f, 0x0a,
+ 0x7a, 0x21, 0x87, 0xbf, 0xb9, 0x59, 0x37, 0x2e, 0xf4, 0x71, 0xa5, 0x3e,
+ 0xbe, 0xcd, 0x70, 0x83, 0x18, 0xf8, 0x8a, 0x72, 0x85, 0x45, 0x1f, 0x08,
+ 0x01, 0x6f, 0x37, 0xf5, 0x2b, 0x7b, 0xea, 0xb9, 0x8b, 0xa3, 0xcc, 0xfd,
+ 0x35, 0x52, 0xdd, 0x66, 0xde, 0x4f, 0x30, 0xc5, 0x73, 0x81, 0xb6, 0xe8,
+ 0x3c, 0xd8, 0x48, 0x8a};
+
+ // The public key is specified as the following ASN.1 structure:
+ // SubjectPublicKeyInfo ::= SEQUENCE {
+ // algorithm AlgorithmIdentifier,
+ // subjectPublicKey BIT STRING }
+ const uint8_t public_key_info[294] = {
+ 0x30, 0x82, 0x01, 0x22, // a SEQUENCE of length 290 (0x122)
+ // algorithm
+ 0x30, 0x0d, // a SEQUENCE of length 13
+ 0x06, 0x09, // an OBJECT IDENTIFIER of length 9
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
+ 0x00, // a NULL of length 0
+ // subjectPublicKey
+ 0x03, 0x82, 0x01, 0x0f, // a BIT STRING of length 271 (0x10f)
+ 0x00, // number of unused bits
+ 0x30, 0x82, 0x01, 0x0a, // a SEQUENCE of length 266 (0x10a)
+ // modulus
+ 0x02, 0x82, 0x01, 0x01, // an INTEGER of length 257 (0x101)
+ 0x00, 0xc4, 0x2d, 0xd5, 0x15, 0x8c, 0x9c, 0x26, 0x4c, 0xec, 0x32, 0x35,
+ 0xeb, 0x5f, 0xb8, 0x59, 0x01, 0x5a, 0xa6, 0x61, 0x81, 0x59, 0x3b, 0x70,
+ 0x63, 0xab, 0xe3, 0xdc, 0x3d, 0xc7, 0x2a, 0xb8, 0xc9, 0x33, 0xd3, 0x79,
+ 0xe4, 0x3a, 0xed, 0x3c, 0x30, 0x23, 0x84, 0x8e, 0xb3, 0x30, 0x14, 0xb6,
+ 0xb2, 0x87, 0xc3, 0x3d, 0x95, 0x54, 0x04, 0x9e, 0xdf, 0x99, 0xdd, 0x0b,
+ 0x25, 0x1e, 0x21, 0xde, 0x65, 0x29, 0x7e, 0x35, 0xa8, 0xa9, 0x54, 0xeb,
+ 0xf6, 0xf7, 0x32, 0x39, 0xd4, 0x26, 0x55, 0x95, 0xad, 0xef, 0xfb, 0xfe,
+ 0x58, 0x86, 0xd7, 0x9e, 0xf4, 0x00, 0x8d, 0x8c, 0x2a, 0x0c, 0xbd, 0x42,
+ 0x04, 0xce, 0xa7, 0x3f, 0x04, 0xf6, 0xee, 0x80, 0xf2, 0xaa, 0xef, 0x52,
+ 0xa1, 0x69, 0x66, 0xda, 0xbe, 0x1a, 0xad, 0x5d, 0xda, 0x2c, 0x66, 0xea,
+ 0x1a, 0x6b, 0xbb, 0xe5, 0x1a, 0x51, 0x4a, 0x00, 0x2f, 0x48, 0xc7, 0x98,
+ 0x75, 0xd8, 0xb9, 0x29, 0xc8, 0xee, 0xf8, 0x66, 0x6d, 0x0a, 0x9c, 0xb3,
+ 0xf3, 0xfc, 0x78, 0x7c, 0xa2, 0xf8, 0xa3, 0xf2, 0xb5, 0xc3, 0xf3, 0xb9,
+ 0x7a, 0x91, 0xc1, 0xa7, 0xe6, 0x25, 0x2e, 0x9c, 0xa8, 0xed, 0x12, 0x65,
+ 0x6e, 0x6a, 0xf6, 0x12, 0x44, 0x53, 0x70, 0x30, 0x95, 0xc3, 0x9c, 0x2b,
+ 0x58, 0x2b, 0x3d, 0x08, 0x74, 0x4a, 0xf2, 0xbe, 0x51, 0xb0, 0xbf, 0x87,
+ 0xd0, 0x4c, 0x27, 0x58, 0x6b, 0xb5, 0x35, 0xc5, 0x9d, 0xaf, 0x17, 0x31,
+ 0xf8, 0x0b, 0x8f, 0xee, 0xad, 0x81, 0x36, 0x05, 0x89, 0x08, 0x98, 0xcf,
+ 0x3a, 0xaf, 0x25, 0x87, 0xc0, 0x49, 0xea, 0xa7, 0xfd, 0x67, 0xf7, 0x45,
+ 0x8e, 0x97, 0xcc, 0x14, 0x39, 0xe2, 0x36, 0x85, 0xb5, 0x7e, 0x1a, 0x37,
+ 0xfd, 0x16, 0xf6, 0x71, 0x11, 0x9a, 0x74, 0x30, 0x16, 0xfe, 0x13, 0x94,
+ 0xa3, 0x3f, 0x84, 0x0d, 0x4f,
+ // public exponent
+ 0x02, 0x03, // an INTEGER of length 3
+ 0x01, 0x00, 0x01};
+
+ // We use the signature verifier to perform four signature verification
+ // tests.
+ crypto::SignatureVerifier verifier;
+ bool ok;
+
+ // Test 1: feed all of the data to the verifier at once (a single
+ // VerifyUpdate call).
+ ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+ sizeof(signature), public_key_info,
+ sizeof(public_key_info));
+ EXPECT_TRUE(ok);
+ verifier.VerifyUpdate(tbs_certificate, sizeof(tbs_certificate));
+ ok = verifier.VerifyFinal();
+ EXPECT_TRUE(ok);
+
+ // Test 2: feed the data to the verifier in three parts (three VerifyUpdate
+ // calls).
+ ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+ sizeof(signature), public_key_info,
+ sizeof(public_key_info));
+ EXPECT_TRUE(ok);
+ verifier.VerifyUpdate(tbs_certificate, 256);
+ verifier.VerifyUpdate(tbs_certificate + 256, 256);
+ verifier.VerifyUpdate(tbs_certificate + 512, sizeof(tbs_certificate) - 512);
+ ok = verifier.VerifyFinal();
+ EXPECT_TRUE(ok);
+
+ // Test 3: verify the signature with incorrect data.
+ uint8_t bad_tbs_certificate[sizeof(tbs_certificate)];
+ memcpy(bad_tbs_certificate, tbs_certificate, sizeof(tbs_certificate));
+ bad_tbs_certificate[10] += 1; // Corrupt one byte of the data.
+ ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+ sizeof(signature), public_key_info,
+ sizeof(public_key_info));
+ EXPECT_TRUE(ok);
+ verifier.VerifyUpdate(bad_tbs_certificate, sizeof(bad_tbs_certificate));
+ ok = verifier.VerifyFinal();
+ EXPECT_FALSE(ok);
+
+ // Test 4: verify a bad signature.
+ uint8_t bad_signature[sizeof(signature)];
+ memcpy(bad_signature, signature, sizeof(signature));
+ bad_signature[10] += 1; // Corrupt one byte of the signature.
+ ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1,
+ bad_signature, sizeof(bad_signature),
+ public_key_info, sizeof(public_key_info));
+
+ // A crypto library (e.g., NSS) may detect that the signature is corrupted
+ // and cause VerifyInit to return false, so it is fine for 'ok' to be false.
+ if (ok) {
+ verifier.VerifyUpdate(tbs_certificate, sizeof(tbs_certificate));
+ ok = verifier.VerifyFinal();
+ EXPECT_FALSE(ok);
+ }
+
+ // Test 5: import an invalid key.
+ uint8_t bad_public_key_info[sizeof(public_key_info)];
+ memcpy(bad_public_key_info, public_key_info, sizeof(public_key_info));
+ bad_public_key_info[0] += 1; // Corrupt part of the SPKI syntax.
+ ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+ sizeof(signature), bad_public_key_info,
+ sizeof(bad_public_key_info));
+ EXPECT_FALSE(ok);
+
+ // Test 6: import a key with extra data.
+ uint8_t long_public_key_info[sizeof(public_key_info) + 5];
+ memset(long_public_key_info, 0, sizeof(long_public_key_info));
+ memcpy(long_public_key_info, public_key_info, sizeof(public_key_info));
+ ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+ sizeof(signature), long_public_key_info,
+ sizeof(long_public_key_info));
+ EXPECT_FALSE(ok);
+}
+
+//////////////////////////////////////////////////////////////////////
+//
+// RSA-PSS signature verification known answer test
+//
+//////////////////////////////////////////////////////////////////////
+
+// The following RSA-PSS signature test vectors come from the pss-vect.txt
+// file downloaded from
+// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip.
+//
+// For each key, 6 random messages of length between 1 and 256 octets have
+// been RSASSA-PSS signed.
+//
+// Hash function: SHA-1
+// Mask generation function: MGF1 with SHA-1
+// Salt length: 20 octets
+
+// Example 1: A 1024-bit RSA Key Pair"
+
+// RSA modulus n:
+static const char rsa_modulus_n_1[] =
+ "a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1 "
+ "56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91 "
+ "d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3 "
+ "94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df "
+ "d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77 "
+ "c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1 "
+ "05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4 "
+ "ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37 ";
+// RSA public exponent e: "
+static const char rsa_public_exponent_e_1[] =
+ "01 00 01 ";
+
+// RSASSA-PSS Signature Example 1.1
+// Message to be signed:
+static const char message_1_1[] =
+ "cd c8 7d a2 23 d7 86 df 3b 45 e0 bb bc 72 13 26 "
+ "d1 ee 2a f8 06 cc 31 54 75 cc 6f 0d 9c 66 e1 b6 "
+ "23 71 d4 5c e2 39 2e 1a c9 28 44 c3 10 10 2f 15 "
+ "6a 0d 8d 52 c1 f4 c4 0b a3 aa 65 09 57 86 cb 76 "
+ "97 57 a6 56 3b a9 58 fe d0 bc c9 84 e8 b5 17 a3 "
+ "d5 f5 15 b2 3b 8a 41 e7 4a a8 67 69 3f 90 df b0 "
+ "61 a6 e8 6d fa ae e6 44 72 c0 0e 5f 20 94 57 29 "
+ "cb eb e7 7f 06 ce 78 e0 8f 40 98 fb a4 1f 9d 61 "
+ "93 c0 31 7e 8b 60 d4 b6 08 4a cb 42 d2 9e 38 08 "
+ "a3 bc 37 2d 85 e3 31 17 0f cb f7 cc 72 d0 b7 1c "
+ "29 66 48 b3 a4 d1 0f 41 62 95 d0 80 7a a6 25 ca "
+ "b2 74 4f d9 ea 8f d2 23 c4 25 37 02 98 28 bd 16 "
+ "be 02 54 6f 13 0f d2 e3 3b 93 6d 26 76 e0 8a ed "
+ "1b 73 31 8b 75 0a 01 67 d0 ";
+// Salt:
+static const char salt_1_1[] =
+ "de e9 59 c7 e0 64 11 36 14 20 ff 80 18 5e d5 7f "
+ "3e 67 76 af ";
+// Signature:
+static const char signature_1_1[] =
+ "90 74 30 8f b5 98 e9 70 1b 22 94 38 8e 52 f9 71 "
+ "fa ac 2b 60 a5 14 5a f1 85 df 52 87 b5 ed 28 87 "
+ "e5 7c e7 fd 44 dc 86 34 e4 07 c8 e0 e4 36 0b c2 "
+ "26 f3 ec 22 7f 9d 9e 54 63 8e 8d 31 f5 05 12 15 "
+ "df 6e bb 9c 2f 95 79 aa 77 59 8a 38 f9 14 b5 b9 "
+ "c1 bd 83 c4 e2 f9 f3 82 a0 d0 aa 35 42 ff ee 65 "
+ "98 4a 60 1b c6 9e b2 8d eb 27 dc a1 2c 82 c2 d4 "
+ "c3 f6 6c d5 00 f1 ff 2b 99 4d 8a 4e 30 cb b3 3c ";
+
+// RSASSA-PSS Signature Example 1.2
+// Message to be signed:
+static const char message_1_2[] =
+ "85 13 84 cd fe 81 9c 22 ed 6c 4c cb 30 da eb 5c "
+ "f0 59 bc 8e 11 66 b7 e3 53 0c 4c 23 3e 2b 5f 8f "
+ "71 a1 cc a5 82 d4 3e cc 72 b1 bc a1 6d fc 70 13 "
+ "22 6b 9e ";
+// Salt:
+static const char salt_1_2[] =
+ "ef 28 69 fa 40 c3 46 cb 18 3d ab 3d 7b ff c9 8f "
+ "d5 6d f4 2d ";
+// Signature:
+static const char signature_1_2[] =
+ "3e f7 f4 6e 83 1b f9 2b 32 27 41 42 a5 85 ff ce "
+ "fb dc a7 b3 2a e9 0d 10 fb 0f 0c 72 99 84 f0 4e "
+ "f2 9a 9d f0 78 07 75 ce 43 73 9b 97 83 83 90 db "
+ "0a 55 05 e6 3d e9 27 02 8d 9d 29 b2 19 ca 2c 45 "
+ "17 83 25 58 a5 5d 69 4a 6d 25 b9 da b6 60 03 c4 "
+ "cc cd 90 78 02 19 3b e5 17 0d 26 14 7d 37 b9 35 "
+ "90 24 1b e5 1c 25 05 5f 47 ef 62 75 2c fb e2 14 "
+ "18 fa fe 98 c2 2c 4d 4d 47 72 4f db 56 69 e8 43 ";
+
+// RSASSA-PSS Signature Example 1.3
+// Message to be signed:
+static const char message_1_3[] =
+ "a4 b1 59 94 17 61 c4 0c 6a 82 f2 b8 0d 1b 94 f5 "
+ "aa 26 54 fd 17 e1 2d 58 88 64 67 9b 54 cd 04 ef "
+ "8b d0 30 12 be 8d c3 7f 4b 83 af 79 63 fa ff 0d "
+ "fa 22 54 77 43 7c 48 01 7f f2 be 81 91 cf 39 55 "
+ "fc 07 35 6e ab 3f 32 2f 7f 62 0e 21 d2 54 e5 db "
+ "43 24 27 9f e0 67 e0 91 0e 2e 81 ca 2c ab 31 c7 "
+ "45 e6 7a 54 05 8e b5 0d 99 3c db 9e d0 b4 d0 29 "
+ "c0 6d 21 a9 4c a6 61 c3 ce 27 fa e1 d6 cb 20 f4 "
+ "56 4d 66 ce 47 67 58 3d 0e 5f 06 02 15 b5 90 17 "
+ "be 85 ea 84 89 39 12 7b d8 c9 c4 d4 7b 51 05 6c "
+ "03 1c f3 36 f1 7c 99 80 f3 b8 f5 b9 b6 87 8e 8b "
+ "79 7a a4 3b 88 26 84 33 3e 17 89 3f e9 ca a6 aa "
+ "29 9f 7e d1 a1 8e e2 c5 48 64 b7 b2 b9 9b 72 61 "
+ "8f b0 25 74 d1 39 ef 50 f0 19 c9 ee f4 16 97 13 "
+ "38 e7 d4 70 ";
+// Salt:
+static const char salt_1_3[] =
+ "71 0b 9c 47 47 d8 00 d4 de 87 f1 2a fd ce 6d f1 "
+ "81 07 cc 77 ";
+// Signature:
+static const char signature_1_3[] =
+ "66 60 26 fb a7 1b d3 e7 cf 13 15 7c c2 c5 1a 8e "
+ "4a a6 84 af 97 78 f9 18 49 f3 43 35 d1 41 c0 01 "
+ "54 c4 19 76 21 f9 62 4a 67 5b 5a bc 22 ee 7d 5b "
+ "aa ff aa e1 c9 ba ca 2c c3 73 b3 f3 3e 78 e6 14 "
+ "3c 39 5a 91 aa 7f ac a6 64 eb 73 3a fd 14 d8 82 "
+ "72 59 d9 9a 75 50 fa ca 50 1e f2 b0 4e 33 c2 3a "
+ "a5 1f 4b 9e 82 82 ef db 72 8c c0 ab 09 40 5a 91 "
+ "60 7c 63 69 96 1b c8 27 0d 2d 4f 39 fc e6 12 b1 ";
+
+// RSASSA-PSS Signature Example 1.4
+// Message to be signed:
+static const char message_1_4[] =
+ "bc 65 67 47 fa 9e af b3 f0 ";
+// Salt:
+static const char salt_1_4[] =
+ "05 6f 00 98 5d e1 4d 8e f5 ce a9 e8 2f 8c 27 be "
+ "f7 20 33 5e ";
+// Signature:
+static const char signature_1_4[] =
+ "46 09 79 3b 23 e9 d0 93 62 dc 21 bb 47 da 0b 4f "
+ "3a 76 22 64 9a 47 d4 64 01 9b 9a ea fe 53 35 9c "
+ "17 8c 91 cd 58 ba 6b cb 78 be 03 46 a7 bc 63 7f "
+ "4b 87 3d 4b ab 38 ee 66 1f 19 96 34 c5 47 a1 ad "
+ "84 42 e0 3d a0 15 b1 36 e5 43 f7 ab 07 c0 c1 3e "
+ "42 25 b8 de 8c ce 25 d4 f6 eb 84 00 f8 1f 7e 18 "
+ "33 b7 ee 6e 33 4d 37 09 64 ca 79 fd b8 72 b4 d7 "
+ "52 23 b5 ee b0 81 01 59 1f b5 32 d1 55 a6 de 87 ";
+
+// RSASSA-PSS Signature Example 1.5
+// Message to be signed:
+static const char message_1_5[] =
+ "b4 55 81 54 7e 54 27 77 0c 76 8e 8b 82 b7 55 64 "
+ "e0 ea 4e 9c 32 59 4d 6b ff 70 65 44 de 0a 87 76 "
+ "c7 a8 0b 45 76 55 0e ee 1b 2a ca bc 7e 8b 7d 3e "
+ "f7 bb 5b 03 e4 62 c1 10 47 ea dd 00 62 9a e5 75 "
+ "48 0a c1 47 0f e0 46 f1 3a 2b f5 af 17 92 1d c4 "
+ "b0 aa 8b 02 be e6 33 49 11 65 1d 7f 85 25 d1 0f "
+ "32 b5 1d 33 be 52 0d 3d df 5a 70 99 55 a3 df e7 "
+ "82 83 b9 e0 ab 54 04 6d 15 0c 17 7f 03 7f dc cc "
+ "5b e4 ea 5f 68 b5 e5 a3 8c 9d 7e dc cc c4 97 5f "
+ "45 5a 69 09 b4 ";
+// Salt:
+static const char salt_1_5[] =
+ "80 e7 0f f8 6a 08 de 3e c6 09 72 b3 9b 4f bf dc "
+ "ea 67 ae 8e ";
+// Signature:
+static const char signature_1_5[] =
+ "1d 2a ad 22 1c a4 d3 1d df 13 50 92 39 01 93 98 "
+ "e3 d1 4b 32 dc 34 dc 5a f4 ae ae a3 c0 95 af 73 "
+ "47 9c f0 a4 5e 56 29 63 5a 53 a0 18 37 76 15 b1 "
+ "6c b9 b1 3b 3e 09 d6 71 eb 71 e3 87 b8 54 5c 59 "
+ "60 da 5a 64 77 6e 76 8e 82 b2 c9 35 83 bf 10 4c "
+ "3f db 23 51 2b 7b 4e 89 f6 33 dd 00 63 a5 30 db "
+ "45 24 b0 1c 3f 38 4c 09 31 0e 31 5a 79 dc d3 d6 "
+ "84 02 2a 7f 31 c8 65 a6 64 e3 16 97 8b 75 9f ad ";
+
+// RSASSA-PSS Signature Example 1.6
+// Message to be signed:
+static const char message_1_6[] =
+ "10 aa e9 a0 ab 0b 59 5d 08 41 20 7b 70 0d 48 d7 "
+ "5f ae dd e3 b7 75 cd 6b 4c c8 8a e0 6e 46 94 ec "
+ "74 ba 18 f8 52 0d 4f 5e a6 9c bb e7 cc 2b eb a4 "
+ "3e fd c1 02 15 ac 4e b3 2d c3 02 a1 f5 3d c6 c4 "
+ "35 22 67 e7 93 6c fe bf 7c 8d 67 03 57 84 a3 90 "
+ "9f a8 59 c7 b7 b5 9b 8e 39 c5 c2 34 9f 18 86 b7 "
+ "05 a3 02 67 d4 02 f7 48 6a b4 f5 8c ad 5d 69 ad "
+ "b1 7a b8 cd 0c e1 ca f5 02 5a f4 ae 24 b1 fb 87 "
+ "94 c6 07 0c c0 9a 51 e2 f9 91 13 11 e3 87 7d 00 "
+ "44 c7 1c 57 a9 93 39 50 08 80 6b 72 3a c3 83 73 "
+ "d3 95 48 18 18 52 8c 1e 70 53 73 92 82 05 35 29 "
+ "51 0e 93 5c d0 fa 77 b8 fa 53 cc 2d 47 4b d4 fb "
+ "3c c5 c6 72 d6 ff dc 90 a0 0f 98 48 71 2c 4b cf "
+ "e4 6c 60 57 36 59 b1 1e 64 57 e8 61 f0 f6 04 b6 "
+ "13 8d 14 4f 8c e4 e2 da 73 ";
+// Salt:
+static const char salt_1_6[] =
+ "a8 ab 69 dd 80 1f 00 74 c2 a1 fc 60 64 98 36 c6 "
+ "16 d9 96 81 ";
+// Signature:
+static const char signature_1_6[] =
+ "2a 34 f6 12 5e 1f 6b 0b f9 71 e8 4f bd 41 c6 32 "
+ "be 8f 2c 2a ce 7d e8 b6 92 6e 31 ff 93 e9 af 98 "
+ "7f bc 06 e5 1e 9b e1 4f 51 98 f9 1f 3f 95 3b d6 "
+ "7d a6 0a 9d f5 97 64 c3 dc 0f e0 8e 1c be f0 b7 "
+ "5f 86 8d 10 ad 3f ba 74 9f ef 59 fb 6d ac 46 a0 "
+ "d6 e5 04 36 93 31 58 6f 58 e4 62 8f 39 aa 27 89 "
+ "82 54 3b c0 ee b5 37 dc 61 95 80 19 b3 94 fb 27 "
+ "3f 21 58 58 a0 a0 1a c4 d6 50 b9 55 c6 7f 4c 58 ";
+
+// Example 9: A 1536-bit RSA Key Pair
+
+// RSA modulus n:
+static const char rsa_modulus_n_9[] =
+ "e6 bd 69 2a c9 66 45 79 04 03 fd d0 f5 be b8 b9 "
+ "bf 92 ed 10 00 7f c3 65 04 64 19 dd 06 c0 5c 5b "
+ "5b 2f 48 ec f9 89 e4 ce 26 91 09 97 9c bb 40 b4 "
+ "a0 ad 24 d2 24 83 d1 ee 31 5a d4 cc b1 53 42 68 "
+ "35 26 91 c5 24 f6 dd 8e 6c 29 d2 24 cf 24 69 73 "
+ "ae c8 6c 5b f6 b1 40 1a 85 0d 1b 9a d1 bb 8c bc "
+ "ec 47 b0 6f 0f 8c 7f 45 d3 fc 8f 31 92 99 c5 43 "
+ "3d db c2 b3 05 3b 47 de d2 ec d4 a4 ca ef d6 14 "
+ "83 3d c8 bb 62 2f 31 7e d0 76 b8 05 7f e8 de 3f "
+ "84 48 0a d5 e8 3e 4a 61 90 4a 4f 24 8f b3 97 02 "
+ "73 57 e1 d3 0e 46 31 39 81 5c 6f d4 fd 5a c5 b8 "
+ "17 2a 45 23 0e cb 63 18 a0 4f 14 55 d8 4e 5a 8b ";
+// RSA public exponent e:
+static const char rsa_public_exponent_e_9[] =
+ "01 00 01 ";
+
+// RSASSA-PSS Signature Example 9.1
+// Message to be signed:
+static const char message_9_1[] =
+ "a8 8e 26 58 55 e9 d7 ca 36 c6 87 95 f0 b3 1b 59 "
+ "1c d6 58 7c 71 d0 60 a0 b3 f7 f3 ea ef 43 79 59 "
+ "22 02 8b c2 b6 ad 46 7c fc 2d 7f 65 9c 53 85 aa "
+ "70 ba 36 72 cd de 4c fe 49 70 cc 79 04 60 1b 27 "
+ "88 72 bf 51 32 1c 4a 97 2f 3c 95 57 0f 34 45 d4 "
+ "f5 79 80 e0 f2 0d f5 48 46 e6 a5 2c 66 8f 12 88 "
+ "c0 3f 95 00 6e a3 2f 56 2d 40 d5 2a f9 fe b3 2f "
+ "0f a0 6d b6 5b 58 8a 23 7b 34 e5 92 d5 5c f9 79 "
+ "f9 03 a6 42 ef 64 d2 ed 54 2a a8 c7 7d c1 dd 76 "
+ "2f 45 a5 93 03 ed 75 e5 41 ca 27 1e 2b 60 ca 70 "
+ "9e 44 fa 06 61 13 1e 8d 5d 41 63 fd 8d 39 85 66 "
+ "ce 26 de 87 30 e7 2f 9c ca 73 76 41 c2 44 15 94 "
+ "20 63 70 28 df 0a 18 07 9d 62 08 ea 8b 47 11 a2 "
+ "c7 50 f5 ";
+// Salt:
+static const char salt_9_1[] =
+ "c0 a4 25 31 3d f8 d7 56 4b d2 43 4d 31 15 23 d5 "
+ "25 7e ed 80 ";
+// Signature:
+static const char signature_9_1[] =
+ "58 61 07 22 6c 3c e0 13 a7 c8 f0 4d 1a 6a 29 59 "
+ "bb 4b 8e 20 5b a4 3a 27 b5 0f 12 41 11 bc 35 ef "
+ "58 9b 03 9f 59 32 18 7c b6 96 d7 d9 a3 2c 0c 38 "
+ "30 0a 5c dd a4 83 4b 62 d2 eb 24 0a f3 3f 79 d1 "
+ "3d fb f0 95 bf 59 9e 0d 96 86 94 8c 19 64 74 7b "
+ "67 e8 9c 9a ba 5c d8 50 16 23 6f 56 6c c5 80 2c "
+ "b1 3e ad 51 bc 7c a6 be f3 b9 4d cb db b1 d5 70 "
+ "46 97 71 df 0e 00 b1 a8 a0 67 77 47 2d 23 16 27 "
+ "9e da e8 64 74 66 8d 4e 1e ff f9 5f 1d e6 1c 60 "
+ "20 da 32 ae 92 bb f1 65 20 fe f3 cf 4d 88 f6 11 "
+ "21 f2 4b bd 9f e9 1b 59 ca f1 23 5b 2a 93 ff 81 "
+ "fc 40 3a dd f4 eb de a8 49 34 a9 cd af 8e 1a 9e ";
+
+// RSASSA-PSS Signature Example 9.2
+// Message to be signed:
+static const char message_9_2[] =
+ "c8 c9 c6 af 04 ac da 41 4d 22 7e f2 3e 08 20 c3 "
+ "73 2c 50 0d c8 72 75 e9 5b 0d 09 54 13 99 3c 26 "
+ "58 bc 1d 98 85 81 ba 87 9c 2d 20 1f 14 cb 88 ce "
+ "d1 53 a0 19 69 a7 bf 0a 7b e7 9c 84 c1 48 6b c1 "
+ "2b 3f a6 c5 98 71 b6 82 7c 8c e2 53 ca 5f ef a8 "
+ "a8 c6 90 bf 32 6e 8e 37 cd b9 6d 90 a8 2e ba b6 "
+ "9f 86 35 0e 18 22 e8 bd 53 6a 2e ";
+// Salt:
+static const char salt_9_2[] =
+ "b3 07 c4 3b 48 50 a8 da c2 f1 5f 32 e3 78 39 ef "
+ "8c 5c 0e 91 ";
+// Signature:
+static const char signature_9_2[] =
+ "80 b6 d6 43 25 52 09 f0 a4 56 76 38 97 ac 9e d2 "
+ "59 d4 59 b4 9c 28 87 e5 88 2e cb 44 34 cf d6 6d "
+ "d7 e1 69 93 75 38 1e 51 cd 7f 55 4f 2c 27 17 04 "
+ "b3 99 d4 2b 4b e2 54 0a 0e ca 61 95 1f 55 26 7f "
+ "7c 28 78 c1 22 84 2d ad b2 8b 01 bd 5f 8c 02 5f "
+ "7e 22 84 18 a6 73 c0 3d 6b c0 c7 36 d0 a2 95 46 "
+ "bd 67 f7 86 d9 d6 92 cc ea 77 8d 71 d9 8c 20 63 "
+ "b7 a7 10 92 18 7a 4d 35 af 10 81 11 d8 3e 83 ea "
+ "e4 6c 46 aa 34 27 7e 06 04 45 89 90 37 88 f1 d5 "
+ "e7 ce e2 5f b4 85 e9 29 49 11 88 14 d6 f2 c3 ee "
+ "36 14 89 01 6f 32 7f b5 bc 51 7e b5 04 70 bf fa "
+ "1a fa 5f 4c e9 aa 0c e5 b8 ee 19 bf 55 01 b9 58 ";
+
+// RSASSA-PSS Signature Example 9.3
+// Message to be signed:
+static const char message_9_3[] =
+ "0a fa d4 2c cd 4f c6 06 54 a5 50 02 d2 28 f5 2a "
+ "4a 5f e0 3b 8b bb 08 ca 82 da ca 55 8b 44 db e1 "
+ "26 6e 50 c0 e7 45 a3 6d 9d 29 04 e3 40 8a bc d1 "
+ "fd 56 99 94 06 3f 4a 75 cc 72 f2 fe e2 a0 cd 89 "
+ "3a 43 af 1c 5b 8b 48 7d f0 a7 16 10 02 4e 4f 6d "
+ "df 9f 28 ad 08 13 c1 aa b9 1b cb 3c 90 64 d5 ff "
+ "74 2d ef fe a6 57 09 41 39 36 9e 5e a6 f4 a9 63 "
+ "19 a5 cc 82 24 14 5b 54 50 62 75 8f ef d1 fe 34 "
+ "09 ae 16 92 59 c6 cd fd 6b 5f 29 58 e3 14 fa ec "
+ "be 69 d2 ca ce 58 ee 55 17 9a b9 b3 e6 d1 ec c1 "
+ "4a 55 7c 5f eb e9 88 59 52 64 fc 5d a1 c5 71 46 "
+ "2e ca 79 8a 18 a1 a4 94 0c da b4 a3 e9 20 09 cc "
+ "d4 2e 1e 94 7b 13 14 e3 22 38 a2 de ce 7d 23 a8 "
+ "9b 5b 30 c7 51 fd 0a 4a 43 0d 2c 54 85 94 ";
+// Salt:
+static const char salt_9_3[] =
+ "9a 2b 00 7e 80 97 8b bb 19 2c 35 4e b7 da 9a ed "
+ "fc 74 db f5 ";
+// Signature:
+static const char signature_9_3[] =
+ "48 44 08 f3 89 8c d5 f5 34 83 f8 08 19 ef bf 27 "
+ "08 c3 4d 27 a8 b2 a6 fa e8 b3 22 f9 24 02 37 f9 "
+ "81 81 7a ca 18 46 f1 08 4d aa 6d 7c 07 95 f6 e5 "
+ "bf 1a f5 9c 38 e1 85 84 37 ce 1f 7e c4 19 b9 8c "
+ "87 36 ad f6 dd 9a 00 b1 80 6d 2b d3 ad 0a 73 77 "
+ "5e 05 f5 2d fe f3 a5 9a b4 b0 81 43 f0 df 05 cd "
+ "1a d9 d0 4b ec ec a6 da a4 a2 12 98 03 e2 00 cb "
+ "c7 77 87 ca f4 c1 d0 66 3a 6c 59 87 b6 05 95 20 "
+ "19 78 2c af 2e c1 42 6d 68 fb 94 ed 1d 4b e8 16 "
+ "a7 ed 08 1b 77 e6 ab 33 0b 3f fc 07 38 20 fe cd "
+ "e3 72 7f cb e2 95 ee 61 a0 50 a3 43 65 86 37 c3 "
+ "fd 65 9c fb 63 73 6d e3 2d 9f 90 d3 c2 f6 3e ca ";
+
+// RSASSA-PSS Signature Example 9.4
+// Message to be signed:
+static const char message_9_4[] =
+ "1d fd 43 b4 6c 93 db 82 62 9b da e2 bd 0a 12 b8 "
+ "82 ea 04 c3 b4 65 f5 cf 93 02 3f 01 05 96 26 db "
+ "be 99 f2 6b b1 be 94 9d dd d1 6d c7 f3 de bb 19 "
+ "a1 94 62 7f 0b 22 44 34 df 7d 87 00 e9 e9 8b 06 "
+ "e3 60 c1 2f db e3 d1 9f 51 c9 68 4e b9 08 9e cb "
+ "b0 a2 f0 45 03 99 d3 f5 9e ac 72 94 08 5d 04 4f "
+ "53 93 c6 ce 73 74 23 d8 b8 6c 41 53 70 d3 89 e3 "
+ "0b 9f 0a 3c 02 d2 5d 00 82 e8 ad 6f 3f 1e f2 4a "
+ "45 c3 cf 82 b3 83 36 70 63 a4 d4 61 3e 42 64 f0 "
+ "1b 2d ac 2e 5a a4 20 43 f8 fb 5f 69 fa 87 1d 14 "
+ "fb 27 3e 76 7a 53 1c 40 f0 2f 34 3b c2 fb 45 a0 "
+ "c7 e0 f6 be 25 61 92 3a 77 21 1d 66 a6 e2 db b4 "
+ "3c 36 63 50 be ae 22 da 3a c2 c1 f5 07 70 96 fc "
+ "b5 c4 bf 25 5f 75 74 35 1a e0 b1 e1 f0 36 32 81 "
+ "7c 08 56 d4 a8 ba 97 af bd c8 b8 58 55 40 2b c5 "
+ "69 26 fc ec 20 9f 9e a8 ";
+// Salt:
+static const char salt_9_4[] =
+ "70 f3 82 bd df 4d 5d 2d d8 8b 3b c7 b7 30 8b e6 "
+ "32 b8 40 45 ";
+// Signature:
+static const char signature_9_4[] =
+ "84 eb eb 48 1b e5 98 45 b4 64 68 ba fb 47 1c 01 "
+ "12 e0 2b 23 5d 84 b5 d9 11 cb d1 92 6e e5 07 4a "
+ "e0 42 44 95 cb 20 e8 23 08 b8 eb b6 5f 41 9a 03 "
+ "fb 40 e7 2b 78 98 1d 88 aa d1 43 05 36 85 17 2c "
+ "97 b2 9c 8b 7b f0 ae 73 b5 b2 26 3c 40 3d a0 ed "
+ "2f 80 ff 74 50 af 78 28 eb 8b 86 f0 02 8b d2 a8 "
+ "b1 76 a4 d2 28 cc ce a1 83 94 f2 38 b0 9f f7 58 "
+ "cc 00 bc 04 30 11 52 35 57 42 f2 82 b5 4e 66 3a "
+ "91 9e 70 9d 8d a2 4a de 55 00 a7 b9 aa 50 22 6e "
+ "0c a5 29 23 e6 c2 d8 60 ec 50 ff 48 0f a5 74 77 "
+ "e8 2b 05 65 f4 37 9f 79 c7 72 d5 c2 da 80 af 9f "
+ "bf 32 5e ce 6f c2 0b 00 96 16 14 be e8 9a 18 3e ";
+
+// RSASSA-PSS Signature Example 9.5
+// Message to be signed:
+static const char message_9_5[] =
+ "1b dc 6e 7c 98 fb 8c f5 4e 9b 09 7b 66 a8 31 e9 "
+ "cf e5 2d 9d 48 88 44 8e e4 b0 97 80 93 ba 1d 7d "
+ "73 ae 78 b3 a6 2b a4 ad 95 cd 28 9c cb 9e 00 52 "
+ "26 bb 3d 17 8b cc aa 82 1f b0 44 a4 e2 1e e9 76 "
+ "96 c1 4d 06 78 c9 4c 2d ae 93 b0 ad 73 92 22 18 "
+ "55 3d aa 7e 44 eb e5 77 25 a7 a4 5c c7 2b 9b 21 "
+ "38 a6 b1 7c 8d b4 11 ce 82 79 ee 12 41 af f0 a8 "
+ "be c6 f7 7f 87 ed b0 c6 9c b2 72 36 e3 43 5a 80 "
+ "0b 19 2e 4f 11 e5 19 e3 fe 30 fc 30 ea cc ca 4f "
+ "bb 41 76 90 29 bf 70 8e 81 7a 9e 68 38 05 be 67 "
+ "fa 10 09 84 68 3b 74 83 8e 3b cf fa 79 36 6e ed "
+ "1d 48 1c 76 72 91 18 83 8f 31 ba 8a 04 8a 93 c1 "
+ "be 44 24 59 8e 8d f6 32 8b 7a 77 88 0a 3f 9c 7e "
+ "2e 8d fc a8 eb 5a 26 fb 86 bd c5 56 d4 2b be 01 "
+ "d9 fa 6e d8 06 46 49 1c 93 41 ";
+// Salt:
+static const char salt_9_5[] =
+ "d6 89 25 7a 86 ef fa 68 21 2c 5e 0c 61 9e ca 29 "
+ "5f b9 1b 67 ";
+// Signature:
+static const char signature_9_5[] =
+ "82 10 2d f8 cb 91 e7 17 99 19 a0 4d 26 d3 35 d6 "
+ "4f bc 2f 87 2c 44 83 39 43 24 1d e8 45 48 10 27 "
+ "4c df 3d b5 f4 2d 42 3d b1 52 af 71 35 f7 01 42 "
+ "0e 39 b4 94 a6 7c bf d1 9f 91 19 da 23 3a 23 da "
+ "5c 64 39 b5 ba 0d 2b c3 73 ee e3 50 70 01 37 8d "
+ "4a 40 73 85 6b 7f e2 ab a0 b5 ee 93 b2 7f 4a fe "
+ "c7 d4 d1 20 92 1c 83 f6 06 76 5b 02 c1 9e 4d 6a "
+ "1a 3b 95 fa 4c 42 29 51 be 4f 52 13 10 77 ef 17 "
+ "17 97 29 cd df bd b5 69 50 db ac ee fe 78 cb 16 "
+ "64 0a 09 9e a5 6d 24 38 9e ef 10 f8 fe cb 31 ba "
+ "3e a3 b2 27 c0 a8 66 98 bb 89 e3 e9 36 39 05 bf "
+ "22 77 7b 2a 3a a5 21 b6 5b 4c ef 76 d8 3b de 4c ";
+
+// RSASSA-PSS Signature Example 9.6
+// Message to be signed:
+static const char message_9_6[] =
+ "88 c7 a9 f1 36 04 01 d9 0e 53 b1 01 b6 1c 53 25 "
+ "c3 c7 5d b1 b4 11 fb eb 8e 83 0b 75 e9 6b 56 67 "
+ "0a d2 45 40 4e 16 79 35 44 ee 35 4b c6 13 a9 0c "
+ "c9 84 87 15 a7 3d b5 89 3e 7f 6d 27 98 15 c0 c1 "
+ "de 83 ef 8e 29 56 e3 a5 6e d2 6a 88 8d 7a 9c dc "
+ "d0 42 f4 b1 6b 7f a5 1e f1 a0 57 36 62 d1 6a 30 "
+ "2d 0e c5 b2 85 d2 e0 3a d9 65 29 c8 7b 3d 37 4d "
+ "b3 72 d9 5b 24 43 d0 61 b6 b1 a3 50 ba 87 80 7e "
+ "d0 83 af d1 eb 05 c3 f5 2f 4e ba 5e d2 22 77 14 "
+ "fd b5 0b 9d 9d 9d d6 81 4f 62 f6 27 2f cd 5c db "
+ "ce 7a 9e f7 97 ";
+// Salt:
+static const char salt_9_6[] =
+ "c2 5f 13 bf 67 d0 81 67 1a 04 81 a1 f1 82 0d 61 "
+ "3b ba 22 76 ";
+// Signature:
+static const char signature_9_6[] =
+ "a7 fd b0 d2 59 16 5c a2 c8 8d 00 bb f1 02 8a 86 "
+ "7d 33 76 99 d0 61 19 3b 17 a9 64 8e 14 cc bb aa "
+ "de ac aa cd ec 81 5e 75 71 29 4e bb 8a 11 7a f2 "
+ "05 fa 07 8b 47 b0 71 2c 19 9e 3a d0 51 35 c5 04 "
+ "c2 4b 81 70 51 15 74 08 02 48 79 92 ff d5 11 d4 "
+ "af c6 b8 54 49 1e b3 f0 dd 52 31 39 54 2f f1 5c "
+ "31 01 ee 85 54 35 17 c6 a3 c7 94 17 c6 7e 2d d9 "
+ "aa 74 1e 9a 29 b0 6d cb 59 3c 23 36 b3 67 0a e3 "
+ "af ba c7 c3 e7 6e 21 54 73 e8 66 e3 38 ca 24 4d "
+ "e0 0b 62 62 4d 6b 94 26 82 2c ea e9 f8 cc 46 08 "
+ "95 f4 12 50 07 3f d4 5c 5a 1e 7b 42 5c 20 4a 42 "
+ "3a 69 91 59 f6 90 3e 71 0b 37 a7 bb 2b c8 04 9f ";
+
+// Example 10: A 2048-bit RSA Key Pair
+
+// RSA modulus n:
+static const char rsa_modulus_n_10[] =
+ "a5 dd 86 7a c4 cb 02 f9 0b 94 57 d4 8c 14 a7 70 "
+ "ef 99 1c 56 c3 9c 0e c6 5f d1 1a fa 89 37 ce a5 "
+ "7b 9b e7 ac 73 b4 5c 00 17 61 5b 82 d6 22 e3 18 "
+ "75 3b 60 27 c0 fd 15 7b e1 2f 80 90 fe e2 a7 ad "
+ "cd 0e ef 75 9f 88 ba 49 97 c7 a4 2d 58 c9 aa 12 "
+ "cb 99 ae 00 1f e5 21 c1 3b b5 43 14 45 a8 d5 ae "
+ "4f 5e 4c 7e 94 8a c2 27 d3 60 40 71 f2 0e 57 7e "
+ "90 5f be b1 5d fa f0 6d 1d e5 ae 62 53 d6 3a 6a "
+ "21 20 b3 1a 5d a5 da bc 95 50 60 0e 20 f2 7d 37 "
+ "39 e2 62 79 25 fe a3 cc 50 9f 21 df f0 4e 6e ea "
+ "45 49 c5 40 d6 80 9f f9 30 7e ed e9 1f ff 58 73 "
+ "3d 83 85 a2 37 d6 d3 70 5a 33 e3 91 90 09 92 07 "
+ "0d f7 ad f1 35 7c f7 e3 70 0c e3 66 7d e8 3f 17 "
+ "b8 df 17 78 db 38 1d ce 09 cb 4a d0 58 a5 11 00 "
+ "1a 73 81 98 ee 27 cf 55 a1 3b 75 45 39 90 65 82 "
+ "ec 8b 17 4b d5 8d 5d 1f 3d 76 7c 61 37 21 ae 05 ";
+// RSA public exponent e:
+static const char rsa_public_exponent_e_10[] =
+ "01 00 01 ";
+
+// RSASSA-PSS Signature Example 10.1
+// Message to be signed:
+static const char message_10_1[] =
+ "88 31 77 e5 12 6b 9b e2 d9 a9 68 03 27 d5 37 0c "
+ "6f 26 86 1f 58 20 c4 3d a6 7a 3a d6 09 ";
+// Salt:
+static const char salt_10_1[] =
+ "04 e2 15 ee 6f f9 34 b9 da 70 d7 73 0c 87 34 ab "
+ "fc ec de 89 ";
+// Signature:
+static const char signature_10_1[] =
+ "82 c2 b1 60 09 3b 8a a3 c0 f7 52 2b 19 f8 73 54 "
+ "06 6c 77 84 7a bf 2a 9f ce 54 2d 0e 84 e9 20 c5 "
+ "af b4 9f fd fd ac e1 65 60 ee 94 a1 36 96 01 14 "
+ "8e ba d7 a0 e1 51 cf 16 33 17 91 a5 72 7d 05 f2 "
+ "1e 74 e7 eb 81 14 40 20 69 35 d7 44 76 5a 15 e7 "
+ "9f 01 5c b6 6c 53 2c 87 a6 a0 59 61 c8 bf ad 74 "
+ "1a 9a 66 57 02 28 94 39 3e 72 23 73 97 96 c0 2a "
+ "77 45 5d 0f 55 5b 0e c0 1d df 25 9b 62 07 fd 0f "
+ "d5 76 14 ce f1 a5 57 3b aa ff 4e c0 00 69 95 16 "
+ "59 b8 5f 24 30 0a 25 16 0c a8 52 2d c6 e6 72 7e "
+ "57 d0 19 d7 e6 36 29 b8 fe 5e 89 e2 5c c1 5b eb "
+ "3a 64 75 77 55 92 99 28 0b 9b 28 f7 9b 04 09 00 "
+ "0b e2 5b bd 96 40 8b a3 b4 3c c4 86 18 4d d1 c8 "
+ "e6 25 53 fa 1a f4 04 0f 60 66 3d e7 f5 e4 9c 04 "
+ "38 8e 25 7f 1c e8 9c 95 da b4 8a 31 5d 9b 66 b1 "
+ "b7 62 82 33 87 6f f2 38 52 30 d0 70 d0 7e 16 66 ";
+
+// RSASSA-PSS Signature Example 10.2
+// Message to be signed:
+static const char message_10_2[] =
+ "dd 67 0a 01 46 58 68 ad c9 3f 26 13 19 57 a5 0c "
+ "52 fb 77 7c db aa 30 89 2c 9e 12 36 11 64 ec 13 "
+ "97 9d 43 04 81 18 e4 44 5d b8 7b ee 58 dd 98 7b "
+ "34 25 d0 20 71 d8 db ae 80 70 8b 03 9d bb 64 db "
+ "d1 de 56 57 d9 fe d0 c1 18 a5 41 43 74 2e 0f f3 "
+ "c8 7f 74 e4 58 57 64 7a f3 f7 9e b0 a1 4c 9d 75 "
+ "ea 9a 1a 04 b7 cf 47 8a 89 7a 70 8f d9 88 f4 8e "
+ "80 1e db 0b 70 39 df 8c 23 bb 3c 56 f4 e8 21 ac ";
+// Salt:
+static const char salt_10_2[] =
+ "8b 2b dd 4b 40 fa f5 45 c7 78 dd f9 bc 1a 49 cb "
+ "57 f9 b7 1b ";
+// Signature:
+static const char signature_10_2[] =
+ "14 ae 35 d9 dd 06 ba 92 f7 f3 b8 97 97 8a ed 7c "
+ "d4 bf 5f f0 b5 85 a4 0b d4 6c e1 b4 2c d2 70 30 "
+ "53 bb 90 44 d6 4e 81 3d 8f 96 db 2d d7 00 7d 10 "
+ "11 8f 6f 8f 84 96 09 7a d7 5e 1f f6 92 34 1b 28 "
+ "92 ad 55 a6 33 a1 c5 5e 7f 0a 0a d5 9a 0e 20 3a "
+ "5b 82 78 ae c5 4d d8 62 2e 28 31 d8 71 74 f8 ca "
+ "ff 43 ee 6c 46 44 53 45 d8 4a 59 65 9b fb 92 ec "
+ "d4 c8 18 66 86 95 f3 47 06 f6 68 28 a8 99 59 63 "
+ "7f 2b f3 e3 25 1c 24 bd ba 4d 4b 76 49 da 00 22 "
+ "21 8b 11 9c 84 e7 9a 65 27 ec 5b 8a 5f 86 1c 15 "
+ "99 52 e2 3e c0 5e 1e 71 73 46 fa ef e8 b1 68 68 "
+ "25 bd 2b 26 2f b2 53 10 66 c0 de 09 ac de 2e 42 "
+ "31 69 07 28 b5 d8 5e 11 5a 2f 6b 92 b7 9c 25 ab "
+ "c9 bd 93 99 ff 8b cf 82 5a 52 ea 1f 56 ea 76 dd "
+ "26 f4 3b aa fa 18 bf a9 2a 50 4c bd 35 69 9e 26 "
+ "d1 dc c5 a2 88 73 85 f3 c6 32 32 f0 6f 32 44 c3 ";
+
+// RSASSA-PSS Signature Example 10.3
+// Message to be signed:
+static const char message_10_3[] =
+ "48 b2 b6 a5 7a 63 c8 4c ea 85 9d 65 c6 68 28 4b "
+ "08 d9 6b dc aa be 25 2d b0 e4 a9 6c b1 ba c6 01 "
+ "93 41 db 6f be fb 8d 10 6b 0e 90 ed a6 bc c6 c6 "
+ "26 2f 37 e7 ea 9c 7e 5d 22 6b d7 df 85 ec 5e 71 "
+ "ef ff 2f 54 c5 db 57 7f f7 29 ff 91 b8 42 49 1d "
+ "e2 74 1d 0c 63 16 07 df 58 6b 90 5b 23 b9 1a f1 "
+ "3d a1 23 04 bf 83 ec a8 a7 3e 87 1f f9 db ";
+// Salt:
+static const char salt_10_3[] =
+ "4e 96 fc 1b 39 8f 92 b4 46 71 01 0c 0d c3 ef d6 "
+ "e2 0c 2d 73 ";
+// Signature:
+static const char signature_10_3[] =
+ "6e 3e 4d 7b 6b 15 d2 fb 46 01 3b 89 00 aa 5b bb "
+ "39 39 cf 2c 09 57 17 98 70 42 02 6e e6 2c 74 c5 "
+ "4c ff d5 d7 d5 7e fb bf 95 0a 0f 5c 57 4f a0 9d "
+ "3f c1 c9 f5 13 b0 5b 4f f5 0d d8 df 7e df a2 01 "
+ "02 85 4c 35 e5 92 18 01 19 a7 0c e5 b0 85 18 2a "
+ "a0 2d 9e a2 aa 90 d1 df 03 f2 da ae 88 5b a2 f5 "
+ "d0 5a fd ac 97 47 6f 06 b9 3b 5b c9 4a 1a 80 aa "
+ "91 16 c4 d6 15 f3 33 b0 98 89 2b 25 ff ac e2 66 "
+ "f5 db 5a 5a 3b cc 10 a8 24 ed 55 aa d3 5b 72 78 "
+ "34 fb 8c 07 da 28 fc f4 16 a5 d9 b2 22 4f 1f 8b "
+ "44 2b 36 f9 1e 45 6f de a2 d7 cf e3 36 72 68 de "
+ "03 07 a4 c7 4e 92 41 59 ed 33 39 3d 5e 06 55 53 "
+ "1c 77 32 7b 89 82 1b de df 88 01 61 c7 8c d4 19 "
+ "6b 54 19 f7 ac c3 f1 3e 5e bf 16 1b 6e 7c 67 24 "
+ "71 6c a3 3b 85 c2 e2 56 40 19 2a c2 85 96 51 d5 "
+ "0b de 7e b9 76 e5 1c ec 82 8b 98 b6 56 3b 86 bb ";
+
+// RSASSA-PSS Signature Example 10.4
+// Message to be signed:
+static const char message_10_4[] =
+ "0b 87 77 c7 f8 39 ba f0 a6 4b bb db c5 ce 79 75 "
+ "5c 57 a2 05 b8 45 c1 74 e2 d2 e9 05 46 a0 89 c4 "
+ "e6 ec 8a df fa 23 a7 ea 97 ba e6 b6 5d 78 2b 82 "
+ "db 5d 2b 5a 56 d2 2a 29 a0 5e 7c 44 33 e2 b8 2a "
+ "62 1a bb a9 0a dd 05 ce 39 3f c4 8a 84 05 42 45 "
+ "1a ";
+// Salt:
+static const char salt_10_4[] =
+ "c7 cd 69 8d 84 b6 51 28 d8 83 5e 3a 8b 1e b0 e0 "
+ "1c b5 41 ec ";
+// Signature:
+static const char signature_10_4[] =
+ "34 04 7f f9 6c 4d c0 dc 90 b2 d4 ff 59 a1 a3 61 "
+ "a4 75 4b 25 5d 2e e0 af 7d 8b f8 7c 9b c9 e7 dd "
+ "ee de 33 93 4c 63 ca 1c 0e 3d 26 2c b1 45 ef 93 "
+ "2a 1f 2c 0a 99 7a a6 a3 4f 8e ae e7 47 7d 82 cc "
+ "f0 90 95 a6 b8 ac ad 38 d4 ee c9 fb 7e ab 7a d0 "
+ "2d a1 d1 1d 8e 54 c1 82 5e 55 bf 58 c2 a2 32 34 "
+ "b9 02 be 12 4f 9e 90 38 a8 f6 8f a4 5d ab 72 f6 "
+ "6e 09 45 bf 1d 8b ac c9 04 4c 6f 07 09 8c 9f ce "
+ "c5 8a 3a ab 10 0c 80 51 78 15 5f 03 0a 12 4c 45 "
+ "0e 5a cb da 47 d0 e4 f1 0b 80 a2 3f 80 3e 77 4d "
+ "02 3b 00 15 c2 0b 9f 9b be 7c 91 29 63 38 d5 ec "
+ "b4 71 ca fb 03 20 07 b6 7a 60 be 5f 69 50 4a 9f "
+ "01 ab b3 cb 46 7b 26 0e 2b ce 86 0b e8 d9 5b f9 "
+ "2c 0c 8e 14 96 ed 1e 52 85 93 a4 ab b6 df 46 2d "
+ "de 8a 09 68 df fe 46 83 11 68 57 a2 32 f5 eb f6 "
+ "c8 5b e2 38 74 5a d0 f3 8f 76 7a 5f db f4 86 fb ";
+
+// RSASSA-PSS Signature Example 10.5
+// Message to be signed:
+static const char message_10_5[] =
+ "f1 03 6e 00 8e 71 e9 64 da dc 92 19 ed 30 e1 7f "
+ "06 b4 b6 8a 95 5c 16 b3 12 b1 ed df 02 8b 74 97 "
+ "6b ed 6b 3f 6a 63 d4 e7 78 59 24 3c 9c cc dc 98 "
+ "01 65 23 ab b0 24 83 b3 55 91 c3 3a ad 81 21 3b "
+ "b7 c7 bb 1a 47 0a ab c1 0d 44 25 6c 4d 45 59 d9 "
+ "16 ";
+// Salt:
+static const char salt_10_5[] =
+ "ef a8 bf f9 62 12 b2 f4 a3 f3 71 a1 0d 57 41 52 "
+ "65 5f 5d fb ";
+// Signature:
+static const char signature_10_5[] =
+ "7e 09 35 ea 18 f4 d6 c1 d1 7c e8 2e b2 b3 83 6c "
+ "55 b3 84 58 9c e1 9d fe 74 33 63 ac 99 48 d1 f3 "
+ "46 b7 bf dd fe 92 ef d7 8a db 21 fa ef c8 9a de "
+ "42 b1 0f 37 40 03 fe 12 2e 67 42 9a 1c b8 cb d1 "
+ "f8 d9 01 45 64 c4 4d 12 01 16 f4 99 0f 1a 6e 38 "
+ "77 4c 19 4b d1 b8 21 32 86 b0 77 b0 49 9d 2e 7b "
+ "3f 43 4a b1 22 89 c5 56 68 4d ee d7 81 31 93 4b "
+ "b3 dd 65 37 23 6f 7c 6f 3d cb 09 d4 76 be 07 72 "
+ "1e 37 e1 ce ed 9b 2f 7b 40 68 87 bd 53 15 73 05 "
+ "e1 c8 b4 f8 4d 73 3b c1 e1 86 fe 06 cc 59 b6 ed "
+ "b8 f4 bd 7f fe fd f4 f7 ba 9c fb 9d 57 06 89 b5 "
+ "a1 a4 10 9a 74 6a 69 08 93 db 37 99 25 5a 0c b9 "
+ "21 5d 2d 1c d4 90 59 0e 95 2e 8c 87 86 aa 00 11 "
+ "26 52 52 47 0c 04 1d fb c3 ee c7 c3 cb f7 1c 24 "
+ "86 9d 11 5c 0c b4 a9 56 f5 6d 53 0b 80 ab 58 9a "
+ "cf ef c6 90 75 1d df 36 e8 d3 83 f8 3c ed d2 cc ";
+
+// RSASSA-PSS Signature Example 10.6
+// Message to be signed:
+static const char message_10_6[] =
+ "25 f1 08 95 a8 77 16 c1 37 45 0b b9 51 9d fa a1 "
+ "f2 07 fa a9 42 ea 88 ab f7 1e 9c 17 98 00 85 b5 "
+ "55 ae ba b7 62 64 ae 2a 3a b9 3c 2d 12 98 11 91 "
+ "dd ac 6f b5 94 9e b3 6a ee 3c 5d a9 40 f0 07 52 "
+ "c9 16 d9 46 08 fa 7d 97 ba 6a 29 15 b6 88 f2 03 "
+ "23 d4 e9 d9 68 01 d8 9a 72 ab 58 92 dc 21 17 c0 "
+ "74 34 fc f9 72 e0 58 cf 8c 41 ca 4b 4f f5 54 f7 "
+ "d5 06 8a d3 15 5f ce d0 f3 12 5b c0 4f 91 93 37 "
+ "8a 8f 5c 4c 3b 8c b4 dd 6d 1c c6 9d 30 ec ca 6e "
+ "aa 51 e3 6a 05 73 0e 9e 34 2e 85 5b af 09 9d ef "
+ "b8 af d7 ";
+// Salt:
+static const char salt_10_6[] =
+ "ad 8b 15 23 70 36 46 22 4b 66 0b 55 08 85 91 7c "
+ "a2 d1 df 28 ";
+// Signature:
+static const char signature_10_6[] =
+ "6d 3b 5b 87 f6 7e a6 57 af 21 f7 54 41 97 7d 21 "
+ "80 f9 1b 2c 5f 69 2d e8 29 55 69 6a 68 67 30 d9 "
+ "b9 77 8d 97 07 58 cc b2 60 71 c2 20 9f fb d6 12 "
+ "5b e2 e9 6e a8 1b 67 cb 9b 93 08 23 9f da 17 f7 "
+ "b2 b6 4e cd a0 96 b6 b9 35 64 0a 5a 1c b4 2a 91 "
+ "55 b1 c9 ef 7a 63 3a 02 c5 9f 0d 6e e5 9b 85 2c "
+ "43 b3 50 29 e7 3c 94 0f f0 41 0e 8f 11 4e ed 46 "
+ "bb d0 fa e1 65 e4 2b e2 52 8a 40 1c 3b 28 fd 81 "
+ "8e f3 23 2d ca 9f 4d 2a 0f 51 66 ec 59 c4 23 96 "
+ "d6 c1 1d bc 12 15 a5 6f a1 71 69 db 95 75 34 3e "
+ "f3 4f 9d e3 2a 49 cd c3 17 49 22 f2 29 c2 3e 18 "
+ "e4 5d f9 35 31 19 ec 43 19 ce dc e7 a1 7c 64 08 "
+ "8c 1f 6f 52 be 29 63 41 00 b3 91 9d 38 f3 d1 ed "
+ "94 e6 89 1e 66 a7 3b 8f b8 49 f5 87 4d f5 94 59 "
+ "e2 98 c7 bb ce 2e ee 78 2a 19 5a a6 6f e2 d0 73 "
+ "2b 25 e5 95 f5 7d 3e 06 1b 1f c3 e4 06 3b f9 8f ";
+
+struct SignatureExample {
+ const char* message;
+ const char* salt;
+ const char* signature;
+};
+
+struct PSSTestVector {
+ const char* modulus_n;
+ const char* public_exponent_e;
+ SignatureExample example[6];
+};
+
+static const PSSTestVector pss_test[] = {
+ {
+ rsa_modulus_n_1,
+ rsa_public_exponent_e_1,
+ {
+ { message_1_1, salt_1_1, signature_1_1 },
+ { message_1_2, salt_1_2, signature_1_2 },
+ { message_1_3, salt_1_3, signature_1_3 },
+ { message_1_4, salt_1_4, signature_1_4 },
+ { message_1_5, salt_1_5, signature_1_5 },
+ { message_1_6, salt_1_6, signature_1_6 },
+ }
+ },
+ {
+ rsa_modulus_n_9,
+ rsa_public_exponent_e_9,
+ {
+ { message_9_1, salt_9_1, signature_9_1 },
+ { message_9_2, salt_9_2, signature_9_2 },
+ { message_9_3, salt_9_3, signature_9_3 },
+ { message_9_4, salt_9_4, signature_9_4 },
+ { message_9_5, salt_9_5, signature_9_5 },
+ { message_9_6, salt_9_6, signature_9_6 },
+ }
+ },
+ {
+ rsa_modulus_n_10,
+ rsa_public_exponent_e_10,
+ {
+ { message_10_1, salt_10_1, signature_10_1 },
+ { message_10_2, salt_10_2, signature_10_2 },
+ { message_10_3, salt_10_3, signature_10_3 },
+ { message_10_4, salt_10_4, signature_10_4 },
+ { message_10_5, salt_10_5, signature_10_5 },
+ { message_10_6, salt_10_6, signature_10_6 },
+ }
+ },
+};
+
+static uint8_t HexDigitValue(char digit) {
+ if ('0' <= digit && digit <= '9')
+ return digit - '0';
+ if ('a' <= digit && digit <= 'f')
+ return digit - 'a' + 10;
+ return digit - 'A' + 10;
+}
+
+static bool DecodeTestInput(const char* in, std::vector<uint8_t>* out) {
+ out->clear();
+ while (in[0] != '\0') {
+ if (!isxdigit(in[0]) || !isxdigit(in[1]) || in[2] != ' ')
+ return false;
+ uint8_t octet = HexDigitValue(in[0]) * 16 + HexDigitValue(in[1]);
+ out->push_back(octet);
+ in += 3;
+ }
+ return true;
+}
+
+// PrependASN1Length prepends an ASN.1 serialized length to the beginning of
+// |out|.
+static void PrependASN1Length(std::vector<uint8_t>* out, size_t len) {
+ if (len < 128) {
+ out->insert(out->begin(), static_cast<uint8_t>(len));
+ } else if (len < 256) {
+ out->insert(out->begin(), static_cast<uint8_t>(len));
+ out->insert(out->begin(), 0x81);
+ } else if (len < 0x10000) {
+ out->insert(out->begin(), static_cast<uint8_t>(len));
+ out->insert(out->begin(), static_cast<uint8_t>(len >> 8));
+ out->insert(out->begin(), 0x82);
+ } else {
+ CHECK(false) << "ASN.1 length not handled: " << len;
+ }
+}
+
+static bool EncodeRSAPublicKey(const std::vector<uint8_t>& modulus_n,
+ const std::vector<uint8_t>& public_exponent_e,
+ std::vector<uint8_t>* public_key_info) {
+ // The public key is specified as the following ASN.1 structure:
+ // SubjectPublicKeyInfo ::= SEQUENCE {
+ // algorithm AlgorithmIdentifier,
+ // subjectPublicKey BIT STRING }
+ //
+ // The algorithm is specified as the following ASN.1 structure:
+ // AlgorithmIdentifier ::= SEQUENCE {
+ // algorithm OBJECT IDENTIFIER,
+ // parameters ANY DEFINED BY algorithm OPTIONAL }
+ //
+ // An RSA public key is specified as the following ASN.1 structure:
+ // RSAPublicKey ::= SEQUENCE {
+ // modulus INTEGER, -- n
+ // publicExponent INTEGER -- e
+ // }
+ static const uint8_t kIntegerTag = 0x02;
+ static const uint8_t kBitStringTag = 0x03;
+ static const uint8_t kSequenceTag = 0x30;
+ public_key_info->clear();
+
+ // Encode the public exponent e as an INTEGER.
+ public_key_info->insert(public_key_info->begin(),
+ public_exponent_e.begin(),
+ public_exponent_e.end());
+ PrependASN1Length(public_key_info, public_exponent_e.size());
+ public_key_info->insert(public_key_info->begin(), kIntegerTag);
+
+ // Encode the modulus n as an INTEGER.
+ public_key_info->insert(public_key_info->begin(),
+ modulus_n.begin(), modulus_n.end());
+ size_t modulus_size = modulus_n.size();
+ if (modulus_n[0] & 0x80) {
+ public_key_info->insert(public_key_info->begin(), 0x00);
+ modulus_size++;
+ }
+ PrependASN1Length(public_key_info, modulus_size);
+ public_key_info->insert(public_key_info->begin(), kIntegerTag);
+
+ // Encode the RSAPublicKey SEQUENCE.
+ PrependASN1Length(public_key_info, public_key_info->size());
+ public_key_info->insert(public_key_info->begin(), kSequenceTag);
+
+ // Encode the BIT STRING.
+ // Number of unused bits.
+ public_key_info->insert(public_key_info->begin(), 0x00);
+ PrependASN1Length(public_key_info, public_key_info->size());
+ public_key_info->insert(public_key_info->begin(), kBitStringTag);
+
+ // Encode the AlgorithmIdentifier.
+ static const uint8_t algorithm[] = {
+ 0x30, 0x0d, // a SEQUENCE of length 13
+ 0x06, 0x09, // an OBJECT IDENTIFIER of length 9
+ 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00,
+ };
+ public_key_info->insert(public_key_info->begin(),
+ algorithm, algorithm + sizeof(algorithm));
+
+ // Encode the outermost SEQUENCE.
+ PrependASN1Length(public_key_info, public_key_info->size());
+ public_key_info->insert(public_key_info->begin(), kSequenceTag);
+
+ return true;
+}
+
+TEST(SignatureVerifierTest, VerifyRSAPSS) {
+ for (unsigned int i = 0; i < arraysize(pss_test); i++) {
+ SCOPED_TRACE(i);
+ std::vector<uint8_t> modulus_n;
+ std::vector<uint8_t> public_exponent_e;
+ ASSERT_TRUE(DecodeTestInput(pss_test[i].modulus_n, &modulus_n));
+ ASSERT_TRUE(DecodeTestInput(pss_test[i].public_exponent_e,
+ &public_exponent_e));
+ std::vector<uint8_t> public_key_info;
+ ASSERT_TRUE(EncodeRSAPublicKey(modulus_n, public_exponent_e,
+ &public_key_info));
+
+ for (unsigned int j = 0; j < arraysize(pss_test[i].example); j++) {
+ SCOPED_TRACE(j);
+ std::vector<uint8_t> message;
+ std::vector<uint8_t> salt;
+ std::vector<uint8_t> signature;
+ ASSERT_TRUE(DecodeTestInput(pss_test[i].example[j].message, &message));
+ ASSERT_TRUE(DecodeTestInput(pss_test[i].example[j].salt, &salt));
+ ASSERT_TRUE(DecodeTestInput(pss_test[i].example[j].signature,
+ &signature));
+
+ crypto::SignatureVerifier verifier;
+ bool ok;
+
+ // Positive test.
+ ok = verifier.VerifyInitRSAPSS(crypto::SignatureVerifier::SHA1,
+ crypto::SignatureVerifier::SHA1,
+ salt.size(),
+ &signature[0], signature.size(),
+ &public_key_info[0],
+ public_key_info.size());
+ ASSERT_TRUE(ok);
+ verifier.VerifyUpdate(&message[0], message.size());
+ ok = verifier.VerifyFinal();
+ EXPECT_TRUE(ok);
+
+ // Modify the first byte of the message.
+ ok = verifier.VerifyInitRSAPSS(crypto::SignatureVerifier::SHA1,
+ crypto::SignatureVerifier::SHA1,
+ salt.size(),
+ &signature[0], signature.size(),
+ &public_key_info[0],
+ public_key_info.size());
+ ASSERT_TRUE(ok);
+ message[0] += 1;
+ verifier.VerifyUpdate(&message[0], message.size());
+ message[0] -= 1;
+ ok = verifier.VerifyFinal();
+ EXPECT_FALSE(ok);
+
+ // Truncate the message.
+ ASSERT_FALSE(message.empty());
+ ok = verifier.VerifyInitRSAPSS(crypto::SignatureVerifier::SHA1,
+ crypto::SignatureVerifier::SHA1,
+ salt.size(),
+ &signature[0], signature.size(),
+ &public_key_info[0],
+ public_key_info.size());
+ ASSERT_TRUE(ok);
+ verifier.VerifyUpdate(&message[0], message.size() - 1);
+ ok = verifier.VerifyFinal();
+ EXPECT_FALSE(ok);
+
+ // Corrupt the signature.
+ signature[0] += 1;
+ ok = verifier.VerifyInitRSAPSS(crypto::SignatureVerifier::SHA1,
+ crypto::SignatureVerifier::SHA1,
+ salt.size(),
+ &signature[0], signature.size(),
+ &public_key_info[0],
+ public_key_info.size());
+ signature[0] -= 1;
+ ASSERT_TRUE(ok);
+ verifier.VerifyUpdate(&message[0], message.size());
+ ok = verifier.VerifyFinal();
+ EXPECT_FALSE(ok);
+ }
+ }
+}
diff --git a/libchrome/crypto/symmetric_key.cc b/libchrome/crypto/symmetric_key.cc
new file mode 100644
index 0000000..e3ecf62
--- /dev/null
+++ b/libchrome/crypto/symmetric_key.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/symmetric_key.h"
+
+#include <openssl/evp.h>
+#include <openssl/rand.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "crypto/openssl_util.h"
+
+namespace crypto {
+
+SymmetricKey::~SymmetricKey() {
+ std::fill(key_.begin(), key_.end(), '\0'); // Zero out the confidential key.
+}
+
+// static
+std::unique_ptr<SymmetricKey> SymmetricKey::GenerateRandomKey(
+ Algorithm algorithm,
+ size_t key_size_in_bits) {
+ DCHECK_EQ(AES, algorithm);
+
+ // Whitelist supported key sizes to avoid accidentaly relying on
+ // algorithms available in NSS but not BoringSSL and vice
+ // versa. Note that BoringSSL does not support AES-192.
+ if (key_size_in_bits != 128 && key_size_in_bits != 256)
+ return nullptr;
+
+ size_t key_size_in_bytes = key_size_in_bits / 8;
+ DCHECK_EQ(key_size_in_bits, key_size_in_bytes * 8);
+
+ if (key_size_in_bytes == 0)
+ return nullptr;
+
+ OpenSSLErrStackTracer err_tracer(FROM_HERE);
+ std::unique_ptr<SymmetricKey> key(new SymmetricKey);
+ uint8_t* key_data = reinterpret_cast<uint8_t*>(
+ base::WriteInto(&key->key_, key_size_in_bytes + 1));
+
+ int rv = RAND_bytes(key_data, static_cast<int>(key_size_in_bytes));
+ return rv == 1 ? std::move(key) : nullptr;
+}
+
+// static
+std::unique_ptr<SymmetricKey> SymmetricKey::DeriveKeyFromPassword(
+ Algorithm algorithm,
+ const std::string& password,
+ const std::string& salt,
+ size_t iterations,
+ size_t key_size_in_bits) {
+ DCHECK(algorithm == AES || algorithm == HMAC_SHA1);
+
+ if (algorithm == AES) {
+ // Whitelist supported key sizes to avoid accidentaly relying on
+ // algorithms available in NSS but not BoringSSL and vice
+ // versa. Note that BoringSSL does not support AES-192.
+ if (key_size_in_bits != 128 && key_size_in_bits != 256)
+ return nullptr;
+ }
+
+ size_t key_size_in_bytes = key_size_in_bits / 8;
+ DCHECK_EQ(key_size_in_bits, key_size_in_bytes * 8);
+
+ if (key_size_in_bytes == 0)
+ return nullptr;
+
+ OpenSSLErrStackTracer err_tracer(FROM_HERE);
+ std::unique_ptr<SymmetricKey> key(new SymmetricKey);
+ uint8_t* key_data = reinterpret_cast<uint8_t*>(
+ base::WriteInto(&key->key_, key_size_in_bytes + 1));
+ int rv = PKCS5_PBKDF2_HMAC_SHA1(
+ password.data(), password.length(),
+ reinterpret_cast<const uint8_t*>(salt.data()), salt.length(),
+ static_cast<unsigned>(iterations),
+ key_size_in_bytes, key_data);
+ return rv == 1 ? std::move(key) : nullptr;
+}
+
+// static
+std::unique_ptr<SymmetricKey> SymmetricKey::Import(Algorithm algorithm,
+ const std::string& raw_key) {
+ if (algorithm == AES) {
+ // Whitelist supported key sizes to avoid accidentaly relying on
+ // algorithms available in NSS but not BoringSSL and vice
+ // versa. Note that BoringSSL does not support AES-192.
+ if (raw_key.size() != 128/8 && raw_key.size() != 256/8)
+ return nullptr;
+ }
+
+ std::unique_ptr<SymmetricKey> key(new SymmetricKey);
+ key->key_ = raw_key;
+ return key;
+}
+
+bool SymmetricKey::GetRawKey(std::string* raw_key) {
+ *raw_key = key_;
+ return true;
+}
+
+SymmetricKey::SymmetricKey() = default;
+
+} // namespace crypto
diff --git a/libchrome/crypto/symmetric_key.h b/libchrome/crypto/symmetric_key.h
new file mode 100644
index 0000000..8862708
--- /dev/null
+++ b/libchrome/crypto/symmetric_key.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SYMMETRIC_KEY_H_
+#define CRYPTO_SYMMETRIC_KEY_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "crypto/crypto_export.h"
+
+#if defined(NACL_WIN64)
+// See comments for crypto_nacl_win64 in crypto.gyp.
+// Must test for NACL_WIN64 before OS_WIN since former is a subset of latter.
+#include "crypto/scoped_capi_types.h"
+#elif defined(USE_NSS_CERTS) || \
+ (!defined(USE_OPENSSL) && (defined(OS_WIN) || defined(OS_MACOSX)))
+#include "crypto/scoped_nss_types.h"
+#endif
+
+namespace crypto {
+
+// Wraps a platform-specific symmetric key and allows it to be held in a
+// scoped_ptr.
+class CRYPTO_EXPORT SymmetricKey {
+ public:
+ // Defines the algorithm that a key will be used with. See also
+ // classs Encrptor.
+ enum Algorithm {
+ AES,
+ HMAC_SHA1,
+ };
+
+ virtual ~SymmetricKey();
+
+ // Generates a random key suitable to be used with |algorithm| and of
+ // |key_size_in_bits| bits. |key_size_in_bits| must be a multiple of 8.
+ // The caller is responsible for deleting the returned SymmetricKey.
+ static std::unique_ptr<SymmetricKey> GenerateRandomKey(
+ Algorithm algorithm,
+ size_t key_size_in_bits);
+
+ // Derives a key from the supplied password and salt using PBKDF2, suitable
+ // for use with specified |algorithm|. Note |algorithm| is not the algorithm
+ // used to derive the key from the password. |key_size_in_bits| must be a
+ // multiple of 8. The caller is responsible for deleting the returned
+ // SymmetricKey.
+ static std::unique_ptr<SymmetricKey> DeriveKeyFromPassword(
+ Algorithm algorithm,
+ const std::string& password,
+ const std::string& salt,
+ size_t iterations,
+ size_t key_size_in_bits);
+
+ // Imports an array of key bytes in |raw_key|. This key may have been
+ // generated by GenerateRandomKey or DeriveKeyFromPassword and exported with
+ // GetRawKey, or via another compatible method. The key must be of suitable
+ // size for use with |algorithm|. The caller owns the returned SymmetricKey.
+ static std::unique_ptr<SymmetricKey> Import(Algorithm algorithm,
+ const std::string& raw_key);
+#if defined(NACL_WIN64)
+ HCRYPTKEY key() const { return key_.get(); }
+#elif defined(USE_OPENSSL)
+ const std::string& key() { return key_; }
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+ PK11SymKey* key() const { return key_.get(); }
+#endif
+
+ // Extracts the raw key from the platform specific data.
+ // Warning: |raw_key| holds the raw key as bytes and thus must be handled
+ // carefully.
+ bool GetRawKey(std::string* raw_key);
+
+ private:
+#if defined(NACL_WIN64)
+ SymmetricKey(HCRYPTPROV provider, HCRYPTKEY key,
+ const void* key_data, size_t key_size_in_bytes);
+
+ ScopedHCRYPTPROV provider_;
+ ScopedHCRYPTKEY key_;
+
+ // Contains the raw key, if it is known during initialization and when it
+ // is likely that the associated |provider_| will be unable to export the
+ // |key_|. This is the case of HMAC keys when the key size exceeds 16 bytes
+ // when using the default RSA provider.
+ // TODO(rsleevi): See if KP_EFFECTIVE_KEYLEN is the reason why CryptExportKey
+ // fails with NTE_BAD_KEY/NTE_BAD_LEN
+ std::string raw_key_;
+#elif defined(USE_OPENSSL)
+ SymmetricKey() {}
+ std::string key_;
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+ explicit SymmetricKey(PK11SymKey* key);
+ ScopedPK11SymKey key_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(SymmetricKey);
+};
+
+} // namespace crypto
+
+#endif // CRYPTO_SYMMETRIC_KEY_H_
diff --git a/libchrome/crypto/symmetric_key_nss.cc b/libchrome/crypto/symmetric_key_nss.cc
new file mode 100644
index 0000000..e3aacc7
--- /dev/null
+++ b/libchrome/crypto/symmetric_key_nss.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/symmetric_key.h"
+
+#include <nss.h>
+#include <pk11pub.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace crypto {
+
+SymmetricKey::~SymmetricKey() {}
+
+// static
+SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
+ size_t key_size_in_bits) {
+ DCHECK_EQ(AES, algorithm);
+
+ EnsureNSSInit();
+
+ // Whitelist supported key sizes to avoid accidentaly relying on
+ // algorithms available in NSS but not BoringSSL and vice
+ // versa. Note that BoringSSL does not support AES-192.
+ if (key_size_in_bits != 128 && key_size_in_bits != 256)
+ return NULL;
+
+ ScopedPK11Slot slot(PK11_GetInternalSlot());
+ if (!slot.get())
+ return NULL;
+
+ PK11SymKey* sym_key = PK11_KeyGen(slot.get(), CKM_AES_KEY_GEN, NULL,
+ key_size_in_bits / 8, NULL);
+ if (!sym_key)
+ return NULL;
+
+ return new SymmetricKey(sym_key);
+}
+
+// static
+SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
+ const std::string& password,
+ const std::string& salt,
+ size_t iterations,
+ size_t key_size_in_bits) {
+ EnsureNSSInit();
+ if (salt.empty() || iterations == 0 || key_size_in_bits == 0)
+ return NULL;
+
+ if (algorithm == AES) {
+ // Whitelist supported key sizes to avoid accidentaly relying on
+ // algorithms available in NSS but not BoringSSL and vice
+ // versa. Note that BoringSSL does not support AES-192.
+ if (key_size_in_bits != 128 && key_size_in_bits != 256)
+ return NULL;
+ }
+
+ SECItem password_item;
+ password_item.type = siBuffer;
+ password_item.data = reinterpret_cast<unsigned char*>(
+ const_cast<char *>(password.data()));
+ password_item.len = password.size();
+
+ SECItem salt_item;
+ salt_item.type = siBuffer;
+ salt_item.data = reinterpret_cast<unsigned char*>(
+ const_cast<char *>(salt.data()));
+ salt_item.len = salt.size();
+
+ SECOidTag cipher_algorithm =
+ algorithm == AES ? SEC_OID_AES_256_CBC : SEC_OID_HMAC_SHA1;
+ ScopedSECAlgorithmID alg_id(PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2,
+ cipher_algorithm,
+ SEC_OID_HMAC_SHA1,
+ key_size_in_bits / 8,
+ iterations,
+ &salt_item));
+ if (!alg_id.get())
+ return NULL;
+
+ ScopedPK11Slot slot(PK11_GetInternalSlot());
+ if (!slot.get())
+ return NULL;
+
+ PK11SymKey* sym_key = PK11_PBEKeyGen(slot.get(), alg_id.get(), &password_item,
+ PR_FALSE, NULL);
+ if (!sym_key)
+ return NULL;
+
+ return new SymmetricKey(sym_key);
+}
+
+// static
+SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
+ const std::string& raw_key) {
+ EnsureNSSInit();
+
+ if (algorithm == AES) {
+ // Whitelist supported key sizes to avoid accidentaly relying on
+ // algorithms available in NSS but not BoringSSL and vice
+ // versa. Note that BoringSSL does not support AES-192.
+ if (raw_key.size() != 128/8 && raw_key.size() != 256/8)
+ return NULL;
+ }
+
+ CK_MECHANISM_TYPE cipher =
+ algorithm == AES ? CKM_AES_CBC : CKM_SHA_1_HMAC;
+
+ SECItem key_item;
+ key_item.type = siBuffer;
+ key_item.data = reinterpret_cast<unsigned char*>(
+ const_cast<char *>(raw_key.data()));
+ key_item.len = raw_key.size();
+
+ ScopedPK11Slot slot(PK11_GetInternalSlot());
+ if (!slot.get())
+ return NULL;
+
+ // The exact value of the |origin| argument doesn't matter to NSS as long as
+ // it's not PK11_OriginFortezzaHack, so we pass PK11_OriginUnwrap as a
+ // placeholder.
+ PK11SymKey* sym_key = PK11_ImportSymKey(slot.get(), cipher, PK11_OriginUnwrap,
+ CKA_ENCRYPT, &key_item, NULL);
+ if (!sym_key)
+ return NULL;
+
+ return new SymmetricKey(sym_key);
+}
+
+bool SymmetricKey::GetRawKey(std::string* raw_key) {
+ SECStatus rv = PK11_ExtractKeyValue(key_.get());
+ if (SECSuccess != rv)
+ return false;
+
+ SECItem* key_item = PK11_GetKeyData(key_.get());
+ if (!key_item)
+ return false;
+
+ raw_key->assign(reinterpret_cast<char*>(key_item->data), key_item->len);
+ return true;
+}
+
+SymmetricKey::SymmetricKey(PK11SymKey* key) : key_(key) {
+ DCHECK(key);
+}
+
+} // namespace crypto
diff --git a/libchrome/crypto/symmetric_key_unittest.cc b/libchrome/crypto/symmetric_key_unittest.cc
new file mode 100644
index 0000000..d954761
--- /dev/null
+++ b/libchrome/crypto/symmetric_key_unittest.cc
@@ -0,0 +1,215 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/symmetric_key.h"
+
+#include <memory>
+#include <string>
+
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(SymmetricKeyTest, GenerateRandomKey) {
+ std::unique_ptr<crypto::SymmetricKey> key(
+ crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
+ ASSERT_TRUE(key);
+ std::string raw_key;
+ EXPECT_TRUE(key->GetRawKey(&raw_key));
+ EXPECT_EQ(32U, raw_key.size());
+
+ // Do it again and check that the keys are different.
+ // (Note: this has a one-in-10^77 chance of failure!)
+ std::unique_ptr<crypto::SymmetricKey> key2(
+ crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
+ ASSERT_TRUE(key2);
+ std::string raw_key2;
+ EXPECT_TRUE(key2->GetRawKey(&raw_key2));
+ EXPECT_EQ(32U, raw_key2.size());
+ EXPECT_NE(raw_key, raw_key2);
+}
+
+TEST(SymmetricKeyTest, ImportGeneratedKey) {
+ std::unique_ptr<crypto::SymmetricKey> key1(
+ crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
+ ASSERT_TRUE(key1);
+ std::string raw_key1;
+ EXPECT_TRUE(key1->GetRawKey(&raw_key1));
+
+ std::unique_ptr<crypto::SymmetricKey> key2(
+ crypto::SymmetricKey::Import(crypto::SymmetricKey::AES, raw_key1));
+ ASSERT_TRUE(key2);
+
+ std::string raw_key2;
+ EXPECT_TRUE(key2->GetRawKey(&raw_key2));
+
+ EXPECT_EQ(raw_key1, raw_key2);
+}
+
+TEST(SymmetricKeyTest, ImportDerivedKey) {
+ std::unique_ptr<crypto::SymmetricKey> key1(
+ crypto::SymmetricKey::DeriveKeyFromPassword(
+ crypto::SymmetricKey::HMAC_SHA1, "password", "somesalt", 1024, 160));
+ ASSERT_TRUE(key1);
+ std::string raw_key1;
+ EXPECT_TRUE(key1->GetRawKey(&raw_key1));
+
+ std::unique_ptr<crypto::SymmetricKey> key2(
+ crypto::SymmetricKey::Import(crypto::SymmetricKey::HMAC_SHA1, raw_key1));
+ ASSERT_TRUE(key2);
+
+ std::string raw_key2;
+ EXPECT_TRUE(key2->GetRawKey(&raw_key2));
+
+ EXPECT_EQ(raw_key1, raw_key2);
+}
+
+struct PBKDF2TestVector {
+ crypto::SymmetricKey::Algorithm algorithm;
+ const char* password;
+ const char* salt;
+ unsigned int rounds;
+ unsigned int key_size_in_bits;
+ const char* expected; // ASCII encoded hex bytes
+};
+
+class SymmetricKeyDeriveKeyFromPasswordTest
+ : public testing::TestWithParam<PBKDF2TestVector> {
+};
+
+TEST_P(SymmetricKeyDeriveKeyFromPasswordTest, DeriveKeyFromPassword) {
+ PBKDF2TestVector test_data(GetParam());
+ std::unique_ptr<crypto::SymmetricKey> key(
+ crypto::SymmetricKey::DeriveKeyFromPassword(
+ test_data.algorithm, test_data.password, test_data.salt,
+ test_data.rounds, test_data.key_size_in_bits));
+ ASSERT_TRUE(key);
+
+ std::string raw_key;
+ key->GetRawKey(&raw_key);
+ EXPECT_EQ(test_data.key_size_in_bits / 8, raw_key.size());
+ EXPECT_EQ(test_data.expected,
+ base::ToLowerASCII(base::HexEncode(raw_key.data(),
+ raw_key.size())));
+}
+
+static const PBKDF2TestVector kTestVectors[] = {
+ // These tests come from
+ // http://www.ietf.org/id/draft-josefsson-pbkdf2-test-vectors-00.txt
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "salt",
+ 1,
+ 160,
+ "0c60c80f961f0e71f3a9b524af6012062fe037a6",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "salt",
+ 2,
+ 160,
+ "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "salt",
+ 4096,
+ 160,
+ "4b007901b765489abead49d926f721d065a429c1",
+ },
+ // This test takes over 30s to run on the trybots.
+#if 0
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "salt",
+ 16777216,
+ 160,
+ "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
+ },
+#endif
+
+ // These tests come from RFC 3962, via BSD source code at
+ // http://www.openbsd.org/cgi-bin/cvsweb/src/sbin/bioctl/pbkdf2.c?rev=HEAD&content-type=text/plain
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "ATHENA.MIT.EDUraeburn",
+ 1,
+ 160,
+ "cdedb5281bb2f801565a1122b25635150ad1f7a0",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "ATHENA.MIT.EDUraeburn",
+ 2,
+ 160,
+ "01dbee7f4a9e243e988b62c73cda935da05378b9",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "ATHENA.MIT.EDUraeburn",
+ 1200,
+ 160,
+ "5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddb",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "password",
+ "\022" "4VxxV4\022", /* 0x1234567878563412 */
+ 5,
+ 160,
+ "d1daa78615f287e6a1c8b120d7062a493f98d203",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "pass phrase equals block size",
+ 1200,
+ 160,
+ "139c30c0966bc32ba55fdbf212530ac9c5ec59f1",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "pass phrase exceeds block size",
+ 1200,
+ 160,
+ "9ccad6d468770cd51b10e6a68721be611a8b4d28",
+ },
+ {
+ crypto::SymmetricKey::HMAC_SHA1,
+ "\360\235\204\236", /* g-clef (0xf09d849e) */
+ "EXAMPLE.COMpianist",
+ 50,
+ 160,
+ "6b9cf26d45455a43a5b8bb276a403b39e7fe37a0",
+ },
+
+ // Regression tests for AES keys, derived from the Linux NSS implementation.
+ {
+ crypto::SymmetricKey::AES,
+ "A test password",
+ "saltsalt",
+ 1,
+ 256,
+ "44899a7777f0e6e8b752f875f02044b8ac593de146de896f2e8a816e315a36de",
+ },
+ {
+ crypto::SymmetricKey::AES,
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "pass phrase exceeds block size",
+ 20,
+ 256,
+ "e0739745dc28b8721ba402e05214d2ac1eab54cf72bee1fba388297a09eb493c",
+ },
+};
+
+INSTANTIATE_TEST_CASE_P(, SymmetricKeyDeriveKeyFromPasswordTest,
+ testing::ValuesIn(kTestVectors));
diff --git a/libchrome/crypto/third_party/nss/chromium-blapi.h b/libchrome/crypto/third_party/nss/chromium-blapi.h
new file mode 100644
index 0000000..2ca772e
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/chromium-blapi.h
@@ -0,0 +1,101 @@
+/*
+ * crypto.h - public data structures and prototypes for the crypto library
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1994-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Dr Vipul Gupta <vipul.gupta@sun.com>, Sun Microsystems Laboratories
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* $Id: blapi.h,v 1.27 2007/11/09 18:49:32 wtc%google.com Exp $ */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_
+
+#include "crypto/third_party/nss/chromium-blapit.h"
+
+/******************************************/
+
+extern SHA256Context *SHA256_NewContext(void);
+extern void SHA256_DestroyContext(SHA256Context *cx, PRBool freeit);
+extern void SHA256_Begin(SHA256Context *cx);
+extern void SHA256_Update(SHA256Context *cx, const unsigned char *input,
+ unsigned int inputLen);
+extern void SHA256_End(SHA256Context *cx, unsigned char *digest,
+ unsigned int *digestLen, unsigned int maxDigestLen);
+extern SECStatus SHA256_HashBuf(unsigned char *dest, const unsigned char *src,
+ unsigned int src_length);
+extern SECStatus SHA256_Hash(unsigned char *dest, const char *src);
+extern void SHA256_TraceState(SHA256Context *cx);
+extern unsigned int SHA256_FlattenSize(SHA256Context *cx);
+extern SECStatus SHA256_Flatten(SHA256Context *cx,unsigned char *space);
+extern SHA256Context * SHA256_Resurrect(unsigned char *space, void *arg);
+extern void SHA256_Clone(SHA256Context *dest, SHA256Context *src);
+
+/******************************************/
+
+extern SHA512Context *SHA512_NewContext(void);
+extern void SHA512_DestroyContext(SHA512Context *cx, PRBool freeit);
+extern void SHA512_Begin(SHA512Context *cx);
+extern void SHA512_Update(SHA512Context *cx, const unsigned char *input,
+ unsigned int inputLen);
+extern void SHA512_End(SHA512Context *cx, unsigned char *digest,
+ unsigned int *digestLen, unsigned int maxDigestLen);
+extern SECStatus SHA512_HashBuf(unsigned char *dest, const unsigned char *src,
+ unsigned int src_length);
+extern SECStatus SHA512_Hash(unsigned char *dest, const char *src);
+extern void SHA512_TraceState(SHA512Context *cx);
+extern unsigned int SHA512_FlattenSize(SHA512Context *cx);
+extern SECStatus SHA512_Flatten(SHA512Context *cx,unsigned char *space);
+extern SHA512Context * SHA512_Resurrect(unsigned char *space, void *arg);
+extern void SHA512_Clone(SHA512Context *dest, SHA512Context *src);
+
+/******************************************/
+
+extern SHA384Context *SHA384_NewContext(void);
+extern void SHA384_DestroyContext(SHA384Context *cx, PRBool freeit);
+extern void SHA384_Begin(SHA384Context *cx);
+extern void SHA384_Update(SHA384Context *cx, const unsigned char *input,
+ unsigned int inputLen);
+extern void SHA384_End(SHA384Context *cx, unsigned char *digest,
+ unsigned int *digestLen, unsigned int maxDigestLen);
+extern SECStatus SHA384_HashBuf(unsigned char *dest, const unsigned char *src,
+ unsigned int src_length);
+extern SECStatus SHA384_Hash(unsigned char *dest, const char *src);
+extern void SHA384_TraceState(SHA384Context *cx);
+extern unsigned int SHA384_FlattenSize(SHA384Context *cx);
+extern SECStatus SHA384_Flatten(SHA384Context *cx,unsigned char *space);
+extern SHA384Context * SHA384_Resurrect(unsigned char *space, void *arg);
+extern void SHA384_Clone(SHA384Context *dest, SHA384Context *src);
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_ */
diff --git a/libchrome/crypto/third_party/nss/chromium-blapit.h b/libchrome/crypto/third_party/nss/chromium-blapit.h
new file mode 100644
index 0000000..938547a
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/chromium-blapit.h
@@ -0,0 +1,91 @@
+/*
+ * blapit.h - public data structures for the crypto library
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1994-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Dr Vipul Gupta <vipul.gupta@sun.com> and
+ * Douglas Stebila <douglas@stebila.ca>, Sun Microsystems Laboratories
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* $Id: blapit.h,v 1.20 2007/02/28 19:47:37 rrelyea%redhat.com Exp $ */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_
+
+#include "crypto/third_party/nss/chromium-prtypes.h"
+
+/*
+** A status code. Status's are used by procedures that return status
+** values. Again the motivation is so that a compiler can generate
+** warnings when return values are wrong. Correct testing of status codes:
+**
+** SECStatus rv;
+** rv = some_function (some_argument);
+** if (rv != SECSuccess)
+** do_an_error_thing();
+**
+*/
+typedef enum _SECStatus {
+ SECWouldBlock = -2,
+ SECFailure = -1,
+ SECSuccess = 0
+} SECStatus;
+
+#define SHA256_LENGTH 32 /* bytes */
+#define SHA384_LENGTH 48 /* bytes */
+#define SHA512_LENGTH 64 /* bytes */
+#define HASH_LENGTH_MAX SHA512_LENGTH
+
+/*
+ * Input block size for each hash algorithm.
+ */
+
+#define SHA256_BLOCK_LENGTH 64 /* bytes */
+#define SHA384_BLOCK_LENGTH 128 /* bytes */
+#define SHA512_BLOCK_LENGTH 128 /* bytes */
+#define HASH_BLOCK_LENGTH_MAX SHA512_BLOCK_LENGTH
+
+/***************************************************************************
+** Opaque objects
+*/
+
+struct SHA256ContextStr ;
+struct SHA512ContextStr ;
+
+typedef struct SHA256ContextStr SHA256Context;
+typedef struct SHA512ContextStr SHA512Context;
+/* SHA384Context is really a SHA512ContextStr. This is not a mistake. */
+typedef struct SHA512ContextStr SHA384Context;
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_ */
diff --git a/libchrome/crypto/third_party/nss/chromium-nss.h b/libchrome/crypto/third_party/nss/chromium-nss.h
new file mode 100644
index 0000000..437e6bd
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/chromium-nss.h
@@ -0,0 +1,79 @@
+ /* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1994-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
+
+// This file contains some functions we borrowed from NSS.
+
+#include <prtypes.h>
+#include <hasht.h>
+#include <keyhi.h>
+#include <secmod.h>
+
+#include "crypto/crypto_export.h"
+
+extern "C" SECStatus emsa_pss_verify(const unsigned char *mHash,
+ const unsigned char *em,
+ unsigned int emLen,
+ HASH_HashType hashAlg,
+ HASH_HashType maskHashAlg,
+ unsigned int sLen);
+
+// Like PK11_ImportEncryptedPrivateKeyInfo, but hardcoded for EC, and returns
+// the SECKEYPrivateKey.
+// See https://bugzilla.mozilla.org/show_bug.cgi?id=211546
+// When we use NSS 3.13.2 or later,
+// PK11_ImportEncryptedPrivateKeyInfoAndReturnKey can be used instead.
+SECStatus ImportEncryptedECPrivateKeyInfoAndReturnKey(
+ PK11SlotInfo* slot,
+ SECKEYEncryptedPrivateKeyInfo* epki,
+ SECItem* password,
+ SECItem* nickname,
+ SECItem* public_value,
+ PRBool permanent,
+ PRBool sensitive,
+ SECKEYPrivateKey** private_key,
+ void* wincx);
+
+// Like SEC_DerSignData.
+CRYPTO_EXPORT SECStatus DerSignData(PLArenaPool *arena,
+ SECItem *result,
+ SECItem *input,
+ SECKEYPrivateKey *key,
+ SECOidTag algo_id);
+
+#endif // CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
diff --git a/libchrome/crypto/third_party/nss/chromium-prtypes.h b/libchrome/crypto/third_party/nss/chromium-prtypes.h
new file mode 100644
index 0000000..d5ea8a9
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/chromium-prtypes.h
@@ -0,0 +1,77 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2002
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* Emulates the real prtypes.h. Defines the types and macros that sha512.cc
+ * needs. */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+#define IS_LITTLE_ENDIAN 1
+#else
+#define IS_BIG_ENDIAN 1
+#endif
+
+/*
+ * The C language requires that 'long' be at least 32 bits. 2147483647 is the
+ * largest signed 32-bit integer.
+ */
+#if LONG_MAX > 2147483647L
+#define PR_BYTES_PER_LONG 8
+#else
+#define PR_BYTES_PER_LONG 4
+#endif
+
+#define HAVE_LONG_LONG
+
+#if defined(__linux__)
+#define LINUX
+#endif
+
+typedef uint8_t PRUint8;
+typedef uint32_t PRUint32;
+
+typedef int PRBool;
+
+#define PR_MIN(x,y) ((x)<(y)?(x):(y))
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_ */
diff --git a/libchrome/crypto/third_party/nss/chromium-sha256.h b/libchrome/crypto/third_party/nss/chromium-sha256.h
new file mode 100644
index 0000000..52815ca
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/chromium-sha256.h
@@ -0,0 +1,51 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2002
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_
+
+#include "crypto/third_party/nss/chromium-prtypes.h"
+
+struct SHA256ContextStr {
+ union {
+ PRUint32 w[64]; /* message schedule, input buffer, plus 48 words */
+ PRUint8 b[256];
+ } u;
+ PRUint32 h[8]; /* 8 state variables */
+ PRUint32 sizeHi,sizeLo; /* 64-bit count of hashed bytes. */
+};
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_ */
diff --git a/libchrome/crypto/third_party/nss/rsawrapr.c b/libchrome/crypto/third_party/nss/rsawrapr.c
new file mode 100644
index 0000000..73e498f
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/rsawrapr.c
@@ -0,0 +1,160 @@
+/*
+ * PKCS#1 encoding and decoding functions.
+ * This file is believed to contain no code licensed from other parties.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "seccomon.h"
+#include "secerr.h"
+#include "sechash.h"
+
+/* Needed for RSA-PSS functions */
+static const unsigned char eightZeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+/*
+ * Mask generation function MGF1 as defined in PKCS #1 v2.1 / RFC 3447.
+ */
+static SECStatus
+MGF1(HASH_HashType hashAlg, unsigned char *mask, unsigned int maskLen,
+ const unsigned char *mgfSeed, unsigned int mgfSeedLen)
+{
+ unsigned int digestLen;
+ PRUint32 counter, rounds;
+ unsigned char *tempHash, *temp;
+ const SECHashObject *hash;
+ void *hashContext;
+ unsigned char C[4];
+
+ hash = HASH_GetHashObject(hashAlg);
+ if (hash == NULL)
+ return SECFailure;
+
+ hashContext = (*hash->create)();
+ rounds = (maskLen + hash->length - 1) / hash->length;
+ for (counter = 0; counter < rounds; counter++) {
+ C[0] = (unsigned char)((counter >> 24) & 0xff);
+ C[1] = (unsigned char)((counter >> 16) & 0xff);
+ C[2] = (unsigned char)((counter >> 8) & 0xff);
+ C[3] = (unsigned char)(counter & 0xff);
+
+ /* This could be optimized when the clone functions in
+ * rawhash.c are implemented. */
+ (*hash->begin)(hashContext);
+ (*hash->update)(hashContext, mgfSeed, mgfSeedLen);
+ (*hash->update)(hashContext, C, sizeof C);
+
+ tempHash = mask + counter * hash->length;
+ if (counter != (rounds-1)) {
+ (*hash->end)(hashContext, tempHash, &digestLen, hash->length);
+ } else { /* we're in the last round and need to cut the hash */
+ temp = (unsigned char *)PORT_Alloc(hash->length);
+ (*hash->end)(hashContext, temp, &digestLen, hash->length);
+ PORT_Memcpy(tempHash, temp, maskLen - counter * hash->length);
+ PORT_Free(temp);
+ }
+ }
+ (*hash->destroy)(hashContext, PR_TRUE);
+
+ return SECSuccess;
+}
+
+/*
+ * Verify a RSA-PSS signature.
+ * Described in RFC 3447, section 9.1.2.
+ * We use mHash instead of M as input.
+ * emBits from the RFC is just modBits - 1, see section 8.1.2.
+ * We only support MGF1 as the MGF.
+ *
+ * NOTE: this code assumes modBits is a multiple of 8.
+ */
+SECStatus
+emsa_pss_verify(const unsigned char *mHash,
+ const unsigned char *em, unsigned int emLen,
+ HASH_HashType hashAlg, HASH_HashType maskHashAlg,
+ unsigned int sLen)
+{
+ const SECHashObject *hash;
+ void *hash_context;
+ unsigned char *db;
+ unsigned char *H_; /* H' from the RFC */
+ unsigned int i, dbMaskLen;
+ SECStatus rv;
+
+ hash = HASH_GetHashObject(hashAlg);
+ dbMaskLen = emLen - hash->length - 1;
+
+ /* Step 3 + 4 + 6 */
+ if ((emLen < (hash->length + sLen + 2)) ||
+ (em[emLen - 1] != 0xbc) ||
+ ((em[0] & 0x80) != 0)) {
+ PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+ return SECFailure;
+ }
+
+ /* Step 7 */
+ db = (unsigned char *)PORT_Alloc(dbMaskLen);
+ if (db == NULL) {
+ PORT_SetError(SEC_ERROR_NO_MEMORY);
+ return SECFailure;
+ }
+ /* &em[dbMaskLen] points to H, used as mgfSeed */
+ MGF1(maskHashAlg, db, dbMaskLen, &em[dbMaskLen], hash->length);
+
+ /* Step 8 */
+ for (i = 0; i < dbMaskLen; i++) {
+ db[i] ^= em[i];
+ }
+
+ /* Step 9 */
+ db[0] &= 0x7f;
+
+ /* Step 10 */
+ for (i = 0; i < (dbMaskLen - sLen - 1); i++) {
+ if (db[i] != 0) {
+ PORT_Free(db);
+ PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+ return SECFailure;
+ }
+ }
+ if (db[dbMaskLen - sLen - 1] != 0x01) {
+ PORT_Free(db);
+ PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+ return SECFailure;
+ }
+
+ /* Step 12 + 13 */
+ H_ = (unsigned char *)PORT_Alloc(hash->length);
+ if (H_ == NULL) {
+ PORT_Free(db);
+ PORT_SetError(SEC_ERROR_NO_MEMORY);
+ return SECFailure;
+ }
+ hash_context = (*hash->create)();
+ if (hash_context == NULL) {
+ PORT_Free(db);
+ PORT_Free(H_);
+ PORT_SetError(SEC_ERROR_NO_MEMORY);
+ return SECFailure;
+ }
+ (*hash->begin)(hash_context);
+ (*hash->update)(hash_context, eightZeros, 8);
+ (*hash->update)(hash_context, mHash, hash->length);
+ (*hash->update)(hash_context, &db[dbMaskLen - sLen], sLen);
+ (*hash->end)(hash_context, H_, &i, hash->length);
+ (*hash->destroy)(hash_context, PR_TRUE);
+
+ PORT_Free(db);
+
+ /* Step 14 */
+ if (PORT_Memcmp(H_, &em[dbMaskLen], hash->length) != 0) {
+ PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+ rv = SECFailure;
+ } else {
+ rv = SECSuccess;
+ }
+
+ PORT_Free(H_);
+ return rv;
+}
diff --git a/libchrome/crypto/third_party/nss/sha512.cc b/libchrome/crypto/third_party/nss/sha512.cc
new file mode 100644
index 0000000..78950cb
--- /dev/null
+++ b/libchrome/crypto/third_party/nss/sha512.cc
@@ -0,0 +1,1390 @@
+/*
+ * sha512.c - implementation of SHA256, SHA384 and SHA512
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2002
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* $Id: sha512.c,v 1.9 2006/10/13 16:54:04 wtchang%redhat.com Exp $ */
+
+// Prevent manual unrolling in the sha256 code, which reduces the binary code
+// size from ~10k to ~1k. The performance should be reasonable for our use.
+#define NOUNROLL256 1
+
+#include "crypto/third_party/nss/chromium-prtypes.h" /* for PRUintXX */
+#if defined(_X86_) || defined(SHA_NO_LONG_LONG)
+#define NOUNROLL512 1
+#undef HAVE_LONG_LONG
+#endif
+#include "crypto/third_party/nss/chromium-blapi.h"
+#include "crypto/third_party/nss/chromium-sha256.h" /* for struct SHA256ContextStr */
+
+#include <stdlib.h>
+#include <string.h>
+#define PORT_New(type) static_cast<type*>(malloc(sizeof(type)))
+#define PORT_ZFree(ptr, len) do { memset(ptr, 0, len); free(ptr); } while (0)
+#define PORT_Strlen(s) static_cast<unsigned int>(strlen(s))
+#define PORT_Memcpy memcpy
+
+/* ============= Common constants and defines ======================= */
+
+#define W ctx->u.w
+#define B ctx->u.b
+#define H ctx->h
+
+#define SHR(x,n) (x >> n)
+#define SHL(x,n) (x << n)
+#define Ch(x,y,z) ((x & y) ^ (~x & z))
+#define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z))
+
+/* Padding used with all flavors of SHA */
+static const PRUint8 pad[240] = {
+0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ /* compiler will fill the rest in with zeros */
+};
+
+/* ============= SHA256 implemenmtation ================================== */
+
+/* SHA-256 constants, K256. */
+static const PRUint32 K256[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+/* SHA-256 initial hash values */
+static const PRUint32 H256[8] = {
+ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+ 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
+};
+
+#if defined(_MSC_VER) && defined(_X86_)
+#ifndef FORCEINLINE
+#if (_MSC_VER >= 1200)
+#define FORCEINLINE __forceinline
+#else
+#define FORCEINLINE __inline
+#endif
+#endif
+#define FASTCALL __fastcall
+
+static FORCEINLINE PRUint32 FASTCALL
+swap4b(PRUint32 dwd)
+{
+ __asm {
+ mov eax,dwd
+ bswap eax
+ }
+}
+
+#define SHA_HTONL(x) swap4b(x)
+#define BYTESWAP4(x) x = SHA_HTONL(x)
+
+#elif defined(LINUX) && defined(_X86_)
+#undef __OPTIMIZE__
+#define __OPTIMIZE__ 1
+#undef __pentium__
+#define __pentium__ 1
+#include <byteswap.h>
+#define SHA_HTONL(x) bswap_32(x)
+#define BYTESWAP4(x) x = SHA_HTONL(x)
+
+#else /* neither windows nor Linux PC */
+#define SWAP4MASK 0x00FF00FF
+#define SHA_HTONL(x) (t1 = (x), t1 = (t1 << 16) | (t1 >> 16), \
+ ((t1 & SWAP4MASK) << 8) | ((t1 >> 8) & SWAP4MASK))
+#define BYTESWAP4(x) x = SHA_HTONL(x)
+#endif
+
+#if defined(_MSC_VER) && defined(_X86_)
+#pragma intrinsic (_lrotr, _lrotl)
+#define ROTR32(x,n) _lrotr(x,n)
+#define ROTL32(x,n) _lrotl(x,n)
+#else
+#define ROTR32(x,n) ((x >> n) | (x << ((8 * sizeof x) - n)))
+#define ROTL32(x,n) ((x << n) | (x >> ((8 * sizeof x) - n)))
+#endif
+
+/* Capitol Sigma and lower case sigma functions */
+#define S0(x) (ROTR32(x, 2) ^ ROTR32(x,13) ^ ROTR32(x,22))
+#define S1(x) (ROTR32(x, 6) ^ ROTR32(x,11) ^ ROTR32(x,25))
+#define s0(x) (t1 = x, ROTR32(t1, 7) ^ ROTR32(t1,18) ^ SHR(t1, 3))
+#define s1(x) (t2 = x, ROTR32(t2,17) ^ ROTR32(t2,19) ^ SHR(t2,10))
+
+SHA256Context *
+SHA256_NewContext(void)
+{
+ SHA256Context *ctx = PORT_New(SHA256Context);
+ return ctx;
+}
+
+void
+SHA256_DestroyContext(SHA256Context *ctx, PRBool freeit)
+{
+ if (freeit) {
+ PORT_ZFree(ctx, sizeof *ctx);
+ }
+}
+
+void
+SHA256_Begin(SHA256Context *ctx)
+{
+ memset(ctx, 0, sizeof *ctx);
+ memcpy(H, H256, sizeof H256);
+}
+
+static void
+SHA256_Compress(SHA256Context *ctx)
+{
+ {
+ register PRUint32 t1, t2;
+
+#if defined(IS_LITTLE_ENDIAN)
+ BYTESWAP4(W[0]);
+ BYTESWAP4(W[1]);
+ BYTESWAP4(W[2]);
+ BYTESWAP4(W[3]);
+ BYTESWAP4(W[4]);
+ BYTESWAP4(W[5]);
+ BYTESWAP4(W[6]);
+ BYTESWAP4(W[7]);
+ BYTESWAP4(W[8]);
+ BYTESWAP4(W[9]);
+ BYTESWAP4(W[10]);
+ BYTESWAP4(W[11]);
+ BYTESWAP4(W[12]);
+ BYTESWAP4(W[13]);
+ BYTESWAP4(W[14]);
+ BYTESWAP4(W[15]);
+#endif
+
+#define INITW(t) W[t] = (s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16])
+
+ /* prepare the "message schedule" */
+#ifdef NOUNROLL256
+ {
+ int t;
+ for (t = 16; t < 64; ++t) {
+ INITW(t);
+ }
+ }
+#else
+ INITW(16);
+ INITW(17);
+ INITW(18);
+ INITW(19);
+
+ INITW(20);
+ INITW(21);
+ INITW(22);
+ INITW(23);
+ INITW(24);
+ INITW(25);
+ INITW(26);
+ INITW(27);
+ INITW(28);
+ INITW(29);
+
+ INITW(30);
+ INITW(31);
+ INITW(32);
+ INITW(33);
+ INITW(34);
+ INITW(35);
+ INITW(36);
+ INITW(37);
+ INITW(38);
+ INITW(39);
+
+ INITW(40);
+ INITW(41);
+ INITW(42);
+ INITW(43);
+ INITW(44);
+ INITW(45);
+ INITW(46);
+ INITW(47);
+ INITW(48);
+ INITW(49);
+
+ INITW(50);
+ INITW(51);
+ INITW(52);
+ INITW(53);
+ INITW(54);
+ INITW(55);
+ INITW(56);
+ INITW(57);
+ INITW(58);
+ INITW(59);
+
+ INITW(60);
+ INITW(61);
+ INITW(62);
+ INITW(63);
+
+#endif
+#undef INITW
+ }
+ {
+ PRUint32 a, b, c, d, e, f, g, h;
+
+ a = H[0];
+ b = H[1];
+ c = H[2];
+ d = H[3];
+ e = H[4];
+ f = H[5];
+ g = H[6];
+ h = H[7];
+
+#define ROUND(n,a,b,c,d,e,f,g,h) \
+ h += S1(e) + Ch(e,f,g) + K256[n] + W[n]; \
+ d += h; \
+ h += S0(a) + Maj(a,b,c);
+
+#ifdef NOUNROLL256
+ {
+ int t;
+ for (t = 0; t < 64; t+= 8) {
+ ROUND(t+0,a,b,c,d,e,f,g,h)
+ ROUND(t+1,h,a,b,c,d,e,f,g)
+ ROUND(t+2,g,h,a,b,c,d,e,f)
+ ROUND(t+3,f,g,h,a,b,c,d,e)
+ ROUND(t+4,e,f,g,h,a,b,c,d)
+ ROUND(t+5,d,e,f,g,h,a,b,c)
+ ROUND(t+6,c,d,e,f,g,h,a,b)
+ ROUND(t+7,b,c,d,e,f,g,h,a)
+ }
+ }
+#else
+ ROUND( 0,a,b,c,d,e,f,g,h)
+ ROUND( 1,h,a,b,c,d,e,f,g)
+ ROUND( 2,g,h,a,b,c,d,e,f)
+ ROUND( 3,f,g,h,a,b,c,d,e)
+ ROUND( 4,e,f,g,h,a,b,c,d)
+ ROUND( 5,d,e,f,g,h,a,b,c)
+ ROUND( 6,c,d,e,f,g,h,a,b)
+ ROUND( 7,b,c,d,e,f,g,h,a)
+
+ ROUND( 8,a,b,c,d,e,f,g,h)
+ ROUND( 9,h,a,b,c,d,e,f,g)
+ ROUND(10,g,h,a,b,c,d,e,f)
+ ROUND(11,f,g,h,a,b,c,d,e)
+ ROUND(12,e,f,g,h,a,b,c,d)
+ ROUND(13,d,e,f,g,h,a,b,c)
+ ROUND(14,c,d,e,f,g,h,a,b)
+ ROUND(15,b,c,d,e,f,g,h,a)
+
+ ROUND(16,a,b,c,d,e,f,g,h)
+ ROUND(17,h,a,b,c,d,e,f,g)
+ ROUND(18,g,h,a,b,c,d,e,f)
+ ROUND(19,f,g,h,a,b,c,d,e)
+ ROUND(20,e,f,g,h,a,b,c,d)
+ ROUND(21,d,e,f,g,h,a,b,c)
+ ROUND(22,c,d,e,f,g,h,a,b)
+ ROUND(23,b,c,d,e,f,g,h,a)
+
+ ROUND(24,a,b,c,d,e,f,g,h)
+ ROUND(25,h,a,b,c,d,e,f,g)
+ ROUND(26,g,h,a,b,c,d,e,f)
+ ROUND(27,f,g,h,a,b,c,d,e)
+ ROUND(28,e,f,g,h,a,b,c,d)
+ ROUND(29,d,e,f,g,h,a,b,c)
+ ROUND(30,c,d,e,f,g,h,a,b)
+ ROUND(31,b,c,d,e,f,g,h,a)
+
+ ROUND(32,a,b,c,d,e,f,g,h)
+ ROUND(33,h,a,b,c,d,e,f,g)
+ ROUND(34,g,h,a,b,c,d,e,f)
+ ROUND(35,f,g,h,a,b,c,d,e)
+ ROUND(36,e,f,g,h,a,b,c,d)
+ ROUND(37,d,e,f,g,h,a,b,c)
+ ROUND(38,c,d,e,f,g,h,a,b)
+ ROUND(39,b,c,d,e,f,g,h,a)
+
+ ROUND(40,a,b,c,d,e,f,g,h)
+ ROUND(41,h,a,b,c,d,e,f,g)
+ ROUND(42,g,h,a,b,c,d,e,f)
+ ROUND(43,f,g,h,a,b,c,d,e)
+ ROUND(44,e,f,g,h,a,b,c,d)
+ ROUND(45,d,e,f,g,h,a,b,c)
+ ROUND(46,c,d,e,f,g,h,a,b)
+ ROUND(47,b,c,d,e,f,g,h,a)
+
+ ROUND(48,a,b,c,d,e,f,g,h)
+ ROUND(49,h,a,b,c,d,e,f,g)
+ ROUND(50,g,h,a,b,c,d,e,f)
+ ROUND(51,f,g,h,a,b,c,d,e)
+ ROUND(52,e,f,g,h,a,b,c,d)
+ ROUND(53,d,e,f,g,h,a,b,c)
+ ROUND(54,c,d,e,f,g,h,a,b)
+ ROUND(55,b,c,d,e,f,g,h,a)
+
+ ROUND(56,a,b,c,d,e,f,g,h)
+ ROUND(57,h,a,b,c,d,e,f,g)
+ ROUND(58,g,h,a,b,c,d,e,f)
+ ROUND(59,f,g,h,a,b,c,d,e)
+ ROUND(60,e,f,g,h,a,b,c,d)
+ ROUND(61,d,e,f,g,h,a,b,c)
+ ROUND(62,c,d,e,f,g,h,a,b)
+ ROUND(63,b,c,d,e,f,g,h,a)
+#endif
+
+ H[0] += a;
+ H[1] += b;
+ H[2] += c;
+ H[3] += d;
+ H[4] += e;
+ H[5] += f;
+ H[6] += g;
+ H[7] += h;
+ }
+#undef ROUND
+}
+
+#undef s0
+#undef s1
+#undef S0
+#undef S1
+
+void
+SHA256_Update(SHA256Context *ctx, const unsigned char *input,
+ unsigned int inputLen)
+{
+ unsigned int inBuf = ctx->sizeLo & 0x3f;
+ if (!inputLen)
+ return;
+
+ /* Add inputLen into the count of bytes processed, before processing */
+ if ((ctx->sizeLo += inputLen) < inputLen)
+ ctx->sizeHi++;
+
+ /* if data already in buffer, attemp to fill rest of buffer */
+ if (inBuf) {
+ unsigned int todo = SHA256_BLOCK_LENGTH - inBuf;
+ if (inputLen < todo)
+ todo = inputLen;
+ memcpy(B + inBuf, input, todo);
+ input += todo;
+ inputLen -= todo;
+ if (inBuf + todo == SHA256_BLOCK_LENGTH)
+ SHA256_Compress(ctx);
+ }
+
+ /* if enough data to fill one or more whole buffers, process them. */
+ while (inputLen >= SHA256_BLOCK_LENGTH) {
+ memcpy(B, input, SHA256_BLOCK_LENGTH);
+ input += SHA256_BLOCK_LENGTH;
+ inputLen -= SHA256_BLOCK_LENGTH;
+ SHA256_Compress(ctx);
+ }
+ /* if data left over, fill it into buffer */
+ if (inputLen)
+ memcpy(B, input, inputLen);
+}
+
+void
+SHA256_End(SHA256Context *ctx, unsigned char *digest,
+ unsigned int *digestLen, unsigned int maxDigestLen)
+{
+ unsigned int inBuf = ctx->sizeLo & 0x3f;
+ unsigned int padLen = (inBuf < 56) ? (56 - inBuf) : (56 + 64 - inBuf);
+ PRUint32 hi, lo;
+#ifdef SWAP4MASK
+ PRUint32 t1;
+#endif
+
+ hi = (ctx->sizeHi << 3) | (ctx->sizeLo >> 29);
+ lo = (ctx->sizeLo << 3);
+
+ SHA256_Update(ctx, pad, padLen);
+
+#if defined(IS_LITTLE_ENDIAN)
+ W[14] = SHA_HTONL(hi);
+ W[15] = SHA_HTONL(lo);
+#else
+ W[14] = hi;
+ W[15] = lo;
+#endif
+ SHA256_Compress(ctx);
+
+ /* now output the answer */
+#if defined(IS_LITTLE_ENDIAN)
+ BYTESWAP4(H[0]);
+ BYTESWAP4(H[1]);
+ BYTESWAP4(H[2]);
+ BYTESWAP4(H[3]);
+ BYTESWAP4(H[4]);
+ BYTESWAP4(H[5]);
+ BYTESWAP4(H[6]);
+ BYTESWAP4(H[7]);
+#endif
+ padLen = PR_MIN(SHA256_LENGTH, maxDigestLen);
+ memcpy(digest, H, padLen);
+ if (digestLen)
+ *digestLen = padLen;
+}
+
+void SHA256_Clone(SHA256Context* dest, SHA256Context* src)
+{
+ memcpy(dest, src, sizeof *dest);
+}
+
+/* Comment out unused code, mostly the SHA384 and SHA512 implementations. */
+#if 0
+SECStatus
+SHA256_HashBuf(unsigned char *dest, const unsigned char *src,
+ unsigned int src_length)
+{
+ SHA256Context ctx;
+ unsigned int outLen;
+
+ SHA256_Begin(&ctx);
+ SHA256_Update(&ctx, src, src_length);
+ SHA256_End(&ctx, dest, &outLen, SHA256_LENGTH);
+
+ return SECSuccess;
+}
+
+
+SECStatus
+SHA256_Hash(unsigned char *dest, const char *src)
+{
+ return SHA256_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
+}
+
+
+void SHA256_TraceState(SHA256Context *ctx) { }
+
+unsigned int
+SHA256_FlattenSize(SHA256Context *ctx)
+{
+ return sizeof *ctx;
+}
+
+SECStatus
+SHA256_Flatten(SHA256Context *ctx,unsigned char *space)
+{
+ PORT_Memcpy(space, ctx, sizeof *ctx);
+ return SECSuccess;
+}
+
+SHA256Context *
+SHA256_Resurrect(unsigned char *space, void *arg)
+{
+ SHA256Context *ctx = SHA256_NewContext();
+ if (ctx)
+ PORT_Memcpy(ctx, space, sizeof *ctx);
+ return ctx;
+}
+
+/* ======= SHA512 and SHA384 common constants and defines ================= */
+
+/* common #defines for SHA512 and SHA384 */
+#if defined(HAVE_LONG_LONG)
+#define ROTR64(x,n) ((x >> n) | (x << (64 - n)))
+#define ROTL64(x,n) ((x << n) | (x >> (64 - n)))
+
+#define S0(x) (ROTR64(x,28) ^ ROTR64(x,34) ^ ROTR64(x,39))
+#define S1(x) (ROTR64(x,14) ^ ROTR64(x,18) ^ ROTR64(x,41))
+#define s0(x) (t1 = x, ROTR64(t1, 1) ^ ROTR64(t1, 8) ^ SHR(t1,7))
+#define s1(x) (t2 = x, ROTR64(t2,19) ^ ROTR64(t2,61) ^ SHR(t2,6))
+
+#if PR_BYTES_PER_LONG == 8
+#define ULLC(hi,lo) 0x ## hi ## lo ## UL
+#elif defined(_MSC_VER)
+#define ULLC(hi,lo) 0x ## hi ## lo ## ui64
+#else
+#define ULLC(hi,lo) 0x ## hi ## lo ## ULL
+#endif
+
+#define SHA_MASK16 ULLC(0000FFFF,0000FFFF)
+#define SHA_MASK8 ULLC(00FF00FF,00FF00FF)
+#define SHA_HTONLL(x) (t1 = x, \
+ t1 = ((t1 & SHA_MASK8 ) << 8) | ((t1 >> 8) & SHA_MASK8 ), \
+ t1 = ((t1 & SHA_MASK16) << 16) | ((t1 >> 16) & SHA_MASK16), \
+ (t1 >> 32) | (t1 << 32))
+#define BYTESWAP8(x) x = SHA_HTONLL(x)
+
+#else /* no long long */
+
+#if defined(IS_LITTLE_ENDIAN)
+#define ULLC(hi,lo) { 0x ## lo ## U, 0x ## hi ## U }
+#else
+#define ULLC(hi,lo) { 0x ## hi ## U, 0x ## lo ## U }
+#endif
+
+#define SHA_HTONLL(x) ( BYTESWAP4(x.lo), BYTESWAP4(x.hi), \
+ x.hi ^= x.lo ^= x.hi ^= x.lo, x)
+#define BYTESWAP8(x) do { PRUint32 tmp; BYTESWAP4(x.lo); BYTESWAP4(x.hi); \
+ tmp = x.lo; x.lo = x.hi; x.hi = tmp; } while (0)
+#endif
+
+/* SHA-384 and SHA-512 constants, K512. */
+static const PRUint64 K512[80] = {
+#if PR_BYTES_PER_LONG == 8
+ 0x428a2f98d728ae22UL , 0x7137449123ef65cdUL ,
+ 0xb5c0fbcfec4d3b2fUL , 0xe9b5dba58189dbbcUL ,
+ 0x3956c25bf348b538UL , 0x59f111f1b605d019UL ,
+ 0x923f82a4af194f9bUL , 0xab1c5ed5da6d8118UL ,
+ 0xd807aa98a3030242UL , 0x12835b0145706fbeUL ,
+ 0x243185be4ee4b28cUL , 0x550c7dc3d5ffb4e2UL ,
+ 0x72be5d74f27b896fUL , 0x80deb1fe3b1696b1UL ,
+ 0x9bdc06a725c71235UL , 0xc19bf174cf692694UL ,
+ 0xe49b69c19ef14ad2UL , 0xefbe4786384f25e3UL ,
+ 0x0fc19dc68b8cd5b5UL , 0x240ca1cc77ac9c65UL ,
+ 0x2de92c6f592b0275UL , 0x4a7484aa6ea6e483UL ,
+ 0x5cb0a9dcbd41fbd4UL , 0x76f988da831153b5UL ,
+ 0x983e5152ee66dfabUL , 0xa831c66d2db43210UL ,
+ 0xb00327c898fb213fUL , 0xbf597fc7beef0ee4UL ,
+ 0xc6e00bf33da88fc2UL , 0xd5a79147930aa725UL ,
+ 0x06ca6351e003826fUL , 0x142929670a0e6e70UL ,
+ 0x27b70a8546d22ffcUL , 0x2e1b21385c26c926UL ,
+ 0x4d2c6dfc5ac42aedUL , 0x53380d139d95b3dfUL ,
+ 0x650a73548baf63deUL , 0x766a0abb3c77b2a8UL ,
+ 0x81c2c92e47edaee6UL , 0x92722c851482353bUL ,
+ 0xa2bfe8a14cf10364UL , 0xa81a664bbc423001UL ,
+ 0xc24b8b70d0f89791UL , 0xc76c51a30654be30UL ,
+ 0xd192e819d6ef5218UL , 0xd69906245565a910UL ,
+ 0xf40e35855771202aUL , 0x106aa07032bbd1b8UL ,
+ 0x19a4c116b8d2d0c8UL , 0x1e376c085141ab53UL ,
+ 0x2748774cdf8eeb99UL , 0x34b0bcb5e19b48a8UL ,
+ 0x391c0cb3c5c95a63UL , 0x4ed8aa4ae3418acbUL ,
+ 0x5b9cca4f7763e373UL , 0x682e6ff3d6b2b8a3UL ,
+ 0x748f82ee5defb2fcUL , 0x78a5636f43172f60UL ,
+ 0x84c87814a1f0ab72UL , 0x8cc702081a6439ecUL ,
+ 0x90befffa23631e28UL , 0xa4506cebde82bde9UL ,
+ 0xbef9a3f7b2c67915UL , 0xc67178f2e372532bUL ,
+ 0xca273eceea26619cUL , 0xd186b8c721c0c207UL ,
+ 0xeada7dd6cde0eb1eUL , 0xf57d4f7fee6ed178UL ,
+ 0x06f067aa72176fbaUL , 0x0a637dc5a2c898a6UL ,
+ 0x113f9804bef90daeUL , 0x1b710b35131c471bUL ,
+ 0x28db77f523047d84UL , 0x32caab7b40c72493UL ,
+ 0x3c9ebe0a15c9bebcUL , 0x431d67c49c100d4cUL ,
+ 0x4cc5d4becb3e42b6UL , 0x597f299cfc657e2aUL ,
+ 0x5fcb6fab3ad6faecUL , 0x6c44198c4a475817UL
+#else
+ ULLC(428a2f98,d728ae22), ULLC(71374491,23ef65cd),
+ ULLC(b5c0fbcf,ec4d3b2f), ULLC(e9b5dba5,8189dbbc),
+ ULLC(3956c25b,f348b538), ULLC(59f111f1,b605d019),
+ ULLC(923f82a4,af194f9b), ULLC(ab1c5ed5,da6d8118),
+ ULLC(d807aa98,a3030242), ULLC(12835b01,45706fbe),
+ ULLC(243185be,4ee4b28c), ULLC(550c7dc3,d5ffb4e2),
+ ULLC(72be5d74,f27b896f), ULLC(80deb1fe,3b1696b1),
+ ULLC(9bdc06a7,25c71235), ULLC(c19bf174,cf692694),
+ ULLC(e49b69c1,9ef14ad2), ULLC(efbe4786,384f25e3),
+ ULLC(0fc19dc6,8b8cd5b5), ULLC(240ca1cc,77ac9c65),
+ ULLC(2de92c6f,592b0275), ULLC(4a7484aa,6ea6e483),
+ ULLC(5cb0a9dc,bd41fbd4), ULLC(76f988da,831153b5),
+ ULLC(983e5152,ee66dfab), ULLC(a831c66d,2db43210),
+ ULLC(b00327c8,98fb213f), ULLC(bf597fc7,beef0ee4),
+ ULLC(c6e00bf3,3da88fc2), ULLC(d5a79147,930aa725),
+ ULLC(06ca6351,e003826f), ULLC(14292967,0a0e6e70),
+ ULLC(27b70a85,46d22ffc), ULLC(2e1b2138,5c26c926),
+ ULLC(4d2c6dfc,5ac42aed), ULLC(53380d13,9d95b3df),
+ ULLC(650a7354,8baf63de), ULLC(766a0abb,3c77b2a8),
+ ULLC(81c2c92e,47edaee6), ULLC(92722c85,1482353b),
+ ULLC(a2bfe8a1,4cf10364), ULLC(a81a664b,bc423001),
+ ULLC(c24b8b70,d0f89791), ULLC(c76c51a3,0654be30),
+ ULLC(d192e819,d6ef5218), ULLC(d6990624,5565a910),
+ ULLC(f40e3585,5771202a), ULLC(106aa070,32bbd1b8),
+ ULLC(19a4c116,b8d2d0c8), ULLC(1e376c08,5141ab53),
+ ULLC(2748774c,df8eeb99), ULLC(34b0bcb5,e19b48a8),
+ ULLC(391c0cb3,c5c95a63), ULLC(4ed8aa4a,e3418acb),
+ ULLC(5b9cca4f,7763e373), ULLC(682e6ff3,d6b2b8a3),
+ ULLC(748f82ee,5defb2fc), ULLC(78a5636f,43172f60),
+ ULLC(84c87814,a1f0ab72), ULLC(8cc70208,1a6439ec),
+ ULLC(90befffa,23631e28), ULLC(a4506ceb,de82bde9),
+ ULLC(bef9a3f7,b2c67915), ULLC(c67178f2,e372532b),
+ ULLC(ca273ece,ea26619c), ULLC(d186b8c7,21c0c207),
+ ULLC(eada7dd6,cde0eb1e), ULLC(f57d4f7f,ee6ed178),
+ ULLC(06f067aa,72176fba), ULLC(0a637dc5,a2c898a6),
+ ULLC(113f9804,bef90dae), ULLC(1b710b35,131c471b),
+ ULLC(28db77f5,23047d84), ULLC(32caab7b,40c72493),
+ ULLC(3c9ebe0a,15c9bebc), ULLC(431d67c4,9c100d4c),
+ ULLC(4cc5d4be,cb3e42b6), ULLC(597f299c,fc657e2a),
+ ULLC(5fcb6fab,3ad6faec), ULLC(6c44198c,4a475817)
+#endif
+};
+
+struct SHA512ContextStr {
+ union {
+ PRUint64 w[80]; /* message schedule, input buffer, plus 64 words */
+ PRUint32 l[160];
+ PRUint8 b[640];
+ } u;
+ PRUint64 h[8]; /* 8 state variables */
+ PRUint64 sizeLo; /* 64-bit count of hashed bytes. */
+};
+
+/* =========== SHA512 implementation ===================================== */
+
+/* SHA-512 initial hash values */
+static const PRUint64 H512[8] = {
+#if PR_BYTES_PER_LONG == 8
+ 0x6a09e667f3bcc908UL , 0xbb67ae8584caa73bUL ,
+ 0x3c6ef372fe94f82bUL , 0xa54ff53a5f1d36f1UL ,
+ 0x510e527fade682d1UL , 0x9b05688c2b3e6c1fUL ,
+ 0x1f83d9abfb41bd6bUL , 0x5be0cd19137e2179UL
+#else
+ ULLC(6a09e667,f3bcc908), ULLC(bb67ae85,84caa73b),
+ ULLC(3c6ef372,fe94f82b), ULLC(a54ff53a,5f1d36f1),
+ ULLC(510e527f,ade682d1), ULLC(9b05688c,2b3e6c1f),
+ ULLC(1f83d9ab,fb41bd6b), ULLC(5be0cd19,137e2179)
+#endif
+};
+
+
+SHA512Context *
+SHA512_NewContext(void)
+{
+ SHA512Context *ctx = PORT_New(SHA512Context);
+ return ctx;
+}
+
+void
+SHA512_DestroyContext(SHA512Context *ctx, PRBool freeit)
+{
+ if (freeit) {
+ PORT_ZFree(ctx, sizeof *ctx);
+ }
+}
+
+void
+SHA512_Begin(SHA512Context *ctx)
+{
+ memset(ctx, 0, sizeof *ctx);
+ memcpy(H, H512, sizeof H512);
+}
+
+#if defined(SHA512_TRACE)
+#if defined(HAVE_LONG_LONG)
+#define DUMP(n,a,d,e,h) printf(" t = %2d, %s = %016lx, %s = %016lx\n", \
+ n, #e, d, #a, h);
+#else
+#define DUMP(n,a,d,e,h) printf(" t = %2d, %s = %08x%08x, %s = %08x%08x\n", \
+ n, #e, d.hi, d.lo, #a, h.hi, h.lo);
+#endif
+#else
+#define DUMP(n,a,d,e,h)
+#endif
+
+#if defined(HAVE_LONG_LONG)
+
+#define ADDTO(x,y) y += x
+
+#define INITW(t) W[t] = (s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16])
+
+#define ROUND(n,a,b,c,d,e,f,g,h) \
+ h += S1(e) + Ch(e,f,g) + K512[n] + W[n]; \
+ d += h; \
+ h += S0(a) + Maj(a,b,c); \
+ DUMP(n,a,d,e,h)
+
+#else /* use only 32-bit variables, and don't unroll loops */
+
+#undef NOUNROLL512
+#define NOUNROLL512 1
+
+#define ADDTO(x,y) y.lo += x.lo; y.hi += x.hi + (x.lo > y.lo)
+
+#define ROTR64a(x,n,lo,hi) (x.lo >> n | x.hi << (32-n))
+#define ROTR64A(x,n,lo,hi) (x.lo << (64-n) | x.hi >> (n-32))
+#define SHR64a(x,n,lo,hi) (x.lo >> n | x.hi << (32-n))
+
+/* Capitol Sigma and lower case sigma functions */
+#define s0lo(x) (ROTR64a(x,1,lo,hi) ^ ROTR64a(x,8,lo,hi) ^ SHR64a(x,7,lo,hi))
+#define s0hi(x) (ROTR64a(x,1,hi,lo) ^ ROTR64a(x,8,hi,lo) ^ (x.hi >> 7))
+
+#define s1lo(x) (ROTR64a(x,19,lo,hi) ^ ROTR64A(x,61,lo,hi) ^ SHR64a(x,6,lo,hi))
+#define s1hi(x) (ROTR64a(x,19,hi,lo) ^ ROTR64A(x,61,hi,lo) ^ (x.hi >> 6))
+
+#define S0lo(x)(ROTR64a(x,28,lo,hi) ^ ROTR64A(x,34,lo,hi) ^ ROTR64A(x,39,lo,hi))
+#define S0hi(x)(ROTR64a(x,28,hi,lo) ^ ROTR64A(x,34,hi,lo) ^ ROTR64A(x,39,hi,lo))
+
+#define S1lo(x)(ROTR64a(x,14,lo,hi) ^ ROTR64a(x,18,lo,hi) ^ ROTR64A(x,41,lo,hi))
+#define S1hi(x)(ROTR64a(x,14,hi,lo) ^ ROTR64a(x,18,hi,lo) ^ ROTR64A(x,41,hi,lo))
+
+/* 32-bit versions of Ch and Maj */
+#define Chxx(x,y,z,lo) ((x.lo & y.lo) ^ (~x.lo & z.lo))
+#define Majx(x,y,z,lo) ((x.lo & y.lo) ^ (x.lo & z.lo) ^ (y.lo & z.lo))
+
+#define INITW(t) \
+ do { \
+ PRUint32 lo, tm; \
+ PRUint32 cy = 0; \
+ lo = s1lo(W[t-2]); \
+ lo += (tm = W[t-7].lo); if (lo < tm) cy++; \
+ lo += (tm = s0lo(W[t-15])); if (lo < tm) cy++; \
+ lo += (tm = W[t-16].lo); if (lo < tm) cy++; \
+ W[t].lo = lo; \
+ W[t].hi = cy + s1hi(W[t-2]) + W[t-7].hi + s0hi(W[t-15]) + W[t-16].hi; \
+ } while (0)
+
+#define ROUND(n,a,b,c,d,e,f,g,h) \
+ { \
+ PRUint32 lo, tm, cy; \
+ lo = S1lo(e); \
+ lo += (tm = Chxx(e,f,g,lo)); cy = (lo < tm); \
+ lo += (tm = K512[n].lo); if (lo < tm) cy++; \
+ lo += (tm = W[n].lo); if (lo < tm) cy++; \
+ h.lo += lo; if (h.lo < lo) cy++; \
+ h.hi += cy + S1hi(e) + Chxx(e,f,g,hi) + K512[n].hi + W[n].hi; \
+ d.lo += h.lo; \
+ d.hi += h.hi + (d.lo < h.lo); \
+ lo = S0lo(a); \
+ lo += (tm = Majx(a,b,c,lo)); cy = (lo < tm); \
+ h.lo += lo; if (h.lo < lo) cy++; \
+ h.hi += cy + S0hi(a) + Majx(a,b,c,hi); \
+ DUMP(n,a,d,e,h) \
+ }
+#endif
+
+static void
+SHA512_Compress(SHA512Context *ctx)
+{
+#if defined(IS_LITTLE_ENDIAN)
+ {
+#if defined(HAVE_LONG_LONG)
+ PRUint64 t1;
+#else
+ PRUint32 t1;
+#endif
+ BYTESWAP8(W[0]);
+ BYTESWAP8(W[1]);
+ BYTESWAP8(W[2]);
+ BYTESWAP8(W[3]);
+ BYTESWAP8(W[4]);
+ BYTESWAP8(W[5]);
+ BYTESWAP8(W[6]);
+ BYTESWAP8(W[7]);
+ BYTESWAP8(W[8]);
+ BYTESWAP8(W[9]);
+ BYTESWAP8(W[10]);
+ BYTESWAP8(W[11]);
+ BYTESWAP8(W[12]);
+ BYTESWAP8(W[13]);
+ BYTESWAP8(W[14]);
+ BYTESWAP8(W[15]);
+ }
+#endif
+
+ {
+ PRUint64 t1, t2;
+#ifdef NOUNROLL512
+ {
+ /* prepare the "message schedule" */
+ int t;
+ for (t = 16; t < 80; ++t) {
+ INITW(t);
+ }
+ }
+#else
+ INITW(16);
+ INITW(17);
+ INITW(18);
+ INITW(19);
+
+ INITW(20);
+ INITW(21);
+ INITW(22);
+ INITW(23);
+ INITW(24);
+ INITW(25);
+ INITW(26);
+ INITW(27);
+ INITW(28);
+ INITW(29);
+
+ INITW(30);
+ INITW(31);
+ INITW(32);
+ INITW(33);
+ INITW(34);
+ INITW(35);
+ INITW(36);
+ INITW(37);
+ INITW(38);
+ INITW(39);
+
+ INITW(40);
+ INITW(41);
+ INITW(42);
+ INITW(43);
+ INITW(44);
+ INITW(45);
+ INITW(46);
+ INITW(47);
+ INITW(48);
+ INITW(49);
+
+ INITW(50);
+ INITW(51);
+ INITW(52);
+ INITW(53);
+ INITW(54);
+ INITW(55);
+ INITW(56);
+ INITW(57);
+ INITW(58);
+ INITW(59);
+
+ INITW(60);
+ INITW(61);
+ INITW(62);
+ INITW(63);
+ INITW(64);
+ INITW(65);
+ INITW(66);
+ INITW(67);
+ INITW(68);
+ INITW(69);
+
+ INITW(70);
+ INITW(71);
+ INITW(72);
+ INITW(73);
+ INITW(74);
+ INITW(75);
+ INITW(76);
+ INITW(77);
+ INITW(78);
+ INITW(79);
+#endif
+ }
+#ifdef SHA512_TRACE
+ {
+ int i;
+ for (i = 0; i < 80; ++i) {
+#ifdef HAVE_LONG_LONG
+ printf("W[%2d] = %016lx\n", i, W[i]);
+#else
+ printf("W[%2d] = %08x%08x\n", i, W[i].hi, W[i].lo);
+#endif
+ }
+ }
+#endif
+ {
+ PRUint64 a, b, c, d, e, f, g, h;
+
+ a = H[0];
+ b = H[1];
+ c = H[2];
+ d = H[3];
+ e = H[4];
+ f = H[5];
+ g = H[6];
+ h = H[7];
+
+#ifdef NOUNROLL512
+ {
+ int t;
+ for (t = 0; t < 80; t+= 8) {
+ ROUND(t+0,a,b,c,d,e,f,g,h)
+ ROUND(t+1,h,a,b,c,d,e,f,g)
+ ROUND(t+2,g,h,a,b,c,d,e,f)
+ ROUND(t+3,f,g,h,a,b,c,d,e)
+ ROUND(t+4,e,f,g,h,a,b,c,d)
+ ROUND(t+5,d,e,f,g,h,a,b,c)
+ ROUND(t+6,c,d,e,f,g,h,a,b)
+ ROUND(t+7,b,c,d,e,f,g,h,a)
+ }
+ }
+#else
+ ROUND( 0,a,b,c,d,e,f,g,h)
+ ROUND( 1,h,a,b,c,d,e,f,g)
+ ROUND( 2,g,h,a,b,c,d,e,f)
+ ROUND( 3,f,g,h,a,b,c,d,e)
+ ROUND( 4,e,f,g,h,a,b,c,d)
+ ROUND( 5,d,e,f,g,h,a,b,c)
+ ROUND( 6,c,d,e,f,g,h,a,b)
+ ROUND( 7,b,c,d,e,f,g,h,a)
+
+ ROUND( 8,a,b,c,d,e,f,g,h)
+ ROUND( 9,h,a,b,c,d,e,f,g)
+ ROUND(10,g,h,a,b,c,d,e,f)
+ ROUND(11,f,g,h,a,b,c,d,e)
+ ROUND(12,e,f,g,h,a,b,c,d)
+ ROUND(13,d,e,f,g,h,a,b,c)
+ ROUND(14,c,d,e,f,g,h,a,b)
+ ROUND(15,b,c,d,e,f,g,h,a)
+
+ ROUND(16,a,b,c,d,e,f,g,h)
+ ROUND(17,h,a,b,c,d,e,f,g)
+ ROUND(18,g,h,a,b,c,d,e,f)
+ ROUND(19,f,g,h,a,b,c,d,e)
+ ROUND(20,e,f,g,h,a,b,c,d)
+ ROUND(21,d,e,f,g,h,a,b,c)
+ ROUND(22,c,d,e,f,g,h,a,b)
+ ROUND(23,b,c,d,e,f,g,h,a)
+
+ ROUND(24,a,b,c,d,e,f,g,h)
+ ROUND(25,h,a,b,c,d,e,f,g)
+ ROUND(26,g,h,a,b,c,d,e,f)
+ ROUND(27,f,g,h,a,b,c,d,e)
+ ROUND(28,e,f,g,h,a,b,c,d)
+ ROUND(29,d,e,f,g,h,a,b,c)
+ ROUND(30,c,d,e,f,g,h,a,b)
+ ROUND(31,b,c,d,e,f,g,h,a)
+
+ ROUND(32,a,b,c,d,e,f,g,h)
+ ROUND(33,h,a,b,c,d,e,f,g)
+ ROUND(34,g,h,a,b,c,d,e,f)
+ ROUND(35,f,g,h,a,b,c,d,e)
+ ROUND(36,e,f,g,h,a,b,c,d)
+ ROUND(37,d,e,f,g,h,a,b,c)
+ ROUND(38,c,d,e,f,g,h,a,b)
+ ROUND(39,b,c,d,e,f,g,h,a)
+
+ ROUND(40,a,b,c,d,e,f,g,h)
+ ROUND(41,h,a,b,c,d,e,f,g)
+ ROUND(42,g,h,a,b,c,d,e,f)
+ ROUND(43,f,g,h,a,b,c,d,e)
+ ROUND(44,e,f,g,h,a,b,c,d)
+ ROUND(45,d,e,f,g,h,a,b,c)
+ ROUND(46,c,d,e,f,g,h,a,b)
+ ROUND(47,b,c,d,e,f,g,h,a)
+
+ ROUND(48,a,b,c,d,e,f,g,h)
+ ROUND(49,h,a,b,c,d,e,f,g)
+ ROUND(50,g,h,a,b,c,d,e,f)
+ ROUND(51,f,g,h,a,b,c,d,e)
+ ROUND(52,e,f,g,h,a,b,c,d)
+ ROUND(53,d,e,f,g,h,a,b,c)
+ ROUND(54,c,d,e,f,g,h,a,b)
+ ROUND(55,b,c,d,e,f,g,h,a)
+
+ ROUND(56,a,b,c,d,e,f,g,h)
+ ROUND(57,h,a,b,c,d,e,f,g)
+ ROUND(58,g,h,a,b,c,d,e,f)
+ ROUND(59,f,g,h,a,b,c,d,e)
+ ROUND(60,e,f,g,h,a,b,c,d)
+ ROUND(61,d,e,f,g,h,a,b,c)
+ ROUND(62,c,d,e,f,g,h,a,b)
+ ROUND(63,b,c,d,e,f,g,h,a)
+
+ ROUND(64,a,b,c,d,e,f,g,h)
+ ROUND(65,h,a,b,c,d,e,f,g)
+ ROUND(66,g,h,a,b,c,d,e,f)
+ ROUND(67,f,g,h,a,b,c,d,e)
+ ROUND(68,e,f,g,h,a,b,c,d)
+ ROUND(69,d,e,f,g,h,a,b,c)
+ ROUND(70,c,d,e,f,g,h,a,b)
+ ROUND(71,b,c,d,e,f,g,h,a)
+
+ ROUND(72,a,b,c,d,e,f,g,h)
+ ROUND(73,h,a,b,c,d,e,f,g)
+ ROUND(74,g,h,a,b,c,d,e,f)
+ ROUND(75,f,g,h,a,b,c,d,e)
+ ROUND(76,e,f,g,h,a,b,c,d)
+ ROUND(77,d,e,f,g,h,a,b,c)
+ ROUND(78,c,d,e,f,g,h,a,b)
+ ROUND(79,b,c,d,e,f,g,h,a)
+#endif
+
+ ADDTO(a,H[0]);
+ ADDTO(b,H[1]);
+ ADDTO(c,H[2]);
+ ADDTO(d,H[3]);
+ ADDTO(e,H[4]);
+ ADDTO(f,H[5]);
+ ADDTO(g,H[6]);
+ ADDTO(h,H[7]);
+ }
+}
+
+void
+SHA512_Update(SHA512Context *ctx, const unsigned char *input,
+ unsigned int inputLen)
+{
+ unsigned int inBuf;
+ if (!inputLen)
+ return;
+
+#if defined(HAVE_LONG_LONG)
+ inBuf = (unsigned int)ctx->sizeLo & 0x7f;
+ /* Add inputLen into the count of bytes processed, before processing */
+ ctx->sizeLo += inputLen;
+#else
+ inBuf = (unsigned int)ctx->sizeLo.lo & 0x7f;
+ ctx->sizeLo.lo += inputLen;
+ if (ctx->sizeLo.lo < inputLen) ctx->sizeLo.hi++;
+#endif
+
+ /* if data already in buffer, attemp to fill rest of buffer */
+ if (inBuf) {
+ unsigned int todo = SHA512_BLOCK_LENGTH - inBuf;
+ if (inputLen < todo)
+ todo = inputLen;
+ memcpy(B + inBuf, input, todo);
+ input += todo;
+ inputLen -= todo;
+ if (inBuf + todo == SHA512_BLOCK_LENGTH)
+ SHA512_Compress(ctx);
+ }
+
+ /* if enough data to fill one or more whole buffers, process them. */
+ while (inputLen >= SHA512_BLOCK_LENGTH) {
+ memcpy(B, input, SHA512_BLOCK_LENGTH);
+ input += SHA512_BLOCK_LENGTH;
+ inputLen -= SHA512_BLOCK_LENGTH;
+ SHA512_Compress(ctx);
+ }
+ /* if data left over, fill it into buffer */
+ if (inputLen)
+ memcpy(B, input, inputLen);
+}
+
+void
+SHA512_End(SHA512Context *ctx, unsigned char *digest,
+ unsigned int *digestLen, unsigned int maxDigestLen)
+{
+#if defined(HAVE_LONG_LONG)
+ unsigned int inBuf = (unsigned int)ctx->sizeLo & 0x7f;
+ unsigned int padLen = (inBuf < 112) ? (112 - inBuf) : (112 + 128 - inBuf);
+ PRUint64 lo, t1;
+ lo = (ctx->sizeLo << 3);
+#else
+ unsigned int inBuf = (unsigned int)ctx->sizeLo.lo & 0x7f;
+ unsigned int padLen = (inBuf < 112) ? (112 - inBuf) : (112 + 128 - inBuf);
+ PRUint64 lo = ctx->sizeLo;
+ PRUint32 t1;
+ lo.lo <<= 3;
+#endif
+
+ SHA512_Update(ctx, pad, padLen);
+
+#if defined(HAVE_LONG_LONG)
+ W[14] = 0;
+#else
+ W[14].lo = 0;
+ W[14].hi = 0;
+#endif
+
+ W[15] = lo;
+#if defined(IS_LITTLE_ENDIAN)
+ BYTESWAP8(W[15]);
+#endif
+ SHA512_Compress(ctx);
+
+ /* now output the answer */
+#if defined(IS_LITTLE_ENDIAN)
+ BYTESWAP8(H[0]);
+ BYTESWAP8(H[1]);
+ BYTESWAP8(H[2]);
+ BYTESWAP8(H[3]);
+ BYTESWAP8(H[4]);
+ BYTESWAP8(H[5]);
+ BYTESWAP8(H[6]);
+ BYTESWAP8(H[7]);
+#endif
+ padLen = PR_MIN(SHA512_LENGTH, maxDigestLen);
+ memcpy(digest, H, padLen);
+ if (digestLen)
+ *digestLen = padLen;
+}
+
+SECStatus
+SHA512_HashBuf(unsigned char *dest, const unsigned char *src,
+ unsigned int src_length)
+{
+ SHA512Context ctx;
+ unsigned int outLen;
+
+ SHA512_Begin(&ctx);
+ SHA512_Update(&ctx, src, src_length);
+ SHA512_End(&ctx, dest, &outLen, SHA512_LENGTH);
+
+ return SECSuccess;
+}
+
+
+SECStatus
+SHA512_Hash(unsigned char *dest, const char *src)
+{
+ return SHA512_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
+}
+
+
+void SHA512_TraceState(SHA512Context *ctx) { }
+
+unsigned int
+SHA512_FlattenSize(SHA512Context *ctx)
+{
+ return sizeof *ctx;
+}
+
+SECStatus
+SHA512_Flatten(SHA512Context *ctx,unsigned char *space)
+{
+ PORT_Memcpy(space, ctx, sizeof *ctx);
+ return SECSuccess;
+}
+
+SHA512Context *
+SHA512_Resurrect(unsigned char *space, void *arg)
+{
+ SHA512Context *ctx = SHA512_NewContext();
+ if (ctx)
+ PORT_Memcpy(ctx, space, sizeof *ctx);
+ return ctx;
+}
+
+void SHA512_Clone(SHA512Context *dest, SHA512Context *src)
+{
+ memcpy(dest, src, sizeof *dest);
+}
+
+/* ======================================================================= */
+/* SHA384 uses a SHA512Context as the real context.
+** The only differences between SHA384 an SHA512 are:
+** a) the intialization values for the context, and
+** b) the number of bytes of data produced as output.
+*/
+
+/* SHA-384 initial hash values */
+static const PRUint64 H384[8] = {
+#if PR_BYTES_PER_LONG == 8
+ 0xcbbb9d5dc1059ed8UL , 0x629a292a367cd507UL ,
+ 0x9159015a3070dd17UL , 0x152fecd8f70e5939UL ,
+ 0x67332667ffc00b31UL , 0x8eb44a8768581511UL ,
+ 0xdb0c2e0d64f98fa7UL , 0x47b5481dbefa4fa4UL
+#else
+ ULLC(cbbb9d5d,c1059ed8), ULLC(629a292a,367cd507),
+ ULLC(9159015a,3070dd17), ULLC(152fecd8,f70e5939),
+ ULLC(67332667,ffc00b31), ULLC(8eb44a87,68581511),
+ ULLC(db0c2e0d,64f98fa7), ULLC(47b5481d,befa4fa4)
+#endif
+};
+
+SHA384Context *
+SHA384_NewContext(void)
+{
+ return SHA512_NewContext();
+}
+
+void
+SHA384_DestroyContext(SHA384Context *ctx, PRBool freeit)
+{
+ SHA512_DestroyContext(ctx, freeit);
+}
+
+void
+SHA384_Begin(SHA384Context *ctx)
+{
+ memset(ctx, 0, sizeof *ctx);
+ memcpy(H, H384, sizeof H384);
+}
+
+void
+SHA384_Update(SHA384Context *ctx, const unsigned char *input,
+ unsigned int inputLen)
+{
+ SHA512_Update(ctx, input, inputLen);
+}
+
+void
+SHA384_End(SHA384Context *ctx, unsigned char *digest,
+ unsigned int *digestLen, unsigned int maxDigestLen)
+{
+#define SHA_MIN(a,b) (a < b ? a : b)
+ unsigned int maxLen = SHA_MIN(maxDigestLen, SHA384_LENGTH);
+ SHA512_End(ctx, digest, digestLen, maxLen);
+}
+
+SECStatus
+SHA384_HashBuf(unsigned char *dest, const unsigned char *src,
+ unsigned int src_length)
+{
+ SHA512Context ctx;
+ unsigned int outLen;
+
+ SHA384_Begin(&ctx);
+ SHA512_Update(&ctx, src, src_length);
+ SHA512_End(&ctx, dest, &outLen, SHA384_LENGTH);
+
+ return SECSuccess;
+}
+
+SECStatus
+SHA384_Hash(unsigned char *dest, const char *src)
+{
+ return SHA384_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
+}
+
+void SHA384_TraceState(SHA384Context *ctx) { }
+
+unsigned int
+SHA384_FlattenSize(SHA384Context *ctx)
+{
+ return sizeof(SHA384Context);
+}
+
+SECStatus
+SHA384_Flatten(SHA384Context *ctx,unsigned char *space)
+{
+ return SHA512_Flatten(ctx, space);
+}
+
+SHA384Context *
+SHA384_Resurrect(unsigned char *space, void *arg)
+{
+ return SHA512_Resurrect(space, arg);
+}
+
+void SHA384_Clone(SHA384Context *dest, SHA384Context *src)
+{
+ memcpy(dest, src, sizeof *dest);
+}
+#endif /* Comment out unused code. */
+
+/* ======================================================================= */
+#ifdef SELFTEST
+#include <stdio.h>
+
+static const char abc[] = { "abc" };
+static const char abcdbc[] = {
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+};
+static const char abcdef[] = {
+ "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
+ "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"
+};
+
+void
+dumpHash32(const unsigned char *buf, unsigned int bufLen)
+{
+ unsigned int i;
+ for (i = 0; i < bufLen; i += 4) {
+ printf(" %02x%02x%02x%02x", buf[i], buf[i+1], buf[i+2], buf[i+3]);
+ }
+ printf("\n");
+}
+
+void test256(void)
+{
+ unsigned char outBuf[SHA256_LENGTH];
+
+ printf("SHA256, input = %s\n", abc);
+ SHA256_Hash(outBuf, abc);
+ dumpHash32(outBuf, sizeof outBuf);
+
+ printf("SHA256, input = %s\n", abcdbc);
+ SHA256_Hash(outBuf, abcdbc);
+ dumpHash32(outBuf, sizeof outBuf);
+}
+
+void
+dumpHash64(const unsigned char *buf, unsigned int bufLen)
+{
+ unsigned int i;
+ for (i = 0; i < bufLen; i += 8) {
+ if (i % 32 == 0)
+ printf("\n");
+ printf(" %02x%02x%02x%02x%02x%02x%02x%02x",
+ buf[i ], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
+ }
+ printf("\n");
+}
+
+void test512(void)
+{
+ unsigned char outBuf[SHA512_LENGTH];
+
+ printf("SHA512, input = %s\n", abc);
+ SHA512_Hash(outBuf, abc);
+ dumpHash64(outBuf, sizeof outBuf);
+
+ printf("SHA512, input = %s\n", abcdef);
+ SHA512_Hash(outBuf, abcdef);
+ dumpHash64(outBuf, sizeof outBuf);
+}
+
+void time512(void)
+{
+ unsigned char outBuf[SHA512_LENGTH];
+
+ SHA512_Hash(outBuf, abc);
+ SHA512_Hash(outBuf, abcdef);
+}
+
+void test384(void)
+{
+ unsigned char outBuf[SHA384_LENGTH];
+
+ printf("SHA384, input = %s\n", abc);
+ SHA384_Hash(outBuf, abc);
+ dumpHash64(outBuf, sizeof outBuf);
+
+ printf("SHA384, input = %s\n", abcdef);
+ SHA384_Hash(outBuf, abcdef);
+ dumpHash64(outBuf, sizeof outBuf);
+}
+
+int main (int argc, char *argv[], char *envp[])
+{
+ int i = 1;
+ if (argc > 1) {
+ i = atoi(argv[1]);
+ }
+ if (i < 2) {
+ test256();
+ test512();
+ test384();
+ } else {
+ while (i-- > 0) {
+ time512();
+ }
+ printf("done\n");
+ }
+ return 0;
+}
+
+#endif
diff --git a/libchrome/crypto/wincrypt_shim.h b/libchrome/crypto/wincrypt_shim.h
new file mode 100644
index 0000000..48d4b5c
--- /dev/null
+++ b/libchrome/crypto/wincrypt_shim.h
@@ -0,0 +1,25 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_CRYPTO_WINCRYPT_SHIM_H_
+#define NET_CRYPTO_WINCRYPT_SHIM_H_
+
+// wincrypt.h defines macros which conflict with OpenSSL's types. This header
+// includes wincrypt and undefines the OpenSSL macros which conflict. Any
+// Chromium headers which include wincrypt should instead include this header.
+
+#include <windows.h>
+#include <wincrypt.h>
+
+// Undefine the macros which conflict with OpenSSL and define replacements. See
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa378145(v=vs.85).aspx
+#undef X509_CERT_PAIR
+#undef X509_EXTENSIONS
+#undef X509_NAME
+
+#define WINCRYPT_X509_CERT_PAIR ((LPCSTR) 53)
+#define WINCRYPT_X509_EXTENSIONS ((LPCSTR) 5)
+#define WINCRYPT_X509_NAME ((LPCSTR) 7)
+
+#endif // NET_CRYPTO_WINCRYPT_SHIM_H_
diff --git a/libchrome/dbus/BUILD.gn b/libchrome/dbus/BUILD.gn
new file mode 100644
index 0000000..28efb93
--- /dev/null
+++ b/libchrome/dbus/BUILD.gn
@@ -0,0 +1,134 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/features.gni")
+import("//testing/test.gni")
+import("//third_party/protobuf/proto_library.gni")
+
+assert(use_dbus)
+
+component("dbus") {
+ sources = [
+ "bus.cc",
+ "bus.h",
+ "dbus_export.h",
+ "dbus_statistics.cc",
+ "dbus_statistics.h",
+ "exported_object.cc",
+ "exported_object.h",
+ "file_descriptor.cc",
+ "file_descriptor.h",
+ "message.cc",
+ "message.h",
+ "object_manager.cc",
+ "object_manager.h",
+ "object_path.cc",
+ "object_path.h",
+ "object_proxy.cc",
+ "object_proxy.h",
+ "property.cc",
+ "property.h",
+ "scoped_dbus_error.cc",
+ "scoped_dbus_error.h",
+ "string_util.cc",
+ "string_util.h",
+ "util.cc",
+ "util.h",
+ "values_util.cc",
+ "values_util.h",
+ ]
+
+ defines = [ "DBUS_IMPLEMENTATION" ]
+
+ deps = [
+ "//third_party/protobuf:protobuf_lite",
+ ]
+ public_deps = [
+ "//base",
+ ]
+
+ public_configs = [ "//build/config/linux/dbus" ]
+}
+
+proto_library("test_proto") {
+ sources = [
+ "test_proto.proto",
+ ]
+}
+
+# This target contains mocks that can be used to write unit tests without
+# issuing actual D-Bus calls.
+static_library("test_support") {
+ testonly = true
+ sources = [
+ "mock_bus.cc",
+ "mock_bus.h",
+ "mock_exported_object.cc",
+ "mock_exported_object.h",
+ "mock_object_manager.cc",
+ "mock_object_manager.h",
+ "mock_object_proxy.cc",
+ "mock_object_proxy.h",
+ ]
+
+ public_deps = [
+ ":dbus",
+ ]
+ deps = [
+ "//testing/gmock",
+ ]
+
+ configs += [ "//build/config/linux/dbus" ]
+}
+
+test("dbus_unittests") {
+ sources = [
+ "bus_unittest.cc",
+ "dbus_statistics_unittest.cc",
+ "end_to_end_async_unittest.cc",
+ "end_to_end_sync_unittest.cc",
+ "message_unittest.cc",
+ "mock_unittest.cc",
+ "object_manager_unittest.cc",
+ "object_proxy_unittest.cc",
+ "property_unittest.cc",
+ "signal_sender_verification_unittest.cc",
+ "string_util_unittest.cc",
+ "test_service.cc",
+ "test_service.h",
+ "util_unittest.cc",
+ "values_util_unittest.cc",
+ ]
+
+ deps = [
+ ":dbus",
+ ":test_proto",
+ ":test_support",
+ "//base/test:run_all_unittests",
+ "//base/test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/protobuf:protobuf_lite",
+ ]
+
+ configs += [ "//build/config/linux/dbus" ]
+}
+
+executable("dbus_test_server") {
+ testonly = true
+ sources = [
+ "test_server.cc",
+ "test_service.cc",
+ "test_service.h",
+ ]
+
+ deps = [
+ ":dbus",
+ "//base",
+ "//base/test:test_support",
+ "//build/config/sanitizers:deps",
+ ]
+
+ configs += [ "//build/config/linux/dbus" ]
+}
diff --git a/libchrome/dbus/DEPS b/libchrome/dbus/DEPS
new file mode 100644
index 0000000..97db67c
--- /dev/null
+++ b/libchrome/dbus/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+third_party/protobuf",
+]
diff --git a/libchrome/dbus/OWNERS b/libchrome/dbus/OWNERS
new file mode 100644
index 0000000..04931c3
--- /dev/null
+++ b/libchrome/dbus/OWNERS
@@ -0,0 +1,3 @@
+hashimoto@chromium.org
+satorux@chromium.org
+stevenjb@chromium.org
diff --git a/libchrome/dbus/bus.cc b/libchrome/dbus/bus.cc
new file mode 100644
index 0000000..57834d3
--- /dev/null
+++ b/libchrome/dbus/bus.cc
@@ -0,0 +1,1198 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/bus.h"
+
+#include <stddef.h>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "dbus/exported_object.h"
+#include "dbus/message.h"
+#include "dbus/object_manager.h"
+#include "dbus/object_path.h"
+#include "dbus/object_proxy.h"
+#include "dbus/scoped_dbus_error.h"
+
+namespace dbus {
+
+namespace {
+
+// The NameOwnerChanged member in org.freedesktop.DBus
+const char kNameOwnerChangedSignal[] = "NameOwnerChanged";
+
+// The match rule used to filter for changes to a given service name owner.
+const char kServiceNameOwnerChangeMatchRule[] =
+ "type='signal',interface='org.freedesktop.DBus',"
+ "member='NameOwnerChanged',path='/org/freedesktop/DBus',"
+ "sender='org.freedesktop.DBus',arg0='%s'";
+
+// The class is used for watching the file descriptor used for D-Bus
+// communication.
+class Watch : public base::MessagePumpLibevent::Watcher {
+ public:
+ explicit Watch(DBusWatch* watch)
+ : raw_watch_(watch) {
+ dbus_watch_set_data(raw_watch_, this, NULL);
+ }
+
+ ~Watch() override { dbus_watch_set_data(raw_watch_, NULL, NULL); }
+
+ // Returns true if the underlying file descriptor is ready to be watched.
+ bool IsReadyToBeWatched() {
+ return dbus_watch_get_enabled(raw_watch_);
+ }
+
+ // Starts watching the underlying file descriptor.
+ void StartWatching() {
+ const int file_descriptor = dbus_watch_get_unix_fd(raw_watch_);
+ const int flags = dbus_watch_get_flags(raw_watch_);
+
+ base::MessageLoopForIO::Mode mode = base::MessageLoopForIO::WATCH_READ;
+ if ((flags & DBUS_WATCH_READABLE) && (flags & DBUS_WATCH_WRITABLE))
+ mode = base::MessageLoopForIO::WATCH_READ_WRITE;
+ else if (flags & DBUS_WATCH_READABLE)
+ mode = base::MessageLoopForIO::WATCH_READ;
+ else if (flags & DBUS_WATCH_WRITABLE)
+ mode = base::MessageLoopForIO::WATCH_WRITE;
+ else
+ NOTREACHED();
+
+ const bool persistent = true; // Watch persistently.
+ const bool success = base::MessageLoopForIO::current()->WatchFileDescriptor(
+ file_descriptor, persistent, mode, &file_descriptor_watcher_, this);
+ CHECK(success) << "Unable to allocate memory";
+ }
+
+ // Stops watching the underlying file descriptor.
+ void StopWatching() {
+ file_descriptor_watcher_.StopWatchingFileDescriptor();
+ }
+
+ private:
+ // Implement MessagePumpLibevent::Watcher.
+ void OnFileCanReadWithoutBlocking(int /*file_descriptor*/) override {
+ const bool success = dbus_watch_handle(raw_watch_, DBUS_WATCH_READABLE);
+ CHECK(success) << "Unable to allocate memory";
+ }
+
+ // Implement MessagePumpLibevent::Watcher.
+ void OnFileCanWriteWithoutBlocking(int /*file_descriptor*/) override {
+ const bool success = dbus_watch_handle(raw_watch_, DBUS_WATCH_WRITABLE);
+ CHECK(success) << "Unable to allocate memory";
+ }
+
+ DBusWatch* raw_watch_;
+ base::MessagePumpLibevent::FileDescriptorWatcher file_descriptor_watcher_;
+};
+
+// The class is used for monitoring the timeout used for D-Bus method
+// calls.
+//
+// Unlike Watch, Timeout is a ref counted object, to ensure that |this| of
+// the object is is alive when HandleTimeout() is called. It's unlikely
+// but it may be possible that HandleTimeout() is called after
+// Bus::OnRemoveTimeout(). That's why we don't simply delete the object in
+// Bus::OnRemoveTimeout().
+class Timeout : public base::RefCountedThreadSafe<Timeout> {
+ public:
+ explicit Timeout(DBusTimeout* timeout)
+ : raw_timeout_(timeout),
+ monitoring_is_active_(false),
+ is_completed(false) {
+ dbus_timeout_set_data(raw_timeout_, this, NULL);
+ AddRef(); // Balanced on Complete().
+ }
+
+ // Returns true if the timeout is ready to be monitored.
+ bool IsReadyToBeMonitored() {
+ return dbus_timeout_get_enabled(raw_timeout_);
+ }
+
+ // Starts monitoring the timeout.
+ void StartMonitoring(Bus* bus) {
+ bus->GetDBusTaskRunner()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&Timeout::HandleTimeout, this),
+ GetInterval());
+ monitoring_is_active_ = true;
+ }
+
+ // Stops monitoring the timeout.
+ void StopMonitoring() {
+ // We cannot take back the delayed task we posted in
+ // StartMonitoring(), so we just mark the monitoring is inactive now.
+ monitoring_is_active_ = false;
+ }
+
+ // Returns the interval.
+ base::TimeDelta GetInterval() {
+ return base::TimeDelta::FromMilliseconds(
+ dbus_timeout_get_interval(raw_timeout_));
+ }
+
+ // Cleans up the raw_timeout and marks that timeout is completed.
+ // See the class comment above for why we are doing this.
+ void Complete() {
+ dbus_timeout_set_data(raw_timeout_, NULL, NULL);
+ is_completed = true;
+ Release();
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<Timeout>;
+ ~Timeout() {
+ }
+
+ // Handles the timeout.
+ void HandleTimeout() {
+ // If the timeout is marked completed, we should do nothing. This can
+ // occur if this function is called after Bus::OnRemoveTimeout().
+ if (is_completed)
+ return;
+ // Skip if monitoring is canceled.
+ if (!monitoring_is_active_)
+ return;
+
+ const bool success = dbus_timeout_handle(raw_timeout_);
+ CHECK(success) << "Unable to allocate memory";
+ }
+
+ DBusTimeout* raw_timeout_;
+ bool monitoring_is_active_;
+ bool is_completed;
+};
+
+} // namespace
+
+Bus::Options::Options()
+ : bus_type(SESSION),
+ connection_type(PRIVATE) {
+}
+
+Bus::Options::~Options() {
+}
+
+Bus::Bus(const Options& options)
+ : bus_type_(options.bus_type),
+ connection_type_(options.connection_type),
+ dbus_task_runner_(options.dbus_task_runner),
+ on_shutdown_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED),
+ connection_(NULL),
+ origin_thread_id_(base::PlatformThread::CurrentId()),
+ async_operations_set_up_(false),
+ shutdown_completed_(false),
+ num_pending_watches_(0),
+ num_pending_timeouts_(0),
+ address_(options.address) {
+ // This is safe to call multiple times.
+ dbus_threads_init_default();
+ // The origin message loop is unnecessary if the client uses synchronous
+ // functions only.
+ if (base::ThreadTaskRunnerHandle::IsSet())
+ origin_task_runner_ = base::ThreadTaskRunnerHandle::Get();
+}
+
+Bus::~Bus() {
+ DCHECK(!connection_);
+ DCHECK(owned_service_names_.empty());
+ DCHECK(match_rules_added_.empty());
+ DCHECK(filter_functions_added_.empty());
+ DCHECK(registered_object_paths_.empty());
+ DCHECK_EQ(0, num_pending_watches_);
+ // TODO(satorux): This check fails occasionally in browser_tests for tests
+ // that run very quickly. Perhaps something does not have time to clean up.
+ // Despite the check failing, the tests seem to run fine. crosbug.com/23416
+ // DCHECK_EQ(0, num_pending_timeouts_);
+}
+
+ObjectProxy* Bus::GetObjectProxy(const std::string& service_name,
+ const ObjectPath& object_path) {
+ return GetObjectProxyWithOptions(service_name, object_path,
+ ObjectProxy::DEFAULT_OPTIONS);
+}
+
+ObjectProxy* Bus::GetObjectProxyWithOptions(const std::string& service_name,
+ const ObjectPath& object_path,
+ int options) {
+ AssertOnOriginThread();
+
+ // Check if we already have the requested object proxy.
+ const ObjectProxyTable::key_type key(service_name + object_path.value(),
+ options);
+ ObjectProxyTable::iterator iter = object_proxy_table_.find(key);
+ if (iter != object_proxy_table_.end()) {
+ return iter->second.get();
+ }
+
+ scoped_refptr<ObjectProxy> object_proxy =
+ new ObjectProxy(this, service_name, object_path, options);
+ object_proxy_table_[key] = object_proxy;
+
+ return object_proxy.get();
+}
+
+bool Bus::RemoveObjectProxy(const std::string& service_name,
+ const ObjectPath& object_path,
+ const base::Closure& callback) {
+ return RemoveObjectProxyWithOptions(service_name, object_path,
+ ObjectProxy::DEFAULT_OPTIONS,
+ callback);
+}
+
+bool Bus::RemoveObjectProxyWithOptions(const std::string& service_name,
+ const ObjectPath& object_path,
+ int options,
+ const base::Closure& callback) {
+ AssertOnOriginThread();
+
+ // Check if we have the requested object proxy.
+ const ObjectProxyTable::key_type key(service_name + object_path.value(),
+ options);
+ ObjectProxyTable::iterator iter = object_proxy_table_.find(key);
+ if (iter != object_proxy_table_.end()) {
+ scoped_refptr<ObjectProxy> object_proxy = iter->second;
+ object_proxy_table_.erase(iter);
+ // Object is present. Remove it now and Detach on the DBus thread.
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::RemoveObjectProxyInternal,
+ this, object_proxy, callback));
+ return true;
+ }
+ return false;
+}
+
+void Bus::RemoveObjectProxyInternal(scoped_refptr<ObjectProxy> object_proxy,
+ const base::Closure& callback) {
+ AssertOnDBusThread();
+
+ object_proxy.get()->Detach();
+
+ GetOriginTaskRunner()->PostTask(FROM_HERE, callback);
+}
+
+ExportedObject* Bus::GetExportedObject(const ObjectPath& object_path) {
+ AssertOnOriginThread();
+
+ // Check if we already have the requested exported object.
+ ExportedObjectTable::iterator iter = exported_object_table_.find(object_path);
+ if (iter != exported_object_table_.end()) {
+ return iter->second.get();
+ }
+
+ scoped_refptr<ExportedObject> exported_object =
+ new ExportedObject(this, object_path);
+ exported_object_table_[object_path] = exported_object;
+
+ return exported_object.get();
+}
+
+void Bus::UnregisterExportedObject(const ObjectPath& object_path) {
+ AssertOnOriginThread();
+
+ // Remove the registered object from the table first, to allow a new
+ // GetExportedObject() call to return a new object, rather than this one.
+ ExportedObjectTable::iterator iter = exported_object_table_.find(object_path);
+ if (iter == exported_object_table_.end())
+ return;
+
+ scoped_refptr<ExportedObject> exported_object = iter->second;
+ exported_object_table_.erase(iter);
+
+ // Post the task to perform the final unregistration to the D-Bus thread.
+ // Since the registration also happens on the D-Bus thread in
+ // TryRegisterObjectPath(), and the task runner we post to is a
+ // SequencedTaskRunner, there is a guarantee that this will happen before any
+ // future registration call.
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::UnregisterExportedObjectInternal,
+ this, exported_object));
+}
+
+void Bus::UnregisterExportedObjectInternal(
+ scoped_refptr<ExportedObject> exported_object) {
+ AssertOnDBusThread();
+
+ exported_object->Unregister();
+}
+
+ObjectManager* Bus::GetObjectManager(const std::string& service_name,
+ const ObjectPath& object_path) {
+ AssertOnOriginThread();
+
+ // Check if we already have the requested object manager.
+ const ObjectManagerTable::key_type key(service_name + object_path.value());
+ ObjectManagerTable::iterator iter = object_manager_table_.find(key);
+ if (iter != object_manager_table_.end()) {
+ return iter->second.get();
+ }
+
+ scoped_refptr<ObjectManager> object_manager =
+ new ObjectManager(this, service_name, object_path);
+ object_manager_table_[key] = object_manager;
+
+ return object_manager.get();
+}
+
+bool Bus::RemoveObjectManager(const std::string& service_name,
+ const ObjectPath& object_path,
+ const base::Closure& callback) {
+ AssertOnOriginThread();
+ DCHECK(!callback.is_null());
+
+ const ObjectManagerTable::key_type key(service_name + object_path.value());
+ ObjectManagerTable::iterator iter = object_manager_table_.find(key);
+ if (iter == object_manager_table_.end())
+ return false;
+
+ // ObjectManager is present. Remove it now and CleanUp on the DBus thread.
+ scoped_refptr<ObjectManager> object_manager = iter->second;
+ object_manager_table_.erase(iter);
+
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::RemoveObjectManagerInternal,
+ this, object_manager, callback));
+
+ return true;
+}
+
+void Bus::RemoveObjectManagerInternal(
+ scoped_refptr<dbus::ObjectManager> object_manager,
+ const base::Closure& callback) {
+ AssertOnDBusThread();
+ DCHECK(object_manager.get());
+
+ object_manager->CleanUp();
+
+ // The ObjectManager has to be deleted on the origin thread since it was
+ // created there.
+ GetOriginTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::RemoveObjectManagerInternalHelper,
+ this, object_manager, callback));
+}
+
+void Bus::RemoveObjectManagerInternalHelper(
+ scoped_refptr<dbus::ObjectManager> object_manager,
+ const base::Closure& callback) {
+ AssertOnOriginThread();
+ DCHECK(object_manager.get());
+
+ // Release the object manager and run the callback.
+ object_manager = NULL;
+ callback.Run();
+}
+
+void Bus::GetManagedObjects() {
+ for (ObjectManagerTable::iterator iter = object_manager_table_.begin();
+ iter != object_manager_table_.end(); ++iter) {
+ iter->second->GetManagedObjects();
+ }
+}
+
+bool Bus::Connect() {
+ // dbus_bus_get_private() and dbus_bus_get() are blocking calls.
+ AssertOnDBusThread();
+
+ // Check if it's already initialized.
+ if (connection_)
+ return true;
+
+ ScopedDBusError error;
+ if (bus_type_ == CUSTOM_ADDRESS) {
+ if (connection_type_ == PRIVATE) {
+ connection_ = dbus_connection_open_private(address_.c_str(), error.get());
+ } else {
+ connection_ = dbus_connection_open(address_.c_str(), error.get());
+ }
+ } else {
+ const DBusBusType dbus_bus_type = static_cast<DBusBusType>(bus_type_);
+ if (connection_type_ == PRIVATE) {
+ connection_ = dbus_bus_get_private(dbus_bus_type, error.get());
+ } else {
+ connection_ = dbus_bus_get(dbus_bus_type, error.get());
+ }
+ }
+ if (!connection_) {
+ LOG(ERROR) << "Failed to connect to the bus: "
+ << (error.is_set() ? error.message() : "");
+ return false;
+ }
+
+ if (bus_type_ == CUSTOM_ADDRESS) {
+ // We should call dbus_bus_register here, otherwise unique name can not be
+ // acquired. According to dbus specification, it is responsible to call
+ // org.freedesktop.DBus.Hello method at the beging of bus connection to
+ // acquire unique name. In the case of dbus_bus_get, dbus_bus_register is
+ // called internally.
+ if (!dbus_bus_register(connection_, error.get())) {
+ LOG(ERROR) << "Failed to register the bus component: "
+ << (error.is_set() ? error.message() : "");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void Bus::ClosePrivateConnection() {
+ // dbus_connection_close is blocking call.
+ AssertOnDBusThread();
+ DCHECK_EQ(PRIVATE, connection_type_)
+ << "non-private connection should not be closed";
+ dbus_connection_close(connection_);
+}
+
+void Bus::ShutdownAndBlock() {
+ AssertOnDBusThread();
+
+ if (shutdown_completed_)
+ return; // Already shutdowned, just return.
+
+ // Unregister the exported objects.
+ for (ExportedObjectTable::iterator iter = exported_object_table_.begin();
+ iter != exported_object_table_.end(); ++iter) {
+ iter->second->Unregister();
+ }
+
+ // Release all service names.
+ for (std::set<std::string>::iterator iter = owned_service_names_.begin();
+ iter != owned_service_names_.end();) {
+ // This is a bit tricky but we should increment the iter here as
+ // ReleaseOwnership() may remove |service_name| from the set.
+ const std::string& service_name = *iter++;
+ ReleaseOwnership(service_name);
+ }
+ if (!owned_service_names_.empty()) {
+ LOG(ERROR) << "Failed to release all service names. # of services left: "
+ << owned_service_names_.size();
+ }
+
+ // Detach from the remote objects.
+ for (ObjectProxyTable::iterator iter = object_proxy_table_.begin();
+ iter != object_proxy_table_.end(); ++iter) {
+ iter->second->Detach();
+ }
+
+ // Clean up the object managers.
+ for (ObjectManagerTable::iterator iter = object_manager_table_.begin();
+ iter != object_manager_table_.end(); ++iter) {
+ iter->second->CleanUp();
+ }
+
+ // Release object proxies and exported objects here. We should do this
+ // here rather than in the destructor to avoid memory leaks due to
+ // cyclic references.
+ object_proxy_table_.clear();
+ exported_object_table_.clear();
+
+ // Private connection should be closed.
+ if (connection_) {
+ // Remove Disconnected watcher.
+ ScopedDBusError error;
+
+ if (connection_type_ == PRIVATE)
+ ClosePrivateConnection();
+ // dbus_connection_close() won't unref.
+ dbus_connection_unref(connection_);
+ }
+
+ connection_ = NULL;
+ shutdown_completed_ = true;
+}
+
+void Bus::ShutdownOnDBusThreadAndBlock() {
+ AssertOnOriginThread();
+ DCHECK(dbus_task_runner_.get());
+
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::ShutdownOnDBusThreadAndBlockInternal, this));
+
+ // http://crbug.com/125222
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+
+ // Wait until the shutdown is complete on the D-Bus thread.
+ // The shutdown should not hang, but set timeout just in case.
+ const int kTimeoutSecs = 3;
+ const base::TimeDelta timeout(base::TimeDelta::FromSeconds(kTimeoutSecs));
+ const bool signaled = on_shutdown_.TimedWait(timeout);
+ LOG_IF(ERROR, !signaled) << "Failed to shutdown the bus";
+}
+
+void Bus::RequestOwnership(const std::string& service_name,
+ ServiceOwnershipOptions options,
+ OnOwnershipCallback on_ownership_callback) {
+ AssertOnOriginThread();
+
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::RequestOwnershipInternal,
+ this, service_name, options, on_ownership_callback));
+}
+
+void Bus::RequestOwnershipInternal(const std::string& service_name,
+ ServiceOwnershipOptions options,
+ OnOwnershipCallback on_ownership_callback) {
+ AssertOnDBusThread();
+
+ bool success = Connect();
+ if (success)
+ success = RequestOwnershipAndBlock(service_name, options);
+
+ GetOriginTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(on_ownership_callback,
+ service_name,
+ success));
+}
+
+bool Bus::RequestOwnershipAndBlock(const std::string& service_name,
+ ServiceOwnershipOptions options) {
+ DCHECK(connection_);
+ // dbus_bus_request_name() is a blocking call.
+ AssertOnDBusThread();
+
+ // Check if we already own the service name.
+ if (owned_service_names_.find(service_name) != owned_service_names_.end()) {
+ return true;
+ }
+
+ ScopedDBusError error;
+ const int result = dbus_bus_request_name(connection_,
+ service_name.c_str(),
+ options,
+ error.get());
+ if (result != DBUS_REQUEST_NAME_REPLY_PRIMARY_OWNER) {
+ LOG(ERROR) << "Failed to get the ownership of " << service_name << ": "
+ << (error.is_set() ? error.message() : "");
+ return false;
+ }
+ owned_service_names_.insert(service_name);
+ return true;
+}
+
+bool Bus::ReleaseOwnership(const std::string& service_name) {
+ DCHECK(connection_);
+ // dbus_bus_request_name() is a blocking call.
+ AssertOnDBusThread();
+
+ // Check if we already own the service name.
+ std::set<std::string>::iterator found =
+ owned_service_names_.find(service_name);
+ if (found == owned_service_names_.end()) {
+ LOG(ERROR) << service_name << " is not owned by the bus";
+ return false;
+ }
+
+ ScopedDBusError error;
+ const int result = dbus_bus_release_name(connection_, service_name.c_str(),
+ error.get());
+ if (result == DBUS_RELEASE_NAME_REPLY_RELEASED) {
+ owned_service_names_.erase(found);
+ return true;
+ } else {
+ LOG(ERROR) << "Failed to release the ownership of " << service_name << ": "
+ << (error.is_set() ? error.message() : "")
+ << ", result code: " << result;
+ return false;
+ }
+}
+
+bool Bus::SetUpAsyncOperations() {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ if (async_operations_set_up_)
+ return true;
+
+ // Process all the incoming data if any, so that OnDispatchStatus() will
+ // be called when the incoming data is ready.
+ ProcessAllIncomingDataIfAny();
+
+ bool success = dbus_connection_set_watch_functions(connection_,
+ &Bus::OnAddWatchThunk,
+ &Bus::OnRemoveWatchThunk,
+ &Bus::OnToggleWatchThunk,
+ this,
+ NULL);
+ CHECK(success) << "Unable to allocate memory";
+
+ success = dbus_connection_set_timeout_functions(connection_,
+ &Bus::OnAddTimeoutThunk,
+ &Bus::OnRemoveTimeoutThunk,
+ &Bus::OnToggleTimeoutThunk,
+ this,
+ NULL);
+ CHECK(success) << "Unable to allocate memory";
+
+ dbus_connection_set_dispatch_status_function(
+ connection_,
+ &Bus::OnDispatchStatusChangedThunk,
+ this,
+ NULL);
+
+ async_operations_set_up_ = true;
+
+ return true;
+}
+
+DBusMessage* Bus::SendWithReplyAndBlock(DBusMessage* request,
+ int timeout_ms,
+ DBusError* error) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ return dbus_connection_send_with_reply_and_block(
+ connection_, request, timeout_ms, error);
+}
+
+void Bus::SendWithReply(DBusMessage* request,
+ DBusPendingCall** pending_call,
+ int timeout_ms) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ const bool success = dbus_connection_send_with_reply(
+ connection_, request, pending_call, timeout_ms);
+ CHECK(success) << "Unable to allocate memory";
+}
+
+void Bus::Send(DBusMessage* request, uint32_t* serial) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ const bool success = dbus_connection_send(connection_, request, serial);
+ CHECK(success) << "Unable to allocate memory";
+}
+
+void Bus::AddFilterFunction(DBusHandleMessageFunction filter_function,
+ void* user_data) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ std::pair<DBusHandleMessageFunction, void*> filter_data_pair =
+ std::make_pair(filter_function, user_data);
+ if (filter_functions_added_.find(filter_data_pair) !=
+ filter_functions_added_.end()) {
+ VLOG(1) << "Filter function already exists: " << filter_function
+ << " with associated data: " << user_data;
+ return;
+ }
+
+ const bool success = dbus_connection_add_filter(
+ connection_, filter_function, user_data, NULL);
+ CHECK(success) << "Unable to allocate memory";
+ filter_functions_added_.insert(filter_data_pair);
+}
+
+void Bus::RemoveFilterFunction(DBusHandleMessageFunction filter_function,
+ void* user_data) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ std::pair<DBusHandleMessageFunction, void*> filter_data_pair =
+ std::make_pair(filter_function, user_data);
+ if (filter_functions_added_.find(filter_data_pair) ==
+ filter_functions_added_.end()) {
+ VLOG(1) << "Requested to remove an unknown filter function: "
+ << filter_function
+ << " with associated data: " << user_data;
+ return;
+ }
+
+ dbus_connection_remove_filter(connection_, filter_function, user_data);
+ filter_functions_added_.erase(filter_data_pair);
+}
+
+void Bus::AddMatch(const std::string& match_rule, DBusError* error) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ std::map<std::string, int>::iterator iter =
+ match_rules_added_.find(match_rule);
+ if (iter != match_rules_added_.end()) {
+ // The already existing rule's counter is incremented.
+ iter->second++;
+
+ VLOG(1) << "Match rule already exists: " << match_rule;
+ return;
+ }
+
+ dbus_bus_add_match(connection_, match_rule.c_str(), error);
+ match_rules_added_[match_rule] = 1;
+}
+
+bool Bus::RemoveMatch(const std::string& match_rule, DBusError* error) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ std::map<std::string, int>::iterator iter =
+ match_rules_added_.find(match_rule);
+ if (iter == match_rules_added_.end()) {
+ LOG(ERROR) << "Requested to remove an unknown match rule: " << match_rule;
+ return false;
+ }
+
+ // The rule's counter is decremented and the rule is deleted when reachs 0.
+ iter->second--;
+ if (iter->second == 0) {
+ dbus_bus_remove_match(connection_, match_rule.c_str(), error);
+ match_rules_added_.erase(match_rule);
+ }
+ return true;
+}
+
+bool Bus::TryRegisterObjectPath(const ObjectPath& object_path,
+ const DBusObjectPathVTable* vtable,
+ void* user_data,
+ DBusError* error) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ if (registered_object_paths_.find(object_path) !=
+ registered_object_paths_.end()) {
+ LOG(ERROR) << "Object path already registered: " << object_path.value();
+ return false;
+ }
+
+ const bool success = dbus_connection_try_register_object_path(
+ connection_,
+ object_path.value().c_str(),
+ vtable,
+ user_data,
+ error);
+ if (success)
+ registered_object_paths_.insert(object_path);
+ return success;
+}
+
+void Bus::UnregisterObjectPath(const ObjectPath& object_path) {
+ DCHECK(connection_);
+ AssertOnDBusThread();
+
+ if (registered_object_paths_.find(object_path) ==
+ registered_object_paths_.end()) {
+ LOG(ERROR) << "Requested to unregister an unknown object path: "
+ << object_path.value();
+ return;
+ }
+
+ const bool success = dbus_connection_unregister_object_path(
+ connection_,
+ object_path.value().c_str());
+ CHECK(success) << "Unable to allocate memory";
+ registered_object_paths_.erase(object_path);
+}
+
+void Bus::ShutdownOnDBusThreadAndBlockInternal() {
+ AssertOnDBusThread();
+
+ ShutdownAndBlock();
+ on_shutdown_.Signal();
+}
+
+void Bus::ProcessAllIncomingDataIfAny() {
+ AssertOnDBusThread();
+
+ // As mentioned at the class comment in .h file, connection_ can be NULL.
+ if (!connection_)
+ return;
+
+ // It is safe and necessary to call dbus_connection_get_dispatch_status even
+ // if the connection is lost.
+ if (dbus_connection_get_dispatch_status(connection_) ==
+ DBUS_DISPATCH_DATA_REMAINS) {
+ while (dbus_connection_dispatch(connection_) ==
+ DBUS_DISPATCH_DATA_REMAINS) {
+ }
+ }
+}
+
+base::TaskRunner* Bus::GetDBusTaskRunner() {
+ if (dbus_task_runner_.get())
+ return dbus_task_runner_.get();
+ else
+ return GetOriginTaskRunner();
+}
+
+base::TaskRunner* Bus::GetOriginTaskRunner() {
+ DCHECK(origin_task_runner_.get());
+ return origin_task_runner_.get();
+}
+
+bool Bus::HasDBusThread() {
+ return dbus_task_runner_.get() != NULL;
+}
+
+void Bus::AssertOnOriginThread() {
+ DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
+}
+
+void Bus::AssertOnDBusThread() {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ if (dbus_task_runner_.get()) {
+ DCHECK(dbus_task_runner_->RunsTasksOnCurrentThread());
+ } else {
+ AssertOnOriginThread();
+ }
+}
+
+std::string Bus::GetServiceOwnerAndBlock(const std::string& service_name,
+ GetServiceOwnerOption options) {
+ AssertOnDBusThread();
+
+ MethodCall get_name_owner_call("org.freedesktop.DBus", "GetNameOwner");
+ MessageWriter writer(&get_name_owner_call);
+ writer.AppendString(service_name);
+ VLOG(1) << "Method call: " << get_name_owner_call.ToString();
+
+ const ObjectPath obj_path("/org/freedesktop/DBus");
+ if (!get_name_owner_call.SetDestination("org.freedesktop.DBus") ||
+ !get_name_owner_call.SetPath(obj_path)) {
+ if (options == REPORT_ERRORS)
+ LOG(ERROR) << "Failed to get name owner.";
+ return "";
+ }
+
+ ScopedDBusError error;
+ DBusMessage* response_message =
+ SendWithReplyAndBlock(get_name_owner_call.raw_message(),
+ ObjectProxy::TIMEOUT_USE_DEFAULT,
+ error.get());
+ if (!response_message) {
+ if (options == REPORT_ERRORS) {
+ LOG(ERROR) << "Failed to get name owner. Got " << error.name() << ": "
+ << error.message();
+ }
+ return "";
+ }
+
+ std::unique_ptr<Response> response(
+ Response::FromRawMessage(response_message));
+ MessageReader reader(response.get());
+
+ std::string service_owner;
+ if (!reader.PopString(&service_owner))
+ service_owner.clear();
+ return service_owner;
+}
+
+void Bus::GetServiceOwner(const std::string& service_name,
+ const GetServiceOwnerCallback& callback) {
+ AssertOnOriginThread();
+
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::GetServiceOwnerInternal, this, service_name, callback));
+}
+
+void Bus::GetServiceOwnerInternal(const std::string& service_name,
+ const GetServiceOwnerCallback& callback) {
+ AssertOnDBusThread();
+
+ std::string service_owner;
+ if (Connect())
+ service_owner = GetServiceOwnerAndBlock(service_name, SUPPRESS_ERRORS);
+ GetOriginTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(callback, service_owner));
+}
+
+void Bus::ListenForServiceOwnerChange(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback) {
+ AssertOnOriginThread();
+ DCHECK(!service_name.empty());
+ DCHECK(!callback.is_null());
+
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::ListenForServiceOwnerChangeInternal,
+ this, service_name, callback));
+}
+
+void Bus::ListenForServiceOwnerChangeInternal(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback) {
+ AssertOnDBusThread();
+ DCHECK(!service_name.empty());
+ DCHECK(!callback.is_null());
+
+ if (!Connect() || !SetUpAsyncOperations())
+ return;
+
+ if (service_owner_changed_listener_map_.empty())
+ AddFilterFunction(Bus::OnServiceOwnerChangedFilter, this);
+
+ ServiceOwnerChangedListenerMap::iterator it =
+ service_owner_changed_listener_map_.find(service_name);
+ if (it == service_owner_changed_listener_map_.end()) {
+ // Add a match rule for the new service name.
+ const std::string name_owner_changed_match_rule =
+ base::StringPrintf(kServiceNameOwnerChangeMatchRule,
+ service_name.c_str());
+ ScopedDBusError error;
+ AddMatch(name_owner_changed_match_rule, error.get());
+ if (error.is_set()) {
+ LOG(ERROR) << "Failed to add match rule for " << service_name
+ << ". Got " << error.name() << ": " << error.message();
+ return;
+ }
+
+ service_owner_changed_listener_map_[service_name].push_back(callback);
+ return;
+ }
+
+ // Check if the callback has already been added.
+ std::vector<GetServiceOwnerCallback>& callbacks = it->second;
+ for (size_t i = 0; i < callbacks.size(); ++i) {
+ if (callbacks[i].Equals(callback))
+ return;
+ }
+ callbacks.push_back(callback);
+}
+
+void Bus::UnlistenForServiceOwnerChange(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback) {
+ AssertOnOriginThread();
+ DCHECK(!service_name.empty());
+ DCHECK(!callback.is_null());
+
+ GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&Bus::UnlistenForServiceOwnerChangeInternal,
+ this, service_name, callback));
+}
+
+void Bus::UnlistenForServiceOwnerChangeInternal(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback) {
+ AssertOnDBusThread();
+ DCHECK(!service_name.empty());
+ DCHECK(!callback.is_null());
+
+ ServiceOwnerChangedListenerMap::iterator it =
+ service_owner_changed_listener_map_.find(service_name);
+ if (it == service_owner_changed_listener_map_.end())
+ return;
+
+ std::vector<GetServiceOwnerCallback>& callbacks = it->second;
+ for (size_t i = 0; i < callbacks.size(); ++i) {
+ if (callbacks[i].Equals(callback)) {
+ callbacks.erase(callbacks.begin() + i);
+ break; // There can be only one.
+ }
+ }
+ if (!callbacks.empty())
+ return;
+
+ // Last callback for |service_name| has been removed, remove match rule.
+ const std::string name_owner_changed_match_rule =
+ base::StringPrintf(kServiceNameOwnerChangeMatchRule,
+ service_name.c_str());
+ ScopedDBusError error;
+ RemoveMatch(name_owner_changed_match_rule, error.get());
+ // And remove |service_owner_changed_listener_map_| entry.
+ service_owner_changed_listener_map_.erase(it);
+
+ if (service_owner_changed_listener_map_.empty())
+ RemoveFilterFunction(Bus::OnServiceOwnerChangedFilter, this);
+}
+
+std::string Bus::GetConnectionName() {
+ if (!connection_)
+ return "";
+ return dbus_bus_get_unique_name(connection_);
+}
+
+dbus_bool_t Bus::OnAddWatch(DBusWatch* raw_watch) {
+ AssertOnDBusThread();
+
+ // watch will be deleted when raw_watch is removed in OnRemoveWatch().
+ Watch* watch = new Watch(raw_watch);
+ if (watch->IsReadyToBeWatched()) {
+ watch->StartWatching();
+ }
+ ++num_pending_watches_;
+ return true;
+}
+
+void Bus::OnRemoveWatch(DBusWatch* raw_watch) {
+ AssertOnDBusThread();
+
+ Watch* watch = static_cast<Watch*>(dbus_watch_get_data(raw_watch));
+ delete watch;
+ --num_pending_watches_;
+}
+
+void Bus::OnToggleWatch(DBusWatch* raw_watch) {
+ AssertOnDBusThread();
+
+ Watch* watch = static_cast<Watch*>(dbus_watch_get_data(raw_watch));
+ if (watch->IsReadyToBeWatched()) {
+ watch->StartWatching();
+ } else {
+ // It's safe to call this if StartWatching() wasn't called, per
+ // message_pump_libevent.h.
+ watch->StopWatching();
+ }
+}
+
+dbus_bool_t Bus::OnAddTimeout(DBusTimeout* raw_timeout) {
+ AssertOnDBusThread();
+
+ // timeout will be deleted when raw_timeout is removed in
+ // OnRemoveTimeoutThunk().
+ Timeout* timeout = new Timeout(raw_timeout);
+ if (timeout->IsReadyToBeMonitored()) {
+ timeout->StartMonitoring(this);
+ }
+ ++num_pending_timeouts_;
+ return true;
+}
+
+void Bus::OnRemoveTimeout(DBusTimeout* raw_timeout) {
+ AssertOnDBusThread();
+
+ Timeout* timeout = static_cast<Timeout*>(dbus_timeout_get_data(raw_timeout));
+ timeout->Complete();
+ --num_pending_timeouts_;
+}
+
+void Bus::OnToggleTimeout(DBusTimeout* raw_timeout) {
+ AssertOnDBusThread();
+
+ Timeout* timeout = static_cast<Timeout*>(dbus_timeout_get_data(raw_timeout));
+ if (timeout->IsReadyToBeMonitored()) {
+ timeout->StartMonitoring(this);
+ } else {
+ timeout->StopMonitoring();
+ }
+}
+
+void Bus::OnDispatchStatusChanged(DBusConnection* connection,
+ DBusDispatchStatus /*status*/) {
+ DCHECK_EQ(connection, connection_);
+ AssertOnDBusThread();
+
+ // We cannot call ProcessAllIncomingDataIfAny() here, as calling
+ // dbus_connection_dispatch() inside DBusDispatchStatusFunction is
+ // prohibited by the D-Bus library. Hence, we post a task here instead.
+ // See comments for dbus_connection_set_dispatch_status_function().
+ GetDBusTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(&Bus::ProcessAllIncomingDataIfAny,
+ this));
+}
+
+void Bus::OnServiceOwnerChanged(DBusMessage* message) {
+ DCHECK(message);
+ AssertOnDBusThread();
+
+ // |message| will be unrefed on exit of the function. Increment the
+ // reference so we can use it in Signal::FromRawMessage() below.
+ dbus_message_ref(message);
+ std::unique_ptr<Signal> signal(Signal::FromRawMessage(message));
+
+ // Confirm the validity of the NameOwnerChanged signal.
+ if (signal->GetMember() != kNameOwnerChangedSignal ||
+ signal->GetInterface() != DBUS_INTERFACE_DBUS ||
+ signal->GetSender() != DBUS_SERVICE_DBUS) {
+ return;
+ }
+
+ MessageReader reader(signal.get());
+ std::string service_name;
+ std::string old_owner;
+ std::string new_owner;
+ if (!reader.PopString(&service_name) ||
+ !reader.PopString(&old_owner) ||
+ !reader.PopString(&new_owner)) {
+ return;
+ }
+
+ ServiceOwnerChangedListenerMap::const_iterator it =
+ service_owner_changed_listener_map_.find(service_name);
+ if (it == service_owner_changed_listener_map_.end())
+ return;
+
+ const std::vector<GetServiceOwnerCallback>& callbacks = it->second;
+ for (size_t i = 0; i < callbacks.size(); ++i) {
+ GetOriginTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(callbacks[i], new_owner));
+ }
+}
+
+// static
+dbus_bool_t Bus::OnAddWatchThunk(DBusWatch* raw_watch, void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ return self->OnAddWatch(raw_watch);
+}
+
+// static
+void Bus::OnRemoveWatchThunk(DBusWatch* raw_watch, void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ self->OnRemoveWatch(raw_watch);
+}
+
+// static
+void Bus::OnToggleWatchThunk(DBusWatch* raw_watch, void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ self->OnToggleWatch(raw_watch);
+}
+
+// static
+dbus_bool_t Bus::OnAddTimeoutThunk(DBusTimeout* raw_timeout, void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ return self->OnAddTimeout(raw_timeout);
+}
+
+// static
+void Bus::OnRemoveTimeoutThunk(DBusTimeout* raw_timeout, void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ self->OnRemoveTimeout(raw_timeout);
+}
+
+// static
+void Bus::OnToggleTimeoutThunk(DBusTimeout* raw_timeout, void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ self->OnToggleTimeout(raw_timeout);
+}
+
+// static
+void Bus::OnDispatchStatusChangedThunk(DBusConnection* connection,
+ DBusDispatchStatus status,
+ void* data) {
+ Bus* self = static_cast<Bus*>(data);
+ self->OnDispatchStatusChanged(connection, status);
+}
+
+// static
+DBusHandlerResult Bus::OnServiceOwnerChangedFilter(
+ DBusConnection* /*connection*/,
+ DBusMessage* message,
+ void* data) {
+ if (dbus_message_is_signal(message,
+ DBUS_INTERFACE_DBUS,
+ kNameOwnerChangedSignal)) {
+ Bus* self = static_cast<Bus*>(data);
+ self->OnServiceOwnerChanged(message);
+ }
+ // Always return unhandled to let others, e.g. ObjectProxies, handle the same
+ // signal.
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/bus.h b/libchrome/dbus/bus.h
new file mode 100644
index 0000000..7d39159
--- /dev/null
+++ b/libchrome/dbus/bus.h
@@ -0,0 +1,766 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_BUS_H_
+#define DBUS_BUS_H_
+
+#include <dbus/dbus.h>
+#include <stdint.h>
+
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "dbus/dbus_export.h"
+#include "dbus/object_path.h"
+
+namespace base {
+class SequencedTaskRunner;
+class SingleThreadTaskRunner;
+class TaskRunner;
+}
+
+namespace tracked_objects {
+class Location;
+}
+
+namespace dbus {
+
+class ExportedObject;
+class ObjectManager;
+class ObjectProxy;
+
+// Bus is used to establish a connection with D-Bus, create object
+// proxies, and export objects.
+//
+// For asynchronous operations such as an asynchronous method call, the
+// bus object will use a task runner to monitor the underlying file
+// descriptor used for D-Bus communication. By default, the bus will use
+// the current thread's task runner. If |dbus_task_runner| option is
+// specified, the bus will use that task runner instead.
+//
+// THREADING
+//
+// In the D-Bus library, we use the two threads:
+//
+// - The origin thread: the thread that created the Bus object.
+// - The D-Bus thread: the thread servicing |dbus_task_runner|.
+//
+// The origin thread is usually Chrome's UI thread. The D-Bus thread is
+// usually a dedicated thread for the D-Bus library.
+//
+// BLOCKING CALLS
+//
+// Functions that issue blocking calls are marked "BLOCKING CALL" and
+// these functions should be called in the D-Bus thread (if
+// supplied). AssertOnDBusThread() is placed in these functions.
+//
+// Note that it's hard to tell if a libdbus function is actually blocking
+// or not (ex. dbus_bus_request_name() internally calls
+// dbus_connection_send_with_reply_and_block(), which is a blocking
+// call). To err on the safe side, we consider all libdbus functions that
+// deal with the connection to dbus-daemon to be blocking.
+//
+// SHUTDOWN
+//
+// The Bus object must be shut down manually by ShutdownAndBlock() and
+// friends. We require the manual shutdown to make the operation explicit
+// rather than doing it silently in the destructor.
+//
+// EXAMPLE USAGE:
+//
+// Synchronous method call:
+//
+// dbus::Bus::Options options;
+// // Set up the bus options here.
+// ...
+// dbus::Bus bus(options);
+//
+// dbus::ObjectProxy* object_proxy =
+// bus.GetObjectProxy(service_name, object_path);
+//
+// dbus::MethodCall method_call(interface_name, method_name);
+// std::unique_ptr<dbus::Response> response(
+// object_proxy.CallMethodAndBlock(&method_call, timeout_ms));
+// if (response.get() != NULL) { // Success.
+// ...
+// }
+//
+// Asynchronous method call:
+//
+// void OnResponse(dbus::Response* response) {
+// // response is NULL if the method call failed.
+// if (!response)
+// return;
+// }
+//
+// ...
+// object_proxy.CallMethod(&method_call, timeout_ms,
+// base::Bind(&OnResponse));
+//
+// Exporting a method:
+//
+// void Echo(dbus::MethodCall* method_call,
+// dbus::ExportedObject::ResponseSender response_sender) {
+// // Do something with method_call.
+// Response* response = Response::FromMethodCall(method_call);
+// // Build response here.
+// // Can send an immediate response here to implement a synchronous service
+// // or store the response_sender and send a response later to implement an
+// // asynchronous service.
+// response_sender.Run(response);
+// }
+//
+// void OnExported(const std::string& interface_name,
+// const ObjectPath& object_path,
+// bool success) {
+// // success is true if the method was exported successfully.
+// }
+//
+// ...
+// dbus::ExportedObject* exported_object =
+// bus.GetExportedObject(service_name, object_path);
+// exported_object.ExportMethod(interface_name, method_name,
+// base::Bind(&Echo),
+// base::Bind(&OnExported));
+//
+// WHY IS THIS A REF COUNTED OBJECT?
+//
+// Bus is a ref counted object, to ensure that |this| of the object is
+// alive when callbacks referencing |this| are called. However, after the
+// bus is shut down, |connection_| can be NULL. Hence, callbacks should
+// not rely on that |connection_| is alive.
+class CHROME_DBUS_EXPORT Bus : public base::RefCountedThreadSafe<Bus> {
+ public:
+ // Specifies the bus type. SESSION is used to communicate with per-user
+ // services like GNOME applications. SYSTEM is used to communicate with
+ // system-wide services like NetworkManager. CUSTOM_ADDRESS is used to
+ // communicate with an user specified address.
+ enum BusType {
+ SESSION = DBUS_BUS_SESSION,
+ SYSTEM = DBUS_BUS_SYSTEM,
+ CUSTOM_ADDRESS,
+ };
+
+ // Specifies the connection type. PRIVATE should usually be used unless
+ // you are sure that SHARED is safe for you, which is unlikely the case
+ // in Chrome.
+ //
+ // PRIVATE gives you a private connection, that won't be shared with
+ // other Bus objects.
+ //
+ // SHARED gives you a connection shared among other Bus objects, which
+ // is unsafe if the connection is shared with multiple threads.
+ enum ConnectionType {
+ PRIVATE,
+ SHARED,
+ };
+
+ // Specifies whether the GetServiceOwnerAndBlock call should report or
+ // suppress errors.
+ enum GetServiceOwnerOption {
+ REPORT_ERRORS,
+ SUPPRESS_ERRORS,
+ };
+
+ // Specifies service ownership options.
+ //
+ // REQUIRE_PRIMARY indicates that you require primary ownership of the
+ // service name.
+ //
+ // ALLOW_REPLACEMENT indicates that you'll allow another connection to
+ // steal ownership of this service name from you.
+ //
+ // REQUIRE_PRIMARY_ALLOW_REPLACEMENT does the obvious.
+ enum ServiceOwnershipOptions {
+ REQUIRE_PRIMARY = (DBUS_NAME_FLAG_DO_NOT_QUEUE |
+ DBUS_NAME_FLAG_REPLACE_EXISTING),
+ REQUIRE_PRIMARY_ALLOW_REPLACEMENT = (REQUIRE_PRIMARY |
+ DBUS_NAME_FLAG_ALLOW_REPLACEMENT),
+ };
+
+ // Options used to create a Bus object.
+ struct CHROME_DBUS_EXPORT Options {
+ Options();
+ ~Options();
+
+ BusType bus_type; // SESSION by default.
+ ConnectionType connection_type; // PRIVATE by default.
+ // If dbus_task_runner is set, the bus object will use that
+ // task runner to process asynchronous operations.
+ //
+ // The thread servicing the task runner should meet the following
+ // requirements:
+ // 1) Already running.
+ // 2) Has a MessageLoopForIO.
+ scoped_refptr<base::SequencedTaskRunner> dbus_task_runner;
+
+ // Specifies the server addresses to be connected. If you want to
+ // communicate with non dbus-daemon such as ibus-daemon, set |bus_type| to
+ // CUSTOM_ADDRESS, and |address| to the D-Bus server address you want to
+ // connect to. The format of this address value is the dbus address style
+ // which is described in
+ // http://dbus.freedesktop.org/doc/dbus-specification.html#addresses
+ //
+ // EXAMPLE USAGE:
+ // dbus::Bus::Options options;
+ // options.bus_type = CUSTOM_ADDRESS;
+ // options.address.assign("unix:path=/tmp/dbus-XXXXXXX");
+ // // Set up other options
+ // dbus::Bus bus(options);
+ //
+ // // Do something.
+ //
+ std::string address;
+ };
+
+ // Creates a Bus object. The actual connection will be established when
+ // Connect() is called.
+ explicit Bus(const Options& options);
+
+ // Called when an ownership request is complete.
+ // Parameters:
+ // - the requested service name.
+ // - whether ownership has been obtained or not.
+ typedef base::Callback<void (const std::string&, bool)> OnOwnershipCallback;
+
+ // Called when GetServiceOwner() completes.
+ // |service_owner| is the return value from GetServiceOwnerAndBlock().
+ typedef base::Callback<void (const std::string& service_owner)>
+ GetServiceOwnerCallback;
+
+ // TODO(satorux): Remove the service name parameter as the caller of
+ // RequestOwnership() knows the service name.
+
+ // Gets the object proxy for the given service name and the object path.
+ // The caller must not delete the returned object.
+ //
+ // Returns an existing object proxy if the bus object already owns the
+ // object proxy for the given service name and the object path.
+ // Never returns NULL.
+ //
+ // The bus will own all object proxies created by the bus, to ensure
+ // that the object proxies are detached from remote objects at the
+ // shutdown time of the bus.
+ //
+ // The object proxy is used to call methods of remote objects, and
+ // receive signals from them.
+ //
+ // |service_name| looks like "org.freedesktop.NetworkManager", and
+ // |object_path| looks like "/org/freedesktop/NetworkManager/Devices/0".
+ //
+ // Must be called in the origin thread.
+ virtual ObjectProxy* GetObjectProxy(const std::string& service_name,
+ const ObjectPath& object_path);
+
+ // Same as above, but also takes a bitfield of ObjectProxy::Options.
+ // See object_proxy.h for available options.
+ virtual ObjectProxy* GetObjectProxyWithOptions(
+ const std::string& service_name,
+ const ObjectPath& object_path,
+ int options);
+
+ // Removes the previously created object proxy for the given service
+ // name and the object path and releases its memory.
+ //
+ // If and object proxy for the given service name and object was
+ // created with GetObjectProxy, this function removes it from the
+ // bus object and detaches the ObjectProxy, invalidating any pointer
+ // previously acquired for it with GetObjectProxy. A subsequent call
+ // to GetObjectProxy will return a new object.
+ //
+ // All the object proxies are detached from remote objects at the
+ // shutdown time of the bus, but they can be detached early to reduce
+ // memory footprint and used match rules for the bus connection.
+ //
+ // |service_name| looks like "org.freedesktop.NetworkManager", and
+ // |object_path| looks like "/org/freedesktop/NetworkManager/Devices/0".
+ // |callback| is called when the object proxy is successfully removed and
+ // detached.
+ //
+ // The function returns true when there is an object proxy matching the
+ // |service_name| and |object_path| to remove, and calls |callback| when it
+ // is removed. Otherwise, it returns false and the |callback| function is
+ // never called. The |callback| argument must not be null.
+ //
+ // Must be called in the origin thread.
+ virtual bool RemoveObjectProxy(const std::string& service_name,
+ const ObjectPath& object_path,
+ const base::Closure& callback);
+
+ // Same as above, but also takes a bitfield of ObjectProxy::Options.
+ // See object_proxy.h for available options.
+ virtual bool RemoveObjectProxyWithOptions(
+ const std::string& service_name,
+ const ObjectPath& object_path,
+ int options,
+ const base::Closure& callback);
+
+ // Gets the exported object for the given object path.
+ // The caller must not delete the returned object.
+ //
+ // Returns an existing exported object if the bus object already owns
+ // the exported object for the given object path. Never returns NULL.
+ //
+ // The bus will own all exported objects created by the bus, to ensure
+ // that the exported objects are unregistered at the shutdown time of
+ // the bus.
+ //
+ // The exported object is used to export methods of local objects, and
+ // send signal from them.
+ //
+ // Must be called in the origin thread.
+ virtual ExportedObject* GetExportedObject(const ObjectPath& object_path);
+
+ // Unregisters the exported object for the given object path |object_path|.
+ //
+ // Getting an exported object for the same object path after this call
+ // will return a new object, method calls on any remaining copies of the
+ // previous object will not be called.
+ //
+ // Must be called in the origin thread.
+ virtual void UnregisterExportedObject(const ObjectPath& object_path);
+
+
+ // Gets an object manager for the given remote object path |object_path|
+ // exported by the service |service_name|.
+ //
+ // Returns an existing object manager if the bus object already owns a
+ // matching object manager, never returns NULL.
+ //
+ // The caller must not delete the returned object, the bus retains ownership
+ // of all object managers.
+ //
+ // Must be called in the origin thread.
+ virtual ObjectManager* GetObjectManager(const std::string& service_name,
+ const ObjectPath& object_path);
+
+ // Unregisters the object manager for the given remote object path
+ // |object_path| exported by the srevice |service_name|.
+ //
+ // Getting an object manager for the same remote object after this call
+ // will return a new object, method calls on any remaining copies of the
+ // previous object are not permitted.
+ //
+ // This method will asynchronously clean up any match rules that have been
+ // added for the object manager and invoke |callback| when the operation is
+ // complete. If this method returns false, then |callback| is never called.
+ // The |callback| argument must not be null.
+ //
+ // Must be called in the origin thread.
+ virtual bool RemoveObjectManager(const std::string& service_name,
+ const ObjectPath& object_path,
+ const base::Closure& callback);
+
+ // Instructs all registered object managers to retrieve their set of managed
+ // objects from their respective remote objects. There is no need to call this
+ // manually, this is called automatically by the D-Bus thread manager once
+ // implementation classes are registered.
+ virtual void GetManagedObjects();
+
+ // Shuts down the bus and blocks until it's done. More specifically, this
+ // function does the following:
+ //
+ // - Unregisters the object paths
+ // - Releases the service names
+ // - Closes the connection to dbus-daemon.
+ //
+ // This function can be called multiple times and it is no-op for the 2nd time
+ // calling.
+ //
+ // BLOCKING CALL.
+ virtual void ShutdownAndBlock();
+
+ // Similar to ShutdownAndBlock(), but this function is used to
+ // synchronously shut down the bus that uses the D-Bus thread. This
+ // function is intended to be used at the very end of the browser
+ // shutdown, where it makes more sense to shut down the bus
+ // synchronously, than trying to make it asynchronous.
+ //
+ // BLOCKING CALL, but must be called in the origin thread.
+ virtual void ShutdownOnDBusThreadAndBlock();
+
+ // Returns true if the shutdown has been completed.
+ bool shutdown_completed() { return shutdown_completed_; }
+
+ //
+ // The public functions below are not intended to be used in client
+ // code. These are used to implement ObjectProxy and ExportedObject.
+ //
+
+ // Connects the bus to the dbus-daemon.
+ // Returns true on success, or the bus is already connected.
+ //
+ // BLOCKING CALL.
+ virtual bool Connect();
+
+ // Disconnects the bus from the dbus-daemon.
+ // Safe to call multiple times and no operation after the first call.
+ // Do not call for shared connection it will be released by libdbus.
+ //
+ // BLOCKING CALL.
+ virtual void ClosePrivateConnection();
+
+ // Requests the ownership of the service name given by |service_name|.
+ // See also RequestOwnershipAndBlock().
+ //
+ // |on_ownership_callback| is called when the service name is obtained
+ // or failed to be obtained, in the origin thread.
+ //
+ // Must be called in the origin thread.
+ virtual void RequestOwnership(const std::string& service_name,
+ ServiceOwnershipOptions options,
+ OnOwnershipCallback on_ownership_callback);
+
+ // Requests the ownership of the given service name.
+ // Returns true on success, or the the service name is already obtained.
+ //
+ // Note that it's important to expose methods before requesting a service
+ // name with this method. See also ExportedObject::ExportMethodAndBlock()
+ // for details.
+ //
+ // BLOCKING CALL.
+ virtual bool RequestOwnershipAndBlock(const std::string& service_name,
+ ServiceOwnershipOptions options);
+
+ // Releases the ownership of the given service name.
+ // Returns true on success.
+ //
+ // BLOCKING CALL.
+ virtual bool ReleaseOwnership(const std::string& service_name);
+
+ // Sets up async operations.
+ // Returns true on success, or it's already set up.
+ // This function needs to be called before starting async operations.
+ //
+ // BLOCKING CALL.
+ virtual bool SetUpAsyncOperations();
+
+ // Sends a message to the bus and blocks until the response is
+ // received. Used to implement synchronous method calls.
+ //
+ // BLOCKING CALL.
+ virtual DBusMessage* SendWithReplyAndBlock(DBusMessage* request,
+ int timeout_ms,
+ DBusError* error);
+
+ // Requests to send a message to the bus. The reply is handled with
+ // |pending_call| at a later time.
+ //
+ // BLOCKING CALL.
+ virtual void SendWithReply(DBusMessage* request,
+ DBusPendingCall** pending_call,
+ int timeout_ms);
+
+ // Requests to send a message to the bus. The message serial number will
+ // be stored in |serial|.
+ //
+ // BLOCKING CALL.
+ virtual void Send(DBusMessage* request, uint32_t* serial);
+
+ // Adds the message filter function. |filter_function| will be called
+ // when incoming messages are received.
+ //
+ // When a new incoming message arrives, filter functions are called in
+ // the order that they were added until the the incoming message is
+ // handled by a filter function.
+ //
+ // The same filter function associated with the same user data cannot be
+ // added more than once.
+ //
+ // BLOCKING CALL.
+ virtual void AddFilterFunction(DBusHandleMessageFunction filter_function,
+ void* user_data);
+
+ // Removes the message filter previously added by AddFilterFunction().
+ //
+ // BLOCKING CALL.
+ virtual void RemoveFilterFunction(DBusHandleMessageFunction filter_function,
+ void* user_data);
+
+ // Adds the match rule. Messages that match the rule will be processed
+ // by the filter functions added by AddFilterFunction().
+ //
+ // You cannot specify which filter function to use for a match rule.
+ // Instead, you should check if an incoming message is what you are
+ // interested in, in the filter functions.
+ //
+ // The same match rule can be added more than once and should be removed
+ // as many times as it was added.
+ //
+ // The match rule looks like:
+ // "type='signal', interface='org.chromium.SomeInterface'".
+ //
+ // See "Message Bus Message Routing" section in the D-Bus specification
+ // for details about match rules:
+ // http://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing
+ //
+ // BLOCKING CALL.
+ virtual void AddMatch(const std::string& match_rule, DBusError* error);
+
+ // Removes the match rule previously added by AddMatch().
+ // Returns false if the requested match rule is unknown or has already been
+ // removed. Otherwise, returns true and sets |error| accordingly.
+ //
+ // BLOCKING CALL.
+ virtual bool RemoveMatch(const std::string& match_rule, DBusError* error);
+
+ // Tries to register the object path. Returns true on success.
+ // Returns false if the object path is already registered.
+ //
+ // |message_function| in |vtable| will be called every time when a new
+ // |message sent to the object path arrives.
+ //
+ // The same object path must not be added more than once.
+ //
+ // See also documentation of |dbus_connection_try_register_object_path| at
+ // http://dbus.freedesktop.org/doc/api/html/group__DBusConnection.html
+ //
+ // BLOCKING CALL.
+ virtual bool TryRegisterObjectPath(const ObjectPath& object_path,
+ const DBusObjectPathVTable* vtable,
+ void* user_data,
+ DBusError* error);
+
+ // Unregister the object path.
+ //
+ // BLOCKING CALL.
+ virtual void UnregisterObjectPath(const ObjectPath& object_path);
+
+ // Returns the task runner of the D-Bus thread.
+ virtual base::TaskRunner* GetDBusTaskRunner();
+
+ // Returns the task runner of the thread that created the bus.
+ virtual base::TaskRunner* GetOriginTaskRunner();
+
+ // Returns true if the bus has the D-Bus thread.
+ virtual bool HasDBusThread();
+
+ // Check whether the current thread is on the origin thread (the thread
+ // that created the bus). If not, DCHECK will fail.
+ virtual void AssertOnOriginThread();
+
+ // Check whether the current thread is on the D-Bus thread. If not,
+ // DCHECK will fail. If the D-Bus thread is not supplied, it calls
+ // AssertOnOriginThread().
+ virtual void AssertOnDBusThread();
+
+ // Gets the owner for |service_name| via org.freedesktop.DBus.GetNameOwner.
+ // Returns the owner name, if any, or an empty string on failure.
+ // |options| specifies where to printing error messages or not.
+ //
+ // BLOCKING CALL.
+ virtual std::string GetServiceOwnerAndBlock(const std::string& service_name,
+ GetServiceOwnerOption options);
+
+ // A non-blocking version of GetServiceOwnerAndBlock().
+ // Must be called in the origin thread.
+ virtual void GetServiceOwner(const std::string& service_name,
+ const GetServiceOwnerCallback& callback);
+
+ // Whenever the owner for |service_name| changes, run |callback| with the
+ // name of the new owner. If the owner goes away, then |callback| receives
+ // an empty string.
+ //
+ // Any unique (service_name, callback) can be used. Duplicate are ignored.
+ // |service_name| must not be empty and |callback| must not be null.
+ //
+ // Must be called in the origin thread.
+ virtual void ListenForServiceOwnerChange(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback);
+
+ // Stop listening for |service_name| owner changes for |callback|.
+ // Any unique (service_name, callback) can be used. Non-registered callbacks
+ // for a given service name are ignored.
+ // |service_name| must not be empty and |callback| must not be null.
+ //
+ // Must be called in the origin thread.
+ virtual void UnlistenForServiceOwnerChange(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback);
+
+ // Return the unique name of the bus connnection if it is connected to
+ // D-BUS. Otherwise, return an empty string.
+ std::string GetConnectionName();
+
+ // Returns true if the bus is connected to D-Bus.
+ bool is_connected() { return connection_ != NULL; }
+
+ protected:
+ // This is protected, so we can define sub classes.
+ virtual ~Bus();
+
+ private:
+ friend class base::RefCountedThreadSafe<Bus>;
+
+ // Helper function used for RemoveObjectProxy().
+ void RemoveObjectProxyInternal(scoped_refptr<dbus::ObjectProxy> object_proxy,
+ const base::Closure& callback);
+
+ // Helper functions used for RemoveObjectManager().
+ void RemoveObjectManagerInternal(
+ scoped_refptr<dbus::ObjectManager> object_manager,
+ const base::Closure& callback);
+ void RemoveObjectManagerInternalHelper(
+ scoped_refptr<dbus::ObjectManager> object_manager,
+ const base::Closure& callback);
+
+ // Helper function used for UnregisterExportedObject().
+ void UnregisterExportedObjectInternal(
+ scoped_refptr<dbus::ExportedObject> exported_object);
+
+ // Helper function used for ShutdownOnDBusThreadAndBlock().
+ void ShutdownOnDBusThreadAndBlockInternal();
+
+ // Helper function used for RequestOwnership().
+ void RequestOwnershipInternal(const std::string& service_name,
+ ServiceOwnershipOptions options,
+ OnOwnershipCallback on_ownership_callback);
+
+ // Helper function used for GetServiceOwner().
+ void GetServiceOwnerInternal(const std::string& service_name,
+ const GetServiceOwnerCallback& callback);
+
+ // Helper function used for ListenForServiceOwnerChange().
+ void ListenForServiceOwnerChangeInternal(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback);
+
+ // Helper function used for UnListenForServiceOwnerChange().
+ void UnlistenForServiceOwnerChangeInternal(
+ const std::string& service_name,
+ const GetServiceOwnerCallback& callback);
+
+ // Processes the all incoming data to the connection, if any.
+ //
+ // BLOCKING CALL.
+ void ProcessAllIncomingDataIfAny();
+
+ // Called when a watch object is added. Used to start monitoring the
+ // file descriptor used for D-Bus communication.
+ dbus_bool_t OnAddWatch(DBusWatch* raw_watch);
+
+ // Called when a watch object is removed.
+ void OnRemoveWatch(DBusWatch* raw_watch);
+
+ // Called when the "enabled" status of |raw_watch| is toggled.
+ void OnToggleWatch(DBusWatch* raw_watch);
+
+ // Called when a timeout object is added. Used to start monitoring
+ // timeout for method calls.
+ dbus_bool_t OnAddTimeout(DBusTimeout* raw_timeout);
+
+ // Called when a timeout object is removed.
+ void OnRemoveTimeout(DBusTimeout* raw_timeout);
+
+ // Called when the "enabled" status of |raw_timeout| is toggled.
+ void OnToggleTimeout(DBusTimeout* raw_timeout);
+
+ // Called when the dispatch status (i.e. if any incoming data is
+ // available) is changed.
+ void OnDispatchStatusChanged(DBusConnection* connection,
+ DBusDispatchStatus status);
+
+ // Called when a service owner change occurs.
+ void OnServiceOwnerChanged(DBusMessage* message);
+
+ // Callback helper functions. Redirects to the corresponding member function.
+ static dbus_bool_t OnAddWatchThunk(DBusWatch* raw_watch, void* data);
+ static void OnRemoveWatchThunk(DBusWatch* raw_watch, void* data);
+ static void OnToggleWatchThunk(DBusWatch* raw_watch, void* data);
+ static dbus_bool_t OnAddTimeoutThunk(DBusTimeout* raw_timeout, void* data);
+ static void OnRemoveTimeoutThunk(DBusTimeout* raw_timeout, void* data);
+ static void OnToggleTimeoutThunk(DBusTimeout* raw_timeout, void* data);
+ static void OnDispatchStatusChangedThunk(DBusConnection* connection,
+ DBusDispatchStatus status,
+ void* data);
+
+ // Calls OnConnectionDisconnected if the Disconnected signal is received.
+ static DBusHandlerResult OnConnectionDisconnectedFilter(
+ DBusConnection* connection,
+ DBusMessage* message,
+ void* user_data);
+
+ // Calls OnServiceOwnerChanged for a NameOwnerChanged signal.
+ static DBusHandlerResult OnServiceOwnerChangedFilter(
+ DBusConnection* connection,
+ DBusMessage* message,
+ void* user_data);
+
+ const BusType bus_type_;
+ const ConnectionType connection_type_;
+ scoped_refptr<base::SequencedTaskRunner> dbus_task_runner_;
+ base::WaitableEvent on_shutdown_;
+ DBusConnection* connection_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
+ base::PlatformThreadId origin_thread_id_;
+
+ std::set<std::string> owned_service_names_;
+ // The following sets are used to check if rules/object_paths/filters
+ // are properly cleaned up before destruction of the bus object.
+ // Since it's not an error to add the same match rule twice, the repeated
+ // match rules are counted in a map.
+ std::map<std::string, int> match_rules_added_;
+ std::set<ObjectPath> registered_object_paths_;
+ std::set<std::pair<DBusHandleMessageFunction, void*> >
+ filter_functions_added_;
+
+ // ObjectProxyTable is used to hold the object proxies created by the
+ // bus object. Key is a pair; the first part is a concatenated string of
+ // service name + object path, like
+ // "org.chromium.TestService/org/chromium/TestObject".
+ // The second part is the ObjectProxy::Options for the proxy.
+ typedef std::map<std::pair<std::string, int>,
+ scoped_refptr<dbus::ObjectProxy> > ObjectProxyTable;
+ ObjectProxyTable object_proxy_table_;
+
+ // ExportedObjectTable is used to hold the exported objects created by
+ // the bus object. Key is a concatenated string of service name +
+ // object path, like "org.chromium.TestService/org/chromium/TestObject".
+ typedef std::map<const dbus::ObjectPath,
+ scoped_refptr<dbus::ExportedObject> > ExportedObjectTable;
+ ExportedObjectTable exported_object_table_;
+
+ // ObjectManagerTable is used to hold the object managers created by the
+ // bus object. Key is a concatenated string of service name + object path,
+ // like "org.chromium.TestService/org/chromium/TestObject".
+ typedef std::map<std::string,
+ scoped_refptr<dbus::ObjectManager> > ObjectManagerTable;
+ ObjectManagerTable object_manager_table_;
+
+ // A map of NameOwnerChanged signals to listen for and the callbacks to run
+ // on the origin thread when the owner changes.
+ // Only accessed on the DBus thread.
+ // Key: Service name
+ // Value: Vector of callbacks. Unique and expected to be small. Not using
+ // std::set here because base::Callbacks don't have a '<' operator.
+ typedef std::map<std::string, std::vector<GetServiceOwnerCallback> >
+ ServiceOwnerChangedListenerMap;
+ ServiceOwnerChangedListenerMap service_owner_changed_listener_map_;
+
+ bool async_operations_set_up_;
+ bool shutdown_completed_;
+
+ // Counters to make sure that OnAddWatch()/OnRemoveWatch() and
+ // OnAddTimeout()/OnRemoveTimeou() are balanced.
+ int num_pending_watches_;
+ int num_pending_timeouts_;
+
+ std::string address_;
+
+ DISALLOW_COPY_AND_ASSIGN(Bus);
+};
+
+} // namespace dbus
+
+#endif // DBUS_BUS_H_
diff --git a/libchrome/dbus/dbus.gyp b/libchrome/dbus/dbus.gyp
new file mode 100644
index 0000000..264383e
--- /dev/null
+++ b/libchrome/dbus/dbus.gyp
@@ -0,0 +1,141 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'targets': [
+ {
+ 'target_name': 'dbus',
+ 'type': '<(component)',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../build/linux/system.gyp:dbus',
+ '../third_party/protobuf/protobuf.gyp:protobuf_lite',
+ ],
+ 'export_dependent_settings': [
+ '../base/base.gyp:base',
+ ],
+ 'defines': [
+ 'DBUS_IMPLEMENTATION',
+ ],
+ 'sources': [
+ 'bus.cc',
+ 'bus.h',
+ 'dbus_export.h',
+ 'dbus_statistics.cc',
+ 'dbus_statistics.h',
+ 'exported_object.cc',
+ 'exported_object.h',
+ 'file_descriptor.cc',
+ 'file_descriptor.h',
+ 'message.cc',
+ 'message.h',
+ 'object_manager.cc',
+ 'object_manager.h',
+ 'object_path.cc',
+ 'object_path.h',
+ 'object_proxy.cc',
+ 'object_proxy.h',
+ 'property.cc',
+ 'property.h',
+ 'scoped_dbus_error.cc',
+ 'scoped_dbus_error.h',
+ 'string_util.cc',
+ 'string_util.h',
+ 'util.cc',
+ 'util.h',
+ 'values_util.cc',
+ 'values_util.h',
+ ],
+ },
+ {
+ # Protobuf compiler / generator test protocol buffer
+ 'target_name': 'dbus_test_proto',
+ 'type': 'static_library',
+ 'sources': [ 'test_proto.proto' ],
+ 'variables': {
+ 'proto_out_dir': 'dbus',
+ },
+ 'includes': [ '../build/protoc.gypi' ],
+ },
+ {
+ # This target contains mocks that can be used to write unit tests
+ # without issuing actual D-Bus calls.
+ 'target_name': 'dbus_test_support',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../build/linux/system.gyp:dbus',
+ '../testing/gmock.gyp:gmock',
+ 'dbus',
+ ],
+ 'sources': [
+ 'mock_bus.cc',
+ 'mock_bus.h',
+ 'mock_exported_object.cc',
+ 'mock_exported_object.h',
+ 'mock_object_manager.cc',
+ 'mock_object_manager.h',
+ 'mock_object_proxy.cc',
+ 'mock_object_proxy.h',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ {
+ 'target_name': 'dbus_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ '../base/base.gyp:run_all_unittests',
+ '../base/base.gyp:test_support_base',
+ '../build/linux/system.gyp:dbus',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ 'dbus',
+ 'dbus_test_proto',
+ 'dbus_test_support',
+ ],
+ 'sources': [
+ 'bus_unittest.cc',
+ 'dbus_statistics_unittest.cc',
+ 'end_to_end_async_unittest.cc',
+ 'end_to_end_sync_unittest.cc',
+ 'message_unittest.cc',
+ 'mock_unittest.cc',
+ 'object_manager_unittest.cc',
+ 'object_proxy_unittest.cc',
+ 'property_unittest.cc',
+ 'signal_sender_verification_unittest.cc',
+ 'string_util_unittest.cc',
+ 'test_service.cc',
+ 'test_service.h',
+ 'util_unittest.cc',
+ 'values_util_unittest.cc',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ {
+ 'target_name': 'dbus_test_server',
+ 'type': 'executable',
+ 'dependencies': [
+ '../base/base.gyp:test_support_base',
+ '../base/base.gyp:base',
+ '../build/linux/system.gyp:dbus',
+ 'dbus',
+ ],
+ 'sources': [
+ 'test_server.cc',
+ 'test_service.cc',
+ 'test_service.h',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ ],
+}
diff --git a/libchrome/dbus/dbus_export.h b/libchrome/dbus/dbus_export.h
new file mode 100644
index 0000000..7c96d17
--- /dev/null
+++ b/libchrome/dbus/dbus_export.h
@@ -0,0 +1,31 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_DBUS_EXPORT_H_
+#define DBUS_DBUS_EXPORT_H_
+
+// Defines CHROME_DBUS_EXPORT so that functionality implemented by the dbus
+// library can be exported to consumers.
+// NOTE: We haven't used DBUS_EXPORT because it would conflict with the version
+// from /usr/include/dbus-1.0/dbus/dbus-macros.h.
+
+#if defined(WIN32)
+#error dbus support is not currently expected to work on windows
+#endif // defined(WIN32)
+
+#if defined(COMPONENT_BUILD)
+
+#if defined(DBUS_IMPLEMENTATION)
+#define CHROME_DBUS_EXPORT __attribute__((visibility("default")))
+#else
+#define CHROME_DBUS_EXPORT
+#endif
+
+#else // !defined(COMPONENT_BUILD)
+
+#define CHROME_DBUS_EXPORT
+
+#endif // defined(COMPONENT_BUILD)
+
+#endif // DBUS_DBUS_EXPORT_H_
diff --git a/libchrome/dbus/dbus_statistics.cc b/libchrome/dbus/dbus_statistics.cc
new file mode 100644
index 0000000..e1e0973
--- /dev/null
+++ b/libchrome/dbus/dbus_statistics.cc
@@ -0,0 +1,284 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/dbus_statistics.h"
+
+#include <memory>
+#include <set>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+
+namespace dbus {
+
+namespace {
+
+// Used to store dbus statistics sorted alphabetically by service, interface,
+// then method (using std::string <).
+struct Stat {
+ Stat(const std::string& service,
+ const std::string& interface,
+ const std::string& method)
+ : service(service),
+ interface(interface),
+ method(method),
+ sent_method_calls(0),
+ received_signals(0),
+ sent_blocking_method_calls(0) {
+ }
+ std::string service;
+ std::string interface;
+ std::string method;
+ int sent_method_calls;
+ int received_signals;
+ int sent_blocking_method_calls;
+
+ bool Compare(const Stat& other) const {
+ if (service != other.service)
+ return service < other.service;
+ if (interface != other.interface)
+ return interface < other.interface;
+ return method < other.method;
+ }
+
+ struct PtrCompare {
+ bool operator()(Stat* lhs, Stat* rhs) const {
+ DCHECK(lhs && rhs);
+ return lhs->Compare(*rhs);
+ }
+ };
+};
+
+typedef std::set<Stat*, Stat::PtrCompare> StatSet;
+
+//------------------------------------------------------------------------------
+// DBusStatistics
+
+// Simple class for gathering DBus usage statistics.
+class DBusStatistics {
+ public:
+ DBusStatistics()
+ : start_time_(base::Time::Now()),
+ origin_thread_id_(base::PlatformThread::CurrentId()) {
+ }
+
+ ~DBusStatistics() {
+ DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
+ STLDeleteContainerPointers(stats_.begin(), stats_.end());
+ }
+
+ // Enum to specify which field in Stat to increment in AddStat
+ enum StatType {
+ TYPE_SENT_METHOD_CALLS,
+ TYPE_RECEIVED_SIGNALS,
+ TYPE_SENT_BLOCKING_METHOD_CALLS
+ };
+
+ // Add a call to |method| for |interface|. See also MethodCall in message.h.
+ void AddStat(const std::string& service,
+ const std::string& interface,
+ const std::string& method,
+ StatType type) {
+ if (base::PlatformThread::CurrentId() != origin_thread_id_) {
+ DVLOG(1) << "Ignoring DBusStatistics::AddStat call from thread: "
+ << base::PlatformThread::CurrentId();
+ return;
+ }
+ Stat* stat = GetStat(service, interface, method, true);
+ DCHECK(stat);
+ if (type == TYPE_SENT_METHOD_CALLS)
+ ++stat->sent_method_calls;
+ else if (type == TYPE_RECEIVED_SIGNALS)
+ ++stat->received_signals;
+ else if (type == TYPE_SENT_BLOCKING_METHOD_CALLS)
+ ++stat->sent_blocking_method_calls;
+ else
+ NOTREACHED();
+ }
+
+ // Look up the Stat entry in |stats_|. If |add_stat| is true, add a new entry
+ // if one does not already exist.
+ Stat* GetStat(const std::string& service,
+ const std::string& interface,
+ const std::string& method,
+ bool add_stat) {
+ DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
+ std::unique_ptr<Stat> stat(new Stat(service, interface, method));
+ StatSet::iterator found = stats_.find(stat.get());
+ if (found != stats_.end())
+ return *found;
+ if (!add_stat)
+ return NULL;
+ found = stats_.insert(stat.release()).first;
+ return *found;
+ }
+
+ StatSet& stats() { return stats_; }
+ base::Time start_time() { return start_time_; }
+
+ private:
+ StatSet stats_;
+ base::Time start_time_;
+ base::PlatformThreadId origin_thread_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(DBusStatistics);
+};
+
+DBusStatistics* g_dbus_statistics = NULL;
+
+} // namespace
+
+//------------------------------------------------------------------------------
+
+namespace statistics {
+
+void Initialize() {
+ if (g_dbus_statistics)
+ delete g_dbus_statistics; // reset statistics
+ g_dbus_statistics = new DBusStatistics();
+}
+
+void Shutdown() {
+ delete g_dbus_statistics;
+ g_dbus_statistics = NULL;
+}
+
+void AddSentMethodCall(const std::string& service,
+ const std::string& interface,
+ const std::string& method) {
+ if (!g_dbus_statistics)
+ return;
+ g_dbus_statistics->AddStat(
+ service, interface, method, DBusStatistics::TYPE_SENT_METHOD_CALLS);
+}
+
+void AddReceivedSignal(const std::string& service,
+ const std::string& interface,
+ const std::string& method) {
+ if (!g_dbus_statistics)
+ return;
+ g_dbus_statistics->AddStat(
+ service, interface, method, DBusStatistics::TYPE_RECEIVED_SIGNALS);
+}
+
+void AddBlockingSentMethodCall(const std::string& service,
+ const std::string& interface,
+ const std::string& method) {
+ if (!g_dbus_statistics)
+ return;
+ g_dbus_statistics->AddStat(
+ service, interface, method,
+ DBusStatistics::TYPE_SENT_BLOCKING_METHOD_CALLS);
+}
+
+// NOTE: If the output format is changed, be certain to change the test
+// expectations as well.
+std::string GetAsString(ShowInString show, FormatString format) {
+ if (!g_dbus_statistics)
+ return "DBusStatistics not initialized.";
+
+ const StatSet& stats = g_dbus_statistics->stats();
+ if (stats.empty())
+ return "No DBus calls.";
+
+ base::TimeDelta dtime = base::Time::Now() - g_dbus_statistics->start_time();
+ int dminutes = dtime.InMinutes();
+ dminutes = std::max(dminutes, 1);
+
+ std::string result;
+ int sent = 0, received = 0, sent_blocking = 0;
+ // Stats are stored in order by service, then interface, then method.
+ for (StatSet::const_iterator iter = stats.begin(); iter != stats.end(); ) {
+ StatSet::const_iterator cur_iter = iter;
+ StatSet::const_iterator next_iter = ++iter;
+ const Stat* stat = *cur_iter;
+ sent += stat->sent_method_calls;
+ received += stat->received_signals;
+ sent_blocking += stat->sent_blocking_method_calls;
+ // If this is not the last stat, and if the next stat matches the current
+ // stat, continue.
+ if (next_iter != stats.end() &&
+ (*next_iter)->service == stat->service &&
+ (show < SHOW_INTERFACE || (*next_iter)->interface == stat->interface) &&
+ (show < SHOW_METHOD || (*next_iter)->method == stat->method))
+ continue;
+
+ if (!sent && !received && !sent_blocking)
+ continue; // No stats collected for this line, skip it and continue.
+
+ // Add a line to the result and clear the counts.
+ std::string line;
+ if (show == SHOW_SERVICE) {
+ line += stat->service;
+ } else {
+ // The interface usually includes the service so don't show both.
+ line += stat->interface;
+ if (show >= SHOW_METHOD)
+ line += "." + stat->method;
+ }
+ line += base::StringPrintf(":");
+ if (sent_blocking) {
+ line += base::StringPrintf(" Sent (BLOCKING):");
+ if (format == FORMAT_TOTALS)
+ line += base::StringPrintf(" %d", sent_blocking);
+ else if (format == FORMAT_PER_MINUTE)
+ line += base::StringPrintf(" %d/min", sent_blocking / dminutes);
+ else if (format == FORMAT_ALL)
+ line += base::StringPrintf(" %d (%d/min)",
+ sent_blocking, sent_blocking / dminutes);
+ }
+ if (sent) {
+ line += base::StringPrintf(" Sent:");
+ if (format == FORMAT_TOTALS)
+ line += base::StringPrintf(" %d", sent);
+ else if (format == FORMAT_PER_MINUTE)
+ line += base::StringPrintf(" %d/min", sent / dminutes);
+ else if (format == FORMAT_ALL)
+ line += base::StringPrintf(" %d (%d/min)", sent, sent / dminutes);
+ }
+ if (received) {
+ line += base::StringPrintf(" Received:");
+ if (format == FORMAT_TOTALS)
+ line += base::StringPrintf(" %d", received);
+ else if (format == FORMAT_PER_MINUTE)
+ line += base::StringPrintf(" %d/min", received / dminutes);
+ else if (format == FORMAT_ALL)
+ line += base::StringPrintf(
+ " %d (%d/min)", received, received / dminutes);
+ }
+ result += line + "\n";
+ sent = 0;
+ sent_blocking = 0;
+ received = 0;
+ }
+ return result;
+}
+
+namespace testing {
+
+bool GetCalls(const std::string& service,
+ const std::string& interface,
+ const std::string& method,
+ int* sent,
+ int* received,
+ int* blocking) {
+ if (!g_dbus_statistics)
+ return false;
+ Stat* stat = g_dbus_statistics->GetStat(service, interface, method, false);
+ if (!stat)
+ return false;
+ *sent = stat->sent_method_calls;
+ *received = stat->received_signals;
+ *blocking = stat->sent_blocking_method_calls;
+ return true;
+}
+
+} // namespace testing
+
+} // namespace statistics
+} // namespace dbus
diff --git a/libchrome/dbus/dbus_statistics.h b/libchrome/dbus/dbus_statistics.h
new file mode 100644
index 0000000..e035558
--- /dev/null
+++ b/libchrome/dbus/dbus_statistics.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_DBUS_STATISTICS_H_
+#define DBUS_DBUS_STATISTICS_H_
+
+#include <string>
+
+#include "dbus/dbus_export.h"
+
+// The functions defined here are used to gather DBus statistics, and
+// provide them in a format convenient for debugging. These functions are only
+// valid when called from the main thread (the thread which Initialize() was
+// called from). Calls from other threads will be ignored.
+
+namespace dbus {
+namespace statistics {
+
+// Enum to specify what level of detail to show in GetAsString
+enum ShowInString {
+ SHOW_SERVICE = 0, // Service totals only
+ SHOW_INTERFACE = 1, // Service + interface totals
+ SHOW_METHOD = 2, // Service + interface + method totals
+};
+
+// Enum to specify how to format the display in GetAsString
+enum FormatString {
+ FORMAT_TOTALS = 0, // Raw totals only
+ FORMAT_PER_MINUTE = 1, // Per-minute only
+ FORMAT_ALL = 2 // Include all format details
+};
+
+// Initializes / shuts down dbus statistics gathering. Calling Initialize
+// more than once will reset the statistics.
+CHROME_DBUS_EXPORT void Initialize();
+CHROME_DBUS_EXPORT void Shutdown();
+
+// Add sent/received calls to the statistics gathering class. These methods
+// do nothing unless Initialize() was called.
+CHROME_DBUS_EXPORT void AddSentMethodCall(const std::string& service,
+ const std::string& interface,
+ const std::string& method);
+CHROME_DBUS_EXPORT void AddReceivedSignal(const std::string& service,
+ const std::string& interface,
+ const std::string& method);
+// Track synchronous calls independently since we want to highlight
+// (and remove) these.
+CHROME_DBUS_EXPORT void AddBlockingSentMethodCall(const std::string& service,
+ const std::string& interface,
+ const std::string& method);
+
+// Output the calls into a formatted string. |show| determines what level
+// of detail to show: one line per service, per interface, or per method.
+// If |show_per_minute| is true include per minute stats.
+// Example output for SHOW_METHOD, FORMAT_TOTALS:
+// org.chromium.Mtpd.EnumerateStorage: Sent: 100
+// org.chromium.Mtpd.MTPStorageSignal: Received: 20
+// Example output for SHOW_INTERFACE, FORMAT_ALL:
+// org.chromium.Mtpd: Sent: 100 (10/min) Received: 20 (2/min)
+CHROME_DBUS_EXPORT std::string GetAsString(ShowInString show,
+ FormatString format);
+
+namespace testing {
+// Sets |sent| to the number of sent calls, |received| to the number of
+// received calls, and |blocking| to the number of sent blocking calls for
+// service+interface+method. Used in unittests.
+CHROME_DBUS_EXPORT bool GetCalls(const std::string& service,
+ const std::string& interface,
+ const std::string& method,
+ int* sent,
+ int* received,
+ int* blocking);
+} // namespace testing
+
+} // namespace statistics
+} // namespace dbus
+
+#endif // DBUS_DBUS_STATISTICS_H_
diff --git a/libchrome/dbus/exported_object.cc b/libchrome/dbus/exported_object.cc
new file mode 100644
index 0000000..b156308
--- /dev/null
+++ b/libchrome/dbus/exported_object.cc
@@ -0,0 +1,319 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/exported_object.h"
+
+#include <stdint.h>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "dbus/bus.h"
+#include "dbus/message.h"
+#include "dbus/object_path.h"
+#include "dbus/scoped_dbus_error.h"
+#include "dbus/util.h"
+
+namespace dbus {
+
+namespace {
+
+// Used for success ratio histograms. 1 for success, 0 for failure.
+const int kSuccessRatioHistogramMaxValue = 2;
+
+} // namespace
+
+ExportedObject::ExportedObject(Bus* bus,
+ const ObjectPath& object_path)
+ : bus_(bus),
+ object_path_(object_path),
+ object_is_registered_(false) {
+}
+
+ExportedObject::~ExportedObject() {
+ DCHECK(!object_is_registered_);
+}
+
+bool ExportedObject::ExportMethodAndBlock(
+ const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback) {
+ bus_->AssertOnDBusThread();
+
+ // Check if the method is already exported.
+ const std::string absolute_method_name =
+ GetAbsoluteMemberName(interface_name, method_name);
+ if (method_table_.find(absolute_method_name) != method_table_.end()) {
+ LOG(ERROR) << absolute_method_name << " is already exported";
+ return false;
+ }
+
+ if (!bus_->Connect())
+ return false;
+ if (!bus_->SetUpAsyncOperations())
+ return false;
+ if (!Register())
+ return false;
+
+ // Add the method callback to the method table.
+ method_table_[absolute_method_name] = method_call_callback;
+
+ return true;
+}
+
+void ExportedObject::ExportMethod(const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback,
+ OnExportedCallback on_exported_calback) {
+ bus_->AssertOnOriginThread();
+
+ base::Closure task = base::Bind(&ExportedObject::ExportMethodInternal,
+ this,
+ interface_name,
+ method_name,
+ method_call_callback,
+ on_exported_calback);
+ bus_->GetDBusTaskRunner()->PostTask(FROM_HERE, task);
+}
+
+void ExportedObject::SendSignal(Signal* signal) {
+ // For signals, the object path should be set to the path to the sender
+ // object, which is this exported object here.
+ CHECK(signal->SetPath(object_path_));
+
+ // Increment the reference count so we can safely reference the
+ // underlying signal message until the signal sending is complete. This
+ // will be unref'ed in SendSignalInternal().
+ DBusMessage* signal_message = signal->raw_message();
+ dbus_message_ref(signal_message);
+
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ if (bus_->GetDBusTaskRunner()->RunsTasksOnCurrentThread()) {
+ // The Chrome OS power manager doesn't use a dedicated TaskRunner for
+ // sending DBus messages. Sending signals asynchronously can cause an
+ // inversion in the message order if the power manager calls
+ // ObjectProxy::CallMethodAndBlock() before going back to the top level of
+ // the MessageLoop: crbug.com/472361.
+ SendSignalInternal(start_time, signal_message);
+ } else {
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ExportedObject::SendSignalInternal,
+ this,
+ start_time,
+ signal_message));
+ }
+}
+
+void ExportedObject::Unregister() {
+ bus_->AssertOnDBusThread();
+
+ if (!object_is_registered_)
+ return;
+
+ bus_->UnregisterObjectPath(object_path_);
+ object_is_registered_ = false;
+}
+
+void ExportedObject::ExportMethodInternal(
+ const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback,
+ OnExportedCallback on_exported_calback) {
+ bus_->AssertOnDBusThread();
+
+ const bool success = ExportMethodAndBlock(interface_name,
+ method_name,
+ method_call_callback);
+ bus_->GetOriginTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(&ExportedObject::OnExported,
+ this,
+ on_exported_calback,
+ interface_name,
+ method_name,
+ success));
+}
+
+void ExportedObject::OnExported(OnExportedCallback on_exported_callback,
+ const std::string& interface_name,
+ const std::string& method_name,
+ bool success) {
+ bus_->AssertOnOriginThread();
+
+ on_exported_callback.Run(interface_name, method_name, success);
+}
+
+void ExportedObject::SendSignalInternal(base::TimeTicks start_time,
+ DBusMessage* signal_message) {
+ uint32_t serial = 0;
+ bus_->Send(signal_message, &serial);
+ dbus_message_unref(signal_message);
+ // Record time spent to send the the signal. This is not accurate as the
+ // signal will actually be sent from the next run of the message loop,
+ // but we can at least tell the number of signals sent.
+ UMA_HISTOGRAM_TIMES("DBus.SignalSendTime",
+ base::TimeTicks::Now() - start_time);
+}
+
+bool ExportedObject::Register() {
+ bus_->AssertOnDBusThread();
+
+ if (object_is_registered_)
+ return true;
+
+ ScopedDBusError error;
+
+ DBusObjectPathVTable vtable = {};
+ vtable.message_function = &ExportedObject::HandleMessageThunk;
+ vtable.unregister_function = &ExportedObject::OnUnregisteredThunk;
+ const bool success = bus_->TryRegisterObjectPath(object_path_,
+ &vtable,
+ this,
+ error.get());
+ if (!success) {
+ LOG(ERROR) << "Failed to register the object: " << object_path_.value()
+ << ": " << (error.is_set() ? error.message() : "");
+ return false;
+ }
+
+ object_is_registered_ = true;
+ return true;
+}
+
+DBusHandlerResult ExportedObject::HandleMessage(DBusConnection*,
+ DBusMessage* raw_message) {
+ bus_->AssertOnDBusThread();
+ DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_CALL, dbus_message_get_type(raw_message));
+
+ // raw_message will be unrefed on exit of the function. Increment the
+ // reference so we can use it in MethodCall.
+ dbus_message_ref(raw_message);
+ std::unique_ptr<MethodCall> method_call(
+ MethodCall::FromRawMessage(raw_message));
+ const std::string interface = method_call->GetInterface();
+ const std::string member = method_call->GetMember();
+
+ if (interface.empty()) {
+ // We don't support method calls without interface.
+ LOG(WARNING) << "Interface is missing: " << method_call->ToString();
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+ }
+
+ // Check if we know about the method.
+ const std::string absolute_method_name = GetAbsoluteMemberName(
+ interface, member);
+ MethodTable::const_iterator iter = method_table_.find(absolute_method_name);
+ if (iter == method_table_.end()) {
+ // Don't know about the method.
+ LOG(WARNING) << "Unknown method: " << method_call->ToString();
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+ }
+
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ if (bus_->HasDBusThread()) {
+ // Post a task to run the method in the origin thread.
+ bus_->GetOriginTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(&ExportedObject::RunMethod,
+ this,
+ iter->second,
+ base::Passed(&method_call),
+ start_time));
+ } else {
+ // If the D-Bus thread is not used, just call the method directly.
+ MethodCall* method = method_call.get();
+ iter->second.Run(method,
+ base::Bind(&ExportedObject::SendResponse,
+ this,
+ start_time,
+ base::Passed(&method_call)));
+ }
+
+ // It's valid to say HANDLED here, and send a method response at a later
+ // time from OnMethodCompleted() asynchronously.
+ return DBUS_HANDLER_RESULT_HANDLED;
+}
+
+void ExportedObject::RunMethod(MethodCallCallback method_call_callback,
+ std::unique_ptr<MethodCall> method_call,
+ base::TimeTicks start_time) {
+ bus_->AssertOnOriginThread();
+ MethodCall* method = method_call.get();
+ method_call_callback.Run(method,
+ base::Bind(&ExportedObject::SendResponse,
+ this,
+ start_time,
+ base::Passed(&method_call)));
+}
+
+void ExportedObject::SendResponse(base::TimeTicks start_time,
+ std::unique_ptr<MethodCall> method_call,
+ std::unique_ptr<Response> response) {
+ DCHECK(method_call);
+ if (bus_->HasDBusThread()) {
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ExportedObject::OnMethodCompleted,
+ this,
+ base::Passed(&method_call),
+ base::Passed(&response),
+ start_time));
+ } else {
+ OnMethodCompleted(std::move(method_call), std::move(response), start_time);
+ }
+}
+
+void ExportedObject::OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
+ std::unique_ptr<Response> response,
+ base::TimeTicks start_time) {
+ bus_->AssertOnDBusThread();
+
+ // Record if the method call is successful, or not. 1 if successful.
+ UMA_HISTOGRAM_ENUMERATION("DBus.ExportedMethodHandleSuccess",
+ response ? 1 : 0,
+ kSuccessRatioHistogramMaxValue);
+
+ // Check if the bus is still connected. If the method takes long to
+ // complete, the bus may be shut down meanwhile.
+ if (!bus_->is_connected())
+ return;
+
+ if (!response) {
+ // Something bad happened in the method call.
+ std::unique_ptr<ErrorResponse> error_response(ErrorResponse::FromMethodCall(
+ method_call.get(), DBUS_ERROR_FAILED,
+ "error occurred in " + method_call->GetMember()));
+ bus_->Send(error_response->raw_message(), NULL);
+ return;
+ }
+
+ // The method call was successful.
+ bus_->Send(response->raw_message(), NULL);
+
+ // Record time spent to handle the the method call. Don't include failures.
+ UMA_HISTOGRAM_TIMES("DBus.ExportedMethodHandleTime",
+ base::TimeTicks::Now() - start_time);
+}
+
+void ExportedObject::OnUnregistered(DBusConnection*) {}
+
+DBusHandlerResult ExportedObject::HandleMessageThunk(
+ DBusConnection* connection,
+ DBusMessage* raw_message,
+ void* user_data) {
+ ExportedObject* self = reinterpret_cast<ExportedObject*>(user_data);
+ return self->HandleMessage(connection, raw_message);
+}
+
+void ExportedObject::OnUnregisteredThunk(DBusConnection *connection,
+ void* user_data) {
+ ExportedObject* self = reinterpret_cast<ExportedObject*>(user_data);
+ return self->OnUnregistered(connection);
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/exported_object.h b/libchrome/dbus/exported_object.h
new file mode 100644
index 0000000..69a63a5
--- /dev/null
+++ b/libchrome/dbus/exported_object.h
@@ -0,0 +1,183 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_EXPORTED_OBJECT_H_
+#define DBUS_EXPORTED_OBJECT_H_
+
+#include <dbus/dbus.h>
+
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "dbus/dbus_export.h"
+#include "dbus/object_path.h"
+
+namespace dbus {
+
+class Bus;
+class MethodCall;
+class Response;
+class Signal;
+
+// ExportedObject is used to export objects and methods to other D-Bus
+// clients.
+//
+// ExportedObject is a ref counted object, to ensure that |this| of the
+// object is alive when callbacks referencing |this| are called.
+class CHROME_DBUS_EXPORT ExportedObject
+ : public base::RefCountedThreadSafe<ExportedObject> {
+ public:
+ // Client code should use Bus::GetExportedObject() instead of this
+ // constructor.
+ ExportedObject(Bus* bus, const ObjectPath& object_path);
+
+ // Called to send a response from an exported method. |response| is the
+ // response message. Callers should pass NULL in the event of an error that
+ // prevents the sending of a response.
+ typedef base::Callback<void(std::unique_ptr<Response> response)>
+ ResponseSender;
+
+ // Called when an exported method is called. |method_call| is the request
+ // message. |sender| is the callback that's used to send a response.
+ //
+ // |method_call| is owned by ExportedObject, hence client code should not
+ // delete |method_call|.
+ typedef base::Callback<void (MethodCall* method_call, ResponseSender sender)>
+ MethodCallCallback;
+
+ // Called when method exporting is done.
+ // |success| indicates whether exporting was successful or not.
+ typedef base::Callback<void (const std::string& interface_name,
+ const std::string& method_name,
+ bool success)>
+ OnExportedCallback;
+
+ // Exports the method specified by |interface_name| and |method_name|,
+ // and blocks until exporting is done. Returns true on success.
+ //
+ // |method_call_callback| will be called in the origin thread, when the
+ // exported method is called. As it's called in the origin thread,
+ // |method_callback| can safely reference objects in the origin thread
+ // (i.e. UI thread in most cases).
+ //
+ // IMPORTANT NOTE: You should export all methods before requesting a
+ // service name by Bus::RequestOwnership/AndBlock(). If you do it in the
+ // wrong order (i.e. request a service name then export methods), there
+ // will be a short time period where your service is unable to respond to
+ // method calls because these methods aren't yet exposed. This race is a
+ // real problem as clients may start calling methods of your service as
+ // soon as you acquire a service name, by watching the name owner change.
+ //
+ // BLOCKING CALL.
+ virtual bool ExportMethodAndBlock(const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback);
+
+ // Requests to export the method specified by |interface_name| and
+ // |method_name|. See Also ExportMethodAndBlock().
+ //
+ // |on_exported_callback| is called when the method is exported or
+ // failed to be exported, in the origin thread.
+ //
+ // Must be called in the origin thread.
+ virtual void ExportMethod(const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback,
+ OnExportedCallback on_exported_callback);
+
+ // Requests to send the signal from this object. The signal will be sent
+ // synchronously if this method is called from the message loop in the D-Bus
+ // thread and asynchronously otherwise.
+ virtual void SendSignal(Signal* signal);
+
+ // Unregisters the object from the bus. The Bus object will take care of
+ // unregistering so you don't have to do this manually.
+ //
+ // BLOCKING CALL.
+ virtual void Unregister();
+
+ protected:
+ // This is protected, so we can define sub classes.
+ virtual ~ExportedObject();
+
+ private:
+ friend class base::RefCountedThreadSafe<ExportedObject>;
+
+ // Helper function for ExportMethod().
+ void ExportMethodInternal(const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback,
+ OnExportedCallback exported_callback);
+
+ // Called when the object is exported.
+ void OnExported(OnExportedCallback on_exported_callback,
+ const std::string& interface_name,
+ const std::string& method_name,
+ bool success);
+
+ // Helper function for SendSignal().
+ void SendSignalInternal(base::TimeTicks start_time,
+ DBusMessage* signal_message);
+
+ // Registers this object to the bus.
+ // Returns true on success, or the object is already registered.
+ //
+ // BLOCKING CALL.
+ bool Register();
+
+ // Handles the incoming request messages and dispatches to the exported
+ // methods.
+ DBusHandlerResult HandleMessage(DBusConnection* connection,
+ DBusMessage* raw_message);
+
+ // Runs the method. Helper function for HandleMessage().
+ void RunMethod(MethodCallCallback method_call_callback,
+ std::unique_ptr<MethodCall> method_call,
+ base::TimeTicks start_time);
+
+ // Callback invoked by service provider to send a response to a method call.
+ // Can be called immediately from a MethodCallCallback to implement a
+ // synchronous service or called later to implement an asynchronous service.
+ void SendResponse(base::TimeTicks start_time,
+ std::unique_ptr<MethodCall> method_call,
+ std::unique_ptr<Response> response);
+
+ // Called on completion of the method run from SendResponse().
+ // Takes ownership of |method_call| and |response|.
+ void OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
+ std::unique_ptr<Response> response,
+ base::TimeTicks start_time);
+
+ // Called when the object is unregistered.
+ void OnUnregistered(DBusConnection* connection);
+
+ // Redirects the function call to HandleMessage().
+ static DBusHandlerResult HandleMessageThunk(DBusConnection* connection,
+ DBusMessage* raw_message,
+ void* user_data);
+
+ // Redirects the function call to OnUnregistered().
+ static void OnUnregisteredThunk(DBusConnection* connection,
+ void* user_data);
+
+ scoped_refptr<Bus> bus_;
+ ObjectPath object_path_;
+ bool object_is_registered_;
+
+ // The method table where keys are absolute method names (i.e. interface
+ // name + method name), and values are the corresponding callbacks.
+ typedef std::map<std::string, MethodCallCallback> MethodTable;
+ MethodTable method_table_;
+};
+
+} // namespace dbus
+
+#endif // DBUS_EXPORTED_OBJECT_H_
diff --git a/libchrome/dbus/file_descriptor.cc b/libchrome/dbus/file_descriptor.cc
new file mode 100644
index 0000000..b690881
--- /dev/null
+++ b/libchrome/dbus/file_descriptor.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/files/file.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/threading/worker_pool.h"
+#include "dbus/file_descriptor.h"
+
+using std::swap;
+
+namespace dbus {
+
+void CHROME_DBUS_EXPORT FileDescriptor::Deleter::operator()(
+ FileDescriptor* fd) {
+ base::WorkerPool::PostTask(
+ FROM_HERE, base::Bind(&base::DeletePointer<FileDescriptor>, fd), false);
+}
+
+FileDescriptor::FileDescriptor(FileDescriptor&& other) : FileDescriptor() {
+ Swap(&other);
+}
+
+FileDescriptor::~FileDescriptor() {
+ if (owner_)
+ base::File auto_closer(value_);
+}
+
+FileDescriptor& FileDescriptor::operator=(FileDescriptor&& other) {
+ Swap(&other);
+ return *this;
+}
+
+int FileDescriptor::value() const {
+ CHECK(valid_);
+ return value_;
+}
+
+int FileDescriptor::TakeValue() {
+ CHECK(valid_); // NB: check first so owner_ is unchanged if this triggers
+ owner_ = false;
+ return value_;
+}
+
+void FileDescriptor::CheckValidity() {
+ base::File file(value_);
+ if (!file.IsValid()) {
+ valid_ = false;
+ return;
+ }
+
+ base::File::Info info;
+ bool ok = file.GetInfo(&info);
+ file.TakePlatformFile(); // Prevent |value_| from being closed by |file|.
+ valid_ = (ok && !info.is_directory);
+}
+
+void FileDescriptor::Swap(FileDescriptor* other) {
+ swap(value_, other->value_);
+ swap(owner_, other->owner_);
+ swap(valid_, other->valid_);
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/file_descriptor.h b/libchrome/dbus/file_descriptor.h
new file mode 100644
index 0000000..f8e8677
--- /dev/null
+++ b/libchrome/dbus/file_descriptor.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_FILE_DESCRIPTOR_H_
+#define DBUS_FILE_DESCRIPTOR_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "dbus/dbus_export.h"
+
+namespace dbus {
+
+// FileDescriptor is a type used to encapsulate D-Bus file descriptors
+// and to follow the RAII idiom appropiate for use with message operations
+// where the descriptor might be easily leaked. To guard against this the
+// descriptor is closed when an instance is destroyed if it is owned.
+// Ownership is asserted only when PutValue is used and TakeValue can be
+// used to take ownership.
+//
+// For example, in the following
+// FileDescriptor fd;
+// if (!reader->PopString(&name) ||
+// !reader->PopFileDescriptor(&fd) ||
+// !reader->PopUint32(&flags)) {
+// the descriptor in fd will be closed if the PopUint32 fails. But
+// writer.AppendFileDescriptor(dbus::FileDescriptor(1));
+// will not automatically close "1" because it is not owned.
+//
+// Descriptors must be validated before marshalling in a D-Bus message
+// or using them after unmarshalling. We disallow descriptors to a
+// directory to reduce the security risks. Splitting out validation
+// also allows the caller to do this work on the File thread to conform
+// with i/o restrictions.
+class CHROME_DBUS_EXPORT FileDescriptor {
+ public:
+ // This provides a simple way to pass around file descriptors since they must
+ // be closed on a thread that is allowed to perform I/O.
+ struct Deleter {
+ void CHROME_DBUS_EXPORT operator()(FileDescriptor* fd);
+ };
+
+ // Permits initialization without a value for passing to
+ // dbus::MessageReader::PopFileDescriptor to fill in and from int values.
+ FileDescriptor() : value_(-1), owner_(false), valid_(false) {}
+ explicit FileDescriptor(int value) : value_(value), owner_(false),
+ valid_(false) {}
+
+ FileDescriptor(FileDescriptor&& other);
+
+ virtual ~FileDescriptor();
+
+ FileDescriptor& operator=(FileDescriptor&& other);
+
+ // Retrieves value as an int without affecting ownership.
+ int value() const;
+
+ // Retrieves whether or not the descriptor is ok to send/receive.
+ int is_valid() const { return valid_; }
+
+ // Sets the value and assign ownership.
+ void PutValue(int value) {
+ value_ = value;
+ owner_ = true;
+ valid_ = false;
+ }
+
+ // Takes the value and ownership.
+ int TakeValue();
+
+ // Checks (and records) validity of the file descriptor.
+ // We disallow directories to avoid potential sandbox escapes.
+ // Note this call must be made on a thread where file i/o is allowed.
+ void CheckValidity();
+
+ private:
+ void Swap(FileDescriptor* other);
+
+ int value_;
+ bool owner_;
+ bool valid_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileDescriptor);
+};
+
+using ScopedFileDescriptor =
+ std::unique_ptr<FileDescriptor, FileDescriptor::Deleter>;
+
+} // namespace dbus
+
+#endif // DBUS_FILE_DESCRIPTOR_H_
diff --git a/libchrome/dbus/message.cc b/libchrome/dbus/message.cc
new file mode 100644
index 0000000..aca9055
--- /dev/null
+++ b/libchrome/dbus/message.cc
@@ -0,0 +1,997 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/message.h"
+
+#include <string>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "dbus/object_path.h"
+
+namespace {
+
+// Appends the header name and the value to |output|, if the value is
+// not empty.
+void AppendStringHeader(const std::string& header_name,
+ const std::string& header_value,
+ std::string* output) {
+ if (!header_value.empty()) {
+ *output += header_name + ": " + header_value + "\n";
+ }
+}
+
+// Appends the header name and the value to |output|, if the value is
+// nonzero.
+void AppendUint32Header(const std::string& header_name,
+ uint32_t header_value,
+ std::string* output) {
+ if (header_value != 0) {
+ *output += (header_name + ": " + base::UintToString(header_value) + "\n");
+ }
+}
+
+} // namespace
+
+namespace dbus {
+
+bool IsDBusTypeUnixFdSupported() {
+ int major = 0, minor = 0, micro = 0;
+ dbus_get_version(&major, &minor, µ);
+ return major >= 1 && minor >= 4;
+}
+
+Message::Message()
+ : raw_message_(NULL) {
+}
+
+Message::~Message() {
+ if (raw_message_)
+ dbus_message_unref(raw_message_);
+}
+
+void Message::Init(DBusMessage* raw_message) {
+ DCHECK(!raw_message_);
+ raw_message_ = raw_message;
+}
+
+Message::MessageType Message::GetMessageType() {
+ if (!raw_message_)
+ return MESSAGE_INVALID;
+ const int type = dbus_message_get_type(raw_message_);
+ return static_cast<Message::MessageType>(type);
+}
+
+std::string Message::GetMessageTypeAsString() {
+ switch (GetMessageType()) {
+ case MESSAGE_INVALID:
+ return "MESSAGE_INVALID";
+ case MESSAGE_METHOD_CALL:
+ return "MESSAGE_METHOD_CALL";
+ case MESSAGE_METHOD_RETURN:
+ return "MESSAGE_METHOD_RETURN";
+ case MESSAGE_SIGNAL:
+ return "MESSAGE_SIGNAL";
+ case MESSAGE_ERROR:
+ return "MESSAGE_ERROR";
+ }
+ NOTREACHED();
+ return std::string();
+}
+
+std::string Message::ToStringInternal(const std::string& indent,
+ MessageReader* reader) {
+ const char* kBrokenMessage = "[broken message]";
+ std::string output;
+ while (reader->HasMoreData()) {
+ const DataType type = reader->GetDataType();
+ switch (type) {
+ case BYTE: {
+ uint8_t value = 0;
+ if (!reader->PopByte(&value))
+ return kBrokenMessage;
+ output += indent + "byte " + base::UintToString(value) + "\n";
+ break;
+ }
+ case BOOL: {
+ bool value = false;
+ if (!reader->PopBool(&value))
+ return kBrokenMessage;
+ output += indent + "bool " + (value ? "true" : "false") + "\n";
+ break;
+ }
+ case INT16: {
+ int16_t value = 0;
+ if (!reader->PopInt16(&value))
+ return kBrokenMessage;
+ output += indent + "int16_t " + base::IntToString(value) + "\n";
+ break;
+ }
+ case UINT16: {
+ uint16_t value = 0;
+ if (!reader->PopUint16(&value))
+ return kBrokenMessage;
+ output += indent + "uint16_t " + base::UintToString(value) + "\n";
+ break;
+ }
+ case INT32: {
+ int32_t value = 0;
+ if (!reader->PopInt32(&value))
+ return kBrokenMessage;
+ output += indent + "int32_t " + base::IntToString(value) + "\n";
+ break;
+ }
+ case UINT32: {
+ uint32_t value = 0;
+ if (!reader->PopUint32(&value))
+ return kBrokenMessage;
+ output += indent + "uint32_t " + base::UintToString(value) + "\n";
+ break;
+ }
+ case INT64: {
+ int64_t value = 0;
+ if (!reader->PopInt64(&value))
+ return kBrokenMessage;
+ output += (indent + "int64_t " + base::Int64ToString(value) + "\n");
+ break;
+ }
+ case UINT64: {
+ uint64_t value = 0;
+ if (!reader->PopUint64(&value))
+ return kBrokenMessage;
+ output += (indent + "uint64_t " + base::Uint64ToString(value) + "\n");
+ break;
+ }
+ case DOUBLE: {
+ double value = 0;
+ if (!reader->PopDouble(&value))
+ return kBrokenMessage;
+ output += indent + "double " + base::DoubleToString(value) + "\n";
+ break;
+ }
+ case STRING: {
+ std::string value;
+ if (!reader->PopString(&value))
+ return kBrokenMessage;
+ // Truncate if the string is longer than the limit.
+ const size_t kTruncateLength = 100;
+ if (value.size() < kTruncateLength) {
+ output += indent + "string \"" + value + "\"\n";
+ } else {
+ std::string truncated;
+ base::TruncateUTF8ToByteSize(value, kTruncateLength, &truncated);
+ base::StringAppendF(&truncated, "... (%" PRIuS " bytes in total)",
+ value.size());
+ output += indent + "string \"" + truncated + "\"\n";
+ }
+ break;
+ }
+ case OBJECT_PATH: {
+ ObjectPath value;
+ if (!reader->PopObjectPath(&value))
+ return kBrokenMessage;
+ output += indent + "object_path \"" + value.value() + "\"\n";
+ break;
+ }
+ case ARRAY: {
+ MessageReader sub_reader(this);
+ if (!reader->PopArray(&sub_reader))
+ return kBrokenMessage;
+ output += indent + "array [\n";
+ output += ToStringInternal(indent + " ", &sub_reader);
+ output += indent + "]\n";
+ break;
+ }
+ case STRUCT: {
+ MessageReader sub_reader(this);
+ if (!reader->PopStruct(&sub_reader))
+ return kBrokenMessage;
+ output += indent + "struct {\n";
+ output += ToStringInternal(indent + " ", &sub_reader);
+ output += indent + "}\n";
+ break;
+ }
+ case DICT_ENTRY: {
+ MessageReader sub_reader(this);
+ if (!reader->PopDictEntry(&sub_reader))
+ return kBrokenMessage;
+ output += indent + "dict entry {\n";
+ output += ToStringInternal(indent + " ", &sub_reader);
+ output += indent + "}\n";
+ break;
+ }
+ case VARIANT: {
+ MessageReader sub_reader(this);
+ if (!reader->PopVariant(&sub_reader))
+ return kBrokenMessage;
+ output += indent + "variant ";
+ output += ToStringInternal(indent + " ", &sub_reader);
+ break;
+ }
+ case UNIX_FD: {
+ CHECK(IsDBusTypeUnixFdSupported());
+
+ FileDescriptor file_descriptor;
+ if (!reader->PopFileDescriptor(&file_descriptor))
+ return kBrokenMessage;
+ output += indent + "fd#" +
+ base::IntToString(file_descriptor.value()) + "\n";
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown type: " << type;
+ }
+ }
+ return output;
+}
+
+// The returned string consists of message headers such as
+// destination if any, followed by a blank line, and the message
+// payload. For example, a MethodCall's ToString() will look like:
+//
+// destination: com.example.Service
+// path: /com/example/Object
+// interface: com.example.Interface
+// member: SomeMethod
+//
+// string \"payload\"
+// ...
+std::string Message::ToString() {
+ if (!raw_message_)
+ return std::string();
+
+ // Generate headers first.
+ std::string headers;
+ AppendStringHeader("message_type", GetMessageTypeAsString(), &headers);
+ AppendStringHeader("destination", GetDestination(), &headers);
+ AppendStringHeader("path", GetPath().value(), &headers);
+ AppendStringHeader("interface", GetInterface(), &headers);
+ AppendStringHeader("member", GetMember(), &headers);
+ AppendStringHeader("error_name", GetErrorName(), &headers);
+ AppendStringHeader("sender", GetSender(), &headers);
+ AppendStringHeader("signature", GetSignature(), &headers);
+ AppendUint32Header("serial", GetSerial(), &headers);
+ AppendUint32Header("reply_serial", GetReplySerial(), &headers);
+
+ // Generate the payload.
+ MessageReader reader(this);
+ return headers + "\n" + ToStringInternal(std::string(), &reader);
+}
+
+bool Message::SetDestination(const std::string& destination) {
+ return dbus_message_set_destination(raw_message_, destination.c_str());
+}
+
+bool Message::SetPath(const ObjectPath& path) {
+ return dbus_message_set_path(raw_message_, path.value().c_str());
+}
+
+bool Message::SetInterface(const std::string& interface) {
+ return dbus_message_set_interface(raw_message_, interface.c_str());
+}
+
+bool Message::SetMember(const std::string& member) {
+ return dbus_message_set_member(raw_message_, member.c_str());
+}
+
+bool Message::SetErrorName(const std::string& error_name) {
+ return dbus_message_set_error_name(raw_message_, error_name.c_str());
+}
+
+bool Message::SetSender(const std::string& sender) {
+ return dbus_message_set_sender(raw_message_, sender.c_str());
+}
+
+void Message::SetSerial(uint32_t serial) {
+ dbus_message_set_serial(raw_message_, serial);
+}
+
+void Message::SetReplySerial(uint32_t reply_serial) {
+ dbus_message_set_reply_serial(raw_message_, reply_serial);
+}
+
+std::string Message::GetDestination() {
+ const char* destination = dbus_message_get_destination(raw_message_);
+ return destination ? destination : "";
+}
+
+ObjectPath Message::GetPath() {
+ const char* path = dbus_message_get_path(raw_message_);
+ return ObjectPath(path ? path : "");
+}
+
+std::string Message::GetInterface() {
+ const char* interface = dbus_message_get_interface(raw_message_);
+ return interface ? interface : "";
+}
+
+std::string Message::GetMember() {
+ const char* member = dbus_message_get_member(raw_message_);
+ return member ? member : "";
+}
+
+std::string Message::GetErrorName() {
+ const char* error_name = dbus_message_get_error_name(raw_message_);
+ return error_name ? error_name : "";
+}
+
+std::string Message::GetSender() {
+ const char* sender = dbus_message_get_sender(raw_message_);
+ return sender ? sender : "";
+}
+
+std::string Message::GetSignature() {
+ const char* signature = dbus_message_get_signature(raw_message_);
+ return signature ? signature : "";
+}
+
+uint32_t Message::GetSerial() {
+ return dbus_message_get_serial(raw_message_);
+}
+
+uint32_t Message::GetReplySerial() {
+ return dbus_message_get_reply_serial(raw_message_);
+}
+
+//
+// MethodCall implementation.
+//
+
+MethodCall::MethodCall(const std::string& interface_name,
+ const std::string& method_name)
+ : Message() {
+ Init(dbus_message_new(DBUS_MESSAGE_TYPE_METHOD_CALL));
+
+ CHECK(SetInterface(interface_name));
+ CHECK(SetMember(method_name));
+}
+
+MethodCall::MethodCall() : Message() {
+}
+
+MethodCall* MethodCall::FromRawMessage(DBusMessage* raw_message) {
+ DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_CALL, dbus_message_get_type(raw_message));
+
+ MethodCall* method_call = new MethodCall;
+ method_call->Init(raw_message);
+ return method_call;
+}
+
+//
+// Signal implementation.
+//
+Signal::Signal(const std::string& interface_name,
+ const std::string& method_name)
+ : Message() {
+ Init(dbus_message_new(DBUS_MESSAGE_TYPE_SIGNAL));
+
+ CHECK(SetInterface(interface_name));
+ CHECK(SetMember(method_name));
+}
+
+Signal::Signal() : Message() {
+}
+
+Signal* Signal::FromRawMessage(DBusMessage* raw_message) {
+ DCHECK_EQ(DBUS_MESSAGE_TYPE_SIGNAL, dbus_message_get_type(raw_message));
+
+ Signal* signal = new Signal;
+ signal->Init(raw_message);
+ return signal;
+}
+
+//
+// Response implementation.
+//
+
+Response::Response() : Message() {
+}
+
+std::unique_ptr<Response> Response::FromRawMessage(DBusMessage* raw_message) {
+ DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_RETURN,
+ dbus_message_get_type(raw_message));
+
+ std::unique_ptr<Response> response(new Response);
+ response->Init(raw_message);
+ return response;
+}
+
+std::unique_ptr<Response> Response::FromMethodCall(MethodCall* method_call) {
+ std::unique_ptr<Response> response(new Response);
+ response->Init(dbus_message_new_method_return(method_call->raw_message()));
+ return response;
+}
+
+std::unique_ptr<Response> Response::CreateEmpty() {
+ std::unique_ptr<Response> response(new Response);
+ response->Init(dbus_message_new(DBUS_MESSAGE_TYPE_METHOD_RETURN));
+ return response;
+}
+
+//
+// ErrorResponse implementation.
+//
+
+ErrorResponse::ErrorResponse() : Response() {
+}
+
+std::unique_ptr<ErrorResponse> ErrorResponse::FromRawMessage(
+ DBusMessage* raw_message) {
+ DCHECK_EQ(DBUS_MESSAGE_TYPE_ERROR, dbus_message_get_type(raw_message));
+
+ std::unique_ptr<ErrorResponse> response(new ErrorResponse);
+ response->Init(raw_message);
+ return response;
+}
+
+std::unique_ptr<ErrorResponse> ErrorResponse::FromMethodCall(
+ MethodCall* method_call,
+ const std::string& error_name,
+ const std::string& error_message) {
+ std::unique_ptr<ErrorResponse> response(new ErrorResponse);
+ response->Init(dbus_message_new_error(method_call->raw_message(),
+ error_name.c_str(),
+ error_message.c_str()));
+ return response;
+}
+
+//
+// MessageWriter implementation.
+//
+
+MessageWriter::MessageWriter(Message* message)
+ : message_(message),
+ container_is_open_(false) {
+ memset(&raw_message_iter_, 0, sizeof(raw_message_iter_));
+ if (message)
+ dbus_message_iter_init_append(message_->raw_message(), &raw_message_iter_);
+}
+
+MessageWriter::~MessageWriter() {
+}
+
+void MessageWriter::AppendByte(uint8_t value) {
+ AppendBasic(DBUS_TYPE_BYTE, &value);
+}
+
+void MessageWriter::AppendBool(bool value) {
+ // The size of dbus_bool_t and the size of bool are different. The
+ // former is always 4 per dbus-types.h, whereas the latter is usually 1.
+ // dbus_message_iter_append_basic() used in AppendBasic() expects four
+ // bytes for DBUS_TYPE_BOOLEAN, so we must pass a dbus_bool_t, instead
+ // of a bool, to AppendBasic().
+ dbus_bool_t dbus_value = value;
+ AppendBasic(DBUS_TYPE_BOOLEAN, &dbus_value);
+}
+
+void MessageWriter::AppendInt16(int16_t value) {
+ AppendBasic(DBUS_TYPE_INT16, &value);
+}
+
+void MessageWriter::AppendUint16(uint16_t value) {
+ AppendBasic(DBUS_TYPE_UINT16, &value);
+}
+
+void MessageWriter::AppendInt32(int32_t value) {
+ AppendBasic(DBUS_TYPE_INT32, &value);
+}
+
+void MessageWriter::AppendUint32(uint32_t value) {
+ AppendBasic(DBUS_TYPE_UINT32, &value);
+}
+
+void MessageWriter::AppendInt64(int64_t value) {
+ AppendBasic(DBUS_TYPE_INT64, &value);
+}
+
+void MessageWriter::AppendUint64(uint64_t value) {
+ AppendBasic(DBUS_TYPE_UINT64, &value);
+}
+
+void MessageWriter::AppendDouble(double value) {
+ AppendBasic(DBUS_TYPE_DOUBLE, &value);
+}
+
+void MessageWriter::AppendString(const std::string& value) {
+ // D-Bus Specification (0.19) says a string "must be valid UTF-8".
+ CHECK(base::IsStringUTF8(value));
+ const char* pointer = value.c_str();
+ AppendBasic(DBUS_TYPE_STRING, &pointer);
+ // TODO(satorux): It may make sense to return an error here, as the
+ // input string can be large. If needed, we could add something like
+ // bool AppendStringWithErrorChecking().
+}
+
+void MessageWriter::AppendObjectPath(const ObjectPath& value) {
+ CHECK(value.IsValid());
+ const char* pointer = value.value().c_str();
+ AppendBasic(DBUS_TYPE_OBJECT_PATH, &pointer);
+}
+
+// Ideally, client shouldn't need to supply the signature string, but
+// the underlying D-Bus library requires us to supply this before
+// appending contents to array and variant. It's technically possible
+// for us to design API that doesn't require the signature but it will
+// complicate the implementation so we decided to have the signature
+// parameter. Hopefully, variants are less used in request messages from
+// client side than response message from server side, so this should
+// not be a big issue.
+void MessageWriter::OpenArray(const std::string& signature,
+ MessageWriter* writer) {
+ DCHECK(!container_is_open_);
+
+ const bool success = dbus_message_iter_open_container(
+ &raw_message_iter_,
+ DBUS_TYPE_ARRAY,
+ signature.c_str(),
+ &writer->raw_message_iter_);
+ CHECK(success) << "Unable to allocate memory";
+ container_is_open_ = true;
+}
+
+void MessageWriter::OpenVariant(const std::string& signature,
+ MessageWriter* writer) {
+ DCHECK(!container_is_open_);
+
+ const bool success = dbus_message_iter_open_container(
+ &raw_message_iter_,
+ DBUS_TYPE_VARIANT,
+ signature.c_str(),
+ &writer->raw_message_iter_);
+ CHECK(success) << "Unable to allocate memory";
+ container_is_open_ = true;
+}
+
+void MessageWriter::OpenStruct(MessageWriter* writer) {
+ DCHECK(!container_is_open_);
+
+ const bool success = dbus_message_iter_open_container(
+ &raw_message_iter_,
+ DBUS_TYPE_STRUCT,
+ NULL, // Signature should be NULL.
+ &writer->raw_message_iter_);
+ CHECK(success) << "Unable to allocate memory";
+ container_is_open_ = true;
+}
+
+void MessageWriter::OpenDictEntry(MessageWriter* writer) {
+ DCHECK(!container_is_open_);
+
+ const bool success = dbus_message_iter_open_container(
+ &raw_message_iter_,
+ DBUS_TYPE_DICT_ENTRY,
+ NULL, // Signature should be NULL.
+ &writer->raw_message_iter_);
+ CHECK(success) << "Unable to allocate memory";
+ container_is_open_ = true;
+}
+
+void MessageWriter::CloseContainer(MessageWriter* writer) {
+ DCHECK(container_is_open_);
+
+ const bool success = dbus_message_iter_close_container(
+ &raw_message_iter_, &writer->raw_message_iter_);
+ CHECK(success) << "Unable to allocate memory";
+ container_is_open_ = false;
+}
+
+void MessageWriter::AppendArrayOfBytes(const uint8_t* values, size_t length) {
+ DCHECK(!container_is_open_);
+ MessageWriter array_writer(message_);
+ OpenArray("y", &array_writer);
+ const bool success = dbus_message_iter_append_fixed_array(
+ &(array_writer.raw_message_iter_),
+ DBUS_TYPE_BYTE,
+ &values,
+ static_cast<int>(length));
+ CHECK(success) << "Unable to allocate memory";
+ CloseContainer(&array_writer);
+}
+
+void MessageWriter::AppendArrayOfDoubles(const double* values, size_t length) {
+ DCHECK(!container_is_open_);
+ MessageWriter array_writer(message_);
+ OpenArray("d", &array_writer);
+ const bool success = dbus_message_iter_append_fixed_array(
+ &(array_writer.raw_message_iter_),
+ DBUS_TYPE_DOUBLE,
+ &values,
+ static_cast<int>(length));
+ CHECK(success) << "Unable to allocate memory";
+ CloseContainer(&array_writer);
+}
+
+void MessageWriter::AppendArrayOfStrings(
+ const std::vector<std::string>& strings) {
+ DCHECK(!container_is_open_);
+ MessageWriter array_writer(message_);
+ OpenArray("s", &array_writer);
+ for (size_t i = 0; i < strings.size(); ++i) {
+ array_writer.AppendString(strings[i]);
+ }
+ CloseContainer(&array_writer);
+}
+
+void MessageWriter::AppendArrayOfObjectPaths(
+ const std::vector<ObjectPath>& object_paths) {
+ DCHECK(!container_is_open_);
+ MessageWriter array_writer(message_);
+ OpenArray("o", &array_writer);
+ for (size_t i = 0; i < object_paths.size(); ++i) {
+ array_writer.AppendObjectPath(object_paths[i]);
+ }
+ CloseContainer(&array_writer);
+}
+
+void MessageWriter::AppendVariantOfByte(uint8_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_BYTE, &value);
+}
+
+void MessageWriter::AppendVariantOfBool(bool value) {
+ // See the comment at MessageWriter::AppendBool().
+ dbus_bool_t dbus_value = value;
+ AppendVariantOfBasic(DBUS_TYPE_BOOLEAN, &dbus_value);
+}
+
+void MessageWriter::AppendVariantOfInt16(int16_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_INT16, &value);
+}
+
+void MessageWriter::AppendVariantOfUint16(uint16_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_UINT16, &value);
+}
+
+void MessageWriter::AppendVariantOfInt32(int32_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_INT32, &value);
+}
+
+void MessageWriter::AppendVariantOfUint32(uint32_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_UINT32, &value);
+}
+
+void MessageWriter::AppendVariantOfInt64(int64_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_INT64, &value);
+}
+
+void MessageWriter::AppendVariantOfUint64(uint64_t value) {
+ AppendVariantOfBasic(DBUS_TYPE_UINT64, &value);
+}
+
+void MessageWriter::AppendVariantOfDouble(double value) {
+ AppendVariantOfBasic(DBUS_TYPE_DOUBLE, &value);
+}
+
+void MessageWriter::AppendVariantOfString(const std::string& value) {
+ const char* pointer = value.c_str();
+ AppendVariantOfBasic(DBUS_TYPE_STRING, &pointer);
+}
+
+void MessageWriter::AppendVariantOfObjectPath(const ObjectPath& value) {
+ const char* pointer = value.value().c_str();
+ AppendVariantOfBasic(DBUS_TYPE_OBJECT_PATH, &pointer);
+}
+
+void MessageWriter::AppendBasic(int dbus_type, const void* value) {
+ DCHECK(!container_is_open_);
+
+ const bool success = dbus_message_iter_append_basic(
+ &raw_message_iter_, dbus_type, value);
+ // dbus_message_iter_append_basic() fails only when there is not enough
+ // memory. We don't return this error as there is nothing we can do when
+ // it fails to allocate memory for a byte etc.
+ CHECK(success) << "Unable to allocate memory";
+}
+
+void MessageWriter::AppendVariantOfBasic(int dbus_type, const void* value) {
+ const std::string signature(1u, // length
+ base::checked_cast<char>(dbus_type));
+ MessageWriter variant_writer(message_);
+ OpenVariant(signature, &variant_writer);
+ variant_writer.AppendBasic(dbus_type, value);
+ CloseContainer(&variant_writer);
+}
+
+void MessageWriter::AppendFileDescriptor(const FileDescriptor& value) {
+ CHECK(IsDBusTypeUnixFdSupported());
+
+ if (!value.is_valid()) {
+ // NB: sending a directory potentially enables sandbox escape
+ LOG(FATAL) << "Attempt to pass invalid file descriptor";
+ }
+ int fd = value.value();
+ AppendBasic(DBUS_TYPE_UNIX_FD, &fd);
+}
+
+//
+// MessageReader implementation.
+//
+
+MessageReader::MessageReader(Message* message)
+ : message_(message) {
+ memset(&raw_message_iter_, 0, sizeof(raw_message_iter_));
+ if (message)
+ dbus_message_iter_init(message_->raw_message(), &raw_message_iter_);
+}
+
+
+MessageReader::~MessageReader() {
+}
+
+bool MessageReader::HasMoreData() {
+ const int dbus_type = dbus_message_iter_get_arg_type(&raw_message_iter_);
+ return dbus_type != DBUS_TYPE_INVALID;
+}
+
+bool MessageReader::PopByte(uint8_t* value) {
+ return PopBasic(DBUS_TYPE_BYTE, value);
+}
+
+bool MessageReader::PopBool(bool* value) {
+ // Like MessageWriter::AppendBool(), we should copy |value| to
+ // dbus_bool_t, as dbus_message_iter_get_basic() used in PopBasic()
+ // expects four bytes for DBUS_TYPE_BOOLEAN.
+ dbus_bool_t dbus_value = FALSE;
+ const bool success = PopBasic(DBUS_TYPE_BOOLEAN, &dbus_value);
+ *value = static_cast<bool>(dbus_value);
+ return success;
+}
+
+bool MessageReader::PopInt16(int16_t* value) {
+ return PopBasic(DBUS_TYPE_INT16, value);
+}
+
+bool MessageReader::PopUint16(uint16_t* value) {
+ return PopBasic(DBUS_TYPE_UINT16, value);
+}
+
+bool MessageReader::PopInt32(int32_t* value) {
+ return PopBasic(DBUS_TYPE_INT32, value);
+}
+
+bool MessageReader::PopUint32(uint32_t* value) {
+ return PopBasic(DBUS_TYPE_UINT32, value);
+}
+
+bool MessageReader::PopInt64(int64_t* value) {
+ return PopBasic(DBUS_TYPE_INT64, value);
+}
+
+bool MessageReader::PopUint64(uint64_t* value) {
+ return PopBasic(DBUS_TYPE_UINT64, value);
+}
+
+bool MessageReader::PopDouble(double* value) {
+ return PopBasic(DBUS_TYPE_DOUBLE, value);
+}
+
+bool MessageReader::PopString(std::string* value) {
+ char* tmp_value = NULL;
+ const bool success = PopBasic(DBUS_TYPE_STRING, &tmp_value);
+ if (success)
+ value->assign(tmp_value);
+ return success;
+}
+
+bool MessageReader::PopObjectPath(ObjectPath* value) {
+ char* tmp_value = NULL;
+ const bool success = PopBasic(DBUS_TYPE_OBJECT_PATH, &tmp_value);
+ if (success)
+ *value = ObjectPath(tmp_value);
+ return success;
+}
+
+bool MessageReader::PopArray(MessageReader* sub_reader) {
+ return PopContainer(DBUS_TYPE_ARRAY, sub_reader);
+}
+
+bool MessageReader::PopStruct(MessageReader* sub_reader) {
+ return PopContainer(DBUS_TYPE_STRUCT, sub_reader);
+}
+
+bool MessageReader::PopDictEntry(MessageReader* sub_reader) {
+ return PopContainer(DBUS_TYPE_DICT_ENTRY, sub_reader);
+}
+
+bool MessageReader::PopVariant(MessageReader* sub_reader) {
+ return PopContainer(DBUS_TYPE_VARIANT, sub_reader);
+}
+
+bool MessageReader::PopArrayOfBytes(const uint8_t** bytes, size_t* length) {
+ MessageReader array_reader(message_);
+ if (!PopArray(&array_reader))
+ return false;
+ // An empty array is allowed.
+ if (!array_reader.HasMoreData()) {
+ *length = 0;
+ *bytes = NULL;
+ return true;
+ }
+ if (!array_reader.CheckDataType(DBUS_TYPE_BYTE))
+ return false;
+ int int_length = 0;
+ dbus_message_iter_get_fixed_array(&array_reader.raw_message_iter_,
+ bytes,
+ &int_length);
+ *length = static_cast<size_t>(int_length);
+ return true;
+}
+
+bool MessageReader::PopArrayOfDoubles(const double** doubles, size_t* length) {
+ MessageReader array_reader(message_);
+ if (!PopArray(&array_reader))
+ return false;
+ if (!array_reader.HasMoreData()) {
+ *length = 0;
+ *doubles = nullptr;
+ return true;
+ }
+ if (!array_reader.CheckDataType(DBUS_TYPE_DOUBLE))
+ return false;
+ int int_length = 0;
+ dbus_message_iter_get_fixed_array(&array_reader.raw_message_iter_,
+ doubles,
+ &int_length);
+ *length = static_cast<size_t>(int_length);
+ return true;
+}
+
+bool MessageReader::PopArrayOfStrings(
+ std::vector<std::string> *strings) {
+ strings->clear();
+ MessageReader array_reader(message_);
+ if (!PopArray(&array_reader))
+ return false;
+ while (array_reader.HasMoreData()) {
+ std::string string;
+ if (!array_reader.PopString(&string))
+ return false;
+ strings->push_back(string);
+ }
+ return true;
+}
+
+bool MessageReader::PopArrayOfObjectPaths(
+ std::vector<ObjectPath> *object_paths) {
+ object_paths->clear();
+ MessageReader array_reader(message_);
+ if (!PopArray(&array_reader))
+ return false;
+ while (array_reader.HasMoreData()) {
+ ObjectPath object_path;
+ if (!array_reader.PopObjectPath(&object_path))
+ return false;
+ object_paths->push_back(object_path);
+ }
+ return true;
+}
+
+bool MessageReader::PopVariantOfByte(uint8_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_BYTE, value);
+}
+
+bool MessageReader::PopVariantOfBool(bool* value) {
+ // See the comment at MessageReader::PopBool().
+ dbus_bool_t dbus_value = FALSE;
+ const bool success = PopVariantOfBasic(DBUS_TYPE_BOOLEAN, &dbus_value);
+ *value = static_cast<bool>(dbus_value);
+ return success;
+}
+
+bool MessageReader::PopVariantOfInt16(int16_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_INT16, value);
+}
+
+bool MessageReader::PopVariantOfUint16(uint16_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_UINT16, value);
+}
+
+bool MessageReader::PopVariantOfInt32(int32_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_INT32, value);
+}
+
+bool MessageReader::PopVariantOfUint32(uint32_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_UINT32, value);
+}
+
+bool MessageReader::PopVariantOfInt64(int64_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_INT64, value);
+}
+
+bool MessageReader::PopVariantOfUint64(uint64_t* value) {
+ return PopVariantOfBasic(DBUS_TYPE_UINT64, value);
+}
+
+bool MessageReader::PopVariantOfDouble(double* value) {
+ return PopVariantOfBasic(DBUS_TYPE_DOUBLE, value);
+}
+
+bool MessageReader::PopVariantOfString(std::string* value) {
+ char* tmp_value = NULL;
+ const bool success = PopVariantOfBasic(DBUS_TYPE_STRING, &tmp_value);
+ if (success)
+ value->assign(tmp_value);
+ return success;
+}
+
+bool MessageReader::PopVariantOfObjectPath(ObjectPath* value) {
+ char* tmp_value = NULL;
+ const bool success = PopVariantOfBasic(DBUS_TYPE_OBJECT_PATH, &tmp_value);
+ if (success)
+ *value = ObjectPath(tmp_value);
+ return success;
+}
+
+Message::DataType MessageReader::GetDataType() {
+ const int dbus_type = dbus_message_iter_get_arg_type(&raw_message_iter_);
+ return static_cast<Message::DataType>(dbus_type);
+}
+
+std::string MessageReader::GetDataSignature() {
+ std::string signature;
+ char* raw_signature = dbus_message_iter_get_signature(&raw_message_iter_);
+ if (raw_signature) {
+ signature = raw_signature;
+ dbus_free(raw_signature);
+ }
+ return signature;
+}
+
+bool MessageReader::CheckDataType(int dbus_type) {
+ const int actual_type = dbus_message_iter_get_arg_type(&raw_message_iter_);
+ if (actual_type != dbus_type) {
+ VLOG(1) << "Type " << dbus_type << " is expected but got "
+ << actual_type;
+ return false;
+ }
+ return true;
+}
+
+bool MessageReader::PopBasic(int dbus_type, void* value) {
+ if (!CheckDataType(dbus_type))
+ return false;
+ // dbus_message_iter_get_basic() here should always work, as we have
+ // already checked the next item's data type in CheckDataType(). Note
+ // that dbus_message_iter_get_basic() is a void function.
+ dbus_message_iter_get_basic(&raw_message_iter_, value);
+ DCHECK(value);
+ dbus_message_iter_next(&raw_message_iter_);
+ return true;
+}
+
+bool MessageReader::PopContainer(int dbus_type, MessageReader* sub_reader) {
+ DCHECK_NE(this, sub_reader);
+
+ if (!CheckDataType(dbus_type))
+ return false;
+ dbus_message_iter_recurse(&raw_message_iter_,
+ &sub_reader->raw_message_iter_);
+ dbus_message_iter_next(&raw_message_iter_);
+ return true;
+}
+
+bool MessageReader::PopVariantOfBasic(int dbus_type, void* value) {
+ MessageReader variant_reader(message_);
+ if (!PopVariant(&variant_reader))
+ return false;
+ return variant_reader.PopBasic(dbus_type, value);
+}
+
+bool MessageReader::PopFileDescriptor(FileDescriptor* value) {
+ CHECK(IsDBusTypeUnixFdSupported());
+
+ int fd = -1;
+ const bool success = PopBasic(DBUS_TYPE_UNIX_FD, &fd);
+ if (!success)
+ return false;
+
+ value->PutValue(fd);
+ // NB: the caller must check validity before using the value
+ return true;
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/message.h b/libchrome/dbus/message.h
new file mode 100644
index 0000000..e7b6be0
--- /dev/null
+++ b/libchrome/dbus/message.h
@@ -0,0 +1,487 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_MESSAGE_H_
+#define DBUS_MESSAGE_H_
+
+#include <dbus/dbus.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "dbus/dbus_export.h"
+#include "dbus/file_descriptor.h"
+#include "dbus/object_path.h"
+
+namespace google {
+namespace protobuf {
+
+class MessageLite;
+
+} // namespace protobuf
+} // namespace google
+
+
+namespace dbus {
+
+class MessageWriter;
+class MessageReader;
+
+// DBUS_TYPE_UNIX_FD was added in D-Bus version 1.4
+#if !defined(DBUS_TYPE_UNIX_FD)
+#define DBUS_TYPE_UNIX_FD ((int) 'h')
+#endif
+
+// Returns true if Unix FD passing is supported in libdbus.
+// The check is done runtime rather than compile time as the libdbus
+// version used at runtime may be different from the one used at compile time.
+CHROME_DBUS_EXPORT bool IsDBusTypeUnixFdSupported();
+
+// Message is the base class of D-Bus message types. Client code must use
+// sub classes such as MethodCall and Response instead.
+//
+// The class name Message is very generic, but there should be no problem
+// as the class is inside 'dbus' namespace. We chose to name this way, as
+// libdbus defines lots of types starting with DBus, such as
+// DBusMessage. We should avoid confusion and conflict with these types.
+class CHROME_DBUS_EXPORT Message {
+ public:
+ // The message type used in D-Bus. Redefined here so client code
+ // doesn't need to use raw D-Bus macros. DBUS_MESSAGE_TYPE_INVALID
+ // etc. are #define macros. Having an enum type here makes code a bit
+ // more type-safe.
+ enum MessageType {
+ MESSAGE_INVALID = DBUS_MESSAGE_TYPE_INVALID,
+ MESSAGE_METHOD_CALL = DBUS_MESSAGE_TYPE_METHOD_CALL,
+ MESSAGE_METHOD_RETURN = DBUS_MESSAGE_TYPE_METHOD_RETURN,
+ MESSAGE_SIGNAL = DBUS_MESSAGE_TYPE_SIGNAL,
+ MESSAGE_ERROR = DBUS_MESSAGE_TYPE_ERROR,
+ };
+
+ // The data type used in the D-Bus type system. See the comment at
+ // MessageType for why we are redefining data types here.
+ enum DataType {
+ INVALID_DATA = DBUS_TYPE_INVALID,
+ BYTE = DBUS_TYPE_BYTE,
+ BOOL = DBUS_TYPE_BOOLEAN,
+ INT16 = DBUS_TYPE_INT16,
+ UINT16 = DBUS_TYPE_UINT16,
+ INT32 = DBUS_TYPE_INT32,
+ UINT32 = DBUS_TYPE_UINT32,
+ INT64 = DBUS_TYPE_INT64,
+ UINT64 = DBUS_TYPE_UINT64,
+ DOUBLE = DBUS_TYPE_DOUBLE,
+ STRING = DBUS_TYPE_STRING,
+ OBJECT_PATH = DBUS_TYPE_OBJECT_PATH,
+ ARRAY = DBUS_TYPE_ARRAY,
+ STRUCT = DBUS_TYPE_STRUCT,
+ DICT_ENTRY = DBUS_TYPE_DICT_ENTRY,
+ VARIANT = DBUS_TYPE_VARIANT,
+ UNIX_FD = DBUS_TYPE_UNIX_FD,
+ };
+
+ // Returns the type of the message. Returns MESSAGE_INVALID if
+ // raw_message_ is NULL.
+ MessageType GetMessageType();
+
+ // Returns the type of the message as string like "MESSAGE_METHOD_CALL"
+ // for instance.
+ std::string GetMessageTypeAsString();
+
+ DBusMessage* raw_message() { return raw_message_; }
+
+ // Sets the destination, the path, the interface, the member, etc.
+ bool SetDestination(const std::string& destination);
+ bool SetPath(const ObjectPath& path);
+ bool SetInterface(const std::string& interface);
+ bool SetMember(const std::string& member);
+ bool SetErrorName(const std::string& error_name);
+ bool SetSender(const std::string& sender);
+ void SetSerial(uint32_t serial);
+ void SetReplySerial(uint32_t reply_serial);
+ // SetSignature() does not exist as we cannot do it.
+
+ // Gets the destination, the path, the interface, the member, etc.
+ // If not set, an empty string is returned.
+ std::string GetDestination();
+ ObjectPath GetPath();
+ std::string GetInterface();
+ std::string GetMember();
+ std::string GetErrorName();
+ std::string GetSender();
+ std::string GetSignature();
+ // Gets the serial and reply serial numbers. Returns 0 if not set.
+ uint32_t GetSerial();
+ uint32_t GetReplySerial();
+
+ // Returns the string representation of this message. Useful for
+ // debugging. The output is truncated as needed (ex. strings are truncated
+ // if longer than a certain limit defined in the .cc file).
+ std::string ToString();
+
+ protected:
+ // This class cannot be instantiated. Use sub classes instead.
+ Message();
+ virtual ~Message();
+
+ // Initializes the message with the given raw message.
+ void Init(DBusMessage* raw_message);
+
+ private:
+ // Helper function used in ToString().
+ std::string ToStringInternal(const std::string& indent,
+ MessageReader* reader);
+
+ DBusMessage* raw_message_;
+ DISALLOW_COPY_AND_ASSIGN(Message);
+};
+
+// MessageCall is a type of message used for calling a method via D-Bus.
+class CHROME_DBUS_EXPORT MethodCall : public Message {
+ public:
+ // Creates a method call message for the specified interface name and
+ // the method name.
+ //
+ // For instance, to call "Get" method of DBUS_INTERFACE_INTROSPECTABLE
+ // interface ("org.freedesktop.DBus.Introspectable"), create a method
+ // call like this:
+ //
+ // MethodCall method_call(DBUS_INTERFACE_INTROSPECTABLE, "Get");
+ //
+ // The constructor creates the internal raw message.
+ MethodCall(const std::string& interface_name,
+ const std::string& method_name);
+
+ // Returns a newly created MethodCall from the given raw message of the
+ // type DBUS_MESSAGE_TYPE_METHOD_CALL. The caller must delete the
+ // returned object. Takes the ownership of |raw_message|.
+ static MethodCall* FromRawMessage(DBusMessage* raw_message);
+
+ private:
+ // Creates a method call message. The internal raw message is NULL.
+ // Only used internally.
+ MethodCall();
+
+ DISALLOW_COPY_AND_ASSIGN(MethodCall);
+};
+
+// Signal is a type of message used to send a signal.
+class CHROME_DBUS_EXPORT Signal : public Message {
+ public:
+ // Creates a signal message for the specified interface name and the
+ // method name.
+ //
+ // For instance, to send "PropertiesChanged" signal of
+ // DBUS_INTERFACE_INTROSPECTABLE interface
+ // ("org.freedesktop.DBus.Introspectable"), create a signal like this:
+ //
+ // Signal signal(DBUS_INTERFACE_INTROSPECTABLE, "PropertiesChanged");
+ //
+ // The constructor creates the internal raw_message_.
+ Signal(const std::string& interface_name,
+ const std::string& method_name);
+
+ // Returns a newly created SIGNAL from the given raw message of the type
+ // DBUS_MESSAGE_TYPE_SIGNAL. The caller must delete the returned
+ // object. Takes the ownership of |raw_message|.
+ static Signal* FromRawMessage(DBusMessage* raw_message);
+
+ private:
+ // Creates a signal message. The internal raw message is NULL.
+ // Only used internally.
+ Signal();
+
+ DISALLOW_COPY_AND_ASSIGN(Signal);
+};
+
+// Response is a type of message used for receiving a response from a
+// method via D-Bus.
+class CHROME_DBUS_EXPORT Response : public Message {
+ public:
+ // Returns a newly created Response from the given raw message of the
+ // type DBUS_MESSAGE_TYPE_METHOD_RETURN. Takes the ownership of |raw_message|.
+ static std::unique_ptr<Response> FromRawMessage(DBusMessage* raw_message);
+
+ // Returns a newly created Response from the given method call.
+ // Used for implementing exported methods. Does NOT take the ownership of
+ // |method_call|.
+ static std::unique_ptr<Response> FromMethodCall(MethodCall* method_call);
+
+ // Returns a newly created Response with an empty payload.
+ // Useful for testing.
+ static std::unique_ptr<Response> CreateEmpty();
+
+ protected:
+ // Creates a Response message. The internal raw message is NULL.
+ Response();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Response);
+};
+
+// ErrorResponse is a type of message used to return an error to the
+// caller of a method.
+class CHROME_DBUS_EXPORT ErrorResponse: public Response {
+ public:
+ // Returns a newly created Response from the given raw message of the
+ // type DBUS_MESSAGE_TYPE_METHOD_RETURN. Takes the ownership of |raw_message|.
+ static std::unique_ptr<ErrorResponse> FromRawMessage(
+ DBusMessage* raw_message);
+
+ // Returns a newly created ErrorResponse from the given method call, the
+ // error name, and the error message. The error name looks like
+ // "org.freedesktop.DBus.Error.Failed". Used for returning an error to a
+ // failed method call. Does NOT take the ownership of |method_call|.
+ static std::unique_ptr<ErrorResponse> FromMethodCall(
+ MethodCall* method_call,
+ const std::string& error_name,
+ const std::string& error_message);
+
+ private:
+ // Creates an ErrorResponse message. The internal raw message is NULL.
+ ErrorResponse();
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorResponse);
+};
+
+// MessageWriter is used to write outgoing messages for calling methods
+// and sending signals.
+//
+// The main design goal of MessageReader and MessageWriter classes is to
+// provide a type safe API. In the past, there was a Chrome OS blocker
+// bug, that took days to fix, that would have been prevented if the API
+// was type-safe.
+//
+// For instance, instead of doing something like:
+//
+// // We shouldn't add '&' to str here, but it compiles with '&' added.
+// dbus_g_proxy_call(..., G_TYPE_STRING, str, G_TYPE_INVALID, ...)
+//
+// We want to do something like:
+//
+// writer.AppendString(str);
+//
+class CHROME_DBUS_EXPORT MessageWriter {
+ public:
+ // Data added with Append* will be written to |message|, which may be NULL
+ // to create a sub-writer for passing to OpenArray, etc.
+ explicit MessageWriter(Message* message);
+ ~MessageWriter();
+
+ // Appends a byte to the message.
+ void AppendByte(uint8_t value);
+ void AppendBool(bool value);
+ void AppendInt16(int16_t value);
+ void AppendUint16(uint16_t value);
+ void AppendInt32(int32_t value);
+ void AppendUint32(uint32_t value);
+ void AppendInt64(int64_t value);
+ void AppendUint64(uint64_t value);
+ void AppendDouble(double value);
+ void AppendString(const std::string& value);
+ void AppendObjectPath(const ObjectPath& value);
+ void AppendFileDescriptor(const FileDescriptor& value);
+
+ // Opens an array. The array contents can be added to the array with
+ // |sub_writer|. The client code must close the array with
+ // CloseContainer(), once all contents are added.
+ //
+ // |signature| parameter is used to supply the D-Bus type signature of
+ // the array contents. For instance, if you want an array of strings,
+ // then you pass "s" as the signature.
+ //
+ // See the spec for details about the type signatures.
+ // http://dbus.freedesktop.org/doc/dbus-specification.html
+ // #message-protocol-signatures
+ //
+ void OpenArray(const std::string& signature, MessageWriter* sub_writer);
+ // Do the same for a variant.
+ void OpenVariant(const std::string& signature, MessageWriter* sub_writer);
+ // Do the same for Struct and dict entry. They don't need the signature.
+ void OpenStruct(MessageWriter* sub_writer);
+ void OpenDictEntry(MessageWriter* sub_writer);
+
+ // Close the container for a array/variant/struct/dict entry.
+ void CloseContainer(MessageWriter* sub_writer);
+
+ // Appends the array of bytes. Arrays of bytes are often used for
+ // exchanging binary blobs hence it's worth having a specialized
+ // function.
+ void AppendArrayOfBytes(const uint8_t* values, size_t length);
+
+ // Appends the array of doubles. Used for audio mixer matrix doubles.
+ void AppendArrayOfDoubles(const double* values, size_t length);
+
+ // Appends the array of strings. Arrays of strings are often used for
+ // exchanging lists of names hence it's worth having a specialized
+ // function.
+ void AppendArrayOfStrings(const std::vector<std::string>& strings);
+
+ // Appends the array of object paths. Arrays of object paths are often
+ // used when exchanging object paths, hence it's worth having a
+ // specialized function.
+ void AppendArrayOfObjectPaths(const std::vector<ObjectPath>& object_paths);
+
+ // Appends the byte wrapped in a variant data container. Variants are
+ // widely used in D-Bus services so it's worth having a specialized
+ // function. For instance, The third parameter of
+ // "org.freedesktop.DBus.Properties.Set" is a variant.
+ void AppendVariantOfByte(uint8_t value);
+ void AppendVariantOfBool(bool value);
+ void AppendVariantOfInt16(int16_t value);
+ void AppendVariantOfUint16(uint16_t value);
+ void AppendVariantOfInt32(int32_t value);
+ void AppendVariantOfUint32(uint32_t value);
+ void AppendVariantOfInt64(int64_t value);
+ void AppendVariantOfUint64(uint64_t value);
+ void AppendVariantOfDouble(double value);
+ void AppendVariantOfString(const std::string& value);
+ void AppendVariantOfObjectPath(const ObjectPath& value);
+
+ private:
+ // Helper function used to implement AppendByte etc.
+ void AppendBasic(int dbus_type, const void* value);
+
+ // Helper function used to implement AppendVariantOfByte() etc.
+ void AppendVariantOfBasic(int dbus_type, const void* value);
+
+ Message* message_;
+ DBusMessageIter raw_message_iter_;
+ bool container_is_open_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageWriter);
+};
+
+// MessageReader is used to read incoming messages such as responses for
+// method calls.
+//
+// MessageReader manages an internal iterator to read data. All functions
+// starting with Pop advance the iterator on success.
+class CHROME_DBUS_EXPORT MessageReader {
+ public:
+ // The data will be read from the given |message|, which may be NULL to
+ // create a sub-reader for passing to PopArray, etc.
+ explicit MessageReader(Message* message);
+ ~MessageReader();
+
+ // Returns true if the reader has more data to read. The function is
+ // used to iterate contents in a container like:
+ //
+ // while (reader.HasMoreData())
+ // reader.PopString(&value);
+ bool HasMoreData();
+
+ // Gets the byte at the current iterator position.
+ // Returns true and advances the iterator on success.
+ // Returns false if the data type is not a byte.
+ bool PopByte(uint8_t* value);
+ bool PopBool(bool* value);
+ bool PopInt16(int16_t* value);
+ bool PopUint16(uint16_t* value);
+ bool PopInt32(int32_t* value);
+ bool PopUint32(uint32_t* value);
+ bool PopInt64(int64_t* value);
+ bool PopUint64(uint64_t* value);
+ bool PopDouble(double* value);
+ bool PopString(std::string* value);
+ bool PopObjectPath(ObjectPath* value);
+ bool PopFileDescriptor(FileDescriptor* value);
+
+ // Sets up the given message reader to read an array at the current
+ // iterator position.
+ // Returns true and advances the iterator on success.
+ // Returns false if the data type is not an array
+ bool PopArray(MessageReader* sub_reader);
+ bool PopStruct(MessageReader* sub_reader);
+ bool PopDictEntry(MessageReader* sub_reader);
+ bool PopVariant(MessageReader* sub_reader);
+
+ // Gets the array of bytes at the current iterator position.
+ // Returns true and advances the iterator on success.
+ //
+ // Arrays of bytes are often used for exchanging binary blobs hence it's
+ // worth having a specialized function.
+ //
+ // Ownership of the memory pointed to by |bytes| remains with the
+ // MessageReader; |bytes| must be copied if the contents will be referenced
+ // after the MessageReader is destroyed.
+ bool PopArrayOfBytes(const uint8_t** bytes, size_t* length);
+
+ // Gets the array of doubles at the current iterator position.
+ bool PopArrayOfDoubles(const double** doubles, size_t* length);
+
+ // Gets the array of strings at the current iterator position. |strings| is
+ // cleared before being modified. Returns true and advances the iterator on
+ // success.
+ //
+ // Arrays of strings are often used to communicate with D-Bus
+ // services like KWallet, hence it's worth having a specialized
+ // function.
+ bool PopArrayOfStrings(std::vector<std::string>* strings);
+
+ // Gets the array of object paths at the current iterator position.
+ // |object_paths| is cleared before being modified. Returns true and advances
+ // the iterator on success.
+ //
+ // Arrays of object paths are often used to communicate with D-Bus
+ // services like NetworkManager, hence it's worth having a specialized
+ // function.
+ bool PopArrayOfObjectPaths(std::vector<ObjectPath>* object_paths);
+
+ // Gets the byte from the variant data container at the current iterator
+ // position.
+ // Returns true and advances the iterator on success.
+ //
+ // Variants are widely used in D-Bus services so it's worth having a
+ // specialized function. For instance, The return value type of
+ // "org.freedesktop.DBus.Properties.Get" is a variant.
+ bool PopVariantOfByte(uint8_t* value);
+ bool PopVariantOfBool(bool* value);
+ bool PopVariantOfInt16(int16_t* value);
+ bool PopVariantOfUint16(uint16_t* value);
+ bool PopVariantOfInt32(int32_t* value);
+ bool PopVariantOfUint32(uint32_t* value);
+ bool PopVariantOfInt64(int64_t* value);
+ bool PopVariantOfUint64(uint64_t* value);
+ bool PopVariantOfDouble(double* value);
+ bool PopVariantOfString(std::string* value);
+ bool PopVariantOfObjectPath(ObjectPath* value);
+
+ // Get the data type of the value at the current iterator
+ // position. INVALID_DATA will be returned if the iterator points to the
+ // end of the message.
+ Message::DataType GetDataType();
+
+ // Get the DBus signature of the value at the current iterator position.
+ // An empty string will be returned if the iterator points to the end of
+ // the message.
+ std::string GetDataSignature();
+
+ private:
+ // Returns true if the data type at the current iterator position
+ // matches the given D-Bus type, such as DBUS_TYPE_BYTE.
+ bool CheckDataType(int dbus_type);
+
+ // Helper function used to implement PopByte() etc.
+ bool PopBasic(int dbus_type, void *value);
+
+ // Helper function used to implement PopArray() etc.
+ bool PopContainer(int dbus_type, MessageReader* sub_reader);
+
+ // Helper function used to implement PopVariantOfByte() etc.
+ bool PopVariantOfBasic(int dbus_type, void* value);
+
+ Message* message_;
+ DBusMessageIter raw_message_iter_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageReader);
+};
+
+} // namespace dbus
+
+#endif // DBUS_MESSAGE_H_
diff --git a/libchrome/dbus/mock_bus.cc b/libchrome/dbus/mock_bus.cc
new file mode 100644
index 0000000..9e76454
--- /dev/null
+++ b/libchrome/dbus/mock_bus.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/mock_bus.h"
+
+#include "base/location.h"
+
+namespace dbus {
+
+MockBus::MockBus(const Bus::Options& options) : Bus(options) {
+}
+
+MockBus::~MockBus() {
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/mock_bus.h b/libchrome/dbus/mock_bus.h
new file mode 100644
index 0000000..b50f230
--- /dev/null
+++ b/libchrome/dbus/mock_bus.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_MOCK_BUS_H_
+#define DBUS_MOCK_BUS_H_
+
+#include <stdint.h>
+
+#include "dbus/bus.h"
+#include "dbus/object_path.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace dbus {
+
+// Mock for Bus class. Along with MockObjectProxy and MockExportedObject,
+// the mock classes can be used to write unit tests without issuing real
+// D-Bus calls.
+class MockBus : public Bus {
+ public:
+ MockBus(const Bus::Options& options);
+
+ MOCK_METHOD2(GetObjectProxy, ObjectProxy*(const std::string& service_name,
+ const ObjectPath& object_path));
+ MOCK_METHOD3(GetObjectProxyWithOptions,
+ ObjectProxy*(const std::string& service_name,
+ const ObjectPath& object_path,
+ int options));
+ MOCK_METHOD3(RemoveObjectProxy, bool(
+ const std::string& service_name,
+ const ObjectPath& object_path,
+ const base::Closure& callback));
+ MOCK_METHOD4(RemoveObjectProxyWithOptions, bool(
+ const std::string& service_name,
+ const ObjectPath& object_path,
+ int options,
+ const base::Closure& callback));
+ MOCK_METHOD1(GetExportedObject, ExportedObject*(
+ const ObjectPath& object_path));
+ MOCK_METHOD2(GetObjectManager, ObjectManager*(const std::string&,
+ const ObjectPath&));
+ MOCK_METHOD0(ShutdownAndBlock, void());
+ MOCK_METHOD0(ShutdownOnDBusThreadAndBlock, void());
+ MOCK_METHOD0(Connect, bool());
+ MOCK_METHOD3(RequestOwnership, void(
+ const std::string& service_name,
+ ServiceOwnershipOptions options,
+ OnOwnershipCallback on_ownership_callback));
+ MOCK_METHOD2(RequestOwnershipAndBlock, bool(const std::string& service_name,
+ ServiceOwnershipOptions options));
+ MOCK_METHOD1(ReleaseOwnership, bool(const std::string& service_name));
+ MOCK_METHOD0(SetUpAsyncOperations, bool());
+ MOCK_METHOD3(SendWithReplyAndBlock, DBusMessage*(DBusMessage* request,
+ int timeout_ms,
+ DBusError* error));
+ MOCK_METHOD3(SendWithReply, void(DBusMessage* request,
+ DBusPendingCall** pending_call,
+ int timeout_ms));
+ MOCK_METHOD2(Send, void(DBusMessage* request, uint32_t* serial));
+ MOCK_METHOD2(AddFilter, void(DBusHandleMessageFunction handle_message,
+ void* user_data));
+ MOCK_METHOD2(RemoveFilter, void(DBusHandleMessageFunction handle_message,
+ void* user_data));
+ MOCK_METHOD2(AddMatch, void(const std::string& match_rule,
+ DBusError* error));
+ MOCK_METHOD2(RemoveMatch, bool(const std::string& match_rule,
+ DBusError* error));
+ MOCK_METHOD4(TryRegisterObjectPath, bool(const ObjectPath& object_path,
+ const DBusObjectPathVTable* vtable,
+ void* user_data,
+ DBusError* error));
+ MOCK_METHOD1(UnregisterObjectPath, void(const ObjectPath& object_path));
+ MOCK_METHOD0(GetDBusTaskRunner, base::TaskRunner*());
+ MOCK_METHOD0(GetOriginTaskRunner, base::TaskRunner*());
+ MOCK_METHOD0(HasDBusThread, bool());
+ MOCK_METHOD0(AssertOnOriginThread, void());
+ MOCK_METHOD0(AssertOnDBusThread, void());
+
+ protected:
+ virtual ~MockBus();
+};
+
+} // namespace dbus
+
+#endif // DBUS_MOCK_BUS_H_
diff --git a/libchrome/dbus/mock_exported_object.cc b/libchrome/dbus/mock_exported_object.cc
new file mode 100644
index 0000000..ff507dd
--- /dev/null
+++ b/libchrome/dbus/mock_exported_object.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/mock_exported_object.h"
+
+namespace dbus {
+
+MockExportedObject::MockExportedObject(Bus* bus,
+ const ObjectPath& object_path)
+ : ExportedObject(bus, object_path) {
+}
+
+MockExportedObject::~MockExportedObject() {
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/mock_exported_object.h b/libchrome/dbus/mock_exported_object.h
new file mode 100644
index 0000000..80ca951
--- /dev/null
+++ b/libchrome/dbus/mock_exported_object.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_MOCK_EXPORTED_OBJECT_H_
+#define DBUS_MOCK_EXPORTED_OBJECT_H_
+
+#include <string>
+
+#include "dbus/exported_object.h"
+#include "dbus/object_path.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace dbus {
+
+// Mock for ExportedObject.
+class MockExportedObject : public ExportedObject {
+ public:
+ MockExportedObject(Bus* bus,
+ const ObjectPath& object_path);
+
+ MOCK_METHOD3(ExportMethodAndBlock,
+ bool(const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback));
+ MOCK_METHOD4(ExportMethod,
+ void(const std::string& interface_name,
+ const std::string& method_name,
+ MethodCallCallback method_call_callback,
+ OnExportedCallback on_exported_callback));
+ MOCK_METHOD1(SendSignal, void(Signal* signal));
+ MOCK_METHOD0(Unregister, void());
+
+ protected:
+ virtual ~MockExportedObject();
+};
+
+} // namespace dbus
+
+#endif // DBUS_MOCK_EXPORTED_OBJECT_H_
diff --git a/libchrome/dbus/mock_object_manager.cc b/libchrome/dbus/mock_object_manager.cc
new file mode 100644
index 0000000..dcba78e
--- /dev/null
+++ b/libchrome/dbus/mock_object_manager.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/mock_object_manager.h"
+
+namespace dbus {
+
+MockObjectManager::MockObjectManager(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path)
+ : ObjectManager(bus, service_name, object_path) {
+}
+
+MockObjectManager::~MockObjectManager() {
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/mock_object_manager.h b/libchrome/dbus/mock_object_manager.h
new file mode 100644
index 0000000..e4c76ba
--- /dev/null
+++ b/libchrome/dbus/mock_object_manager.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_MOCK_OBJECT_MANAGER_H_
+#define DBUS_MOCK_OBJECT_MANAGER_H_
+
+#include <string>
+
+#include "dbus/message.h"
+#include "dbus/object_manager.h"
+#include "dbus/object_path.h"
+#include "dbus/object_proxy.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace dbus {
+
+// Mock for ObjectManager.
+class MockObjectManager : public ObjectManager {
+ public:
+ MockObjectManager(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path);
+
+ MOCK_METHOD2(RegisterInterface, void(const std::string&,
+ Interface*));
+ MOCK_METHOD1(UnregisterInterface, void(const std::string&));
+ MOCK_METHOD0(GetObjects, std::vector<ObjectPath>());
+ MOCK_METHOD1(GetObjectsWithInterface,
+ std::vector<ObjectPath>(const std::string&));
+ MOCK_METHOD1(GetObjectProxy, ObjectProxy*(const ObjectPath&));
+ MOCK_METHOD2(GetProperties, PropertySet*(const ObjectPath&,
+ const std::string&));
+ MOCK_METHOD0(GetManagedObjects, void());
+
+ protected:
+ virtual ~MockObjectManager();
+};
+
+} // namespace dbus
+
+#endif // DBUS_MOCK_OBJECT_MANAGER_H_
diff --git a/libchrome/dbus/mock_object_proxy.cc b/libchrome/dbus/mock_object_proxy.cc
new file mode 100644
index 0000000..7e26f01
--- /dev/null
+++ b/libchrome/dbus/mock_object_proxy.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/mock_object_proxy.h"
+
+namespace dbus {
+
+MockObjectProxy::MockObjectProxy(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path)
+ : ObjectProxy(bus, service_name, object_path, DEFAULT_OPTIONS) {
+}
+
+MockObjectProxy::~MockObjectProxy() {
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/mock_object_proxy.h b/libchrome/dbus/mock_object_proxy.h
new file mode 100644
index 0000000..f27f6f6
--- /dev/null
+++ b/libchrome/dbus/mock_object_proxy.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_MOCK_OBJECT_PROXY_H_
+#define DBUS_MOCK_OBJECT_PROXY_H_
+
+#include <string>
+
+#include "dbus/message.h"
+#include "dbus/object_path.h"
+#include "dbus/object_proxy.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace dbus {
+
+// Mock for ObjectProxy.
+class MockObjectProxy : public ObjectProxy {
+ public:
+ MockObjectProxy(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path);
+
+ // GMock doesn't support the return type of std::unique_ptr<> because
+ // std::unique_ptr is uncopyable. This is a workaround which defines
+ // |MockCallMethodAndBlock| as a mock method and makes
+ // |CallMethodAndBlock| call the mocked method. Use |MockCallMethodAndBlock|
+ // for setting/testing expectations.
+ MOCK_METHOD3(MockCallMethodAndBlockWithErrorDetails,
+ Response*(MethodCall* method_call,
+ int timeout_ms,
+ ScopedDBusError* error));
+ std::unique_ptr<Response> CallMethodAndBlockWithErrorDetails(
+ MethodCall* method_call,
+ int timeout_ms,
+ ScopedDBusError* error) override {
+ return std::unique_ptr<Response>(
+ MockCallMethodAndBlockWithErrorDetails(method_call, timeout_ms, error));
+ }
+ MOCK_METHOD2(MockCallMethodAndBlock, Response*(MethodCall* method_call,
+ int timeout_ms));
+ std::unique_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
+ int timeout_ms) override {
+ return std::unique_ptr<Response>(
+ MockCallMethodAndBlock(method_call, timeout_ms));
+ }
+ MOCK_METHOD3(CallMethod, void(MethodCall* method_call,
+ int timeout_ms,
+ ResponseCallback callback));
+ MOCK_METHOD4(CallMethodWithErrorCallback, void(MethodCall* method_call,
+ int timeout_ms,
+ ResponseCallback callback,
+ ErrorCallback error_callback));
+ MOCK_METHOD4(ConnectToSignal,
+ void(const std::string& interface_name,
+ const std::string& signal_name,
+ SignalCallback signal_callback,
+ OnConnectedCallback on_connected_callback));
+ MOCK_METHOD0(Detach, void());
+
+ protected:
+ ~MockObjectProxy() override;
+};
+
+} // namespace dbus
+
+#endif // DBUS_MOCK_OBJECT_PROXY_H_
diff --git a/libchrome/dbus/object_manager.cc b/libchrome/dbus/object_manager.cc
new file mode 100644
index 0000000..178bb5f
--- /dev/null
+++ b/libchrome/dbus/object_manager.cc
@@ -0,0 +1,536 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/object_manager.h"
+
+#include <stddef.h>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/stringprintf.h"
+#include "base/task_runner_util.h"
+#include "dbus/bus.h"
+#include "dbus/dbus_statistics.h"
+#include "dbus/message.h"
+#include "dbus/object_proxy.h"
+#include "dbus/property.h"
+#include "dbus/scoped_dbus_error.h"
+#include "dbus/util.h"
+
+namespace dbus {
+
+ObjectManager::Object::Object()
+ : object_proxy(NULL) {
+}
+
+ObjectManager::Object::~Object() {
+}
+
+ObjectManager::ObjectManager(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path)
+ : bus_(bus),
+ service_name_(service_name),
+ object_path_(object_path),
+ setup_success_(false),
+ cleanup_called_(false),
+ weak_ptr_factory_(this) {
+ DVLOG(1) << "Creating ObjectManager for " << service_name_
+ << " " << object_path_.value();
+ DCHECK(bus_);
+ bus_->AssertOnOriginThread();
+ object_proxy_ = bus_->GetObjectProxy(service_name_, object_path_);
+ object_proxy_->SetNameOwnerChangedCallback(
+ base::Bind(&ObjectManager::NameOwnerChanged,
+ weak_ptr_factory_.GetWeakPtr()));
+
+ // Set up a match rule and a filter function to handle PropertiesChanged
+ // signals from the service. This is important to avoid any race conditions
+ // that might cause us to miss PropertiesChanged signals once all objects are
+ // initialized via GetManagedObjects.
+ base::PostTaskAndReplyWithResult(
+ bus_->GetDBusTaskRunner(),
+ FROM_HERE,
+ base::Bind(&ObjectManager::SetupMatchRuleAndFilter, this),
+ base::Bind(&ObjectManager::OnSetupMatchRuleAndFilterComplete, this));
+}
+
+ObjectManager::~ObjectManager() {
+ // Clean up Object structures
+ for (ObjectMap::iterator iter = object_map_.begin();
+ iter != object_map_.end(); ++iter) {
+ Object* object = iter->second;
+
+ for (Object::PropertiesMap::iterator piter = object->properties_map.begin();
+ piter != object->properties_map.end(); ++piter) {
+ PropertySet* properties = piter->second;
+ delete properties;
+ }
+
+ delete object;
+ }
+}
+
+void ObjectManager::RegisterInterface(const std::string& interface_name,
+ Interface* interface) {
+ interface_map_[interface_name] = interface;
+}
+
+void ObjectManager::UnregisterInterface(const std::string& interface_name) {
+ InterfaceMap::iterator iter = interface_map_.find(interface_name);
+ if (iter != interface_map_.end())
+ interface_map_.erase(iter);
+}
+
+std::vector<ObjectPath> ObjectManager::GetObjects() {
+ std::vector<ObjectPath> object_paths;
+
+ for (ObjectMap::iterator iter = object_map_.begin();
+ iter != object_map_.end(); ++iter)
+ object_paths.push_back(iter->first);
+
+ return object_paths;
+}
+
+std::vector<ObjectPath> ObjectManager::GetObjectsWithInterface(
+ const std::string& interface_name) {
+ std::vector<ObjectPath> object_paths;
+
+ for (ObjectMap::iterator oiter = object_map_.begin();
+ oiter != object_map_.end(); ++oiter) {
+ Object* object = oiter->second;
+
+ Object::PropertiesMap::iterator piter =
+ object->properties_map.find(interface_name);
+ if (piter != object->properties_map.end())
+ object_paths.push_back(oiter->first);
+ }
+
+ return object_paths;
+}
+
+ObjectProxy* ObjectManager::GetObjectProxy(const ObjectPath& object_path) {
+ ObjectMap::iterator iter = object_map_.find(object_path);
+ if (iter == object_map_.end())
+ return NULL;
+
+ Object* object = iter->second;
+ return object->object_proxy;
+}
+
+PropertySet* ObjectManager::GetProperties(const ObjectPath& object_path,
+ const std::string& interface_name) {
+ ObjectMap::iterator iter = object_map_.find(object_path);
+ if (iter == object_map_.end())
+ return NULL;
+
+ Object* object = iter->second;
+ Object::PropertiesMap::iterator piter =
+ object->properties_map.find(interface_name);
+ if (piter == object->properties_map.end())
+ return NULL;
+
+ return piter->second;
+}
+
+void ObjectManager::GetManagedObjects() {
+ MethodCall method_call(kObjectManagerInterface,
+ kObjectManagerGetManagedObjects);
+
+ object_proxy_->CallMethod(
+ &method_call,
+ ObjectProxy::TIMEOUT_USE_DEFAULT,
+ base::Bind(&ObjectManager::OnGetManagedObjects,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+void ObjectManager::CleanUp() {
+ DCHECK(bus_);
+ bus_->AssertOnDBusThread();
+ DCHECK(!cleanup_called_);
+
+ cleanup_called_ = true;
+
+ if (!setup_success_)
+ return;
+
+ bus_->RemoveFilterFunction(&ObjectManager::HandleMessageThunk, this);
+
+ ScopedDBusError error;
+ bus_->RemoveMatch(match_rule_, error.get());
+ if (error.is_set())
+ LOG(ERROR) << "Failed to remove match rule: " << match_rule_;
+
+ match_rule_.clear();
+}
+
+void ObjectManager::InitializeObjects() {
+ DCHECK(bus_);
+ DCHECK(object_proxy_);
+ DCHECK(setup_success_);
+
+ // |object_proxy_| is no longer valid if the Bus was shut down before this
+ // call. Don't initiate any other action from the origin thread.
+ if (cleanup_called_)
+ return;
+
+ object_proxy_->ConnectToSignal(
+ kObjectManagerInterface,
+ kObjectManagerInterfacesAdded,
+ base::Bind(&ObjectManager::InterfacesAddedReceived,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&ObjectManager::InterfacesAddedConnected,
+ weak_ptr_factory_.GetWeakPtr()));
+
+ object_proxy_->ConnectToSignal(
+ kObjectManagerInterface,
+ kObjectManagerInterfacesRemoved,
+ base::Bind(&ObjectManager::InterfacesRemovedReceived,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&ObjectManager::InterfacesRemovedConnected,
+ weak_ptr_factory_.GetWeakPtr()));
+
+ GetManagedObjects();
+}
+
+bool ObjectManager::SetupMatchRuleAndFilter() {
+ DCHECK(bus_);
+ DCHECK(!setup_success_);
+ bus_->AssertOnDBusThread();
+
+ if (cleanup_called_)
+ return false;
+
+ if (!bus_->Connect() || !bus_->SetUpAsyncOperations())
+ return false;
+
+ service_name_owner_ =
+ bus_->GetServiceOwnerAndBlock(service_name_, Bus::SUPPRESS_ERRORS);
+
+ const std::string match_rule =
+ base::StringPrintf(
+ "type='signal', sender='%s', interface='%s', member='%s'",
+ service_name_.c_str(),
+ kPropertiesInterface,
+ kPropertiesChanged);
+
+ bus_->AddFilterFunction(&ObjectManager::HandleMessageThunk, this);
+
+ ScopedDBusError error;
+ bus_->AddMatch(match_rule, error.get());
+ if (error.is_set()) {
+ LOG(ERROR) << "ObjectManager failed to add match rule \"" << match_rule
+ << "\". Got " << error.name() << ": " << error.message();
+ bus_->RemoveFilterFunction(&ObjectManager::HandleMessageThunk, this);
+ return false;
+ }
+
+ match_rule_ = match_rule;
+ setup_success_ = true;
+
+ return true;
+}
+
+void ObjectManager::OnSetupMatchRuleAndFilterComplete(bool success) {
+ LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
+ << ": Failed to set up match rule.";
+ if (success)
+ InitializeObjects();
+}
+
+// static
+DBusHandlerResult ObjectManager::HandleMessageThunk(DBusConnection* connection,
+ DBusMessage* raw_message,
+ void* user_data) {
+ ObjectManager* self = reinterpret_cast<ObjectManager*>(user_data);
+ return self->HandleMessage(connection, raw_message);
+}
+
+DBusHandlerResult ObjectManager::HandleMessage(DBusConnection*,
+ DBusMessage* raw_message) {
+ DCHECK(bus_);
+ bus_->AssertOnDBusThread();
+
+ // Handle the message only if it is a signal.
+ // Note that the match rule in SetupMatchRuleAndFilter() is configured to
+ // only accept signals, but we check here just in case.
+ if (dbus_message_get_type(raw_message) != DBUS_MESSAGE_TYPE_SIGNAL)
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+
+ // raw_message will be unrefed on exit of the function. Increment the
+ // reference so we can use it in Signal.
+ dbus_message_ref(raw_message);
+ std::unique_ptr<Signal> signal(Signal::FromRawMessage(raw_message));
+
+ const std::string interface = signal->GetInterface();
+ const std::string member = signal->GetMember();
+
+ statistics::AddReceivedSignal(service_name_, interface, member);
+
+ // Handle the signal only if it is PropertiesChanged.
+ // Note that the match rule in SetupMatchRuleAndFilter() is configured to
+ // only accept PropertiesChanged signals, but we check here just in case.
+ const std::string absolute_signal_name =
+ GetAbsoluteMemberName(interface, member);
+ const std::string properties_changed_signal_name =
+ GetAbsoluteMemberName(kPropertiesInterface, kPropertiesChanged);
+ if (absolute_signal_name != properties_changed_signal_name)
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+
+ VLOG(1) << "Signal received: " << signal->ToString();
+
+ // Handle the signal only if it is from the service that the ObjectManager
+ // instance is interested in.
+ // Note that the match rule in SetupMatchRuleAndFilter() is configured to
+ // only accept messages from the service name of our interest. However, the
+ // service='...' filter does not work as intended. See crbug.com/507206#14
+ // and #15 for details, hence it's necessary to check the sender here.
+ std::string sender = signal->GetSender();
+ if (service_name_owner_ != sender)
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+
+ const ObjectPath path = signal->GetPath();
+
+ if (bus_->HasDBusThread()) {
+ // Post a task to run the method in the origin thread. Transfer ownership of
+ // |signal| to NotifyPropertiesChanged, which will handle the clean up.
+ Signal* released_signal = signal.release();
+ bus_->GetOriginTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ObjectManager::NotifyPropertiesChanged,
+ this, path,
+ released_signal));
+ } else {
+ // If the D-Bus thread is not used, just call the callback on the
+ // current thread. Transfer the ownership of |signal| to
+ // NotifyPropertiesChanged.
+ NotifyPropertiesChanged(path, signal.release());
+ }
+
+ // We don't return DBUS_HANDLER_RESULT_HANDLED for signals because other
+ // objects may be interested in them. (e.g. Signals from org.freedesktop.DBus)
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+}
+
+void ObjectManager::NotifyPropertiesChanged(
+ const dbus::ObjectPath object_path,
+ Signal* signal) {
+ DCHECK(bus_);
+ bus_->AssertOnOriginThread();
+
+ NotifyPropertiesChangedHelper(object_path, signal);
+
+ // Delete the message on the D-Bus thread. See comments in HandleMessage.
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<Signal>, signal));
+}
+
+void ObjectManager::NotifyPropertiesChangedHelper(
+ const dbus::ObjectPath object_path,
+ Signal* signal) {
+ DCHECK(bus_);
+ bus_->AssertOnOriginThread();
+
+ MessageReader reader(signal);
+ std::string interface;
+ if (!reader.PopString(&interface)) {
+ LOG(WARNING) << "Property changed signal has wrong parameters: "
+ << "expected interface name: " << signal->ToString();
+ return;
+ }
+
+ PropertySet* properties = GetProperties(object_path, interface);
+ if (properties)
+ properties->ChangedReceived(signal);
+}
+
+void ObjectManager::OnGetManagedObjects(Response* response) {
+ if (response != NULL) {
+ MessageReader reader(response);
+ MessageReader array_reader(NULL);
+ if (!reader.PopArray(&array_reader))
+ return;
+
+ while (array_reader.HasMoreData()) {
+ MessageReader dict_entry_reader(NULL);
+ ObjectPath object_path;
+ if (!array_reader.PopDictEntry(&dict_entry_reader) ||
+ !dict_entry_reader.PopObjectPath(&object_path))
+ continue;
+
+ UpdateObject(object_path, &dict_entry_reader);
+ }
+
+ } else {
+ LOG(WARNING) << service_name_ << " " << object_path_.value()
+ << ": Failed to get managed objects";
+ }
+}
+
+void ObjectManager::InterfacesAddedReceived(Signal* signal) {
+ DCHECK(signal);
+ MessageReader reader(signal);
+ ObjectPath object_path;
+ if (!reader.PopObjectPath(&object_path)) {
+ LOG(WARNING) << service_name_ << " " << object_path_.value()
+ << ": InterfacesAdded signal has incorrect parameters: "
+ << signal->ToString();
+ return;
+ }
+
+ UpdateObject(object_path, &reader);
+}
+
+void ObjectManager::InterfacesAddedConnected(
+ const std::string& /*interface_name*/,
+ const std::string& /*signal_name*/,
+ bool success) {
+ LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
+ << ": Failed to connect to InterfacesAdded signal.";
+}
+
+void ObjectManager::InterfacesRemovedReceived(Signal* signal) {
+ DCHECK(signal);
+ MessageReader reader(signal);
+ ObjectPath object_path;
+ std::vector<std::string> interface_names;
+ if (!reader.PopObjectPath(&object_path) ||
+ !reader.PopArrayOfStrings(&interface_names)) {
+ LOG(WARNING) << service_name_ << " " << object_path_.value()
+ << ": InterfacesRemoved signal has incorrect parameters: "
+ << signal->ToString();
+ return;
+ }
+
+ for (size_t i = 0; i < interface_names.size(); ++i)
+ RemoveInterface(object_path, interface_names[i]);
+}
+
+void ObjectManager::InterfacesRemovedConnected(
+ const std::string& /*interface_name*/,
+ const std::string& /*signal_name*/,
+ bool success) {
+ LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
+ << ": Failed to connect to "
+ << "InterfacesRemoved signal.";
+}
+
+void ObjectManager::UpdateObject(const ObjectPath& object_path,
+ MessageReader* reader) {
+ DCHECK(reader);
+ MessageReader array_reader(NULL);
+ if (!reader->PopArray(&array_reader))
+ return;
+
+ while (array_reader.HasMoreData()) {
+ MessageReader dict_entry_reader(NULL);
+ std::string interface_name;
+ if (!array_reader.PopDictEntry(&dict_entry_reader) ||
+ !dict_entry_reader.PopString(&interface_name))
+ continue;
+
+ AddInterface(object_path, interface_name, &dict_entry_reader);
+ }
+}
+
+
+void ObjectManager::AddInterface(const ObjectPath& object_path,
+ const std::string& interface_name,
+ MessageReader* reader) {
+ InterfaceMap::iterator iiter = interface_map_.find(interface_name);
+ if (iiter == interface_map_.end())
+ return;
+ Interface* interface = iiter->second;
+
+ ObjectMap::iterator oiter = object_map_.find(object_path);
+ Object* object;
+ if (oiter == object_map_.end()) {
+ object = object_map_[object_path] = new Object;
+ object->object_proxy = bus_->GetObjectProxy(service_name_, object_path);
+ } else
+ object = oiter->second;
+
+ Object::PropertiesMap::iterator piter =
+ object->properties_map.find(interface_name);
+ PropertySet* property_set;
+ const bool interface_added = (piter == object->properties_map.end());
+ if (interface_added) {
+ property_set = object->properties_map[interface_name] =
+ interface->CreateProperties(object->object_proxy,
+ object_path, interface_name);
+ } else
+ property_set = piter->second;
+
+ property_set->UpdatePropertiesFromReader(reader);
+
+ if (interface_added)
+ interface->ObjectAdded(object_path, interface_name);
+}
+
+void ObjectManager::RemoveInterface(const ObjectPath& object_path,
+ const std::string& interface_name) {
+ ObjectMap::iterator oiter = object_map_.find(object_path);
+ if (oiter == object_map_.end())
+ return;
+ Object* object = oiter->second;
+
+ Object::PropertiesMap::iterator piter =
+ object->properties_map.find(interface_name);
+ if (piter == object->properties_map.end())
+ return;
+
+ // Inform the interface before removing the properties structure or object
+ // in case it needs details from them to make its own decisions.
+ InterfaceMap::iterator iiter = interface_map_.find(interface_name);
+ if (iiter != interface_map_.end()) {
+ Interface* interface = iiter->second;
+ interface->ObjectRemoved(object_path, interface_name);
+ }
+
+ delete piter->second;
+ object->properties_map.erase(piter);
+
+ if (object->properties_map.empty()) {
+ object_map_.erase(oiter);
+ delete object;
+ }
+}
+
+void ObjectManager::NameOwnerChanged(const std::string& old_owner,
+ const std::string& new_owner) {
+ service_name_owner_ = new_owner;
+
+ if (!old_owner.empty()) {
+ ObjectMap::iterator iter = object_map_.begin();
+ while (iter != object_map_.end()) {
+ ObjectMap::iterator tmp = iter;
+ ++iter;
+
+ // PropertiesMap is mutated by RemoveInterface, and also Object is
+ // destroyed; easier to collect the object path and interface names
+ // and remove them safely.
+ const dbus::ObjectPath object_path = tmp->first;
+ Object* object = tmp->second;
+ std::vector<std::string> interfaces;
+
+ for (Object::PropertiesMap::iterator piter =
+ object->properties_map.begin();
+ piter != object->properties_map.end(); ++piter)
+ interfaces.push_back(piter->first);
+
+ for (std::vector<std::string>::iterator iiter = interfaces.begin();
+ iiter != interfaces.end(); ++iiter)
+ RemoveInterface(object_path, *iiter);
+ }
+
+ }
+
+ if (!new_owner.empty())
+ GetManagedObjects();
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/object_manager.h b/libchrome/dbus/object_manager.h
new file mode 100644
index 0000000..a97495e
--- /dev/null
+++ b/libchrome/dbus/object_manager.h
@@ -0,0 +1,364 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_OBJECT_MANAGER_H_
+#define DBUS_OBJECT_MANAGER_H_
+
+#include <stdint.h>
+
+#include <map>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "dbus/object_path.h"
+#include "dbus/property.h"
+
+// Newer D-Bus services implement the Object Manager interface to inform other
+// clients about the objects they export, the properties of those objects, and
+// notification of changes in the set of available objects:
+// http://dbus.freedesktop.org/doc/dbus-specification.html
+// #standard-interfaces-objectmanager
+//
+// This interface is very closely tied to the Properties interface, and uses
+// even more levels of nested dictionaries and variants. In addition to
+// simplifying implementation, since there tends to be a single object manager
+// per service, spanning the complete set of objects an interfaces available,
+// the classes implemented here make dealing with this interface simpler.
+//
+// Except where noted, use of this class replaces the need for the code
+// documented in dbus/property.h
+//
+// Client implementation classes should begin by deriving from the
+// dbus::ObjectManager::Interface class, and defining a Properties structure as
+// documented in dbus/property.h.
+//
+// Example:
+// class ExampleClient : public dbus::ObjectManager::Interface {
+// public:
+// struct Properties : public dbus::PropertySet {
+// dbus::Property<std::string> name;
+// dbus::Property<uint16_t> version;
+// dbus::Property<dbus::ObjectPath> parent;
+// dbus::Property<std::vector<std::string> > children;
+//
+// Properties(dbus::ObjectProxy* object_proxy,
+// const PropertyChangedCallback callback)
+// : dbus::PropertySet(object_proxy, kExampleInterface, callback) {
+// RegisterProperty("Name", &name);
+// RegisterProperty("Version", &version);
+// RegisterProperty("Parent", &parent);
+// RegisterProperty("Children", &children);
+// }
+// virtual ~Properties() {}
+// };
+//
+// The link between the implementation class and the object manager is set up
+// in the constructor and removed in the destructor; the class should maintain
+// a pointer to its object manager for use in other methods and establish
+// itself as the implementation class for its interface.
+//
+// Example:
+// explicit ExampleClient::ExampleClient(dbus::Bus* bus)
+// : bus_(bus),
+// weak_ptr_factory_(this) {
+// object_manager_ = bus_->GetObjectManager(kServiceName, kManagerPath);
+// object_manager_->RegisterInterface(kInterface, this);
+// }
+//
+// virtual ExampleClient::~ExampleClient() {
+// object_manager_->UnregisterInterface(kInterface);
+// }
+//
+// The D-Bus thread manager takes care of issuing the necessary call to
+// GetManagedObjects() after the implementation classes have been set up.
+//
+// The object manager interface class has one abstract method that must be
+// implemented by the class to create Properties structures on demand. As well
+// as implementing this, you will want to implement a public GetProperties()
+// method.
+//
+// Example:
+// dbus::PropertySet* CreateProperties(dbus::ObjectProxy* object_proxy,
+// const std::string& interface_name)
+// override {
+// Properties* properties = new Properties(
+// object_proxy, interface_name,
+// base::Bind(&PropertyChanged,
+// weak_ptr_factory_.GetWeakPtr(),
+// object_path));
+// return static_cast<dbus::PropertySet*>(properties);
+// }
+//
+// Properties* GetProperties(const dbus::ObjectPath& object_path) {
+// return static_cast<Properties*>(
+// object_manager_->GetProperties(object_path, kInterface));
+// }
+//
+// Note that unlike classes that only use dbus/property.h there is no need
+// to connect signals or obtain the initial values of properties. The object
+// manager class handles that for you.
+//
+// PropertyChanged is a method of your own to notify your observers of a change
+// in your properties, either as a result of a signal from the Properties
+// interface or from the Object Manager interface. You may also wish to
+// implement the optional ObjectAdded and ObjectRemoved methods of the class
+// to likewise notify observers.
+//
+// When your class needs an object proxy for a given object path, it may
+// obtain it from the object manager. Unlike the equivalent method on the bus
+// this will return NULL if the object is not known.
+//
+// object_proxy = object_manager_->GetObjectProxy(object_path);
+// if (object_proxy) {
+// ...
+// }
+//
+// There is no need for code using your implementation class to be aware of the
+// use of object manager behind the scenes, the rules for updating properties
+// documented in dbus/property.h still apply.
+
+namespace dbus {
+
+const char kObjectManagerInterface[] = "org.freedesktop.DBus.ObjectManager";
+const char kObjectManagerGetManagedObjects[] = "GetManagedObjects";
+const char kObjectManagerInterfacesAdded[] = "InterfacesAdded";
+const char kObjectManagerInterfacesRemoved[] = "InterfacesRemoved";
+
+class Bus;
+class MessageReader;
+class ObjectProxy;
+class Response;
+class Signal;
+
+// ObjectManager implements both the D-Bus client components of the D-Bus
+// Object Manager interface, as internal methods, and a public API for
+// client classes to utilize.
+class CHROME_DBUS_EXPORT ObjectManager
+ : public base::RefCountedThreadSafe<ObjectManager> {
+public:
+ // ObjectManager::Interface must be implemented by any class wishing to have
+ // its remote objects managed by an ObjectManager.
+ class Interface {
+ public:
+ virtual ~Interface() {}
+
+ // Called by ObjectManager to create a Properties structure for the remote
+ // D-Bus object identified by |object_path| and accessibile through
+ // |object_proxy|. The D-Bus interface name |interface_name| is that passed
+ // to RegisterInterface() by the implementation class.
+ //
+ // The implementation class should create and return an instance of its own
+ // subclass of dbus::PropertySet; ObjectManager will then connect signals
+ // and update the properties from its own internal message reader.
+ virtual PropertySet* CreateProperties(
+ ObjectProxy *object_proxy,
+ const dbus::ObjectPath& object_path,
+ const std::string& interface_name) = 0;
+
+ // Called by ObjectManager to inform the implementation class that an
+ // object has been added with the path |object_path|. The D-Bus interface
+ // name |interface_name| is that passed to RegisterInterface() by the
+ // implementation class.
+ //
+ // If a new object implements multiple interfaces, this method will be
+ // called on each interface implementation with differing values of
+ // |interface_name| as appropriate. An implementation class will only
+ // receive multiple calls if it has registered for multiple interfaces.
+ virtual void ObjectAdded(const ObjectPath& /*object_path*/,
+ const std::string& /*interface_name*/) {}
+
+ // Called by ObjectManager to inform the implementation class than an
+ // object with the path |object_path| has been removed. Ths D-Bus interface
+ // name |interface_name| is that passed to RegisterInterface() by the
+ // implementation class. Multiple interfaces are handled as with
+ // ObjectAdded().
+ //
+ // This method will be called before the Properties structure and the
+ // ObjectProxy object for the given interface are cleaned up, it is safe
+ // to retrieve them during removal to vary processing.
+ virtual void ObjectRemoved(const ObjectPath& /*object_path*/,
+ const std::string& /*interface_name*/) {}
+ };
+
+ // Client code should use Bus::GetObjectManager() instead of this constructor.
+ ObjectManager(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path);
+
+ // Register a client implementation class |interface| for the given D-Bus
+ // interface named in |interface_name|. That object's CreateProperties()
+ // method will be used to create instances of dbus::PropertySet* when
+ // required.
+ void RegisterInterface(const std::string& interface_name,
+ Interface* interface);
+
+ // Unregister the implementation class for the D-Bus interface named in
+ // |interface_name|, objects and properties of this interface will be
+ // ignored.
+ void UnregisterInterface(const std::string& interface_name);
+
+ // Returns a list of object paths, in an undefined order, of objects known
+ // to this manager.
+ std::vector<ObjectPath> GetObjects();
+
+ // Returns the list of object paths, in an undefined order, of objects
+ // implementing the interface named in |interface_name| known to this manager.
+ std::vector<ObjectPath> GetObjectsWithInterface(
+ const std::string& interface_name);
+
+ // Returns a ObjectProxy pointer for the given |object_path|. Unlike
+ // the equivalent method on Bus this will return NULL if the object
+ // manager has not been informed of that object's existance.
+ ObjectProxy* GetObjectProxy(const ObjectPath& object_path);
+
+ // Returns a PropertySet* pointer for the given |object_path| and
+ // |interface_name|, or NULL if the object manager has not been informed of
+ // that object's existance or the interface's properties. The caller should
+ // cast the returned pointer to the appropriate type, e.g.:
+ // static_cast<Properties*>(GetProperties(object_path, my_interface));
+ PropertySet* GetProperties(const ObjectPath& object_path,
+ const std::string& interface_name);
+
+ // Instructs the object manager to refresh its list of managed objects;
+ // automatically called by the D-Bus thread manager, there should never be
+ // a need to call this manually.
+ void GetManagedObjects();
+
+ // Cleans up any match rules and filter functions added by this ObjectManager.
+ // The Bus object will take care of this so you don't have to do it manually.
+ //
+ // BLOCKING CALL.
+ void CleanUp();
+
+ protected:
+ virtual ~ObjectManager();
+
+ private:
+ friend class base::RefCountedThreadSafe<ObjectManager>;
+
+ // Connects the InterfacesAdded and InterfacesRemoved signals and calls
+ // GetManagedObjects. Called from OnSetupMatchRuleAndFilterComplete.
+ void InitializeObjects();
+
+ // Called from the constructor to add a match rule for PropertiesChanged
+ // signals on the DBus thread and set up a corresponding filter function.
+ bool SetupMatchRuleAndFilter();
+
+ // Called on the origin thread once the match rule and filter have been set
+ // up. |success| is false, if an error occurred during set up; it's true
+ // otherwise.
+ void OnSetupMatchRuleAndFilterComplete(bool success);
+
+ // Called by dbus:: when a message is received. This is used to filter
+ // PropertiesChanged signals from the correct sender and relay the event to
+ // the correct PropertySet.
+ static DBusHandlerResult HandleMessageThunk(DBusConnection* connection,
+ DBusMessage* raw_message,
+ void* user_data);
+ DBusHandlerResult HandleMessage(DBusConnection* connection,
+ DBusMessage* raw_message);
+
+ // Called when a PropertiesChanged signal is received from the sender.
+ // This method notifies the relevant PropertySet that it should update its
+ // properties based on the received signal. Called from HandleMessage.
+ void NotifyPropertiesChanged(const dbus::ObjectPath object_path,
+ Signal* signal);
+ void NotifyPropertiesChangedHelper(const dbus::ObjectPath object_path,
+ Signal* signal);
+
+ // Called by dbus:: in response to the GetManagedObjects() method call.
+ void OnGetManagedObjects(Response* response);
+
+ // Called by dbus:: when an InterfacesAdded signal is received and initially
+ // connected.
+ void InterfacesAddedReceived(Signal* signal);
+ void InterfacesAddedConnected(const std::string& interface_name,
+ const std::string& signal_name,
+ bool success);
+
+ // Called by dbus:: when an InterfacesRemoved signal is received and
+ // initially connected.
+ void InterfacesRemovedReceived(Signal* signal);
+ void InterfacesRemovedConnected(const std::string& interface_name,
+ const std::string& signal_name,
+ bool success);
+
+ // Updates the map entry for the object with path |object_path| using the
+ // D-Bus message in |reader|, which should consist of an dictionary mapping
+ // interface names to properties dictionaries as recieved by both the
+ // GetManagedObjects() method return and the InterfacesAdded() signal.
+ void UpdateObject(const ObjectPath& object_path, MessageReader* reader);
+
+ // Updates the properties structure of the object with path |object_path|
+ // for the interface named |interface_name| using the D-Bus message in
+ // |reader| which should consist of the properties dictionary for that
+ // interface.
+ //
+ // Called by UpdateObjects() for each interface in the dictionary; this
+ // method takes care of both creating the entry in the ObjectMap and
+ // ObjectProxy if required, as well as the PropertySet instance for that
+ // interface if necessary.
+ void AddInterface(const ObjectPath& object_path,
+ const std::string& interface_name,
+ MessageReader* reader);
+
+ // Removes the properties structure of the object with path |object_path|
+ // for the interfaces named |interface_name|.
+ //
+ // If no further interfaces remain, the entry in the ObjectMap is discarded.
+ void RemoveInterface(const ObjectPath& object_path,
+ const std::string& interface_name);
+
+ // Removes all objects and interfaces from the object manager when
+ // |old_owner| is not the empty string and/or re-requests the set of managed
+ // objects when |new_owner| is not the empty string.
+ void NameOwnerChanged(const std::string& old_owner,
+ const std::string& new_owner);
+
+ Bus* bus_;
+ std::string service_name_;
+ std::string service_name_owner_;
+ std::string match_rule_;
+ ObjectPath object_path_;
+ ObjectProxy* object_proxy_;
+ bool setup_success_;
+ bool cleanup_called_;
+
+ // Maps the name of an interface to the implementation class used for
+ // instantiating PropertySet structures for that interface's properties.
+ typedef std::map<std::string, Interface*> InterfaceMap;
+ InterfaceMap interface_map_;
+
+ // Each managed object consists of a ObjectProxy used to make calls
+ // against that object and a collection of D-Bus interface names and their
+ // associated PropertySet structures.
+ struct Object {
+ Object();
+ ~Object();
+
+ ObjectProxy* object_proxy;
+
+ // Maps the name of an interface to the specific PropertySet structure
+ // of that interface's properties.
+ typedef std::map<const std::string, PropertySet*> PropertiesMap;
+ PropertiesMap properties_map;
+ };
+
+ // Maps the object path of an object to the Object structure.
+ typedef std::map<const ObjectPath, Object*> ObjectMap;
+ ObjectMap object_map_;
+
+ // Weak pointer factory for generating 'this' pointers that might live longer
+ // than we do.
+ // Note: This should remain the last member so it'll be destroyed and
+ // invalidate its weak pointers before any other members are destroyed.
+ base::WeakPtrFactory<ObjectManager> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectManager);
+};
+
+} // namespace dbus
+
+#endif // DBUS_OBJECT_MANAGER_H_
diff --git a/libchrome/dbus/object_path.cc b/libchrome/dbus/object_path.cc
new file mode 100644
index 0000000..8606b6b
--- /dev/null
+++ b/libchrome/dbus/object_path.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/object_path.h"
+
+#include <ostream>
+
+#include "dbus/string_util.h"
+
+namespace dbus {
+
+bool ObjectPath::IsValid() const {
+ return IsValidObjectPath(value_);
+}
+
+bool ObjectPath::operator<(const ObjectPath& that) const {
+ return value_ < that.value_;
+}
+
+bool ObjectPath::operator==(const ObjectPath& that) const {
+ return value_ == that.value_;
+}
+
+bool ObjectPath::operator!=(const ObjectPath& that) const {
+ return value_ != that.value_;
+}
+
+void PrintTo(const ObjectPath& path, std::ostream* out) {
+ *out << path.value();
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/object_path.h b/libchrome/dbus/object_path.h
new file mode 100644
index 0000000..072c23d
--- /dev/null
+++ b/libchrome/dbus/object_path.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_OBJECT_PATH_H_
+#define DBUS_OBJECT_PATH_H_
+
+#include <iosfwd>
+#include <string>
+
+#include "dbus/dbus_export.h"
+
+namespace dbus {
+
+// ObjectPath is a type used to distinguish D-Bus object paths from simple
+// strings, especially since normal practice is that these should be only
+// initialized from static constants or obtained from remote objects and no
+// assumptions about their value made.
+class CHROME_DBUS_EXPORT ObjectPath {
+ public:
+ // Permit initialization without a value for passing to
+ // dbus::MessageReader::PopObjectPath to fill in and from std::string
+ // objects.
+ //
+ // The compiler synthesised copy constructor and assignment operator are
+ // sufficient for our needs, as is implicit initialization of a std::string
+ // from a string constant.
+ ObjectPath() {}
+ explicit ObjectPath(const std::string& value) : value_(value) {}
+
+ // Retrieves value as a std::string.
+ const std::string& value() const { return value_; }
+
+ // Returns true if the value is a valid object path.
+ bool IsValid() const;
+
+ // Permit sufficient comparison to allow an ObjectPath to be used as a
+ // key in a std::map.
+ bool operator<(const ObjectPath&) const;
+
+ // Permit testing for equality, required for mocks to work and useful for
+ // observers.
+ bool operator==(const ObjectPath&) const;
+ bool operator!=(const ObjectPath&) const;
+
+ private:
+ std::string value_;
+};
+
+// This is required by gtest to print a readable output on test failures.
+CHROME_DBUS_EXPORT void PrintTo(const ObjectPath& path, std::ostream* out);
+
+} // namespace dbus
+
+#endif // DBUS_OBJECT_PATH_H_
diff --git a/libchrome/dbus/object_proxy.cc b/libchrome/dbus/object_proxy.cc
new file mode 100644
index 0000000..ce02551
--- /dev/null
+++ b/libchrome/dbus/object_proxy.cc
@@ -0,0 +1,716 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/object_proxy.h"
+
+#include <stddef.h>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "dbus/bus.h"
+#include "dbus/dbus_statistics.h"
+#include "dbus/message.h"
+#include "dbus/object_path.h"
+#include "dbus/scoped_dbus_error.h"
+#include "dbus/util.h"
+
+namespace dbus {
+
+namespace {
+
+const char kErrorServiceUnknown[] = "org.freedesktop.DBus.Error.ServiceUnknown";
+const char kErrorObjectUnknown[] = "org.freedesktop.DBus.Error.UnknownObject";
+
+// Used for success ratio histograms. 1 for success, 0 for failure.
+const int kSuccessRatioHistogramMaxValue = 2;
+
+// The path of D-Bus Object sending NameOwnerChanged signal.
+const char kDBusSystemObjectPath[] = "/org/freedesktop/DBus";
+
+// The D-Bus Object interface.
+const char kDBusSystemObjectInterface[] = "org.freedesktop.DBus";
+
+// The D-Bus Object address.
+const char kDBusSystemObjectAddress[] = "org.freedesktop.DBus";
+
+// The NameOwnerChanged member in |kDBusSystemObjectInterface|.
+const char kNameOwnerChangedMember[] = "NameOwnerChanged";
+
+// An empty function used for ObjectProxy::EmptyResponseCallback().
+void EmptyResponseCallbackBody(Response* /*response*/) {
+}
+
+} // namespace
+
+ObjectProxy::ObjectProxy(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path,
+ int options)
+ : bus_(bus),
+ service_name_(service_name),
+ object_path_(object_path),
+ ignore_service_unknown_errors_(
+ options & IGNORE_SERVICE_UNKNOWN_ERRORS) {
+}
+
+ObjectProxy::~ObjectProxy() {
+ DCHECK(pending_calls_.empty());
+}
+
+// Originally we tried to make |method_call| a const reference, but we
+// gave up as dbus_connection_send_with_reply_and_block() takes a
+// non-const pointer of DBusMessage as the second parameter.
+std::unique_ptr<Response> ObjectProxy::CallMethodAndBlockWithErrorDetails(
+ MethodCall* method_call,
+ int timeout_ms,
+ ScopedDBusError* error) {
+ bus_->AssertOnDBusThread();
+
+ if (!bus_->Connect() ||
+ !method_call->SetDestination(service_name_) ||
+ !method_call->SetPath(object_path_))
+ return std::unique_ptr<Response>();
+
+ DBusMessage* request_message = method_call->raw_message();
+
+ // Send the message synchronously.
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ DBusMessage* response_message =
+ bus_->SendWithReplyAndBlock(request_message, timeout_ms, error->get());
+ // Record if the method call is successful, or not. 1 if successful.
+ UMA_HISTOGRAM_ENUMERATION("DBus.SyncMethodCallSuccess",
+ response_message ? 1 : 0,
+ kSuccessRatioHistogramMaxValue);
+ statistics::AddBlockingSentMethodCall(service_name_,
+ method_call->GetInterface(),
+ method_call->GetMember());
+
+ if (!response_message) {
+ LogMethodCallFailure(method_call->GetInterface(),
+ method_call->GetMember(),
+ error->is_set() ? error->name() : "unknown error type",
+ error->is_set() ? error->message() : "");
+ return std::unique_ptr<Response>();
+ }
+ // Record time spent for the method call. Don't include failures.
+ UMA_HISTOGRAM_TIMES("DBus.SyncMethodCallTime",
+ base::TimeTicks::Now() - start_time);
+
+ return Response::FromRawMessage(response_message);
+}
+
+std::unique_ptr<Response> ObjectProxy::CallMethodAndBlock(
+ MethodCall* method_call,
+ int timeout_ms) {
+ ScopedDBusError error;
+ return CallMethodAndBlockWithErrorDetails(method_call, timeout_ms, &error);
+}
+
+void ObjectProxy::CallMethod(MethodCall* method_call,
+ int timeout_ms,
+ ResponseCallback callback) {
+ CallMethodWithErrorCallback(method_call, timeout_ms, callback,
+ base::Bind(&ObjectProxy::OnCallMethodError,
+ this,
+ method_call->GetInterface(),
+ method_call->GetMember(),
+ callback));
+}
+
+void ObjectProxy::CallMethodWithErrorCallback(MethodCall* method_call,
+ int timeout_ms,
+ ResponseCallback callback,
+ ErrorCallback error_callback) {
+ bus_->AssertOnOriginThread();
+
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+
+ if (!method_call->SetDestination(service_name_) ||
+ !method_call->SetPath(object_path_)) {
+ // In case of a failure, run the error callback with NULL.
+ DBusMessage* response_message = NULL;
+ base::Closure task = base::Bind(&ObjectProxy::RunResponseCallback,
+ this,
+ callback,
+ error_callback,
+ start_time,
+ response_message);
+ bus_->GetOriginTaskRunner()->PostTask(FROM_HERE, task);
+ return;
+ }
+
+ // Increment the reference count so we can safely reference the
+ // underlying request message until the method call is complete. This
+ // will be unref'ed in StartAsyncMethodCall().
+ DBusMessage* request_message = method_call->raw_message();
+ dbus_message_ref(request_message);
+
+ base::Closure task = base::Bind(&ObjectProxy::StartAsyncMethodCall,
+ this,
+ timeout_ms,
+ request_message,
+ callback,
+ error_callback,
+ start_time);
+ statistics::AddSentMethodCall(service_name_,
+ method_call->GetInterface(),
+ method_call->GetMember());
+
+ // Wait for the response in the D-Bus thread.
+ bus_->GetDBusTaskRunner()->PostTask(FROM_HERE, task);
+}
+
+void ObjectProxy::ConnectToSignal(const std::string& interface_name,
+ const std::string& signal_name,
+ SignalCallback signal_callback,
+ OnConnectedCallback on_connected_callback) {
+ bus_->AssertOnOriginThread();
+
+ if (bus_->HasDBusThread()) {
+ base::PostTaskAndReplyWithResult(
+ bus_->GetDBusTaskRunner(), FROM_HERE,
+ base::Bind(&ObjectProxy::ConnectToSignalInternal, this, interface_name,
+ signal_name, signal_callback),
+ base::Bind(on_connected_callback, interface_name, signal_name));
+ } else {
+ // If the bus doesn't have a dedicated dbus thread we need to call
+ // ConnectToSignalInternal directly otherwise we might miss a signal
+ // that is currently queued if we do a PostTask.
+ const bool success =
+ ConnectToSignalInternal(interface_name, signal_name, signal_callback);
+ on_connected_callback.Run(interface_name, signal_name, success);
+ }
+}
+
+void ObjectProxy::SetNameOwnerChangedCallback(
+ NameOwnerChangedCallback callback) {
+ bus_->AssertOnOriginThread();
+
+ name_owner_changed_callback_ = callback;
+}
+
+void ObjectProxy::WaitForServiceToBeAvailable(
+ WaitForServiceToBeAvailableCallback callback) {
+ bus_->AssertOnOriginThread();
+
+ wait_for_service_to_be_available_callbacks_.push_back(callback);
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ObjectProxy::WaitForServiceToBeAvailableInternal, this));
+}
+
+void ObjectProxy::Detach() {
+ bus_->AssertOnDBusThread();
+
+ if (bus_->is_connected())
+ bus_->RemoveFilterFunction(&ObjectProxy::HandleMessageThunk, this);
+
+ for (const auto& match_rule : match_rules_) {
+ ScopedDBusError error;
+ bus_->RemoveMatch(match_rule, error.get());
+ if (error.is_set()) {
+ // There is nothing we can do to recover, so just print the error.
+ LOG(ERROR) << "Failed to remove match rule: " << match_rule;
+ }
+ }
+ match_rules_.clear();
+
+ for (auto* pending_call : pending_calls_) {
+ dbus_pending_call_cancel(pending_call);
+ dbus_pending_call_unref(pending_call);
+ }
+ pending_calls_.clear();
+}
+
+// static
+ObjectProxy::ResponseCallback ObjectProxy::EmptyResponseCallback() {
+ return base::Bind(&EmptyResponseCallbackBody);
+}
+
+ObjectProxy::OnPendingCallIsCompleteData::OnPendingCallIsCompleteData(
+ ObjectProxy* in_object_proxy,
+ ResponseCallback in_response_callback,
+ ErrorCallback in_error_callback,
+ base::TimeTicks in_start_time)
+ : object_proxy(in_object_proxy),
+ response_callback(in_response_callback),
+ error_callback(in_error_callback),
+ start_time(in_start_time) {
+}
+
+ObjectProxy::OnPendingCallIsCompleteData::~OnPendingCallIsCompleteData() {
+}
+
+void ObjectProxy::StartAsyncMethodCall(int timeout_ms,
+ DBusMessage* request_message,
+ ResponseCallback response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time) {
+ bus_->AssertOnDBusThread();
+
+ if (!bus_->Connect() || !bus_->SetUpAsyncOperations()) {
+ // In case of a failure, run the error callback with NULL.
+ DBusMessage* response_message = NULL;
+ base::Closure task = base::Bind(&ObjectProxy::RunResponseCallback,
+ this,
+ response_callback,
+ error_callback,
+ start_time,
+ response_message);
+ bus_->GetOriginTaskRunner()->PostTask(FROM_HERE, task);
+
+ dbus_message_unref(request_message);
+ return;
+ }
+
+ DBusPendingCall* pending_call = NULL;
+
+ bus_->SendWithReply(request_message, &pending_call, timeout_ms);
+
+ // Prepare the data we'll be passing to OnPendingCallIsCompleteThunk().
+ // The data will be deleted in OnPendingCallIsCompleteThunk().
+ OnPendingCallIsCompleteData* data =
+ new OnPendingCallIsCompleteData(this, response_callback, error_callback,
+ start_time);
+
+ // This returns false only when unable to allocate memory.
+ const bool success = dbus_pending_call_set_notify(
+ pending_call,
+ &ObjectProxy::OnPendingCallIsCompleteThunk,
+ data,
+ &DeleteVoidPointer<OnPendingCallIsCompleteData>);
+ CHECK(success) << "Unable to allocate memory";
+ pending_calls_.insert(pending_call);
+
+ // It's now safe to unref the request message.
+ dbus_message_unref(request_message);
+}
+
+void ObjectProxy::OnPendingCallIsComplete(DBusPendingCall* pending_call,
+ ResponseCallback response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time) {
+ bus_->AssertOnDBusThread();
+
+ DBusMessage* response_message = dbus_pending_call_steal_reply(pending_call);
+ base::Closure task = base::Bind(&ObjectProxy::RunResponseCallback,
+ this,
+ response_callback,
+ error_callback,
+ start_time,
+ response_message);
+ bus_->GetOriginTaskRunner()->PostTask(FROM_HERE, task);
+
+ // Remove the pending call from the set.
+ pending_calls_.erase(pending_call);
+ dbus_pending_call_unref(pending_call);
+}
+
+void ObjectProxy::RunResponseCallback(ResponseCallback response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time,
+ DBusMessage* response_message) {
+ bus_->AssertOnOriginThread();
+
+ bool method_call_successful = false;
+ if (!response_message) {
+ // The response is not received.
+ error_callback.Run(NULL);
+ } else if (dbus_message_get_type(response_message) ==
+ DBUS_MESSAGE_TYPE_ERROR) {
+ // This will take |response_message| and release (unref) it.
+ std::unique_ptr<ErrorResponse> error_response(
+ ErrorResponse::FromRawMessage(response_message));
+ error_callback.Run(error_response.get());
+ // Delete the message on the D-Bus thread. See below for why.
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<ErrorResponse>,
+ error_response.release()));
+ } else {
+ // This will take |response_message| and release (unref) it.
+ std::unique_ptr<Response> response(
+ Response::FromRawMessage(response_message));
+ // The response is successfully received.
+ response_callback.Run(response.get());
+ // The message should be deleted on the D-Bus thread for a complicated
+ // reason:
+ //
+ // libdbus keeps track of the number of bytes in the incoming message
+ // queue to ensure that the data size in the queue is manageable. The
+ // bookkeeping is partly done via dbus_message_unref(), and immediately
+ // asks the client code (Chrome) to stop monitoring the underlying
+ // socket, if the number of bytes exceeds a certian number, which is set
+ // to 63MB, per dbus-transport.cc:
+ //
+ // /* Try to default to something that won't totally hose the system,
+ // * but doesn't impose too much of a limitation.
+ // */
+ // transport->max_live_messages_size = _DBUS_ONE_MEGABYTE * 63;
+ //
+ // The monitoring of the socket is done on the D-Bus thread (see Watch
+ // class in bus.cc), hence we should stop the monitoring from D-Bus
+ // thread, not from the current thread here, which is likely UI thread.
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<Response>, response.release()));
+
+ method_call_successful = true;
+ // Record time spent for the method call. Don't include failures.
+ UMA_HISTOGRAM_TIMES("DBus.AsyncMethodCallTime",
+ base::TimeTicks::Now() - start_time);
+ }
+ // Record if the method call is successful, or not. 1 if successful.
+ UMA_HISTOGRAM_ENUMERATION("DBus.AsyncMethodCallSuccess",
+ method_call_successful,
+ kSuccessRatioHistogramMaxValue);
+}
+
+void ObjectProxy::OnPendingCallIsCompleteThunk(DBusPendingCall* pending_call,
+ void* user_data) {
+ OnPendingCallIsCompleteData* data =
+ reinterpret_cast<OnPendingCallIsCompleteData*>(user_data);
+ ObjectProxy* self = data->object_proxy;
+ self->OnPendingCallIsComplete(pending_call,
+ data->response_callback,
+ data->error_callback,
+ data->start_time);
+}
+
+bool ObjectProxy::ConnectToNameOwnerChangedSignal() {
+ bus_->AssertOnDBusThread();
+
+ if (!bus_->Connect() || !bus_->SetUpAsyncOperations())
+ return false;
+
+ bus_->AddFilterFunction(&ObjectProxy::HandleMessageThunk, this);
+
+ // Add a match_rule listening NameOwnerChanged for the well-known name
+ // |service_name_|.
+ const std::string name_owner_changed_match_rule =
+ base::StringPrintf(
+ "type='signal',interface='org.freedesktop.DBus',"
+ "member='NameOwnerChanged',path='/org/freedesktop/DBus',"
+ "sender='org.freedesktop.DBus',arg0='%s'",
+ service_name_.c_str());
+
+ const bool success =
+ AddMatchRuleWithoutCallback(name_owner_changed_match_rule,
+ "org.freedesktop.DBus.NameOwnerChanged");
+
+ // Try getting the current name owner. It's not guaranteed that we can get
+ // the name owner at this moment, as the service may not yet be started. If
+ // that's the case, we'll get the name owner via NameOwnerChanged signal,
+ // as soon as the service is started.
+ UpdateNameOwnerAndBlock();
+
+ return success;
+}
+
+bool ObjectProxy::ConnectToSignalInternal(const std::string& interface_name,
+ const std::string& signal_name,
+ SignalCallback signal_callback) {
+ bus_->AssertOnDBusThread();
+
+ if (!ConnectToNameOwnerChangedSignal())
+ return false;
+
+ const std::string absolute_signal_name =
+ GetAbsoluteMemberName(interface_name, signal_name);
+
+ // Add a match rule so the signal goes through HandleMessage().
+ const std::string match_rule =
+ base::StringPrintf("type='signal', interface='%s', path='%s'",
+ interface_name.c_str(),
+ object_path_.value().c_str());
+ return AddMatchRuleWithCallback(match_rule,
+ absolute_signal_name,
+ signal_callback);
+}
+
+void ObjectProxy::WaitForServiceToBeAvailableInternal() {
+ bus_->AssertOnDBusThread();
+
+ if (!ConnectToNameOwnerChangedSignal()) { // Failed to connect to the signal.
+ const bool service_is_ready = false;
+ bus_->GetOriginTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ObjectProxy::RunWaitForServiceToBeAvailableCallbacks,
+ this, service_is_ready));
+ return;
+ }
+
+ const bool service_is_available = !service_name_owner_.empty();
+ if (service_is_available) { // Service is already available.
+ bus_->GetOriginTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ObjectProxy::RunWaitForServiceToBeAvailableCallbacks,
+ this, service_is_available));
+ return;
+ }
+}
+
+DBusHandlerResult ObjectProxy::HandleMessage(DBusConnection*,
+ DBusMessage* raw_message) {
+ bus_->AssertOnDBusThread();
+
+ if (dbus_message_get_type(raw_message) != DBUS_MESSAGE_TYPE_SIGNAL)
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+
+ // raw_message will be unrefed on exit of the function. Increment the
+ // reference so we can use it in Signal.
+ dbus_message_ref(raw_message);
+ std::unique_ptr<Signal> signal(Signal::FromRawMessage(raw_message));
+
+ // Verify the signal comes from the object we're proxying for, this is
+ // our last chance to return DBUS_HANDLER_RESULT_NOT_YET_HANDLED and
+ // allow other object proxies to handle instead.
+ const ObjectPath path = signal->GetPath();
+ if (path != object_path_) {
+ if (path.value() == kDBusSystemObjectPath &&
+ signal->GetMember() == kNameOwnerChangedMember) {
+ // Handle NameOwnerChanged separately
+ return HandleNameOwnerChanged(std::move(signal));
+ }
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+ }
+
+ const std::string interface = signal->GetInterface();
+ const std::string member = signal->GetMember();
+
+ statistics::AddReceivedSignal(service_name_, interface, member);
+
+ // Check if we know about the signal.
+ const std::string absolute_signal_name = GetAbsoluteMemberName(
+ interface, member);
+ MethodTable::const_iterator iter = method_table_.find(absolute_signal_name);
+ if (iter == method_table_.end()) {
+ // Don't know about the signal.
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+ }
+ VLOG(1) << "Signal received: " << signal->ToString();
+
+ std::string sender = signal->GetSender();
+ if (service_name_owner_ != sender) {
+ LOG(ERROR) << "Rejecting a message from a wrong sender.";
+ UMA_HISTOGRAM_COUNTS("DBus.RejectedSignalCount", 1);
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+ }
+
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ if (bus_->HasDBusThread()) {
+ // Post a task to run the method in the origin thread.
+ // Transfer the ownership of |signal| to RunMethod().
+ // |released_signal| will be deleted in RunMethod().
+ Signal* released_signal = signal.release();
+ bus_->GetOriginTaskRunner()->PostTask(FROM_HERE,
+ base::Bind(&ObjectProxy::RunMethod,
+ this,
+ start_time,
+ iter->second,
+ released_signal));
+ } else {
+ const base::TimeTicks start_time = base::TimeTicks::Now();
+ // If the D-Bus thread is not used, just call the callback on the
+ // current thread. Transfer the ownership of |signal| to RunMethod().
+ Signal* released_signal = signal.release();
+ RunMethod(start_time, iter->second, released_signal);
+ }
+
+ // We don't return DBUS_HANDLER_RESULT_HANDLED for signals because other
+ // objects may be interested in them. (e.g. Signals from org.freedesktop.DBus)
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+}
+
+void ObjectProxy::RunMethod(base::TimeTicks start_time,
+ std::vector<SignalCallback> signal_callbacks,
+ Signal* signal) {
+ bus_->AssertOnOriginThread();
+
+ for (std::vector<SignalCallback>::iterator iter = signal_callbacks.begin();
+ iter != signal_callbacks.end(); ++iter)
+ iter->Run(signal);
+
+ // Delete the message on the D-Bus thread. See comments in
+ // RunResponseCallback().
+ bus_->GetDBusTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&base::DeletePointer<Signal>, signal));
+
+ // Record time spent for handling the signal.
+ UMA_HISTOGRAM_TIMES("DBus.SignalHandleTime",
+ base::TimeTicks::Now() - start_time);
+}
+
+DBusHandlerResult ObjectProxy::HandleMessageThunk(
+ DBusConnection* connection,
+ DBusMessage* raw_message,
+ void* user_data) {
+ ObjectProxy* self = reinterpret_cast<ObjectProxy*>(user_data);
+ return self->HandleMessage(connection, raw_message);
+}
+
+void ObjectProxy::LogMethodCallFailure(
+ const base::StringPiece& interface_name,
+ const base::StringPiece& method_name,
+ const base::StringPiece& error_name,
+ const base::StringPiece& error_message) const {
+ if (ignore_service_unknown_errors_ &&
+ (error_name == kErrorServiceUnknown || error_name == kErrorObjectUnknown))
+ return;
+
+ std::ostringstream msg;
+ msg << "Failed to call method: " << interface_name << "." << method_name
+ << ": object_path= " << object_path_.value()
+ << ": " << error_name << ": " << error_message;
+
+ // "UnknownObject" indicates that an object or service is no longer available,
+ // e.g. a Shill network service has gone out of range. Treat these as warnings
+ // not errors.
+ if (error_name == kErrorObjectUnknown)
+ LOG(WARNING) << msg.str();
+ else
+ LOG(ERROR) << msg.str();
+}
+
+void ObjectProxy::OnCallMethodError(const std::string& interface_name,
+ const std::string& method_name,
+ ResponseCallback response_callback,
+ ErrorResponse* error_response) {
+ if (error_response) {
+ // Error message may contain the error message as string.
+ MessageReader reader(error_response);
+ std::string error_message;
+ reader.PopString(&error_message);
+ LogMethodCallFailure(interface_name,
+ method_name,
+ error_response->GetErrorName(),
+ error_message);
+ }
+ response_callback.Run(NULL);
+}
+
+bool ObjectProxy::AddMatchRuleWithCallback(
+ const std::string& match_rule,
+ const std::string& absolute_signal_name,
+ SignalCallback signal_callback) {
+ DCHECK(!match_rule.empty());
+ DCHECK(!absolute_signal_name.empty());
+ bus_->AssertOnDBusThread();
+
+ if (match_rules_.find(match_rule) == match_rules_.end()) {
+ ScopedDBusError error;
+ bus_->AddMatch(match_rule, error.get());
+ if (error.is_set()) {
+ LOG(ERROR) << "Failed to add match rule \"" << match_rule << "\". Got "
+ << error.name() << ": " << error.message();
+ return false;
+ } else {
+ // Store the match rule, so that we can remove this in Detach().
+ match_rules_.insert(match_rule);
+ // Add the signal callback to the method table.
+ method_table_[absolute_signal_name].push_back(signal_callback);
+ return true;
+ }
+ } else {
+ // We already have the match rule.
+ method_table_[absolute_signal_name].push_back(signal_callback);
+ return true;
+ }
+}
+
+bool ObjectProxy::AddMatchRuleWithoutCallback(
+ const std::string& match_rule,
+ const std::string& absolute_signal_name) {
+ DCHECK(!match_rule.empty());
+ DCHECK(!absolute_signal_name.empty());
+ bus_->AssertOnDBusThread();
+
+ if (match_rules_.find(match_rule) != match_rules_.end())
+ return true;
+
+ ScopedDBusError error;
+ bus_->AddMatch(match_rule, error.get());
+ if (error.is_set()) {
+ LOG(ERROR) << "Failed to add match rule \"" << match_rule << "\". Got "
+ << error.name() << ": " << error.message();
+ return false;
+ }
+ // Store the match rule, so that we can remove this in Detach().
+ match_rules_.insert(match_rule);
+ return true;
+}
+
+void ObjectProxy::UpdateNameOwnerAndBlock() {
+ bus_->AssertOnDBusThread();
+ // Errors should be suppressed here, as the service may not be yet running
+ // when connecting to signals of the service, which is just fine.
+ // The ObjectProxy will be notified when the service is launched via
+ // NameOwnerChanged signal. See also comments in ConnectToSignalInternal().
+ service_name_owner_ =
+ bus_->GetServiceOwnerAndBlock(service_name_, Bus::SUPPRESS_ERRORS);
+}
+
+DBusHandlerResult ObjectProxy::HandleNameOwnerChanged(
+ std::unique_ptr<Signal> signal) {
+ DCHECK(signal);
+ bus_->AssertOnDBusThread();
+
+ // Confirm the validity of the NameOwnerChanged signal.
+ if (signal->GetMember() == kNameOwnerChangedMember &&
+ signal->GetInterface() == kDBusSystemObjectInterface &&
+ signal->GetSender() == kDBusSystemObjectAddress) {
+ MessageReader reader(signal.get());
+ std::string name, old_owner, new_owner;
+ if (reader.PopString(&name) &&
+ reader.PopString(&old_owner) &&
+ reader.PopString(&new_owner) &&
+ name == service_name_) {
+ service_name_owner_ = new_owner;
+ bus_->GetOriginTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ObjectProxy::RunNameOwnerChangedCallback,
+ this, old_owner, new_owner));
+
+ const bool service_is_available = !service_name_owner_.empty();
+ if (service_is_available) {
+ bus_->GetOriginTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&ObjectProxy::RunWaitForServiceToBeAvailableCallbacks,
+ this, service_is_available));
+ }
+ }
+ }
+
+ // Always return unhandled to let other object proxies handle the same
+ // signal.
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+}
+
+void ObjectProxy::RunNameOwnerChangedCallback(const std::string& old_owner,
+ const std::string& new_owner) {
+ bus_->AssertOnOriginThread();
+ if (!name_owner_changed_callback_.is_null())
+ name_owner_changed_callback_.Run(old_owner, new_owner);
+}
+
+void ObjectProxy::RunWaitForServiceToBeAvailableCallbacks(
+ bool service_is_available) {
+ bus_->AssertOnOriginThread();
+
+ std::vector<WaitForServiceToBeAvailableCallback> callbacks;
+ callbacks.swap(wait_for_service_to_be_available_callbacks_);
+ for (size_t i = 0; i < callbacks.size(); ++i)
+ callbacks[i].Run(service_is_available);
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/object_proxy.h b/libchrome/dbus/object_proxy.h
new file mode 100644
index 0000000..033e886
--- /dev/null
+++ b/libchrome/dbus/object_proxy.h
@@ -0,0 +1,333 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_OBJECT_PROXY_H_
+#define DBUS_OBJECT_PROXY_H_
+
+#include <dbus/dbus.h>
+
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+#include "dbus/dbus_export.h"
+#include "dbus/object_path.h"
+
+namespace dbus {
+
+class Bus;
+class ErrorResponse;
+class MethodCall;
+class Response;
+class ScopedDBusError;
+class Signal;
+
+// ObjectProxy is used to communicate with remote objects, mainly for
+// calling methods of these objects.
+//
+// ObjectProxy is a ref counted object, to ensure that |this| of the
+// object is alive when callbacks referencing |this| are called; the
+// bus always holds at least one of those references so object proxies
+// always last as long as the bus that created them.
+class CHROME_DBUS_EXPORT ObjectProxy
+ : public base::RefCountedThreadSafe<ObjectProxy> {
+ public:
+ // Client code should use Bus::GetObjectProxy() or
+ // Bus::GetObjectProxyWithOptions() instead of this constructor.
+ ObjectProxy(Bus* bus,
+ const std::string& service_name,
+ const ObjectPath& object_path,
+ int options);
+
+ // Options to be OR-ed together when calling Bus::GetObjectProxyWithOptions().
+ // Set the IGNORE_SERVICE_UNKNOWN_ERRORS option to silence logging of
+ // org.freedesktop.DBus.Error.ServiceUnknown errors and
+ // org.freedesktop.DBus.Error.ObjectUnknown errors.
+ enum Options {
+ DEFAULT_OPTIONS = 0,
+ IGNORE_SERVICE_UNKNOWN_ERRORS = 1 << 0
+ };
+
+ // Special timeout constants.
+ //
+ // The constants correspond to DBUS_TIMEOUT_USE_DEFAULT and
+ // DBUS_TIMEOUT_INFINITE. Here we use literal numbers instead of these
+ // macros as these aren't defined with D-Bus earlier than 1.4.12.
+ enum {
+ TIMEOUT_USE_DEFAULT = -1,
+ TIMEOUT_INFINITE = 0x7fffffff,
+ };
+
+ // Called when an error response is returned or no response is returned.
+ // Used for CallMethodWithErrorCallback().
+ typedef base::Callback<void(ErrorResponse*)> ErrorCallback;
+
+ // Called when the response is returned. Used for CallMethod().
+ typedef base::Callback<void(Response*)> ResponseCallback;
+
+ // Called when a signal is received. Signal* is the incoming signal.
+ typedef base::Callback<void (Signal*)> SignalCallback;
+
+ // Called when NameOwnerChanged signal is received.
+ typedef base::Callback<void(
+ const std::string& old_owner,
+ const std::string& new_owner)> NameOwnerChangedCallback;
+
+ // Called when the service becomes available.
+ typedef base::Callback<void(
+ bool service_is_available)> WaitForServiceToBeAvailableCallback;
+
+ // Called when the object proxy is connected to the signal.
+ // Parameters:
+ // - the interface name.
+ // - the signal name.
+ // - whether it was successful or not.
+ typedef base::Callback<void (const std::string&, const std::string&, bool)>
+ OnConnectedCallback;
+
+ // Calls the method of the remote object and blocks until the response
+ // is returned. Returns NULL on error with the error details specified
+ // in the |error| object.
+ //
+ // BLOCKING CALL.
+ virtual std::unique_ptr<Response> CallMethodAndBlockWithErrorDetails(
+ MethodCall* method_call,
+ int timeout_ms,
+ ScopedDBusError* error);
+
+ // Calls the method of the remote object and blocks until the response
+ // is returned. Returns NULL on error.
+ //
+ // BLOCKING CALL.
+ virtual std::unique_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
+ int timeout_ms);
+
+ // Requests to call the method of the remote object.
+ //
+ // |callback| will be called in the origin thread, once the method call
+ // is complete. As it's called in the origin thread, |callback| can
+ // safely reference objects in the origin thread (i.e. UI thread in most
+ // cases). If the caller is not interested in the response from the
+ // method (i.e. calling a method that does not return a value),
+ // EmptyResponseCallback() can be passed to the |callback| parameter.
+ //
+ // If the method call is successful, a pointer to Response object will
+ // be passed to the callback. If unsuccessful, NULL will be passed to
+ // the callback.
+ //
+ // Must be called in the origin thread.
+ virtual void CallMethod(MethodCall* method_call,
+ int timeout_ms,
+ ResponseCallback callback);
+
+ // Requests to call the method of the remote object.
+ //
+ // |callback| and |error_callback| will be called in the origin thread, once
+ // the method call is complete. As it's called in the origin thread,
+ // |callback| can safely reference objects in the origin thread (i.e.
+ // UI thread in most cases). If the caller is not interested in the response
+ // from the method (i.e. calling a method that does not return a value),
+ // EmptyResponseCallback() can be passed to the |callback| parameter.
+ //
+ // If the method call is successful, a pointer to Response object will
+ // be passed to the callback. If unsuccessful, the error callback will be
+ // called and a pointer to ErrorResponse object will be passed to the error
+ // callback if available, otherwise NULL will be passed.
+ //
+ // Must be called in the origin thread.
+ virtual void CallMethodWithErrorCallback(MethodCall* method_call,
+ int timeout_ms,
+ ResponseCallback callback,
+ ErrorCallback error_callback);
+
+ // Requests to connect to the signal from the remote object.
+ //
+ // |signal_callback| will be called in the origin thread, when the
+ // signal is received from the remote object. As it's called in the
+ // origin thread, |signal_callback| can safely reference objects in the
+ // origin thread (i.e. UI thread in most cases).
+ //
+ // |on_connected_callback| is called when the object proxy is connected
+ // to the signal, or failed to be connected, in the origin thread.
+ //
+ // If a SignalCallback has already been registered for the given
+ // |interface_name| and |signal_name|, |signal_callback| will be
+ // added to the list of callbacks for |interface_name| and
+ // |signal_name|.
+ //
+ // Must be called in the origin thread.
+ virtual void ConnectToSignal(const std::string& interface_name,
+ const std::string& signal_name,
+ SignalCallback signal_callback,
+ OnConnectedCallback on_connected_callback);
+
+ // Sets a callback for "NameOwnerChanged" signal. The callback is called on
+ // the origin thread when D-Bus system sends "NameOwnerChanged" for the name
+ // represented by |service_name_|.
+ virtual void SetNameOwnerChangedCallback(NameOwnerChangedCallback callback);
+
+ // Runs the callback as soon as the service becomes available.
+ virtual void WaitForServiceToBeAvailable(
+ WaitForServiceToBeAvailableCallback callback);
+
+ // Detaches from the remote object. The Bus object will take care of
+ // detaching so you don't have to do this manually.
+ //
+ // BLOCKING CALL.
+ virtual void Detach();
+
+ const ObjectPath& object_path() const { return object_path_; }
+
+ // Returns an empty callback that does nothing. Can be used for
+ // CallMethod().
+ static ResponseCallback EmptyResponseCallback();
+
+ protected:
+ // This is protected, so we can define sub classes.
+ virtual ~ObjectProxy();
+
+ private:
+ friend class base::RefCountedThreadSafe<ObjectProxy>;
+
+ // Struct of data we'll be passing from StartAsyncMethodCall() to
+ // OnPendingCallIsCompleteThunk().
+ struct OnPendingCallIsCompleteData {
+ OnPendingCallIsCompleteData(ObjectProxy* in_object_proxy,
+ ResponseCallback in_response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time);
+ ~OnPendingCallIsCompleteData();
+
+ ObjectProxy* object_proxy;
+ ResponseCallback response_callback;
+ ErrorCallback error_callback;
+ base::TimeTicks start_time;
+ };
+
+ // Starts the async method call. This is a helper function to implement
+ // CallMethod().
+ void StartAsyncMethodCall(int timeout_ms,
+ DBusMessage* request_message,
+ ResponseCallback response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time);
+
+ // Called when the pending call is complete.
+ void OnPendingCallIsComplete(DBusPendingCall* pending_call,
+ ResponseCallback response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time);
+
+ // Runs the response callback with the given response object.
+ void RunResponseCallback(ResponseCallback response_callback,
+ ErrorCallback error_callback,
+ base::TimeTicks start_time,
+ DBusMessage* response_message);
+
+ // Redirects the function call to OnPendingCallIsComplete().
+ static void OnPendingCallIsCompleteThunk(DBusPendingCall* pending_call,
+ void* user_data);
+
+ // Connects to NameOwnerChanged signal.
+ bool ConnectToNameOwnerChangedSignal();
+
+ // Helper function for ConnectToSignal().
+ bool ConnectToSignalInternal(const std::string& interface_name,
+ const std::string& signal_name,
+ SignalCallback signal_callback);
+
+ // Helper function for WaitForServiceToBeAvailable().
+ void WaitForServiceToBeAvailableInternal();
+
+ // Handles the incoming request messages and dispatches to the signal
+ // callbacks.
+ DBusHandlerResult HandleMessage(DBusConnection* connection,
+ DBusMessage* raw_message);
+
+ // Runs the method. Helper function for HandleMessage().
+ void RunMethod(base::TimeTicks start_time,
+ std::vector<SignalCallback> signal_callbacks,
+ Signal* signal);
+
+ // Redirects the function call to HandleMessage().
+ static DBusHandlerResult HandleMessageThunk(DBusConnection* connection,
+ DBusMessage* raw_message,
+ void* user_data);
+
+ // Helper method for logging response errors appropriately.
+ void LogMethodCallFailure(const base::StringPiece& interface_name,
+ const base::StringPiece& method_name,
+ const base::StringPiece& error_name,
+ const base::StringPiece& error_message) const;
+
+ // Used as ErrorCallback by CallMethod().
+ void OnCallMethodError(const std::string& interface_name,
+ const std::string& method_name,
+ ResponseCallback response_callback,
+ ErrorResponse* error_response);
+
+ // Adds the match rule to the bus and associate the callback with the signal.
+ bool AddMatchRuleWithCallback(const std::string& match_rule,
+ const std::string& absolute_signal_name,
+ SignalCallback signal_callback);
+
+ // Adds the match rule to the bus so that HandleMessage can see the signal.
+ bool AddMatchRuleWithoutCallback(const std::string& match_rule,
+ const std::string& absolute_signal_name);
+
+ // Calls D-Bus's GetNameOwner method synchronously to update
+ // |service_name_owner_| with the current owner of |service_name_|.
+ //
+ // BLOCKING CALL.
+ void UpdateNameOwnerAndBlock();
+
+ // Handles NameOwnerChanged signal from D-Bus's special message bus.
+ DBusHandlerResult HandleNameOwnerChanged(
+ std::unique_ptr<dbus::Signal> signal);
+
+ // Runs |name_owner_changed_callback_|.
+ void RunNameOwnerChangedCallback(const std::string& old_owner,
+ const std::string& new_owner);
+
+ // Runs |wait_for_service_to_be_available_callbacks_|.
+ void RunWaitForServiceToBeAvailableCallbacks(bool service_is_available);
+
+ scoped_refptr<Bus> bus_;
+ std::string service_name_;
+ ObjectPath object_path_;
+
+ // The method table where keys are absolute signal names (i.e. interface
+ // name + signal name), and values are lists of the corresponding callbacks.
+ typedef std::map<std::string, std::vector<SignalCallback> > MethodTable;
+ MethodTable method_table_;
+
+ // The callback called when NameOwnerChanged signal is received.
+ NameOwnerChangedCallback name_owner_changed_callback_;
+
+ // Called when the service becomes available.
+ std::vector<WaitForServiceToBeAvailableCallback>
+ wait_for_service_to_be_available_callbacks_;
+
+ std::set<std::string> match_rules_;
+
+ const bool ignore_service_unknown_errors_;
+
+ // Known name owner of the well-known bus name represented by |service_name_|.
+ std::string service_name_owner_;
+
+ std::set<DBusPendingCall*> pending_calls_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectProxy);
+};
+
+} // namespace dbus
+
+#endif // DBUS_OBJECT_PROXY_H_
diff --git a/libchrome/dbus/property.cc b/libchrome/dbus/property.cc
new file mode 100644
index 0000000..aa58436
--- /dev/null
+++ b/libchrome/dbus/property.cc
@@ -0,0 +1,679 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/property.h"
+
+#include <stddef.h>
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+#include "dbus/message.h"
+#include "dbus/object_path.h"
+#include "dbus/object_proxy.h"
+
+namespace dbus {
+
+//
+// PropertyBase implementation.
+//
+
+PropertyBase::PropertyBase() : property_set_(nullptr), is_valid_(false) {}
+
+PropertyBase::~PropertyBase() {}
+
+void PropertyBase::Init(PropertySet* property_set, const std::string& name) {
+ DCHECK(!property_set_);
+ property_set_ = property_set;
+ is_valid_ = false;
+ name_ = name;
+}
+
+//
+// PropertySet implementation.
+//
+
+PropertySet::PropertySet(
+ ObjectProxy* object_proxy,
+ const std::string& interface,
+ const PropertyChangedCallback& property_changed_callback)
+ : object_proxy_(object_proxy),
+ interface_(interface),
+ property_changed_callback_(property_changed_callback),
+ weak_ptr_factory_(this) {}
+
+PropertySet::~PropertySet() {
+}
+
+void PropertySet::RegisterProperty(const std::string& name,
+ PropertyBase* property) {
+ property->Init(this, name);
+ properties_map_[name] = property;
+}
+
+void PropertySet::ConnectSignals() {
+ DCHECK(object_proxy_);
+ object_proxy_->ConnectToSignal(
+ kPropertiesInterface,
+ kPropertiesChanged,
+ base::Bind(&PropertySet::ChangedReceived,
+ weak_ptr_factory_.GetWeakPtr()),
+ base::Bind(&PropertySet::ChangedConnected,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+
+void PropertySet::ChangedReceived(Signal* signal) {
+ DCHECK(signal);
+ MessageReader reader(signal);
+
+ std::string interface;
+ if (!reader.PopString(&interface)) {
+ LOG(WARNING) << "Property changed signal has wrong parameters: "
+ << "expected interface name: " << signal->ToString();
+ return;
+ }
+
+ if (interface != this->interface())
+ return;
+
+ if (!UpdatePropertiesFromReader(&reader)) {
+ LOG(WARNING) << "Property changed signal has wrong parameters: "
+ << "expected dictionary: " << signal->ToString();
+ }
+
+ if (!InvalidatePropertiesFromReader(&reader)) {
+ LOG(WARNING) << "Property changed signal has wrong parameters: "
+ << "expected array to invalidate: " << signal->ToString();
+ }
+}
+
+void PropertySet::ChangedConnected(const std::string& /*interface_name*/,
+ const std::string& signal_name,
+ bool success) {
+ LOG_IF(WARNING, !success) << "Failed to connect to " << signal_name
+ << "signal.";
+}
+
+
+void PropertySet::Get(PropertyBase* property, GetCallback callback) {
+ MethodCall method_call(kPropertiesInterface, kPropertiesGet);
+ MessageWriter writer(&method_call);
+ writer.AppendString(interface());
+ writer.AppendString(property->name());
+
+ DCHECK(object_proxy_);
+ object_proxy_->CallMethod(&method_call,
+ ObjectProxy::TIMEOUT_USE_DEFAULT,
+ base::Bind(&PropertySet::OnGet,
+ GetWeakPtr(),
+ property,
+ callback));
+}
+
+void PropertySet::OnGet(PropertyBase* property, GetCallback callback,
+ Response* response) {
+ if (!response) {
+ LOG(WARNING) << property->name() << ": Get: failed.";
+ return;
+ }
+
+ MessageReader reader(response);
+ if (property->PopValueFromReader(&reader)) {
+ property->set_valid(true);
+ NotifyPropertyChanged(property->name());
+ } else {
+ if (property->is_valid()) {
+ property->set_valid(false);
+ NotifyPropertyChanged(property->name());
+ }
+ }
+
+ if (!callback.is_null())
+ callback.Run(response);
+}
+
+bool PropertySet::GetAndBlock(PropertyBase* property) {
+ MethodCall method_call(kPropertiesInterface, kPropertiesGet);
+ MessageWriter writer(&method_call);
+ writer.AppendString(interface());
+ writer.AppendString(property->name());
+
+ DCHECK(object_proxy_);
+ std::unique_ptr<dbus::Response> response(object_proxy_->CallMethodAndBlock(
+ &method_call, ObjectProxy::TIMEOUT_USE_DEFAULT));
+
+ if (!response.get()) {
+ LOG(WARNING) << property->name() << ": GetAndBlock: failed.";
+ return false;
+ }
+
+ MessageReader reader(response.get());
+ if (property->PopValueFromReader(&reader)) {
+ property->set_valid(true);
+ NotifyPropertyChanged(property->name());
+ } else {
+ if (property->is_valid()) {
+ property->set_valid(false);
+ NotifyPropertyChanged(property->name());
+ }
+ }
+ return true;
+}
+
+void PropertySet::GetAll() {
+ MethodCall method_call(kPropertiesInterface, kPropertiesGetAll);
+ MessageWriter writer(&method_call);
+ writer.AppendString(interface());
+
+ DCHECK(object_proxy_);
+ object_proxy_->CallMethod(&method_call,
+ ObjectProxy::TIMEOUT_USE_DEFAULT,
+ base::Bind(&PropertySet::OnGetAll,
+ weak_ptr_factory_.GetWeakPtr()));
+}
+
+void PropertySet::OnGetAll(Response* response) {
+ if (!response) {
+ LOG(WARNING) << "GetAll request failed for: " << interface_;
+ return;
+ }
+
+ MessageReader reader(response);
+ if (!UpdatePropertiesFromReader(&reader)) {
+ LOG(WARNING) << "GetAll response has wrong parameters: "
+ << "expected dictionary: " << response->ToString();
+ }
+}
+
+void PropertySet::Set(PropertyBase* property, SetCallback callback) {
+ MethodCall method_call(kPropertiesInterface, kPropertiesSet);
+ MessageWriter writer(&method_call);
+ writer.AppendString(interface());
+ writer.AppendString(property->name());
+ property->AppendSetValueToWriter(&writer);
+
+ DCHECK(object_proxy_);
+ object_proxy_->CallMethod(&method_call,
+ ObjectProxy::TIMEOUT_USE_DEFAULT,
+ base::Bind(&PropertySet::OnSet,
+ GetWeakPtr(),
+ property,
+ callback));
+}
+
+bool PropertySet::SetAndBlock(PropertyBase* property) {
+ MethodCall method_call(kPropertiesInterface, kPropertiesSet);
+ MessageWriter writer(&method_call);
+ writer.AppendString(interface());
+ writer.AppendString(property->name());
+ property->AppendSetValueToWriter(&writer);
+
+ DCHECK(object_proxy_);
+ std::unique_ptr<dbus::Response> response(object_proxy_->CallMethodAndBlock(
+ &method_call, ObjectProxy::TIMEOUT_USE_DEFAULT));
+ if (response.get())
+ return true;
+ return false;
+}
+
+void PropertySet::OnSet(PropertyBase* property,
+ SetCallback callback,
+ Response* response) {
+ LOG_IF(WARNING, !response) << property->name() << ": Set: failed.";
+ if (!callback.is_null())
+ callback.Run(response);
+}
+
+bool PropertySet::UpdatePropertiesFromReader(MessageReader* reader) {
+ DCHECK(reader);
+ MessageReader array_reader(NULL);
+ if (!reader->PopArray(&array_reader))
+ return false;
+
+ while (array_reader.HasMoreData()) {
+ MessageReader dict_entry_reader(NULL);
+ if (array_reader.PopDictEntry(&dict_entry_reader))
+ UpdatePropertyFromReader(&dict_entry_reader);
+ }
+
+ return true;
+}
+
+bool PropertySet::UpdatePropertyFromReader(MessageReader* reader) {
+ DCHECK(reader);
+
+ std::string name;
+ if (!reader->PopString(&name))
+ return false;
+
+ PropertiesMap::iterator it = properties_map_.find(name);
+ if (it == properties_map_.end())
+ return false;
+
+ PropertyBase* property = it->second;
+ if (property->PopValueFromReader(reader)) {
+ property->set_valid(true);
+ NotifyPropertyChanged(name);
+ return true;
+ } else {
+ if (property->is_valid()) {
+ property->set_valid(false);
+ NotifyPropertyChanged(property->name());
+ }
+ return false;
+ }
+}
+
+bool PropertySet::InvalidatePropertiesFromReader(MessageReader* reader) {
+ DCHECK(reader);
+ MessageReader array_reader(NULL);
+ if (!reader->PopArray(&array_reader))
+ return false;
+
+ while (array_reader.HasMoreData()) {
+ std::string name;
+ if (!array_reader.PopString(&name))
+ return false;
+
+ PropertiesMap::iterator it = properties_map_.find(name);
+ if (it == properties_map_.end())
+ continue;
+
+ PropertyBase* property = it->second;
+ if (property->is_valid()) {
+ property->set_valid(false);
+ NotifyPropertyChanged(property->name());
+ }
+ }
+
+ return true;
+}
+
+void PropertySet::NotifyPropertyChanged(const std::string& name) {
+ if (!property_changed_callback_.is_null())
+ property_changed_callback_.Run(name);
+}
+
+//
+// Property<Byte> specialization.
+//
+
+template <>
+Property<uint8_t>::Property()
+ : value_(0) {}
+
+template <>
+bool Property<uint8_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfByte(&value_);
+}
+
+template <>
+void Property<uint8_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfByte(set_value_);
+}
+
+//
+// Property<bool> specialization.
+//
+
+template <>
+Property<bool>::Property() : value_(false) {
+}
+
+template <>
+bool Property<bool>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfBool(&value_);
+}
+
+template <>
+void Property<bool>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfBool(set_value_);
+}
+
+//
+// Property<int16_t> specialization.
+//
+
+template <>
+Property<int16_t>::Property()
+ : value_(0) {}
+
+template <>
+bool Property<int16_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfInt16(&value_);
+}
+
+template <>
+void Property<int16_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfInt16(set_value_);
+}
+
+//
+// Property<uint16_t> specialization.
+//
+
+template <>
+Property<uint16_t>::Property()
+ : value_(0) {}
+
+template <>
+bool Property<uint16_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfUint16(&value_);
+}
+
+template <>
+void Property<uint16_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfUint16(set_value_);
+}
+
+//
+// Property<int32_t> specialization.
+//
+
+template <>
+Property<int32_t>::Property()
+ : value_(0) {}
+
+template <>
+bool Property<int32_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfInt32(&value_);
+}
+
+template <>
+void Property<int32_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfInt32(set_value_);
+}
+
+//
+// Property<uint32_t> specialization.
+//
+
+template <>
+Property<uint32_t>::Property()
+ : value_(0) {}
+
+template <>
+bool Property<uint32_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfUint32(&value_);
+}
+
+template <>
+void Property<uint32_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfUint32(set_value_);
+}
+
+//
+// Property<int64_t> specialization.
+//
+
+template <>
+Property<int64_t>::Property()
+ : value_(0), set_value_(0) {}
+
+template <>
+bool Property<int64_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfInt64(&value_);
+}
+
+template <>
+void Property<int64_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfInt64(set_value_);
+}
+
+//
+// Property<uint64_t> specialization.
+//
+
+template <>
+Property<uint64_t>::Property()
+ : value_(0) {}
+
+template <>
+bool Property<uint64_t>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfUint64(&value_);
+}
+
+template <>
+void Property<uint64_t>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfUint64(set_value_);
+}
+
+//
+// Property<double> specialization.
+//
+
+template <>
+Property<double>::Property() : value_(0.0) {
+}
+
+template <>
+bool Property<double>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfDouble(&value_);
+}
+
+template <>
+void Property<double>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfDouble(set_value_);
+}
+
+//
+// Property<std::string> specialization.
+//
+
+template <>
+bool Property<std::string>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfString(&value_);
+}
+
+template <>
+void Property<std::string>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfString(set_value_);
+}
+
+//
+// Property<ObjectPath> specialization.
+//
+
+template <>
+bool Property<ObjectPath>::PopValueFromReader(MessageReader* reader) {
+ return reader->PopVariantOfObjectPath(&value_);
+}
+
+template <>
+void Property<ObjectPath>::AppendSetValueToWriter(MessageWriter* writer) {
+ writer->AppendVariantOfObjectPath(set_value_);
+}
+
+//
+// Property<std::vector<std::string> > specialization.
+//
+
+template <>
+bool Property<std::vector<std::string> >::PopValueFromReader(
+ MessageReader* reader) {
+ MessageReader variant_reader(NULL);
+ if (!reader->PopVariant(&variant_reader))
+ return false;
+
+ value_.clear();
+ return variant_reader.PopArrayOfStrings(&value_);
+}
+
+template <>
+void Property<std::vector<std::string> >::AppendSetValueToWriter(
+ MessageWriter* writer) {
+ MessageWriter variant_writer(NULL);
+ writer->OpenVariant("as", &variant_writer);
+ variant_writer.AppendArrayOfStrings(set_value_);
+ writer->CloseContainer(&variant_writer);
+}
+
+//
+// Property<std::vector<ObjectPath> > specialization.
+//
+
+template <>
+bool Property<std::vector<ObjectPath> >::PopValueFromReader(
+ MessageReader* reader) {
+ MessageReader variant_reader(NULL);
+ if (!reader->PopVariant(&variant_reader))
+ return false;
+
+ value_.clear();
+ return variant_reader.PopArrayOfObjectPaths(&value_);
+}
+
+template <>
+void Property<std::vector<ObjectPath> >::AppendSetValueToWriter(
+ MessageWriter* writer) {
+ MessageWriter variant_writer(NULL);
+ writer->OpenVariant("ao", &variant_writer);
+ variant_writer.AppendArrayOfObjectPaths(set_value_);
+ writer->CloseContainer(&variant_writer);
+}
+
+//
+// Property<std::vector<uint8_t> > specialization.
+//
+
+template <>
+bool Property<std::vector<uint8_t>>::PopValueFromReader(MessageReader* reader) {
+ MessageReader variant_reader(NULL);
+ if (!reader->PopVariant(&variant_reader))
+ return false;
+
+ value_.clear();
+ const uint8_t* bytes = NULL;
+ size_t length = 0;
+ if (!variant_reader.PopArrayOfBytes(&bytes, &length))
+ return false;
+ value_.assign(bytes, bytes + length);
+ return true;
+}
+
+template <>
+void Property<std::vector<uint8_t>>::AppendSetValueToWriter(
+ MessageWriter* writer) {
+ MessageWriter variant_writer(NULL);
+ writer->OpenVariant("ay", &variant_writer);
+ variant_writer.AppendArrayOfBytes(set_value_.data(), set_value_.size());
+ writer->CloseContainer(&variant_writer);
+}
+
+//
+// Property<std::map<std::string, std::string>> specialization.
+//
+
+template <>
+bool Property<std::map<std::string, std::string>>::PopValueFromReader(
+ MessageReader* reader) {
+ MessageReader variant_reader(NULL);
+ MessageReader array_reader(NULL);
+ if (!reader->PopVariant(&variant_reader) ||
+ !variant_reader.PopArray(&array_reader))
+ return false;
+ value_.clear();
+ while (array_reader.HasMoreData()) {
+ dbus::MessageReader dict_entry_reader(NULL);
+ if (!array_reader.PopDictEntry(&dict_entry_reader))
+ return false;
+ std::string key;
+ std::string value;
+ if (!dict_entry_reader.PopString(&key) ||
+ !dict_entry_reader.PopString(&value))
+ return false;
+ value_[key] = value;
+ }
+ return true;
+}
+
+template <>
+void Property<std::map<std::string, std::string>>::AppendSetValueToWriter(
+ MessageWriter* writer) {
+ MessageWriter variant_writer(NULL);
+ MessageWriter dict_writer(NULL);
+ writer->OpenVariant("a{ss}", &variant_writer);
+ variant_writer.OpenArray("{ss}", &dict_writer);
+ for (const auto& pair : set_value_) {
+ dbus::MessageWriter entry_writer(NULL);
+ dict_writer.OpenDictEntry(&entry_writer);
+ entry_writer.AppendString(pair.first);
+ entry_writer.AppendString(pair.second);
+ dict_writer.CloseContainer(&entry_writer);
+ }
+ variant_writer.CloseContainer(&dict_writer);
+ writer->CloseContainer(&variant_writer);
+}
+
+//
+// Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>
+// specialization.
+//
+
+template <>
+bool Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>::
+ PopValueFromReader(MessageReader* reader) {
+ MessageReader variant_reader(NULL);
+ MessageReader array_reader(NULL);
+ if (!reader->PopVariant(&variant_reader) ||
+ !variant_reader.PopArray(&array_reader))
+ return false;
+
+ value_.clear();
+ while (array_reader.HasMoreData()) {
+ dbus::MessageReader struct_reader(NULL);
+ if (!array_reader.PopStruct(&struct_reader))
+ return false;
+
+ std::pair<std::vector<uint8_t>, uint16_t> entry;
+ const uint8_t* bytes = NULL;
+ size_t length = 0;
+ if (!struct_reader.PopArrayOfBytes(&bytes, &length))
+ return false;
+ entry.first.assign(bytes, bytes + length);
+ if (!struct_reader.PopUint16(&entry.second))
+ return false;
+ value_.push_back(entry);
+ }
+ return true;
+}
+
+template <>
+void Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>::
+ AppendSetValueToWriter(MessageWriter* writer) {
+ MessageWriter variant_writer(NULL);
+ MessageWriter array_writer(NULL);
+ writer->OpenVariant("a(ayq)", &variant_writer);
+ variant_writer.OpenArray("(ayq)", &array_writer);
+ for (const auto& pair : set_value_) {
+ dbus::MessageWriter struct_writer(nullptr);
+ array_writer.OpenStruct(&struct_writer);
+ struct_writer.AppendArrayOfBytes(std::get<0>(pair).data(),
+ std::get<0>(pair).size());
+ struct_writer.AppendUint16(std::get<1>(pair));
+ array_writer.CloseContainer(&struct_writer);
+ }
+ variant_writer.CloseContainer(&array_writer);
+ writer->CloseContainer(&variant_writer);
+}
+
+template class Property<uint8_t>;
+template class Property<bool>;
+template class Property<int16_t>;
+template class Property<uint16_t>;
+template class Property<int32_t>;
+template class Property<uint32_t>;
+template class Property<int64_t>;
+template class Property<uint64_t>;
+template class Property<double>;
+template class Property<std::string>;
+template class Property<ObjectPath>;
+template class Property<std::vector<std::string> >;
+template class Property<std::vector<ObjectPath> >;
+template class Property<std::vector<uint8_t>>;
+template class Property<std::map<std::string, std::string>>;
+template class Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>;
+
+} // namespace dbus
diff --git a/libchrome/dbus/property.h b/libchrome/dbus/property.h
new file mode 100644
index 0000000..efbad22
--- /dev/null
+++ b/libchrome/dbus/property.h
@@ -0,0 +1,617 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_PROPERTY_H_
+#define DBUS_PROPERTY_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "dbus/dbus_export.h"
+#include "dbus/message.h"
+#include "dbus/object_proxy.h"
+
+// D-Bus objects frequently provide sets of properties accessed via a
+// standard interface of method calls and signals to obtain the current value,
+// set a new value and be notified of changes to the value. Unfortunately this
+// interface makes heavy use of variants and dictionaries of variants. The
+// classes defined here make dealing with properties in a type-safe manner
+// possible.
+//
+// Client implementation classes should define a Properties structure, deriving
+// from the PropertySet class defined here. This structure should contain a
+// member for each property defined as an instance of the Property<> class,
+// specifying the type to the template. Finally the structure should chain up
+// to the PropertySet constructor, and then call RegisterProperty() for each
+// property defined to associate them with their string name.
+//
+// Example:
+// class ExampleClient {
+// public:
+// struct Properties : public dbus::PropertySet {
+// dbus::Property<std::string> name;
+// dbus::Property<uint16_t> version;
+// dbus::Property<dbus::ObjectPath> parent;
+// dbus::Property<std::vector<std::string> > children;
+//
+// Properties(dbus::ObjectProxy* object_proxy,
+// const PropertyChangedCallback callback)
+// : dbus::PropertySet(object_proxy, "com.example.DBus", callback) {
+// RegisterProperty("Name", &name);
+// RegisterProperty("Version", &version);
+// RegisterProperty("Parent", &parent);
+// RegisterProperty("Children", &children);
+// }
+// virtual ~Properties() {}
+// };
+//
+// The Properties structure requires a pointer to the object proxy of the
+// actual object to track, and after construction should have signals
+// connected to that object and initial values set by calling ConnectSignals()
+// and GetAll(). The structure should not outlive the object proxy, so it
+// is recommended that the lifecycle of both be managed together.
+//
+// Example (continued):
+//
+// typedef std::map<std::pair<dbus::ObjectProxy*, Properties*> > Object;
+// typedef std::map<dbus::ObjectPath, Object> ObjectMap;
+// ObjectMap object_map_;
+//
+// dbus::ObjectProxy* GetObjectProxy(const dbus::ObjectPath& object_path) {
+// return GetObject(object_path).first;
+// }
+//
+// Properties* GetProperties(const dbus::ObjectPath& object_path) {
+// return GetObject(object_path).second;
+// }
+//
+// Object GetObject(const dbus::ObjectPath& object_path) {
+// ObjectMap::iterator it = object_map_.find(object_path);
+// if (it != object_map_.end())
+// return it->second;
+//
+// dbus::ObjectProxy* object_proxy = bus->GetObjectProxy(...);
+// // connect signals, etc.
+//
+// Properties* properties = new Properties(
+// object_proxy,
+// base::Bind(&PropertyChanged,
+// weak_ptr_factory_.GetWeakPtr(),
+// object_path));
+// properties->ConnectSignals();
+// properties->GetAll();
+//
+// Object object = std::make_pair(object_proxy, properties);
+// object_map_[object_path] = object;
+// return object;
+// }
+// };
+//
+// This now allows code using the client implementation to access properties
+// in a type-safe manner, and assuming the PropertyChanged callback is
+// propogated up to observers, be notified of changes. A typical access of
+// the current value of the name property would be:
+//
+// ExampleClient::Properties* p = example_client->GetProperties(object_path);
+// std::string name = p->name.value();
+//
+// Normally these values are updated from signals emitted by the remote object,
+// in case an explicit round-trip is needed to obtain the current value, the
+// Get() method can be used and indicates whether or not the value update was
+// successful. The updated value can be obtained in the callback using the
+// value() method.
+//
+// p->children.Get(base::Bind(&OnGetChildren));
+//
+// A new value can be set using the Set() method, the callback indicates
+// success only; it is up to the remote object when (and indeed if) it updates
+// the property value, and whether it emits a signal or a Get() call is
+// required to obtain it.
+//
+// p->version.Set(20, base::Bind(&OnSetVersion))
+
+namespace dbus {
+
+// D-Bus Properties interface constants, declared here rather than
+// in property.cc because template methods use them.
+const char kPropertiesInterface[] = "org.freedesktop.DBus.Properties";
+const char kPropertiesGetAll[] = "GetAll";
+const char kPropertiesGet[] = "Get";
+const char kPropertiesSet[] = "Set";
+const char kPropertiesChanged[] = "PropertiesChanged";
+
+class PropertySet;
+
+// PropertyBase is an abstract base-class consisting of the parts of
+// the Property<> template that are not type-specific, such as the
+// associated PropertySet, property name, and the type-unsafe parts
+// used by PropertySet.
+class CHROME_DBUS_EXPORT PropertyBase {
+ public:
+ PropertyBase();
+ virtual ~PropertyBase();
+
+ // Initializes the |property_set| and property |name| so that method
+ // calls may be made from this class. This method is called by
+ // PropertySet::RegisterProperty() passing |this| for |property_set| so
+ // there should be no need to call it directly. If you do beware that
+ // no ownership or reference to |property_set| is taken so that object
+ // must outlive this one.
+ void Init(PropertySet* property_set, const std::string& name);
+
+ // Retrieves the name of this property, this may be useful in observers
+ // to avoid specifying the name in more than once place, e.g.
+ //
+ // void Client::PropertyChanged(const dbus::ObjectPath& object_path,
+ // const std::string &property_name) {
+ // Properties& properties = GetProperties(object_path);
+ // if (property_name == properties.version.name()) {
+ // // Handle version property changing
+ // }
+ // }
+ const std::string& name() const { return name_; }
+
+ // Returns true if property is valid, false otherwise.
+ bool is_valid() const { return is_valid_; }
+
+ // Allows to mark Property as valid or invalid.
+ void set_valid(bool is_valid) { is_valid_ = is_valid; }
+
+ // Method used by PropertySet to retrieve the value from a MessageReader,
+ // no knowledge of the contained type is required, this method returns
+ // true if its expected type was found, false if not.
+ // Implementation provided by specialization.
+ virtual bool PopValueFromReader(MessageReader* reader) = 0;
+
+ // Method used by PropertySet to append the set value to a MessageWriter,
+ // no knowledge of the contained type is required.
+ // Implementation provided by specialization.
+ virtual void AppendSetValueToWriter(MessageWriter* writer) = 0;
+
+ // Method used by test and stub implementations of dbus::PropertySet::Set
+ // to replace the property value with the set value without using a
+ // dbus::MessageReader.
+ virtual void ReplaceValueWithSetValue() = 0;
+
+ protected:
+ // Retrieves the associated property set.
+ PropertySet* property_set() { return property_set_; }
+
+ private:
+ // Pointer to the PropertySet instance that this instance is a member of,
+ // no ownership is taken and |property_set_| must outlive this class.
+ PropertySet* property_set_;
+
+ bool is_valid_;
+
+ // Name of the property.
+ std::string name_;
+
+ DISALLOW_COPY_AND_ASSIGN(PropertyBase);
+};
+
+// PropertySet groups a collection of properties for a remote object
+// together into a single structure, fixing their types and name such
+// that calls made through it are type-safe.
+//
+// Clients always sub-class this to add the properties, and should always
+// provide a constructor that chains up to this and then calls
+// RegisterProperty() for each property defined.
+//
+// After creation, client code should call ConnectSignals() and most likely
+// GetAll() to seed initial values and update as changes occur.
+class CHROME_DBUS_EXPORT PropertySet {
+ public:
+ // Callback for changes to cached values of properties, either notified
+ // via signal, or as a result of calls to Get() and GetAll(). The |name|
+ // argument specifies the name of the property changed.
+ typedef base::Callback<void(const std::string& name)> PropertyChangedCallback;
+
+ // Constructs a property set, where |object_proxy| specifies the proxy for
+ // the/ remote object that these properties are for, care should be taken to
+ // ensure that this object does not outlive the lifetime of the proxy;
+ // |interface| specifies the D-Bus interface of these properties, and
+ // |property_changed_callback| specifies the callback for when properties
+ // are changed, this may be a NULL callback.
+ PropertySet(ObjectProxy* object_proxy, const std::string& interface,
+ const PropertyChangedCallback& property_changed_callback);
+
+ // Destructor; we don't hold on to any references or memory that needs
+ // explicit clean-up, but clang thinks we might.
+ virtual ~PropertySet();
+
+ // Registers a property, generally called from the subclass constructor;
+ // pass the |name| of the property as used in method calls and signals,
+ // and the pointer to the |property| member of the structure. This will
+ // call the PropertyBase::Init method.
+ void RegisterProperty(const std::string& name, PropertyBase* property);
+
+ // Connects property change notification signals to the object, generally
+ // called immediately after the object is created and before calls to other
+ // methods. Sub-classes may override to use different D-Bus signals.
+ virtual void ConnectSignals();
+
+ // Methods connected by ConnectSignals() and called by dbus:: when
+ // a property is changed. Sub-classes may override if the property
+ // changed signal provides different arguments.
+ virtual void ChangedReceived(Signal* signal);
+ virtual void ChangedConnected(const std::string& interface_name,
+ const std::string& signal_name,
+ bool success);
+
+ // Callback for Get() method, |success| indicates whether or not the
+ // value could be retrived, if true the new value can be obtained by
+ // calling value() on the property.
+ typedef base::Callback<void(bool success)> GetCallback;
+
+ // Requests an updated value from the remote object for |property|
+ // incurring a round-trip. |callback| will be called when the new
+ // value is available. This may not be implemented by some interfaces,
+ // and may be overriden by sub-classes if interfaces use different
+ // method calls.
+ virtual void Get(PropertyBase* property, GetCallback callback);
+ virtual void OnGet(PropertyBase* property, GetCallback callback,
+ Response* response);
+
+ // The synchronous version of Get().
+ // This should never be used on an interactive thread.
+ virtual bool GetAndBlock(PropertyBase* property);
+
+ // Queries the remote object for values of all properties and updates
+ // initial values. Sub-classes may override to use a different D-Bus
+ // method, or if the remote object does not support retrieving all
+ // properties, either ignore or obtain each property value individually.
+ virtual void GetAll();
+ virtual void OnGetAll(Response* response);
+
+ // Callback for Set() method, |success| indicates whether or not the
+ // new property value was accepted by the remote object.
+ typedef base::Callback<void(bool success)> SetCallback;
+
+ // Requests that the remote object for |property| change the property to
+ // its new value. |callback| will be called to indicate the success or
+ // failure of the request, however the new value may not be available
+ // depending on the remote object. This method may be overridden by
+ // sub-classes if interfaces use different method calls.
+ virtual void Set(PropertyBase* property, SetCallback callback);
+ virtual void OnSet(PropertyBase* property, SetCallback callback,
+ Response* response);
+
+ // The synchronous version of Set().
+ // This should never be used on an interactive thread.
+ virtual bool SetAndBlock(PropertyBase* property);
+
+ // Update properties by reading an array of dictionary entries, each
+ // containing a string with the name and a variant with the value, from
+ // |message_reader|. Returns false if message is in incorrect format.
+ bool UpdatePropertiesFromReader(MessageReader* reader);
+
+ // Updates a single property by reading a string with the name and a
+ // variant with the value from |message_reader|. Returns false if message
+ // is in incorrect format, or property type doesn't match.
+ bool UpdatePropertyFromReader(MessageReader* reader);
+
+ // Calls the property changed callback passed to the constructor, used
+ // by sub-classes that do not call UpdatePropertiesFromReader() or
+ // UpdatePropertyFromReader(). Takes the |name| of the changed property.
+ void NotifyPropertyChanged(const std::string& name);
+
+ // Retrieves the object proxy this property set was initialized with,
+ // provided for sub-classes overriding methods that make D-Bus calls
+ // and for Property<>. Not permitted with const references to this class.
+ ObjectProxy* object_proxy() { return object_proxy_; }
+
+ // Retrieves the interface of this property set.
+ const std::string& interface() const { return interface_; }
+
+ protected:
+ // Get a weak pointer to this property set, provided so that sub-classes
+ // overriding methods that make D-Bus calls may use the existing (or
+ // override) callbacks without providing their own weak pointer factory.
+ base::WeakPtr<PropertySet> GetWeakPtr() {
+ return weak_ptr_factory_.GetWeakPtr();
+ }
+
+ private:
+ // Invalidates properties by reading an array of names, from
+ // |message_reader|. Returns false if message is in incorrect format.
+ bool InvalidatePropertiesFromReader(MessageReader* reader);
+
+ // Pointer to object proxy for making method calls, no ownership is taken
+ // so this must outlive this class.
+ ObjectProxy* object_proxy_;
+
+ // Interface of property, e.g. "org.chromium.ExampleService", this is
+ // distinct from the interface of the method call itself which is the
+ // general D-Bus Properties interface "org.freedesktop.DBus.Properties".
+ std::string interface_;
+
+ // Callback for property changes.
+ PropertyChangedCallback property_changed_callback_;
+
+ // Map of properties (as PropertyBase*) defined in the structure to
+ // names as used in D-Bus method calls and signals. The base pointer
+ // restricts property access via this map to type-unsafe and non-specific
+ // actions only.
+ typedef std::map<const std::string, PropertyBase*> PropertiesMap;
+ PropertiesMap properties_map_;
+
+ // Weak pointer factory as D-Bus callbacks may last longer than these
+ // objects.
+ base::WeakPtrFactory<PropertySet> weak_ptr_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(PropertySet);
+};
+
+// Property template, this defines the type-specific and type-safe methods
+// of properties that can be accessed as members of a PropertySet structure.
+//
+// Properties provide a cached value that has an initial sensible default
+// until the reply to PropertySet::GetAll() is retrieved and is updated by
+// all calls to that method, PropertySet::Get() and property changed signals
+// also handled by PropertySet. It can be obtained by calling value() on the
+// property.
+//
+// It is recommended that this cached value be used where necessary, with
+// code using PropertySet::PropertyChangedCallback to be notified of changes,
+// rather than incurring a round-trip to the remote object for each property
+// access.
+//
+// Where a round-trip is necessary, the Get() method is provided. And to
+// update the remote object value, the Set() method is also provided; these
+// both simply call methods on PropertySet.
+//
+// Handling of particular D-Bus types is performed via specialization,
+// typically the PopValueFromReader() and AppendSetValueToWriter() methods
+// will need to be provided, and in rare cases a constructor to provide a
+// default value. Specializations for basic D-Bus types, strings, object
+// paths and arrays are provided for you.
+template <class T>
+class CHROME_DBUS_EXPORT Property : public PropertyBase {
+ public:
+ Property() {}
+ ~Property() override {}
+
+ // Retrieves the cached value.
+ const T& value() const { return value_; }
+
+ // Requests an updated value from the remote object incurring a
+ // round-trip. |callback| will be called when the new value is available.
+ // This may not be implemented by some interfaces.
+ virtual void Get(dbus::PropertySet::GetCallback callback) {
+ property_set()->Get(this, callback);
+ }
+
+ // The synchronous version of Get().
+ // This should never be used on an interactive thread.
+ virtual bool GetAndBlock() {
+ return property_set()->GetAndBlock(this);
+ }
+
+ // Requests that the remote object change the property value to |value|,
+ // |callback| will be called to indicate the success or failure of the
+ // request, however the new value may not be available depending on the
+ // remote object.
+ virtual void Set(const T& value, dbus::PropertySet::SetCallback callback) {
+ set_value_ = value;
+ property_set()->Set(this, callback);
+ }
+
+ // The synchronous version of Set().
+ // This should never be used on an interactive thread.
+ virtual bool SetAndBlock(const T& value) {
+ set_value_ = value;
+ return property_set()->SetAndBlock(this);
+ }
+
+ // Method used by PropertySet to retrieve the value from a MessageReader,
+ // no knowledge of the contained type is required, this method returns
+ // true if its expected type was found, false if not.
+ bool PopValueFromReader(MessageReader* reader) override;
+
+ // Method used by PropertySet to append the set value to a MessageWriter,
+ // no knowledge of the contained type is required.
+ // Implementation provided by specialization.
+ void AppendSetValueToWriter(MessageWriter* writer) override;
+
+ // Method used by test and stub implementations of dbus::PropertySet::Set
+ // to replace the property value with the set value without using a
+ // dbus::MessageReader.
+ void ReplaceValueWithSetValue() override {
+ value_ = set_value_;
+ property_set()->NotifyPropertyChanged(name());
+ }
+
+ // Method used by test and stub implementations to directly set the
+ // value of a property.
+ void ReplaceValue(const T& value) {
+ value_ = value;
+ property_set()->NotifyPropertyChanged(name());
+ }
+
+ // Method used by test and stub implementations to directly set the
+ // |set_value_| of a property.
+ void ReplaceSetValueForTesting(const T& value) { set_value_ = value; }
+
+ private:
+ // Current cached value of the property.
+ T value_;
+
+ // Replacement value of the property.
+ T set_value_;
+};
+
+// Clang and GCC don't agree on how attributes should work for explicitly
+// instantiated templates. GCC ignores attributes on explicit instantiations
+// (and emits a warning) while Clang requires the visiblity attribute on the
+// explicit instantiations for them to be visible to other compilation units.
+// Hopefully clang and GCC agree one day, and this can be cleaned up:
+// https://llvm.org/bugs/show_bug.cgi?id=24815
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wattributes"
+
+template <>
+CHROME_DBUS_EXPORT Property<uint8_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<uint8_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<uint8_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<uint8_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<bool>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<bool>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<bool>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<bool>;
+
+template <>
+CHROME_DBUS_EXPORT Property<int16_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<int16_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<int16_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<int16_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<uint16_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<uint16_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<uint16_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<uint16_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<int32_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<int32_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<int32_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<int32_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<uint32_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<uint32_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<uint32_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<uint32_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<int64_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<int64_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<int64_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<int64_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<uint64_t>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<uint64_t>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<uint64_t>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<uint64_t>;
+
+template <>
+CHROME_DBUS_EXPORT Property<double>::Property();
+template <>
+CHROME_DBUS_EXPORT bool Property<double>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<double>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<double>;
+
+template <>
+CHROME_DBUS_EXPORT bool Property<std::string>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<std::string>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<std::string>;
+
+template <>
+CHROME_DBUS_EXPORT bool Property<ObjectPath>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<ObjectPath>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<ObjectPath>;
+
+template <>
+CHROME_DBUS_EXPORT bool Property<std::vector<std::string>>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<
+ std::vector<std::string>>::AppendSetValueToWriter(MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<std::vector<std::string>>;
+
+template <>
+CHROME_DBUS_EXPORT bool Property<std::vector<ObjectPath>>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<
+ std::vector<ObjectPath>>::AppendSetValueToWriter(MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<std::vector<ObjectPath>>;
+
+template <>
+CHROME_DBUS_EXPORT bool Property<std::vector<uint8_t>>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void Property<std::vector<uint8_t>>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT Property<std::vector<uint8_t>>;
+
+template <>
+CHROME_DBUS_EXPORT bool
+Property<std::map<std::string, std::string>>::PopValueFromReader(
+ MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void
+Property<std::map<std::string, std::string>>::AppendSetValueToWriter(
+ MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT
+ Property<std::map<std::string, std::string>>;
+
+template <>
+CHROME_DBUS_EXPORT bool
+Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>::
+ PopValueFromReader(MessageReader* reader);
+template <>
+CHROME_DBUS_EXPORT void
+Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>::
+ AppendSetValueToWriter(MessageWriter* writer);
+extern template class CHROME_DBUS_EXPORT
+ Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>;
+
+#pragma GCC diagnostic pop
+
+} // namespace dbus
+
+#endif // DBUS_PROPERTY_H_
diff --git a/libchrome/dbus/scoped_dbus_error.cc b/libchrome/dbus/scoped_dbus_error.cc
new file mode 100644
index 0000000..fa04971
--- /dev/null
+++ b/libchrome/dbus/scoped_dbus_error.cc
@@ -0,0 +1,21 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/scoped_dbus_error.h"
+
+namespace dbus {
+
+ScopedDBusError::ScopedDBusError() {
+ dbus_error_init(&error_);
+}
+
+ScopedDBusError::~ScopedDBusError() {
+ dbus_error_free(&error_);
+}
+
+bool ScopedDBusError::is_set() const {
+ return dbus_error_is_set(&error_);
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/scoped_dbus_error.h b/libchrome/dbus/scoped_dbus_error.h
new file mode 100644
index 0000000..1484dbb
--- /dev/null
+++ b/libchrome/dbus/scoped_dbus_error.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_SCOPED_DBUS_ERROR_H_
+#define DBUS_SCOPED_DBUS_ERROR_H_
+
+#include <dbus/dbus.h>
+
+#include "dbus/dbus_export.h"
+
+namespace dbus {
+
+// Utility class to ensure that DBusError is freed.
+class CHROME_DBUS_EXPORT ScopedDBusError {
+ public:
+ // Do not inline methods that call dbus_error_xxx() functions.
+ // See http://crbug.com/416628
+ ScopedDBusError();
+ ~ScopedDBusError();
+
+ DBusError* get() { return &error_; }
+ bool is_set() const;
+ const char* name() { return error_.name; }
+ const char* message() { return error_.message; }
+
+ private:
+ DBusError error_;
+};
+
+} // namespace dbus
+
+#endif // DBUS_SCOPED_DBUS_ERROR_H_
diff --git a/libchrome/dbus/string_util.cc b/libchrome/dbus/string_util.cc
new file mode 100644
index 0000000..7f71015
--- /dev/null
+++ b/libchrome/dbus/string_util.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/string_util.h"
+
+#include <stddef.h>
+
+#include "base/strings/string_util.h"
+
+namespace dbus {
+
+// This implementation is based upon D-Bus Specification Version 0.19.
+bool IsValidObjectPath(const std::string& value) {
+ // A valid object path begins with '/'.
+ if (!base::StartsWith(value, "/", base::CompareCase::SENSITIVE))
+ return false;
+
+ // Elements are pieces delimited by '/'. For instance, "org", "chromium",
+ // "Foo" are elements of "/org/chromium/Foo".
+ int element_length = 0;
+ for (size_t i = 1; i < value.size(); ++i) {
+ const char c = value[i];
+ if (c == '/') {
+ // No element may be the empty string.
+ if (element_length == 0)
+ return false;
+ element_length = 0;
+ } else {
+ // Each element must only contain "[A-Z][a-z][0-9]_".
+ const bool is_valid_character =
+ ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') ||
+ ('0' <= c && c <= '9') || c == '_';
+ if (!is_valid_character)
+ return false;
+ element_length++;
+ }
+ }
+
+ // A trailing '/' character is not allowed unless the path is the root path.
+ if (value.size() > 1 &&
+ base::EndsWith(value, "/", base::CompareCase::SENSITIVE))
+ return false;
+
+ return true;
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/string_util.h b/libchrome/dbus/string_util.h
new file mode 100644
index 0000000..60c02d1
--- /dev/null
+++ b/libchrome/dbus/string_util.h
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_STRING_UTIL_H_
+#define DBUS_STRING_UTIL_H_
+
+#include <string>
+
+#include "dbus/dbus_export.h"
+
+namespace dbus {
+
+// Returns true if the specified string is a valid object path.
+CHROME_DBUS_EXPORT bool IsValidObjectPath(const std::string& value);
+
+} // namespace dbus
+
+#endif // DBUS_STRING_UTIL_H_
diff --git a/libchrome/dbus/test_proto.proto b/libchrome/dbus/test_proto.proto
new file mode 100644
index 0000000..1ec128b
--- /dev/null
+++ b/libchrome/dbus/test_proto.proto
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+
+// This is a simple dummy protocol buffer that is used for testing handling of
+// protocol buffers in MessageReader and MessageWriter.
+
+message TestProto {
+ optional string text = 1;
+ optional int32 number = 2;
+}
diff --git a/libchrome/dbus/util.cc b/libchrome/dbus/util.cc
new file mode 100644
index 0000000..26e5c71
--- /dev/null
+++ b/libchrome/dbus/util.cc
@@ -0,0 +1,14 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/util.h"
+
+namespace dbus {
+
+std::string GetAbsoluteMemberName(const std::string& interface_name,
+ const std::string& member_name) {
+ return interface_name + "." + member_name;
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/util.h b/libchrome/dbus/util.h
new file mode 100644
index 0000000..b05834d
--- /dev/null
+++ b/libchrome/dbus/util.h
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_UTIL_H_
+#define DBUS_UTIL_H_
+
+#include <string>
+
+#include "dbus/dbus_export.h"
+
+namespace dbus {
+
+// Returns the absolute name of a member by concatanating |interface_name| and
+// |member_name|. e.g.:
+// GetAbsoluteMemberName(
+// "org.freedesktop.DBus.Properties",
+// "PropertiesChanged")
+//
+// => "org.freedesktop.DBus.Properties.PropertiesChanged"
+//
+CHROME_DBUS_EXPORT std::string GetAbsoluteMemberName(
+ const std::string& interface_name,
+ const std::string& member_name);
+
+// Similar to base::DeletePointer, but takes void* as an argument.
+// Used as DBusFreeFunction.
+template<typename T>
+void DeleteVoidPointer(void* memory) {
+ delete static_cast<T*>(memory);
+}
+
+} // namespace dbus
+
+#endif // DBUS_UTIL_H_
diff --git a/libchrome/dbus/values_util.cc b/libchrome/dbus/values_util.cc
new file mode 100644
index 0000000..bea7bea
--- /dev/null
+++ b/libchrome/dbus/values_util.cc
@@ -0,0 +1,311 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "dbus/values_util.h"
+
+#include <utility>
+
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/values.h"
+#include "dbus/message.h"
+
+namespace dbus {
+
+namespace {
+
+// Returns whether |value| is exactly representable by double or not.
+template<typename T>
+bool IsExactlyRepresentableByDouble(T value) {
+ return value == static_cast<T>(static_cast<double>(value));
+}
+
+// Pops values from |reader| and appends them to |list_value|.
+bool PopListElements(MessageReader* reader, base::ListValue* list_value) {
+ while (reader->HasMoreData()) {
+ std::unique_ptr<base::Value> element_value = PopDataAsValue(reader);
+ if (!element_value)
+ return false;
+ list_value->Append(std::move(element_value));
+ }
+ return true;
+}
+
+// Pops dict-entries from |reader| and sets them to |dictionary_value|
+bool PopDictionaryEntries(MessageReader* reader,
+ base::DictionaryValue* dictionary_value) {
+ while (reader->HasMoreData()) {
+ DCHECK_EQ(Message::DICT_ENTRY, reader->GetDataType());
+ MessageReader entry_reader(NULL);
+ if (!reader->PopDictEntry(&entry_reader))
+ return false;
+ // Get key as a string.
+ std::string key_string;
+ if (entry_reader.GetDataType() == Message::STRING) {
+ // If the type of keys is STRING, pop it directly.
+ if (!entry_reader.PopString(&key_string))
+ return false;
+ } else {
+ // If the type of keys is not STRING, convert it to string.
+ std::unique_ptr<base::Value> key(PopDataAsValue(&entry_reader));
+ if (!key)
+ return false;
+ // Use JSONWriter to convert an arbitrary value to a string.
+ base::JSONWriter::Write(*key, &key_string);
+ }
+ // Get the value and set the key-value pair.
+ std::unique_ptr<base::Value> value = PopDataAsValue(&entry_reader);
+ if (!value)
+ return false;
+ dictionary_value->SetWithoutPathExpansion(key_string, std::move(value));
+ }
+ return true;
+}
+
+// Gets the D-Bus type signature for the value.
+std::string GetTypeSignature(const base::Value& value) {
+ switch (value.GetType()) {
+ case base::Value::TYPE_BOOLEAN:
+ return "b";
+ case base::Value::TYPE_INTEGER:
+ return "i";
+ case base::Value::TYPE_DOUBLE:
+ return "d";
+ case base::Value::TYPE_STRING:
+ return "s";
+ case base::Value::TYPE_BINARY:
+ return "ay";
+ case base::Value::TYPE_DICTIONARY:
+ return "a{sv}";
+ case base::Value::TYPE_LIST:
+ return "av";
+ default:
+ DLOG(ERROR) << "Unexpected type " << value.GetType();
+ return std::string();
+ }
+}
+
+} // namespace
+
+std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
+ std::unique_ptr<base::Value> result;
+ switch (reader->GetDataType()) {
+ case Message::INVALID_DATA:
+ // Do nothing.
+ break;
+ case Message::BYTE: {
+ uint8_t value = 0;
+ if (reader->PopByte(&value))
+ result = base::MakeUnique<base::FundamentalValue>(value);
+ break;
+ }
+ case Message::BOOL: {
+ bool value = false;
+ if (reader->PopBool(&value))
+ result = base::MakeUnique<base::FundamentalValue>(value);
+ break;
+ }
+ case Message::INT16: {
+ int16_t value = 0;
+ if (reader->PopInt16(&value))
+ result = base::MakeUnique<base::FundamentalValue>(value);
+ break;
+ }
+ case Message::UINT16: {
+ uint16_t value = 0;
+ if (reader->PopUint16(&value))
+ result = base::MakeUnique<base::FundamentalValue>(value);
+ break;
+ }
+ case Message::INT32: {
+ int32_t value = 0;
+ if (reader->PopInt32(&value))
+ result = base::MakeUnique<base::FundamentalValue>(value);
+ break;
+ }
+ case Message::UINT32: {
+ uint32_t value = 0;
+ if (reader->PopUint32(&value)) {
+ result = base::MakeUnique<base::FundamentalValue>(
+ static_cast<double>(value));
+ }
+ break;
+ }
+ case Message::INT64: {
+ int64_t value = 0;
+ if (reader->PopInt64(&value)) {
+ DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
+ value << " is not exactly representable by double";
+ result = base::MakeUnique<base::FundamentalValue>(
+ static_cast<double>(value));
+ }
+ break;
+ }
+ case Message::UINT64: {
+ uint64_t value = 0;
+ if (reader->PopUint64(&value)) {
+ DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
+ value << " is not exactly representable by double";
+ result = base::MakeUnique<base::FundamentalValue>(
+ static_cast<double>(value));
+ }
+ break;
+ }
+ case Message::DOUBLE: {
+ double value = 0;
+ if (reader->PopDouble(&value))
+ result = base::MakeUnique<base::FundamentalValue>(value);
+ break;
+ }
+ case Message::STRING: {
+ std::string value;
+ if (reader->PopString(&value))
+ result = base::MakeUnique<base::StringValue>(value);
+ break;
+ }
+ case Message::OBJECT_PATH: {
+ ObjectPath value;
+ if (reader->PopObjectPath(&value))
+ result = base::MakeUnique<base::StringValue>(value.value());
+ break;
+ }
+ case Message::UNIX_FD: {
+ // Cannot distinguish a file descriptor from an int
+ NOTREACHED();
+ break;
+ }
+ case Message::ARRAY: {
+ MessageReader sub_reader(NULL);
+ if (reader->PopArray(&sub_reader)) {
+ // If the type of the array's element is DICT_ENTRY, create a
+ // DictionaryValue, otherwise create a ListValue.
+ if (sub_reader.GetDataType() == Message::DICT_ENTRY) {
+ std::unique_ptr<base::DictionaryValue> dictionary_value(
+ new base::DictionaryValue);
+ if (PopDictionaryEntries(&sub_reader, dictionary_value.get()))
+ result = std::move(dictionary_value);
+ } else {
+ std::unique_ptr<base::ListValue> list_value(new base::ListValue);
+ if (PopListElements(&sub_reader, list_value.get()))
+ result = std::move(list_value);
+ }
+ }
+ break;
+ }
+ case Message::STRUCT: {
+ MessageReader sub_reader(NULL);
+ if (reader->PopStruct(&sub_reader)) {
+ std::unique_ptr<base::ListValue> list_value(new base::ListValue);
+ if (PopListElements(&sub_reader, list_value.get()))
+ result = std::move(list_value);
+ }
+ break;
+ }
+ case Message::DICT_ENTRY:
+ // DICT_ENTRY must be popped as an element of an array.
+ NOTREACHED();
+ break;
+ case Message::VARIANT: {
+ MessageReader sub_reader(NULL);
+ if (reader->PopVariant(&sub_reader))
+ result = PopDataAsValue(&sub_reader);
+ break;
+ }
+ }
+ return result;
+}
+
+void AppendBasicTypeValueData(MessageWriter* writer, const base::Value& value) {
+ switch (value.GetType()) {
+ case base::Value::TYPE_BOOLEAN: {
+ bool bool_value = false;
+ bool success = value.GetAsBoolean(&bool_value);
+ DCHECK(success);
+ writer->AppendBool(bool_value);
+ break;
+ }
+ case base::Value::TYPE_INTEGER: {
+ int int_value = 0;
+ bool success = value.GetAsInteger(&int_value);
+ DCHECK(success);
+ writer->AppendInt32(int_value);
+ break;
+ }
+ case base::Value::TYPE_DOUBLE: {
+ double double_value = 0;
+ bool success = value.GetAsDouble(&double_value);
+ DCHECK(success);
+ writer->AppendDouble(double_value);
+ break;
+ }
+ case base::Value::TYPE_STRING: {
+ std::string string_value;
+ bool success = value.GetAsString(&string_value);
+ DCHECK(success);
+ writer->AppendString(string_value);
+ break;
+ }
+ default:
+ DLOG(ERROR) << "Unexpected type " << value.GetType();
+ break;
+ }
+}
+
+void AppendBasicTypeValueDataAsVariant(MessageWriter* writer,
+ const base::Value& value) {
+ MessageWriter sub_writer(NULL);
+ writer->OpenVariant(GetTypeSignature(value), &sub_writer);
+ AppendBasicTypeValueData(&sub_writer, value);
+ writer->CloseContainer(&sub_writer);
+}
+
+void AppendValueData(MessageWriter* writer, const base::Value& value) {
+ switch (value.GetType()) {
+ case base::Value::TYPE_DICTIONARY: {
+ const base::DictionaryValue* dictionary = NULL;
+ value.GetAsDictionary(&dictionary);
+ dbus::MessageWriter array_writer(NULL);
+ writer->OpenArray("{sv}", &array_writer);
+ for (base::DictionaryValue::Iterator iter(*dictionary);
+ !iter.IsAtEnd(); iter.Advance()) {
+ dbus::MessageWriter dict_entry_writer(NULL);
+ array_writer.OpenDictEntry(&dict_entry_writer);
+ dict_entry_writer.AppendString(iter.key());
+ AppendValueDataAsVariant(&dict_entry_writer, iter.value());
+ array_writer.CloseContainer(&dict_entry_writer);
+ }
+ writer->CloseContainer(&array_writer);
+ break;
+ }
+ case base::Value::TYPE_LIST: {
+ const base::ListValue* list = NULL;
+ value.GetAsList(&list);
+ dbus::MessageWriter array_writer(NULL);
+ writer->OpenArray("v", &array_writer);
+ for (const auto& value : *list) {
+ AppendValueDataAsVariant(&array_writer, *value);
+ }
+ writer->CloseContainer(&array_writer);
+ break;
+ }
+ case base::Value::TYPE_BOOLEAN:
+ case base::Value::TYPE_INTEGER:
+ case base::Value::TYPE_DOUBLE:
+ case base::Value::TYPE_STRING:
+ AppendBasicTypeValueData(writer, value);
+ break;
+ default:
+ DLOG(ERROR) << "Unexpected type: " << value.GetType();
+ }
+}
+
+void AppendValueDataAsVariant(MessageWriter* writer, const base::Value& value) {
+ MessageWriter variant_writer(NULL);
+ writer->OpenVariant(GetTypeSignature(value), &variant_writer);
+ AppendValueData(&variant_writer, value);
+ writer->CloseContainer(&variant_writer);
+}
+
+} // namespace dbus
diff --git a/libchrome/dbus/values_util.h b/libchrome/dbus/values_util.h
new file mode 100644
index 0000000..81b839b
--- /dev/null
+++ b/libchrome/dbus/values_util.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_VALUES_UTIL_H_
+#define DBUS_VALUES_UTIL_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "dbus/dbus_export.h"
+
+namespace base {
+class Value;
+}
+
+namespace dbus {
+
+class MessageReader;
+class MessageWriter;
+
+// Pops a value from |reader| as a base::Value.
+// Returns NULL if an error occurs.
+// Note: Integer values larger than int32_t (including uint32_t) are converted
+// to double. Non-string dictionary keys are converted to strings.
+CHROME_DBUS_EXPORT std::unique_ptr<base::Value> PopDataAsValue(
+ MessageReader* reader);
+
+// Appends a basic type value to |writer|. Basic types are BOOLEAN, INTEGER,
+// DOUBLE, and STRING. Use this function for values that are known to be basic
+// types and to handle basic type members of collections that should not
+// have type "a{sv}" or "av". Otherwise, use AppendValueData.
+CHROME_DBUS_EXPORT void AppendBasicTypeValueData(MessageWriter* writer,
+ const base::Value& value);
+
+// Appends a basic type value to |writer| as a variant. Basic types are BOOLEAN,
+// INTEGER, DOUBLE, and STRING. Use this function for values that are known to
+// be basic types and to handle basic type members of collections that should
+// not have type "a{sv}" or "av". Otherwise, use AppendValueDataAsVariant.
+CHROME_DBUS_EXPORT void AppendBasicTypeValueDataAsVariant(
+ MessageWriter* writer,
+ const base::Value& value);
+
+// Appends a value to |writer|. Value can be a basic type, as well as a
+// collection type, such as dictionary or list. Collections will be recursively
+// written as variant containers, i.e. dictionaries will be written with type
+// a{sv} and lists with type av. Any sub-dictionaries or sub-lists will also
+// have these types.
+CHROME_DBUS_EXPORT void AppendValueData(MessageWriter* writer,
+ const base::Value& value);
+
+// Appends a value to |writer| as a variant. Value can be a basic type, as well
+// as a collection type, such as dictionary or list. Collections will be
+// recursively written as variant containers, i.e. dictionaries will be written
+// with type a{sv} and lists with type av. Any sub-dictionaries or sub-lists
+// will also have these types.
+CHROME_DBUS_EXPORT void AppendValueDataAsVariant(MessageWriter* writer,
+ const base::Value& value);
+
+} // namespace dbus
+
+#endif // DBUS_VALUES_UTIL_H_
diff --git a/libchrome/sandbox/BUILD.gn b/libchrome/sandbox/BUILD.gn
new file mode 100644
index 0000000..8ca3574
--- /dev/null
+++ b/libchrome/sandbox/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Meta-target that forwards to the proper platform one.
+group("sandbox") {
+ if (is_win) {
+ public_deps = [
+ "//sandbox/win:sandbox",
+ ]
+ } else if (is_mac) {
+ public_deps = [
+ "//sandbox/mac:sandbox",
+ "//sandbox/mac:seatbelt",
+ ]
+ } else if (is_linux || is_android) {
+ public_deps = [
+ "//sandbox/linux:sandbox",
+ ]
+ }
+}
diff --git a/libchrome/sandbox/OWNERS b/libchrome/sandbox/OWNERS
new file mode 100644
index 0000000..5d3f6ff
--- /dev/null
+++ b/libchrome/sandbox/OWNERS
@@ -0,0 +1,3 @@
+cpu@chromium.org
+jln@chromium.org
+jschuh@chromium.org
diff --git a/libchrome/sandbox/linux/BUILD.gn b/libchrome/sandbox/linux/BUILD.gn
new file mode 100644
index 0000000..a5c041f
--- /dev/null
+++ b/libchrome/sandbox/linux/BUILD.gn
@@ -0,0 +1,461 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/features.gni")
+import("//build/config/nacl/config.gni")
+import("//testing/test.gni")
+
+if (is_android) {
+ import("//build/config/android/rules.gni")
+}
+
+declare_args() {
+ compile_suid_client = is_linux
+
+ compile_credentials = is_linux
+
+ # On Android, use plain GTest.
+ use_base_test_suite = is_linux
+}
+
+if (is_nacl_nonsfi) {
+ config("nacl_nonsfi_warnings") {
+ # There are number of platform specific functions in
+ # seccomp-bpf syscall helpers, which are not being used.
+ cflags = [ "-Wno-unused-function" ]
+ }
+}
+
+# We have two principal targets: sandbox and sandbox_linux_unittests
+# All other targets are listed as dependencies.
+# There is one notable exception: for historical reasons, chrome_sandbox is
+# the setuid sandbox and is its own target.
+
+group("sandbox") {
+ public_deps = [
+ ":sandbox_services",
+ ]
+
+ if (compile_suid_client || is_nacl_nonsfi) {
+ public_deps += [ ":suid_sandbox_client" ]
+ }
+ if (use_seccomp_bpf || is_nacl_nonsfi) {
+ public_deps += [
+ ":seccomp_bpf",
+ ":seccomp_bpf_helpers",
+ ]
+ }
+}
+
+source_set("sandbox_linux_test_utils") {
+ testonly = true
+ sources = [
+ "tests/sandbox_test_runner.cc",
+ "tests/sandbox_test_runner.h",
+ "tests/sandbox_test_runner_function_pointer.cc",
+ "tests/sandbox_test_runner_function_pointer.h",
+ "tests/unit_tests.cc",
+ "tests/unit_tests.h",
+ ]
+
+ deps = [
+ "//testing/gtest",
+ ]
+
+ if (!is_nacl_nonsfi) {
+ sources += [
+ "tests/test_utils.cc",
+ "tests/test_utils.h",
+ ]
+ }
+
+ if (use_seccomp_bpf || is_nacl_nonsfi) {
+ sources += [
+ "seccomp-bpf/bpf_tester_compatibility_delegate.h",
+ "seccomp-bpf/bpf_tests.h",
+ "seccomp-bpf/sandbox_bpf_test_runner.cc",
+ "seccomp-bpf/sandbox_bpf_test_runner.h",
+ ]
+ deps += [ ":seccomp_bpf" ]
+ }
+
+ if (use_base_test_suite) {
+ deps += [ "//base/test:test_support" ]
+ defines = [ "SANDBOX_USES_BASE_TEST_SUITE" ]
+ }
+}
+
+# Sources for sandbox_linux_unittests.
+source_set("sandbox_linux_unittests_sources") {
+ testonly = true
+
+ sources = [
+ "services/proc_util_unittest.cc",
+ "services/resource_limits_unittests.cc",
+ "services/scoped_process_unittest.cc",
+ "services/syscall_wrappers_unittest.cc",
+ "services/thread_helpers_unittests.cc",
+ "services/yama_unittests.cc",
+ "syscall_broker/broker_file_permission_unittest.cc",
+ "syscall_broker/broker_process_unittest.cc",
+ "tests/main.cc",
+ "tests/scoped_temporary_file.cc",
+ "tests/scoped_temporary_file.h",
+ "tests/scoped_temporary_file_unittest.cc",
+ "tests/test_utils_unittest.cc",
+ "tests/unit_tests_unittest.cc",
+ ]
+
+ deps = [
+ ":sandbox",
+ ":sandbox_linux_test_utils",
+ "//base",
+ "//testing/gtest",
+ ]
+
+ if (use_base_test_suite) {
+ deps += [ "//base/test:test_support" ]
+ defines = [ "SANDBOX_USES_BASE_TEST_SUITE" ]
+ }
+
+ if (compile_suid_client) {
+ sources += [
+ "suid/client/setuid_sandbox_client_unittest.cc",
+ "suid/client/setuid_sandbox_host_unittest.cc",
+ ]
+ }
+ if (use_seccomp_bpf) {
+ sources += [
+ "bpf_dsl/bpf_dsl_unittest.cc",
+ "bpf_dsl/codegen_unittest.cc",
+ "bpf_dsl/cons_unittest.cc",
+ "bpf_dsl/dump_bpf.cc",
+ "bpf_dsl/dump_bpf.h",
+ "bpf_dsl/syscall_set_unittest.cc",
+ "bpf_dsl/test_trap_registry.cc",
+ "bpf_dsl/test_trap_registry.h",
+ "bpf_dsl/test_trap_registry_unittest.cc",
+ "bpf_dsl/verifier.cc",
+ "bpf_dsl/verifier.h",
+ "integration_tests/bpf_dsl_seccomp_unittest.cc",
+ "integration_tests/seccomp_broker_process_unittest.cc",
+ "seccomp-bpf-helpers/baseline_policy_unittest.cc",
+ "seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc",
+ "seccomp-bpf/bpf_tests_unittest.cc",
+ "seccomp-bpf/sandbox_bpf_unittest.cc",
+ "seccomp-bpf/syscall_unittest.cc",
+ "seccomp-bpf/trap_unittest.cc",
+ ]
+ deps += [ ":bpf_dsl_golden" ]
+ }
+ if (compile_credentials) {
+ sources += [
+ "integration_tests/namespace_unix_domain_socket_unittest.cc",
+ "services/credentials_unittest.cc",
+ "services/namespace_utils_unittest.cc",
+ ]
+
+ if (use_base_test_suite) {
+ # Tests that use advanced features not available in stock GTest.
+ sources += [ "services/namespace_sandbox_unittest.cc" ]
+ }
+
+ # For credentials_unittest.cc
+ configs += [ "//build/config/linux:libcap" ]
+ }
+}
+
+action("bpf_dsl_golden") {
+ script = "bpf_dsl/golden/generate.py"
+ inputs = [
+ "bpf_dsl/golden/i386/ArgSizePolicy.txt",
+ "bpf_dsl/golden/i386/BasicPolicy.txt",
+ "bpf_dsl/golden/i386/ElseIfPolicy.txt",
+ "bpf_dsl/golden/i386/MaskingPolicy.txt",
+ "bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt",
+ "bpf_dsl/golden/i386/NegativeConstantsPolicy.txt",
+ "bpf_dsl/golden/i386/SwitchPolicy.txt",
+ "bpf_dsl/golden/x86-64/ArgSizePolicy.txt",
+ "bpf_dsl/golden/x86-64/BasicPolicy.txt",
+ "bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt",
+ "bpf_dsl/golden/x86-64/ElseIfPolicy.txt",
+ "bpf_dsl/golden/x86-64/MaskingPolicy.txt",
+ "bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt",
+ "bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt",
+ "bpf_dsl/golden/x86-64/SwitchPolicy.txt",
+ ]
+ outputs = [
+ "$target_gen_dir/bpf_dsl/golden/golden_files.h",
+ ]
+ args =
+ rebase_path(outputs, root_build_dir) + rebase_path(inputs, root_build_dir)
+}
+
+
+test("sandbox_linux_unittests") {
+ deps = [
+ ":sandbox_linux_unittests_sources",
+ "//build/config/sanitizers:deps",
+ ]
+ if (is_android) {
+ use_raw_android_executable = true
+ }
+}
+
+component("seccomp_bpf") {
+ sources = [
+ "bpf_dsl/bpf_dsl.cc",
+ "bpf_dsl/bpf_dsl.h",
+ "bpf_dsl/bpf_dsl_forward.h",
+ "bpf_dsl/bpf_dsl_impl.h",
+ "bpf_dsl/codegen.cc",
+ "bpf_dsl/codegen.h",
+ "bpf_dsl/cons.h",
+ "bpf_dsl/errorcode.h",
+ "bpf_dsl/linux_syscall_ranges.h",
+ "bpf_dsl/policy.cc",
+ "bpf_dsl/policy.h",
+ "bpf_dsl/policy_compiler.cc",
+ "bpf_dsl/policy_compiler.h",
+ "bpf_dsl/seccomp_macros.h",
+ "bpf_dsl/syscall_set.cc",
+ "bpf_dsl/syscall_set.h",
+ "bpf_dsl/trap_registry.h",
+ "seccomp-bpf/die.cc",
+ "seccomp-bpf/die.h",
+ "seccomp-bpf/sandbox_bpf.cc",
+ "seccomp-bpf/sandbox_bpf.h",
+ "seccomp-bpf/syscall.cc",
+ "seccomp-bpf/syscall.h",
+ "seccomp-bpf/trap.cc",
+ "seccomp-bpf/trap.h",
+ ]
+ defines = [ "SANDBOX_IMPLEMENTATION" ]
+
+ public_deps = [
+ ":sandbox_services_headers",
+ ]
+ deps = [
+ ":sandbox_services",
+ "//base",
+ ]
+
+ if (is_nacl_nonsfi) {
+ cflags = [ "-fgnu-inline-asm" ]
+ sources -= [
+ "bpf_dsl/bpf_dsl_forward.h",
+ "bpf_dsl/bpf_dsl_impl.h",
+ "bpf_dsl/cons.h",
+ "bpf_dsl/errorcode.h",
+ "bpf_dsl/linux_syscall_ranges.h",
+ "bpf_dsl/seccomp_macros.h",
+ "bpf_dsl/trap_registry.h",
+ ]
+ }
+}
+
+component("seccomp_bpf_helpers") {
+ sources = [
+ "seccomp-bpf-helpers/baseline_policy.cc",
+ "seccomp-bpf-helpers/baseline_policy.h",
+ "seccomp-bpf-helpers/sigsys_handlers.cc",
+ "seccomp-bpf-helpers/sigsys_handlers.h",
+ "seccomp-bpf-helpers/syscall_parameters_restrictions.cc",
+ "seccomp-bpf-helpers/syscall_parameters_restrictions.h",
+ "seccomp-bpf-helpers/syscall_sets.cc",
+ "seccomp-bpf-helpers/syscall_sets.h",
+ ]
+ defines = [ "SANDBOX_IMPLEMENTATION" ]
+
+ deps = [
+ ":sandbox_services",
+ ":seccomp_bpf",
+ "//base",
+ ]
+
+ if (is_nacl_nonsfi) {
+ sources -= [
+ "seccomp-bpf-helpers/baseline_policy.cc",
+ "seccomp-bpf-helpers/baseline_policy.h",
+ "seccomp-bpf-helpers/syscall_sets.cc",
+ "seccomp-bpf-helpers/syscall_sets.h",
+ ]
+ configs += [ ":nacl_nonsfi_warnings" ]
+ }
+}
+
+if (is_linux) {
+ # The setuid sandbox for Linux.
+ executable("chrome_sandbox") {
+ sources = [
+ "suid/common/sandbox.h",
+ "suid/common/suid_unsafe_environment_variables.h",
+ "suid/process_util.h",
+ "suid/process_util_linux.c",
+ "suid/sandbox.c",
+ ]
+
+ cflags = [
+ # For ULLONG_MAX
+ "-std=gnu99",
+
+ # These files have a suspicious comparison.
+ # TODO fix this and re-enable this warning.
+ "-Wno-sign-compare",
+ ]
+
+ import("//build/config/compiler/compiler.gni")
+ import("//build/config/sanitizers/sanitizers.gni")
+ if (is_component_build || using_sanitizer) {
+ # WARNING! We remove this config so that we don't accidentally
+ # pick up the //build/config:rpath_for_built_shared_libraries
+ # sub-config. However, this means that we need to duplicate any
+ # other flags that executable_config might have.
+ configs -= [ "//build/config:executable_config" ]
+ if (!use_gold) {
+ ldflags = [ "-Wl,--disable-new-dtags" ]
+ }
+ }
+
+ # We also do not want to pick up any of the other sanitizer
+ # flags (i.e. we do not want to build w/ the sanitizers at all).
+ # This is safe to delete unconditionally, because it is part of the
+ # default configs and empty when not using the sanitizers.
+ configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
+ }
+}
+
+component("sandbox_services") {
+ sources = [
+ "services/init_process_reaper.cc",
+ "services/init_process_reaper.h",
+ "services/proc_util.cc",
+ "services/proc_util.h",
+ "services/resource_limits.cc",
+ "services/resource_limits.h",
+ "services/scoped_process.cc",
+ "services/scoped_process.h",
+ "services/syscall_wrappers.cc",
+ "services/syscall_wrappers.h",
+ "services/thread_helpers.cc",
+ "services/thread_helpers.h",
+ "services/yama.cc",
+ "services/yama.h",
+ "syscall_broker/broker_channel.cc",
+ "syscall_broker/broker_channel.h",
+ "syscall_broker/broker_client.cc",
+ "syscall_broker/broker_client.h",
+ "syscall_broker/broker_common.h",
+ "syscall_broker/broker_file_permission.cc",
+ "syscall_broker/broker_file_permission.h",
+ "syscall_broker/broker_host.cc",
+ "syscall_broker/broker_host.h",
+ "syscall_broker/broker_policy.cc",
+ "syscall_broker/broker_policy.h",
+ "syscall_broker/broker_process.cc",
+ "syscall_broker/broker_process.h",
+ ]
+
+ defines = [ "SANDBOX_IMPLEMENTATION" ]
+
+ public_deps = []
+ deps = [
+ "//base",
+ ]
+
+ if (compile_credentials || is_nacl_nonsfi) {
+ sources += [
+ "services/credentials.cc",
+ "services/credentials.h",
+ "services/namespace_sandbox.cc",
+ "services/namespace_sandbox.h",
+ "services/namespace_utils.cc",
+ "services/namespace_utils.h",
+ ]
+
+ public_deps += [ ":sandbox_services_headers" ]
+ }
+
+ if (is_nacl_nonsfi) {
+ cflags = [ "-fgnu-inline-asm" ]
+
+ sources -= [
+ "services/init_process_reaper.cc",
+ "services/init_process_reaper.h",
+ "services/scoped_process.cc",
+ "services/scoped_process.h",
+ "services/yama.cc",
+ "services/yama.h",
+ "syscall_broker/broker_channel.cc",
+ "syscall_broker/broker_channel.h",
+ "syscall_broker/broker_client.cc",
+ "syscall_broker/broker_client.h",
+ "syscall_broker/broker_common.h",
+ "syscall_broker/broker_file_permission.cc",
+ "syscall_broker/broker_file_permission.h",
+ "syscall_broker/broker_host.cc",
+ "syscall_broker/broker_host.h",
+ "syscall_broker/broker_policy.cc",
+ "syscall_broker/broker_policy.h",
+ "syscall_broker/broker_process.cc",
+ "syscall_broker/broker_process.h",
+ ]
+ }
+}
+
+source_set("sandbox_services_headers") {
+ sources = [
+ "system_headers/arm64_linux_syscalls.h",
+ "system_headers/arm64_linux_ucontext.h",
+ "system_headers/arm_linux_syscalls.h",
+ "system_headers/arm_linux_ucontext.h",
+ "system_headers/i386_linux_ucontext.h",
+ "system_headers/linux_futex.h",
+ "system_headers/linux_seccomp.h",
+ "system_headers/linux_signal.h",
+ "system_headers/linux_syscalls.h",
+ "system_headers/linux_time.h",
+ "system_headers/linux_ucontext.h",
+ "system_headers/x86_32_linux_syscalls.h",
+ "system_headers/x86_64_linux_syscalls.h",
+ ]
+}
+
+if (compile_suid_client || is_nacl_nonsfi) {
+ component("suid_sandbox_client") {
+ sources = [
+ "suid/client/setuid_sandbox_client.cc",
+ "suid/client/setuid_sandbox_client.h",
+ "suid/client/setuid_sandbox_host.cc",
+ "suid/client/setuid_sandbox_host.h",
+ "suid/common/sandbox.h",
+ "suid/common/suid_unsafe_environment_variables.h",
+ ]
+ defines = [ "SANDBOX_IMPLEMENTATION" ]
+
+ deps = [
+ ":sandbox_services",
+ "//base",
+ ]
+
+ if (is_nacl_nonsfi) {
+ sources -= [
+ "suid/client/setuid_sandbox_host.cc",
+ "suid/client/setuid_sandbox_host.h",
+ "suid/common/sandbox.h",
+ "suid/common/suid_unsafe_environment_variables.h",
+ ]
+ }
+ }
+}
+
+if (is_android) {
+ # TODO(GYP_GONE) Delete this after we've converted everything to GN.
+ group("sandbox_linux_unittests_deps") {
+ testonly = true
+ deps = [
+ ":sandbox_linux_unittests",
+ ]
+ }
+}
diff --git a/libchrome/sandbox/linux/DEPS b/libchrome/sandbox/linux/DEPS
new file mode 100644
index 0000000..3912859
--- /dev/null
+++ b/libchrome/sandbox/linux/DEPS
@@ -0,0 +1,25 @@
+include_rules = [
+ # First, exclude everything.
+ # Exclude a few dependencies that are included in the root DEPS and that we
+ # don't need.
+ # Sadly, there is no way to exclude all root DEPS since the root has no name.
+ "-ipc",
+ "-library_loaders",
+ "-third_party",
+ "-url",
+ # Make sure that each subdirectory has to declare its dependencies in
+ # sandbox/ explicitly.
+ "-sandbox/linux",
+
+ # Second, add what we want to allow.
+ # Anything included from sandbox/linux must be declared after this line or in
+ # a more specific DEPS file.
+ # base/, build/ and testing/ are already included in the global DEPS file,
+ # but be explicit.
+ "+base",
+ "+build",
+ "+testing",
+ "+sandbox/sandbox_export.h",
+ # Everyone can use tests/
+ "+sandbox/linux/tests",
+]
diff --git a/libchrome/sandbox/linux/OWNERS b/libchrome/sandbox/linux/OWNERS
new file mode 100644
index 0000000..99ef1bd
--- /dev/null
+++ b/libchrome/sandbox/linux/OWNERS
@@ -0,0 +1,4 @@
+jln@chromium.org
+jorgelo@chromium.org
+mdempsky@chromium.org
+rickyz@chromium.org
diff --git a/libchrome/sandbox/linux/bpf_dsl/DEPS b/libchrome/sandbox/linux/bpf_dsl/DEPS
new file mode 100644
index 0000000..70d9b18
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+sandbox/linux/system_headers",
+]
diff --git a/libchrome/sandbox/linux/bpf_dsl/bpf_dsl.cc b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl.cc
new file mode 100644
index 0000000..fed6368
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl.cc
@@ -0,0 +1,343 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
+#include "sandbox/linux/bpf_dsl/errorcode.h"
+#include "sandbox/linux/bpf_dsl/policy_compiler.h"
+#include "sandbox/linux/system_headers/linux_seccomp.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+namespace {
+
+class ReturnResultExprImpl : public internal::ResultExprImpl {
+ public:
+ explicit ReturnResultExprImpl(uint32_t ret) : ret_(ret) {}
+ ~ReturnResultExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc) const override {
+ return pc->Return(ret_);
+ }
+
+ bool IsAllow() const override { return IsAction(SECCOMP_RET_ALLOW); }
+
+ bool IsDeny() const override {
+ return IsAction(SECCOMP_RET_ERRNO) || IsAction(SECCOMP_RET_KILL);
+ }
+
+ private:
+ bool IsAction(uint32_t action) const {
+ return (ret_ & SECCOMP_RET_ACTION) == action;
+ }
+
+ uint32_t ret_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReturnResultExprImpl);
+};
+
+class TrapResultExprImpl : public internal::ResultExprImpl {
+ public:
+ TrapResultExprImpl(TrapRegistry::TrapFnc func, const void* arg, bool safe)
+ : func_(func), arg_(arg), safe_(safe) {
+ DCHECK(func_);
+ }
+ ~TrapResultExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc) const override {
+ return pc->Trap(func_, arg_, safe_);
+ }
+
+ bool HasUnsafeTraps() const override { return safe_ == false; }
+
+ bool IsDeny() const override { return true; }
+
+ private:
+ TrapRegistry::TrapFnc func_;
+ const void* arg_;
+ bool safe_;
+
+ DISALLOW_COPY_AND_ASSIGN(TrapResultExprImpl);
+};
+
+class IfThenResultExprImpl : public internal::ResultExprImpl {
+ public:
+ IfThenResultExprImpl(BoolExpr cond,
+ ResultExpr then_result,
+ ResultExpr else_result)
+ : cond_(std::move(cond)),
+ then_result_(std::move(then_result)),
+ else_result_(std::move(else_result)) {}
+ ~IfThenResultExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc) const override {
+ // We compile the "then" and "else" expressions in separate statements so
+ // they have a defined sequencing. See https://crbug.com/529480.
+ CodeGen::Node then_node = then_result_->Compile(pc);
+ CodeGen::Node else_node = else_result_->Compile(pc);
+ return cond_->Compile(pc, then_node, else_node);
+ }
+
+ bool HasUnsafeTraps() const override {
+ return then_result_->HasUnsafeTraps() || else_result_->HasUnsafeTraps();
+ }
+
+ private:
+ BoolExpr cond_;
+ ResultExpr then_result_;
+ ResultExpr else_result_;
+
+ DISALLOW_COPY_AND_ASSIGN(IfThenResultExprImpl);
+};
+
+class ConstBoolExprImpl : public internal::BoolExprImpl {
+ public:
+ ConstBoolExprImpl(bool value) : value_(value) {}
+ ~ConstBoolExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc,
+ CodeGen::Node then_node,
+ CodeGen::Node else_node) const override {
+ return value_ ? then_node : else_node;
+ }
+
+ private:
+ bool value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConstBoolExprImpl);
+};
+
+class MaskedEqualBoolExprImpl : public internal::BoolExprImpl {
+ public:
+ MaskedEqualBoolExprImpl(int argno,
+ size_t width,
+ uint64_t mask,
+ uint64_t value)
+ : argno_(argno), width_(width), mask_(mask), value_(value) {}
+ ~MaskedEqualBoolExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc,
+ CodeGen::Node then_node,
+ CodeGen::Node else_node) const override {
+ return pc->MaskedEqual(argno_, width_, mask_, value_, then_node, else_node);
+ }
+
+ private:
+ int argno_;
+ size_t width_;
+ uint64_t mask_;
+ uint64_t value_;
+
+ DISALLOW_COPY_AND_ASSIGN(MaskedEqualBoolExprImpl);
+};
+
+class NegateBoolExprImpl : public internal::BoolExprImpl {
+ public:
+ explicit NegateBoolExprImpl(BoolExpr cond) : cond_(std::move(cond)) {}
+ ~NegateBoolExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc,
+ CodeGen::Node then_node,
+ CodeGen::Node else_node) const override {
+ return cond_->Compile(pc, else_node, then_node);
+ }
+
+ private:
+ BoolExpr cond_;
+
+ DISALLOW_COPY_AND_ASSIGN(NegateBoolExprImpl);
+};
+
+class AndBoolExprImpl : public internal::BoolExprImpl {
+ public:
+ AndBoolExprImpl(BoolExpr lhs, BoolExpr rhs)
+ : lhs_(std::move(lhs)), rhs_(std::move(rhs)) {}
+ ~AndBoolExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc,
+ CodeGen::Node then_node,
+ CodeGen::Node else_node) const override {
+ return lhs_->Compile(pc, rhs_->Compile(pc, then_node, else_node),
+ else_node);
+ }
+
+ private:
+ BoolExpr lhs_;
+ BoolExpr rhs_;
+
+ DISALLOW_COPY_AND_ASSIGN(AndBoolExprImpl);
+};
+
+class OrBoolExprImpl : public internal::BoolExprImpl {
+ public:
+ OrBoolExprImpl(BoolExpr lhs, BoolExpr rhs)
+ : lhs_(std::move(lhs)), rhs_(std::move(rhs)) {}
+ ~OrBoolExprImpl() override {}
+
+ CodeGen::Node Compile(PolicyCompiler* pc,
+ CodeGen::Node then_node,
+ CodeGen::Node else_node) const override {
+ return lhs_->Compile(pc, then_node,
+ rhs_->Compile(pc, then_node, else_node));
+ }
+
+ private:
+ BoolExpr lhs_;
+ BoolExpr rhs_;
+
+ DISALLOW_COPY_AND_ASSIGN(OrBoolExprImpl);
+};
+
+} // namespace
+
+namespace internal {
+
+bool ResultExprImpl::HasUnsafeTraps() const {
+ return false;
+}
+
+bool ResultExprImpl::IsAllow() const {
+ return false;
+}
+
+bool ResultExprImpl::IsDeny() const {
+ return false;
+}
+
+uint64_t DefaultMask(size_t size) {
+ switch (size) {
+ case 4:
+ return std::numeric_limits<uint32_t>::max();
+ case 8:
+ return std::numeric_limits<uint64_t>::max();
+ default:
+ CHECK(false) << "Unimplemented DefaultMask case";
+ return 0;
+ }
+}
+
+BoolExpr ArgEq(int num, size_t size, uint64_t mask, uint64_t val) {
+ // If this is changed, update Arg<T>::EqualTo's static_cast rules
+ // accordingly.
+ CHECK(size == 4 || size == 8);
+
+ return std::make_shared<MaskedEqualBoolExprImpl>(num, size, mask, val);
+}
+
+} // namespace internal
+
+ResultExpr Allow() {
+ return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_ALLOW);
+}
+
+ResultExpr Error(int err) {
+ CHECK(err >= ErrorCode::ERR_MIN_ERRNO && err <= ErrorCode::ERR_MAX_ERRNO);
+ return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_ERRNO + err);
+}
+
+ResultExpr Kill() {
+ return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_KILL);
+}
+
+ResultExpr Trace(uint16_t aux) {
+ return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_TRACE + aux);
+}
+
+ResultExpr Trap(TrapRegistry::TrapFnc trap_func, const void* aux) {
+ return std::make_shared<TrapResultExprImpl>(trap_func, aux, true /* safe */);
+}
+
+ResultExpr UnsafeTrap(TrapRegistry::TrapFnc trap_func, const void* aux) {
+ return std::make_shared<TrapResultExprImpl>(trap_func, aux,
+ false /* unsafe */);
+}
+
+BoolExpr BoolConst(bool value) {
+ return std::make_shared<ConstBoolExprImpl>(value);
+}
+
+BoolExpr Not(BoolExpr cond) {
+ return std::make_shared<NegateBoolExprImpl>(std::move(cond));
+}
+
+BoolExpr AllOf() {
+ return BoolConst(true);
+}
+
+BoolExpr AllOf(BoolExpr lhs, BoolExpr rhs) {
+ return std::make_shared<AndBoolExprImpl>(std::move(lhs), std::move(rhs));
+}
+
+BoolExpr AnyOf() {
+ return BoolConst(false);
+}
+
+BoolExpr AnyOf(BoolExpr lhs, BoolExpr rhs) {
+ return std::make_shared<OrBoolExprImpl>(std::move(lhs), std::move(rhs));
+}
+
+Elser If(BoolExpr cond, ResultExpr then_result) {
+ return Elser(nullptr).ElseIf(std::move(cond), std::move(then_result));
+}
+
+Elser::Elser(cons::List<Clause> clause_list) : clause_list_(clause_list) {
+}
+
+Elser::Elser(const Elser& elser) : clause_list_(elser.clause_list_) {
+}
+
+Elser::~Elser() {
+}
+
+Elser Elser::ElseIf(BoolExpr cond, ResultExpr then_result) const {
+ return Elser(Cons(std::make_pair(std::move(cond), std::move(then_result)),
+ clause_list_));
+}
+
+ResultExpr Elser::Else(ResultExpr else_result) const {
+ // We finally have the default result expression for this
+ // if/then/else sequence. Also, we've already accumulated all
+ // if/then pairs into a list of reverse order (i.e., lower priority
+ // conditions are listed before higher priority ones). E.g., an
+ // expression like
+ //
+ // If(b1, e1).ElseIf(b2, e2).ElseIf(b3, e3).Else(e4)
+ //
+ // will have built up a list like
+ //
+ // [(b3, e3), (b2, e2), (b1, e1)].
+ //
+ // Now that we have e4, we can walk the list and create a ResultExpr
+ // tree like:
+ //
+ // expr = e4
+ // expr = (b3 ? e3 : expr) = (b3 ? e3 : e4)
+ // expr = (b2 ? e2 : expr) = (b2 ? e2 : (b3 ? e3 : e4))
+ // expr = (b1 ? e1 : expr) = (b1 ? e1 : (b2 ? e2 : (b3 ? e3 : e4)))
+ //
+ // and end up with an appropriately chained tree.
+
+ ResultExpr expr = std::move(else_result);
+ for (const Clause& clause : clause_list_) {
+ expr = std::make_shared<IfThenResultExprImpl>(clause.first, clause.second,
+ std::move(expr));
+ }
+ return expr;
+}
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+namespace std {
+template class shared_ptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
+template class shared_ptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+} // namespace std
diff --git a/libchrome/sandbox/linux/bpf_dsl/bpf_dsl.h b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl.h
new file mode 100644
index 0000000..7f81344
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl.h
@@ -0,0 +1,335 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_H_
+#define SANDBOX_LINUX_BPF_DSL_BPF_DSL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
+#include "sandbox/linux/bpf_dsl/cons.h"
+#include "sandbox/linux/bpf_dsl/trap_registry.h"
+#include "sandbox/sandbox_export.h"
+
+// The sandbox::bpf_dsl namespace provides a domain-specific language
+// to make writing BPF policies more expressive. In general, the
+// object types all have value semantics (i.e., they can be copied
+// around, returned from or passed to function calls, etc. without any
+// surprising side effects), though not all support assignment.
+//
+// An idiomatic and demonstrative (albeit silly) example of this API
+// would be:
+//
+// #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+//
+// using namespace sandbox::bpf_dsl;
+//
+// class SillyPolicy : public Policy {
+// public:
+// SillyPolicy() {}
+// ~SillyPolicy() override {}
+// ResultExpr EvaluateSyscall(int sysno) const override {
+// if (sysno == __NR_fcntl) {
+// Arg<int> fd(0), cmd(1);
+// Arg<unsigned long> flags(2);
+// const uint64_t kGoodFlags = O_ACCMODE | O_NONBLOCK;
+// return If(AllOf(fd == 0,
+// cmd == F_SETFL,
+// (flags & ~kGoodFlags) == 0),
+// Allow())
+// .ElseIf(AnyOf(cmd == F_DUPFD, cmd == F_DUPFD_CLOEXEC),
+// Error(EMFILE))
+// .Else(Trap(SetFlagHandler, NULL));
+// } else {
+// return Allow();
+// }
+// }
+//
+// private:
+// DISALLOW_COPY_AND_ASSIGN(SillyPolicy);
+// };
+//
+// More generally, the DSL currently supports the following grammar:
+//
+// result = Allow() | Error(errno) | Kill() | Trace(aux)
+// | Trap(trap_func, aux) | UnsafeTrap(trap_func, aux)
+// | If(bool, result)[.ElseIf(bool, result)].Else(result)
+// | Switch(arg)[.Case(val, result)].Default(result)
+// bool = BoolConst(boolean) | Not(bool) | AllOf(bool...) | AnyOf(bool...)
+// | arg == val | arg != val
+// arg = Arg<T>(num) | arg & mask
+//
+// The semantics of each function and operator are intended to be
+// intuitive, but are described in more detail below.
+//
+// (Credit to Sean Parent's "Inheritance is the Base Class of Evil"
+// talk at Going Native 2013 for promoting value semantics via shared
+// pointers to immutable state.)
+
+namespace sandbox {
+namespace bpf_dsl {
+
+// ResultExpr is an opaque reference to an immutable result expression tree.
+using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
+
+// BoolExpr is an opaque reference to an immutable boolean expression tree.
+using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
+
+// Allow specifies a result that the system call should be allowed to
+// execute normally.
+SANDBOX_EXPORT ResultExpr Allow();
+
+// Error specifies a result that the system call should fail with
+// error number |err|. As a special case, Error(0) will result in the
+// system call appearing to have succeeded, but without having any
+// side effects.
+SANDBOX_EXPORT ResultExpr Error(int err);
+
+// Kill specifies a result to kill the process (task) immediately.
+SANDBOX_EXPORT ResultExpr Kill();
+
+// Trace specifies a result to notify a tracing process via the
+// PTRACE_EVENT_SECCOMP event and allow it to change or skip the system call.
+// The value of |aux| will be available to the tracer via PTRACE_GETEVENTMSG.
+SANDBOX_EXPORT ResultExpr Trace(uint16_t aux);
+
+// Trap specifies a result that the system call should be handled by
+// trapping back into userspace and invoking |trap_func|, passing
+// |aux| as the second parameter.
+SANDBOX_EXPORT ResultExpr
+ Trap(TrapRegistry::TrapFnc trap_func, const void* aux);
+
+// UnsafeTrap is like Trap, except the policy is marked as "unsafe"
+// and allowed to use SandboxSyscall to invoke any system call.
+//
+// NOTE: This feature, by definition, disables all security features of
+// the sandbox. It should never be used in production, but it can be
+// very useful to diagnose code that is incompatible with the sandbox.
+// If even a single system call returns "UnsafeTrap", the security of
+// entire sandbox should be considered compromised.
+SANDBOX_EXPORT ResultExpr
+ UnsafeTrap(TrapRegistry::TrapFnc trap_func, const void* aux);
+
+// BoolConst converts a bool value into a BoolExpr.
+SANDBOX_EXPORT BoolExpr BoolConst(bool value);
+
+// Not returns a BoolExpr representing the logical negation of |cond|.
+SANDBOX_EXPORT BoolExpr Not(BoolExpr cond);
+
+// AllOf returns a BoolExpr representing the logical conjunction ("and")
+// of zero or more BoolExprs.
+SANDBOX_EXPORT BoolExpr AllOf();
+SANDBOX_EXPORT BoolExpr AllOf(BoolExpr lhs, BoolExpr rhs);
+template <typename... Rest>
+SANDBOX_EXPORT BoolExpr AllOf(BoolExpr first, Rest&&... rest);
+
+// AnyOf returns a BoolExpr representing the logical disjunction ("or")
+// of zero or more BoolExprs.
+SANDBOX_EXPORT BoolExpr AnyOf();
+SANDBOX_EXPORT BoolExpr AnyOf(BoolExpr lhs, BoolExpr rhs);
+template <typename... Rest>
+SANDBOX_EXPORT BoolExpr AnyOf(BoolExpr first, Rest&&... rest);
+
+template <typename T>
+class SANDBOX_EXPORT Arg {
+ public:
+ // Initializes the Arg to represent the |num|th system call
+ // argument (indexed from 0), which is of type |T|.
+ explicit Arg(int num);
+
+ Arg(const Arg& arg) : num_(arg.num_), mask_(arg.mask_) {}
+
+ // Returns an Arg representing the current argument, but after
+ // bitwise-and'ing it with |rhs|.
+ friend Arg operator&(const Arg& lhs, uint64_t rhs) {
+ return Arg(lhs.num_, lhs.mask_ & rhs);
+ }
+
+ // Returns a boolean expression comparing whether the system call argument
+ // (after applying any bitmasks, if appropriate) equals |rhs|.
+ friend BoolExpr operator==(const Arg& lhs, T rhs) { return lhs.EqualTo(rhs); }
+
+ // Returns a boolean expression comparing whether the system call argument
+ // (after applying any bitmasks, if appropriate) does not equal |rhs|.
+ friend BoolExpr operator!=(const Arg& lhs, T rhs) { return Not(lhs == rhs); }
+
+ private:
+ Arg(int num, uint64_t mask) : num_(num), mask_(mask) {}
+
+ BoolExpr EqualTo(T val) const;
+
+ int num_;
+ uint64_t mask_;
+
+ DISALLOW_ASSIGN(Arg);
+};
+
+// If begins a conditional result expression predicated on the
+// specified boolean expression.
+SANDBOX_EXPORT Elser If(BoolExpr cond, ResultExpr then_result);
+
+class SANDBOX_EXPORT Elser {
+ public:
+ Elser(const Elser& elser);
+ ~Elser();
+
+ // ElseIf extends the conditional result expression with another
+ // "if then" clause, predicated on the specified boolean expression.
+ Elser ElseIf(BoolExpr cond, ResultExpr then_result) const;
+
+ // Else terminates a conditional result expression using |else_result| as
+ // the default fallback result expression.
+ ResultExpr Else(ResultExpr else_result) const;
+
+ private:
+ using Clause = std::pair<BoolExpr, ResultExpr>;
+
+ explicit Elser(cons::List<Clause> clause_list);
+
+ cons::List<Clause> clause_list_;
+
+ friend Elser If(BoolExpr, ResultExpr);
+ template <typename T>
+ friend Caser<T> Switch(const Arg<T>&);
+ DISALLOW_ASSIGN(Elser);
+};
+
+// Switch begins a switch expression dispatched according to the
+// specified argument value.
+template <typename T>
+SANDBOX_EXPORT Caser<T> Switch(const Arg<T>& arg);
+
+template <typename T>
+class SANDBOX_EXPORT Caser {
+ public:
+ Caser(const Caser<T>& caser) : arg_(caser.arg_), elser_(caser.elser_) {}
+ ~Caser() {}
+
+ // Case adds a single-value "case" clause to the switch.
+ Caser<T> Case(T value, ResultExpr result) const;
+
+ // Cases adds a multiple-value "case" clause to the switch.
+ // See also the SANDBOX_BPF_DSL_CASES macro below for a more idiomatic way
+ // of using this function.
+ template <typename... Values>
+ Caser<T> CasesImpl(ResultExpr result, const Values&... values) const;
+
+ // Terminate the switch with a "default" clause.
+ ResultExpr Default(ResultExpr result) const;
+
+ private:
+ Caser(const Arg<T>& arg, Elser elser) : arg_(arg), elser_(elser) {}
+
+ Arg<T> arg_;
+ Elser elser_;
+
+ template <typename U>
+ friend Caser<U> Switch(const Arg<U>&);
+ DISALLOW_ASSIGN(Caser);
+};
+
+// Recommended usage is to put
+// #define CASES SANDBOX_BPF_DSL_CASES
+// near the top of the .cc file (e.g., nearby any "using" statements), then
+// use like:
+// Switch(arg).CASES((3, 5, 7), result)...;
+#define SANDBOX_BPF_DSL_CASES(values, result) \
+ CasesImpl(result, SANDBOX_BPF_DSL_CASES_HELPER values)
+
+// Helper macro to strip parentheses.
+#define SANDBOX_BPF_DSL_CASES_HELPER(...) __VA_ARGS__
+
+// =====================================================================
+// Official API ends here.
+// =====================================================================
+
+namespace internal {
+
+// Make argument-dependent lookup work. This is necessary because although
+// BoolExpr is defined in bpf_dsl, since it's merely a typedef for
+// scoped_refptr<const internal::BoolExplImpl>, argument-dependent lookup only
+// searches the "internal" nested namespace.
+using bpf_dsl::Not;
+using bpf_dsl::AllOf;
+using bpf_dsl::AnyOf;
+
+// Returns a boolean expression that represents whether system call
+// argument |num| of size |size| is equal to |val|, when masked
+// according to |mask|. Users should use the Arg template class below
+// instead of using this API directly.
+SANDBOX_EXPORT BoolExpr
+ ArgEq(int num, size_t size, uint64_t mask, uint64_t val);
+
+// Returns the default mask for a system call argument of the specified size.
+SANDBOX_EXPORT uint64_t DefaultMask(size_t size);
+
+} // namespace internal
+
+template <typename T>
+Arg<T>::Arg(int num)
+ : num_(num), mask_(internal::DefaultMask(sizeof(T))) {
+}
+
+// Definition requires ArgEq to have been declared. Moved out-of-line
+// to minimize how much internal clutter users have to ignore while
+// reading the header documentation.
+//
+// Additionally, we use this helper member function to avoid linker errors
+// caused by defining operator== out-of-line. For a more detailed explanation,
+// see http://www.parashift.com/c++-faq-lite/template-friends.html.
+template <typename T>
+BoolExpr Arg<T>::EqualTo(T val) const {
+ if (sizeof(T) == 4) {
+ // Prevent sign-extension of negative int32_t values.
+ return internal::ArgEq(num_, sizeof(T), mask_, static_cast<uint32_t>(val));
+ }
+ return internal::ArgEq(num_, sizeof(T), mask_, static_cast<uint64_t>(val));
+}
+
+template <typename T>
+SANDBOX_EXPORT Caser<T> Switch(const Arg<T>& arg) {
+ return Caser<T>(arg, Elser(nullptr));
+}
+
+template <typename T>
+Caser<T> Caser<T>::Case(T value, ResultExpr result) const {
+ return SANDBOX_BPF_DSL_CASES((value), std::move(result));
+}
+
+template <typename T>
+template <typename... Values>
+Caser<T> Caser<T>::CasesImpl(ResultExpr result, const Values&... values) const {
+ // Theoretically we could evaluate arg_ just once and emit a more efficient
+ // dispatch table, but for now we simply translate into an equivalent
+ // If/ElseIf/Else chain.
+
+ return Caser<T>(arg_,
+ elser_.ElseIf(AnyOf((arg_ == values)...), std::move(result)));
+}
+
+template <typename T>
+ResultExpr Caser<T>::Default(ResultExpr result) const {
+ return elser_.Else(std::move(result));
+}
+
+template <typename... Rest>
+BoolExpr AllOf(BoolExpr first, Rest&&... rest) {
+ return AllOf(std::move(first), AllOf(std::forward<Rest>(rest)...));
+}
+
+template <typename... Rest>
+BoolExpr AnyOf(BoolExpr first, Rest&&... rest) {
+ return AnyOf(std::move(first), AnyOf(std::forward<Rest>(rest)...));
+}
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_BPF_DSL_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_forward.h b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
new file mode 100644
index 0000000..10477c9
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
@@ -0,0 +1,45 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
+#define SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
+
+#include <memory>
+
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+// The bpf_dsl_forward.h header provides forward declarations for the
+// types defined in bpf_dsl.h. It's intended for use in user headers
+// that need to reference bpf_dsl types, but don't require definitions.
+
+namespace internal {
+class ResultExprImpl;
+class BoolExprImpl;
+}
+
+using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
+using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
+
+template <typename T>
+class Arg;
+
+class Elser;
+
+template <typename T>
+class Caser;
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+namespace std {
+extern template class SANDBOX_EXPORT
+ shared_ptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
+extern template class SANDBOX_EXPORT
+ shared_ptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+} // namespace std
+
+#endif // SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_impl.h b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
new file mode 100644
index 0000000..35ff64f
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
@@ -0,0 +1,68 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
+#define SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+class ErrorCode;
+class PolicyCompiler;
+
+namespace internal {
+
+// Internal interface implemented by BoolExpr implementations.
+class BoolExprImpl {
+ public:
+ // Compile uses |pc| to emit a CodeGen::Node that conditionally continues
+ // to either |then_node| or |false_node|, depending on whether the represented
+ // boolean expression is true or false.
+ virtual CodeGen::Node Compile(PolicyCompiler* pc,
+ CodeGen::Node then_node,
+ CodeGen::Node else_node) const = 0;
+
+ protected:
+ BoolExprImpl() {}
+ virtual ~BoolExprImpl() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BoolExprImpl);
+};
+
+// Internal interface implemented by ResultExpr implementations.
+class ResultExprImpl {
+ public:
+ // Compile uses |pc| to emit a CodeGen::Node that executes the
+ // represented result expression.
+ virtual CodeGen::Node Compile(PolicyCompiler* pc) const = 0;
+
+ // HasUnsafeTraps returns whether the result expression is or recursively
+ // contains an unsafe trap expression.
+ virtual bool HasUnsafeTraps() const;
+
+ // IsAllow returns whether the result expression is an "allow" result.
+ virtual bool IsAllow() const;
+
+ // IsAllow returns whether the result expression is a "deny" result.
+ virtual bool IsDeny() const;
+
+ protected:
+ ResultExprImpl() {}
+ virtual ~ResultExprImpl() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ResultExprImpl);
+};
+
+} // namespace internal
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_unittest.cc b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_unittest.cc
new file mode 100644
index 0000000..801deee
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/bpf_dsl_unittest.cc
@@ -0,0 +1,478 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <stdint.h>
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#include <map>
+#include <utility>
+
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/linux/bpf_dsl/dump_bpf.h"
+#include "sandbox/linux/bpf_dsl/golden/golden_files.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/linux/bpf_dsl/policy_compiler.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/bpf_dsl/test_trap_registry.h"
+#include "sandbox/linux/bpf_dsl/verifier.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define CASES SANDBOX_BPF_DSL_CASES
+
+namespace sandbox {
+namespace bpf_dsl {
+namespace {
+
+// Helper function to construct fake arch_seccomp_data objects.
+struct arch_seccomp_data FakeSyscall(int nr,
+ uintptr_t p0 = 0,
+ uintptr_t p1 = 0,
+ uintptr_t p2 = 0,
+ uintptr_t p3 = 0,
+ uintptr_t p4 = 0,
+ uintptr_t p5 = 0) {
+ // Made up program counter for syscall address.
+ const uint64_t kFakePC = 0x543210;
+
+ struct arch_seccomp_data data = {
+ nr,
+ SECCOMP_ARCH,
+ kFakePC,
+ {
+ p0, p1, p2, p3, p4, p5,
+ },
+ };
+
+ return data;
+}
+
+class PolicyEmulator {
+ public:
+ PolicyEmulator(const golden::Golden& golden, const Policy& policy)
+ : program_() {
+ TestTrapRegistry traps;
+ program_ = PolicyCompiler(&policy, &traps).Compile();
+
+ // TODO(mdempsky): Generalize to more arches.
+ const char* expected = nullptr;
+#if defined(ARCH_CPU_X86)
+ expected = golden.i386_dump;
+#elif defined(ARCH_CPU_X86_64)
+ expected = golden.x86_64_dump;
+#endif
+
+ if (expected != nullptr) {
+ const std::string actual = DumpBPF::StringPrintProgram(program_);
+ EXPECT_EQ(expected, actual);
+ } else {
+ LOG(WARNING) << "Missing golden file data entry";
+ }
+ }
+
+ ~PolicyEmulator() {}
+
+ void ExpectAllow(const struct arch_seccomp_data& data) const {
+ EXPECT_EQ(SECCOMP_RET_ALLOW, Emulate(data));
+ }
+
+ void ExpectErrno(uint16_t err, const struct arch_seccomp_data& data) const {
+ EXPECT_EQ(SECCOMP_RET_ERRNO | err, Emulate(data));
+ }
+
+ void ExpectKill(const struct arch_seccomp_data& data) const {
+ EXPECT_EQ(SECCOMP_RET_KILL, Emulate(data));
+ }
+
+ private:
+ uint32_t Emulate(const struct arch_seccomp_data& data) const {
+ const char* err = nullptr;
+ uint32_t res = Verifier::EvaluateBPF(program_, data, &err);
+ if (err) {
+ ADD_FAILURE() << err;
+ return 0;
+ }
+ return res;
+ }
+
+ CodeGen::Program program_;
+
+ DISALLOW_COPY_AND_ASSIGN(PolicyEmulator);
+};
+
+class BasicPolicy : public Policy {
+ public:
+ BasicPolicy() {}
+ ~BasicPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_getpgid) {
+ const Arg<pid_t> pid(0);
+ return If(pid == 0, Error(EPERM)).Else(Error(EINVAL));
+ }
+ if (sysno == __NR_setuid) {
+ const Arg<uid_t> uid(0);
+ return If(uid != 42, Kill()).Else(Allow());
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicPolicy);
+};
+
+TEST(BPFDSL, Basic) {
+ PolicyEmulator emulator(golden::kBasicPolicy, BasicPolicy());
+
+ emulator.ExpectErrno(EPERM, FakeSyscall(__NR_getpgid, 0));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_getpgid, 1));
+
+ emulator.ExpectAllow(FakeSyscall(__NR_setuid, 42));
+ emulator.ExpectKill(FakeSyscall(__NR_setuid, 43));
+}
+
+/* On IA-32, socketpair() is implemented via socketcall(). :-( */
+#if !defined(ARCH_CPU_X86)
+class BooleanLogicPolicy : public Policy {
+ public:
+ BooleanLogicPolicy() {}
+ ~BooleanLogicPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_socketpair) {
+ const Arg<int> domain(0), type(1), protocol(2);
+ return If(AllOf(domain == AF_UNIX,
+ AnyOf(type == SOCK_STREAM, type == SOCK_DGRAM),
+ protocol == 0),
+ Error(EPERM))
+ .Else(Error(EINVAL));
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BooleanLogicPolicy);
+};
+
+TEST(BPFDSL, BooleanLogic) {
+ PolicyEmulator emulator(golden::kBooleanLogicPolicy, BooleanLogicPolicy());
+
+ const intptr_t kFakeSV = 0x12345;
+
+ // Acceptable combinations that should return EPERM.
+ emulator.ExpectErrno(
+ EPERM, FakeSyscall(__NR_socketpair, AF_UNIX, SOCK_STREAM, 0, kFakeSV));
+ emulator.ExpectErrno(
+ EPERM, FakeSyscall(__NR_socketpair, AF_UNIX, SOCK_DGRAM, 0, kFakeSV));
+
+ // Combinations that are invalid for only one reason; should return EINVAL.
+ emulator.ExpectErrno(
+ EINVAL, FakeSyscall(__NR_socketpair, AF_INET, SOCK_STREAM, 0, kFakeSV));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_socketpair, AF_UNIX,
+ SOCK_SEQPACKET, 0, kFakeSV));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_socketpair, AF_UNIX,
+ SOCK_STREAM, IPPROTO_TCP, kFakeSV));
+
+ // Completely unacceptable combination; should also return EINVAL.
+ emulator.ExpectErrno(
+ EINVAL, FakeSyscall(__NR_socketpair, AF_INET, SOCK_SEQPACKET, IPPROTO_UDP,
+ kFakeSV));
+}
+#endif // !ARCH_CPU_X86
+
+class MoreBooleanLogicPolicy : public Policy {
+ public:
+ MoreBooleanLogicPolicy() {}
+ ~MoreBooleanLogicPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_setresuid) {
+ const Arg<uid_t> ruid(0), euid(1), suid(2);
+ return If(AnyOf(ruid == 0, euid == 0, suid == 0), Error(EPERM))
+ .ElseIf(AllOf(ruid == 1, euid == 1, suid == 1), Error(EAGAIN))
+ .Else(Error(EINVAL));
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MoreBooleanLogicPolicy);
+};
+
+TEST(BPFDSL, MoreBooleanLogic) {
+ PolicyEmulator emulator(golden::kMoreBooleanLogicPolicy,
+ MoreBooleanLogicPolicy());
+
+ // Expect EPERM if any set to 0.
+ emulator.ExpectErrno(EPERM, FakeSyscall(__NR_setresuid, 0, 5, 5));
+ emulator.ExpectErrno(EPERM, FakeSyscall(__NR_setresuid, 5, 0, 5));
+ emulator.ExpectErrno(EPERM, FakeSyscall(__NR_setresuid, 5, 5, 0));
+
+ // Expect EAGAIN if all set to 1.
+ emulator.ExpectErrno(EAGAIN, FakeSyscall(__NR_setresuid, 1, 1, 1));
+
+ // Expect EINVAL for anything else.
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_setresuid, 5, 1, 1));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_setresuid, 1, 5, 1));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_setresuid, 1, 1, 5));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_setresuid, 3, 4, 5));
+}
+
+static const uintptr_t kDeadBeefAddr =
+ static_cast<uintptr_t>(0xdeadbeefdeadbeefULL);
+
+class ArgSizePolicy : public Policy {
+ public:
+ ArgSizePolicy() {}
+ ~ArgSizePolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_uname) {
+ const Arg<uintptr_t> addr(0);
+ return If(addr == kDeadBeefAddr, Error(EPERM)).Else(Allow());
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArgSizePolicy);
+};
+
+TEST(BPFDSL, ArgSizeTest) {
+ PolicyEmulator emulator(golden::kArgSizePolicy, ArgSizePolicy());
+
+ emulator.ExpectAllow(FakeSyscall(__NR_uname, 0));
+ emulator.ExpectErrno(EPERM, FakeSyscall(__NR_uname, kDeadBeefAddr));
+}
+
+class NegativeConstantsPolicy : public Policy {
+ public:
+ NegativeConstantsPolicy() {}
+ ~NegativeConstantsPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_fcntl) {
+ const Arg<int> fd(0);
+ return If(fd == -314, Error(EPERM)).Else(Allow());
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(NegativeConstantsPolicy);
+};
+
+TEST(BPFDSL, NegativeConstantsTest) {
+ PolicyEmulator emulator(golden::kNegativeConstantsPolicy,
+ NegativeConstantsPolicy());
+
+ emulator.ExpectAllow(FakeSyscall(__NR_fcntl, -5, F_DUPFD));
+ emulator.ExpectAllow(FakeSyscall(__NR_fcntl, 20, F_DUPFD));
+ emulator.ExpectErrno(EPERM, FakeSyscall(__NR_fcntl, -314, F_DUPFD));
+}
+
+#if 0
+// TODO(mdempsky): This is really an integration test.
+
+class TrappingPolicy : public Policy {
+ public:
+ TrappingPolicy() {}
+ ~TrappingPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_uname) {
+ return Trap(UnameTrap, &count_);
+ }
+ return Allow();
+ }
+
+ private:
+ static intptr_t count_;
+
+ static intptr_t UnameTrap(const struct arch_seccomp_data& data, void* aux) {
+ BPF_ASSERT_EQ(&count_, aux);
+ return ++count_;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(TrappingPolicy);
+};
+
+intptr_t TrappingPolicy::count_;
+
+BPF_TEST_C(BPFDSL, TrapTest, TrappingPolicy) {
+ ASSERT_SYSCALL_RESULT(1, uname, NULL);
+ ASSERT_SYSCALL_RESULT(2, uname, NULL);
+ ASSERT_SYSCALL_RESULT(3, uname, NULL);
+}
+#endif
+
+class MaskingPolicy : public Policy {
+ public:
+ MaskingPolicy() {}
+ ~MaskingPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_setuid) {
+ const Arg<uid_t> uid(0);
+ return If((uid & 0xf) == 0, Error(EINVAL)).Else(Error(EACCES));
+ }
+ if (sysno == __NR_setgid) {
+ const Arg<gid_t> gid(0);
+ return If((gid & 0xf0) == 0xf0, Error(EINVAL)).Else(Error(EACCES));
+ }
+ if (sysno == __NR_setpgid) {
+ const Arg<pid_t> pid(0);
+ return If((pid & 0xa5) == 0xa0, Error(EINVAL)).Else(Error(EACCES));
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MaskingPolicy);
+};
+
+TEST(BPFDSL, MaskTest) {
+ PolicyEmulator emulator(golden::kMaskingPolicy, MaskingPolicy());
+
+ for (uid_t uid = 0; uid < 0x100; ++uid) {
+ const int expect_errno = (uid & 0xf) == 0 ? EINVAL : EACCES;
+ emulator.ExpectErrno(expect_errno, FakeSyscall(__NR_setuid, uid));
+ }
+
+ for (gid_t gid = 0; gid < 0x100; ++gid) {
+ const int expect_errno = (gid & 0xf0) == 0xf0 ? EINVAL : EACCES;
+ emulator.ExpectErrno(expect_errno, FakeSyscall(__NR_setgid, gid));
+ }
+
+ for (pid_t pid = 0; pid < 0x100; ++pid) {
+ const int expect_errno = (pid & 0xa5) == 0xa0 ? EINVAL : EACCES;
+ emulator.ExpectErrno(expect_errno, FakeSyscall(__NR_setpgid, pid, 0));
+ }
+}
+
+class ElseIfPolicy : public Policy {
+ public:
+ ElseIfPolicy() {}
+ ~ElseIfPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_setuid) {
+ const Arg<uid_t> uid(0);
+ return If((uid & 0xfff) == 0, Error(0))
+ .ElseIf((uid & 0xff0) == 0, Error(EINVAL))
+ .ElseIf((uid & 0xf00) == 0, Error(EEXIST))
+ .Else(Error(EACCES));
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ElseIfPolicy);
+};
+
+TEST(BPFDSL, ElseIfTest) {
+ PolicyEmulator emulator(golden::kElseIfPolicy, ElseIfPolicy());
+
+ emulator.ExpectErrno(0, FakeSyscall(__NR_setuid, 0));
+
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_setuid, 0x0001));
+ emulator.ExpectErrno(EINVAL, FakeSyscall(__NR_setuid, 0x0002));
+
+ emulator.ExpectErrno(EEXIST, FakeSyscall(__NR_setuid, 0x0011));
+ emulator.ExpectErrno(EEXIST, FakeSyscall(__NR_setuid, 0x0022));
+
+ emulator.ExpectErrno(EACCES, FakeSyscall(__NR_setuid, 0x0111));
+ emulator.ExpectErrno(EACCES, FakeSyscall(__NR_setuid, 0x0222));
+}
+
+class SwitchPolicy : public Policy {
+ public:
+ SwitchPolicy() {}
+ ~SwitchPolicy() override {}
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ if (sysno == __NR_fcntl) {
+ const Arg<int> cmd(1);
+ const Arg<unsigned long> long_arg(2);
+ return Switch(cmd)
+ .CASES((F_GETFL, F_GETFD), Error(ENOENT))
+ .Case(F_SETFD, If(long_arg == O_CLOEXEC, Allow()).Else(Error(EINVAL)))
+ .Case(F_SETFL, Error(EPERM))
+ .Default(Error(EACCES));
+ }
+ return Allow();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SwitchPolicy);
+};
+
+TEST(BPFDSL, SwitchTest) {
+ PolicyEmulator emulator(golden::kSwitchPolicy, SwitchPolicy());
+
+ const int kFakeSockFD = 42;
+
+ emulator.ExpectErrno(ENOENT, FakeSyscall(__NR_fcntl, kFakeSockFD, F_GETFD));
+ emulator.ExpectErrno(ENOENT, FakeSyscall(__NR_fcntl, kFakeSockFD, F_GETFL));
+
+ emulator.ExpectAllow(
+ FakeSyscall(__NR_fcntl, kFakeSockFD, F_SETFD, O_CLOEXEC));
+ emulator.ExpectErrno(EINVAL,
+ FakeSyscall(__NR_fcntl, kFakeSockFD, F_SETFD, 0));
+
+ emulator.ExpectErrno(EPERM,
+ FakeSyscall(__NR_fcntl, kFakeSockFD, F_SETFL, O_RDONLY));
+
+ emulator.ExpectErrno(EACCES,
+ FakeSyscall(__NR_fcntl, kFakeSockFD, F_DUPFD, 0));
+}
+
+static intptr_t DummyTrap(const struct arch_seccomp_data& data, void* aux) {
+ return 0;
+}
+
+TEST(BPFDSL, IsAllowDeny) {
+ ResultExpr allow = Allow();
+ EXPECT_TRUE(allow->IsAllow());
+ EXPECT_FALSE(allow->IsDeny());
+
+ ResultExpr error = Error(ENOENT);
+ EXPECT_FALSE(error->IsAllow());
+ EXPECT_TRUE(error->IsDeny());
+
+ ResultExpr trace = Trace(42);
+ EXPECT_FALSE(trace->IsAllow());
+ EXPECT_FALSE(trace->IsDeny());
+
+ ResultExpr trap = Trap(DummyTrap, nullptr);
+ EXPECT_FALSE(trap->IsAllow());
+ EXPECT_TRUE(trap->IsDeny());
+
+ const Arg<int> arg(0);
+ ResultExpr maybe = If(arg == 0, Allow()).Else(Error(EPERM));
+ EXPECT_FALSE(maybe->IsAllow());
+ EXPECT_FALSE(maybe->IsDeny());
+}
+
+TEST(BPFDSL, HasUnsafeTraps) {
+ ResultExpr allow = Allow();
+ EXPECT_FALSE(allow->HasUnsafeTraps());
+
+ ResultExpr safe = Trap(DummyTrap, nullptr);
+ EXPECT_FALSE(safe->HasUnsafeTraps());
+
+ ResultExpr unsafe = UnsafeTrap(DummyTrap, nullptr);
+ EXPECT_TRUE(unsafe->HasUnsafeTraps());
+
+ const Arg<int> arg(0);
+ ResultExpr maybe = If(arg == 0, allow).Else(unsafe);
+ EXPECT_TRUE(maybe->HasUnsafeTraps());
+}
+
+} // namespace
+} // namespace bpf_dsl
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/codegen.cc b/libchrome/sandbox/linux/bpf_dsl/codegen.cc
new file mode 100644
index 0000000..d88bd53
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/codegen.cc
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/codegen.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
+#include <utility>
+
+#include "base/logging.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+
+// This CodeGen implementation strives for simplicity while still
+// generating acceptable BPF programs under typical usage patterns
+// (e.g., by PolicyCompiler).
+//
+// The key to its simplicity is that BPF programs only support forward
+// jumps/branches, which allows constraining the DAG construction API
+// to make instruction nodes immutable. Immutable nodes admits a
+// simple greedy approach of emitting new instructions as needed and
+// then reusing existing ones that have already been emitted. This
+// cleanly avoids any need to compute basic blocks or apply
+// topological sorting because the API effectively sorts instructions
+// for us (e.g., before MakeInstruction() can be called to emit a
+// branch instruction, it must have already been called for each
+// branch path).
+//
+// This greedy algorithm is not without (theoretical) weakness though:
+//
+// 1. In the general case, we don't eliminate dead code. If needed,
+// we could trace back through the program in Compile() and elide
+// any unneeded instructions, but in practice we only emit live
+// instructions anyway.
+//
+// 2. By not dividing instructions into basic blocks and sorting, we
+// lose an opportunity to move non-branch/non-return instructions
+// adjacent to their successor instructions, which means we might
+// need to emit additional jumps. But in practice, they'll
+// already be nearby as long as callers don't go out of their way
+// to interleave MakeInstruction() calls for unrelated code
+// sequences.
+
+namespace sandbox {
+
+// kBranchRange is the maximum value that can be stored in
+// sock_filter's 8-bit jt and jf fields.
+const size_t kBranchRange = std::numeric_limits<uint8_t>::max();
+
+const CodeGen::Node CodeGen::kNullNode;
+
+CodeGen::CodeGen() : program_(), equivalent_(), memos_() {
+}
+
+CodeGen::~CodeGen() {
+}
+
+CodeGen::Program CodeGen::Compile(CodeGen::Node head) {
+ return Program(program_.rbegin() + Offset(head), program_.rend());
+}
+
+CodeGen::Node CodeGen::MakeInstruction(uint16_t code,
+ uint32_t k,
+ Node jt,
+ Node jf) {
+ // To avoid generating redundant code sequences, we memoize the
+ // results from AppendInstruction().
+ auto res = memos_.insert(std::make_pair(MemoKey(code, k, jt, jf), kNullNode));
+ CodeGen::Node* node = &res.first->second;
+ if (res.second) { // Newly inserted memo entry.
+ *node = AppendInstruction(code, k, jt, jf);
+ }
+ return *node;
+}
+
+CodeGen::Node CodeGen::AppendInstruction(uint16_t code,
+ uint32_t k,
+ Node jt,
+ Node jf) {
+ if (BPF_CLASS(code) == BPF_JMP) {
+ CHECK_NE(BPF_JA, BPF_OP(code)) << "CodeGen inserts JAs as needed";
+
+ // Optimally adding jumps is rather tricky, so we use a quick
+ // approximation: by artificially reducing |jt|'s range, |jt| will
+ // stay within its true range even if we add a jump for |jf|.
+ jt = WithinRange(jt, kBranchRange - 1);
+ jf = WithinRange(jf, kBranchRange);
+ return Append(code, k, Offset(jt), Offset(jf));
+ }
+
+ CHECK_EQ(kNullNode, jf) << "Non-branch instructions shouldn't provide jf";
+ if (BPF_CLASS(code) == BPF_RET) {
+ CHECK_EQ(kNullNode, jt) << "Return instructions shouldn't provide jt";
+ } else {
+ // For non-branch/non-return instructions, execution always
+ // proceeds to the next instruction; so we need to arrange for
+ // that to be |jt|.
+ jt = WithinRange(jt, 0);
+ CHECK_EQ(0U, Offset(jt)) << "ICE: Failed to setup next instruction";
+ }
+ return Append(code, k, 0, 0);
+}
+
+CodeGen::Node CodeGen::WithinRange(Node target, size_t range) {
+ // Just use |target| if it's already within range.
+ if (Offset(target) <= range) {
+ return target;
+ }
+
+ // Alternatively, look for an equivalent instruction within range.
+ if (Offset(equivalent_.at(target)) <= range) {
+ return equivalent_.at(target);
+ }
+
+ // Otherwise, fall back to emitting a jump instruction.
+ Node jump = Append(BPF_JMP | BPF_JA, Offset(target), 0, 0);
+ equivalent_.at(target) = jump;
+ return jump;
+}
+
+CodeGen::Node CodeGen::Append(uint16_t code, uint32_t k, size_t jt, size_t jf) {
+ if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) {
+ CHECK_LE(jt, kBranchRange);
+ CHECK_LE(jf, kBranchRange);
+ } else {
+ CHECK_EQ(0U, jt);
+ CHECK_EQ(0U, jf);
+ }
+
+ CHECK_LT(program_.size(), static_cast<size_t>(BPF_MAXINSNS));
+ CHECK_EQ(program_.size(), equivalent_.size());
+
+ Node res = program_.size();
+ program_.push_back(sock_filter{
+ code, static_cast<uint8_t>(jt), static_cast<uint8_t>(jf), k});
+ equivalent_.push_back(res);
+ return res;
+}
+
+size_t CodeGen::Offset(Node target) const {
+ CHECK_LT(target, program_.size()) << "Bogus offset target node";
+ return (program_.size() - 1) - target;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/codegen.h b/libchrome/sandbox/linux/bpf_dsl/codegen.h
new file mode 100644
index 0000000..3fc3f35
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/codegen.h
@@ -0,0 +1,119 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_CODEGEN_H__
+#define SANDBOX_LINUX_BPF_DSL_CODEGEN_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <tuple>
+#include <vector>
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+struct sock_filter;
+
+namespace sandbox {
+
+// The code generator implements a basic assembler that can convert a
+// graph of BPF instructions into a well-formed array of BPF
+// instructions. Most notably, it ensures that jumps are always
+// forward and don't exceed the limit of 255 instructions imposed by
+// the instruction set.
+//
+// Callers would typically create a new CodeGen object and then use it
+// to build a DAG of instruction nodes. They'll eventually call
+// Compile() to convert this DAG to a Program.
+//
+// CodeGen gen;
+// CodeGen::Node allow, branch, dag;
+//
+// allow =
+// gen.MakeInstruction(BPF_RET+BPF_K,
+// ErrorCode(ErrorCode::ERR_ALLOWED).err()));
+// branch =
+// gen.MakeInstruction(BPF_JMP+BPF_EQ+BPF_K, __NR_getpid,
+// Trap(GetPidHandler, NULL), allow);
+// dag =
+// gen.MakeInstruction(BPF_LD+BPF_W+BPF_ABS,
+// offsetof(struct arch_seccomp_data, nr), branch);
+//
+// // Simplified code follows; in practice, it is important to avoid calling
+// // any C++ destructors after starting the sandbox.
+// CodeGen::Program program = gen.Compile(dag);
+// const struct sock_fprog prog = {
+// static_cast<unsigned short>(program.size()), &program[0] };
+// prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+//
+class SANDBOX_EXPORT CodeGen {
+ public:
+ // A vector of BPF instructions that need to be installed as a filter
+ // program in the kernel.
+ typedef std::vector<struct sock_filter> Program;
+
+ // Node represents a node within the instruction DAG being compiled.
+ using Node = Program::size_type;
+
+ // kNullNode represents the "null" node; i.e., the reserved node
+ // value guaranteed to not equal any actual nodes.
+ static const Node kNullNode = -1;
+
+ CodeGen();
+ ~CodeGen();
+
+ // MakeInstruction creates a node representing the specified
+ // instruction, or returns and existing equivalent node if one
+ // exists. For details on the possible parameters refer to
+ // https://www.kernel.org/doc/Documentation/networking/filter.txt.
+ // TODO(mdempsky): Reconsider using default arguments here.
+ Node MakeInstruction(uint16_t code,
+ uint32_t k,
+ Node jt = kNullNode,
+ Node jf = kNullNode);
+
+ // Compile linearizes the instruction DAG rooted at |head| into a
+ // program that can be executed by a BPF virtual machine.
+ Program Compile(Node head);
+
+ private:
+ using MemoKey = std::tuple<uint16_t, uint32_t, Node, Node>;
+
+ // AppendInstruction adds a new instruction, ensuring that |jt| and
+ // |jf| are within range as necessary for |code|.
+ Node AppendInstruction(uint16_t code, uint32_t k, Node jt, Node jf);
+
+ // WithinRange returns a node equivalent to |next| that is at most
+ // |range| instructions away from the (logical) beginning of the
+ // program.
+ Node WithinRange(Node next, size_t range);
+
+ // Append appends a new instruction to the physical end (i.e.,
+ // logical beginning) of |program_|.
+ Node Append(uint16_t code, uint32_t k, size_t jt, size_t jf);
+
+ // Offset returns how many instructions exist in |program_| after |target|.
+ size_t Offset(Node target) const;
+
+ // NOTE: program_ is the compiled program in *reverse*, so that
+ // indices remain stable as we add instructions.
+ Program program_;
+
+ // equivalent_ stores the most recent semantically-equivalent node for each
+ // instruction in program_. A node is defined as semantically-equivalent to N
+ // if it has the same instruction code and constant as N and its successor
+ // nodes (if any) are semantically-equivalent to N's successor nodes, or
+ // if it's an unconditional jump to a node semantically-equivalent to N.
+ std::vector<Node> equivalent_;
+
+ std::map<MemoKey, Node> memos_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGen);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_CODEGEN_H__
diff --git a/libchrome/sandbox/linux/bpf_dsl/codegen_unittest.cc b/libchrome/sandbox/linux/bpf_dsl/codegen_unittest.cc
new file mode 100644
index 0000000..56a0dd2
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/codegen_unittest.cc
@@ -0,0 +1,404 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/codegen.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/md5.h"
+#include "base/strings/string_piece.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+namespace {
+
+// Hash provides an abstraction for building "hash trees" from BPF
+// control flow graphs, and efficiently identifying equivalent graphs.
+//
+// For simplicity, we use MD5, because base happens to provide a
+// convenient API for its use. However, any collision-resistant hash
+// should suffice.
+class Hash {
+ public:
+ static const Hash kZero;
+
+ Hash() : digest_() {}
+
+ Hash(uint16_t code,
+ uint32_t k,
+ const Hash& jt = kZero,
+ const Hash& jf = kZero)
+ : digest_() {
+ base::MD5Context ctx;
+ base::MD5Init(&ctx);
+ HashValue(&ctx, code);
+ HashValue(&ctx, k);
+ HashValue(&ctx, jt);
+ HashValue(&ctx, jf);
+ base::MD5Final(&digest_, &ctx);
+ }
+
+ Hash(const Hash& hash) = default;
+ Hash& operator=(const Hash& rhs) = default;
+
+ friend bool operator==(const Hash& lhs, const Hash& rhs) {
+ return lhs.Base16() == rhs.Base16();
+ }
+ friend bool operator!=(const Hash& lhs, const Hash& rhs) {
+ return !(lhs == rhs);
+ }
+
+ private:
+ template <typename T>
+ void HashValue(base::MD5Context* ctx, const T& value) {
+ base::MD5Update(ctx,
+ base::StringPiece(reinterpret_cast<const char*>(&value),
+ sizeof(value)));
+ }
+
+ std::string Base16() const {
+ return base::MD5DigestToBase16(digest_);
+ }
+
+ base::MD5Digest digest_;
+};
+
+const Hash Hash::kZero;
+
+// Sanity check that equality and inequality work on Hash as required.
+TEST(CodeGen, HashSanity) {
+ std::vector<Hash> hashes;
+
+ // Push a bunch of logically distinct hashes.
+ hashes.push_back(Hash::kZero);
+ for (int i = 0; i < 4; ++i) {
+ hashes.push_back(Hash(i & 1, i & 2));
+ }
+ for (int i = 0; i < 16; ++i) {
+ hashes.push_back(Hash(i & 1, i & 2, Hash(i & 4, i & 8)));
+ }
+ for (int i = 0; i < 64; ++i) {
+ hashes.push_back(
+ Hash(i & 1, i & 2, Hash(i & 4, i & 8), Hash(i & 16, i & 32)));
+ }
+
+ for (const Hash& a : hashes) {
+ for (const Hash& b : hashes) {
+ // Hashes should equal themselves, but not equal all others.
+ if (&a == &b) {
+ EXPECT_EQ(a, b);
+ } else {
+ EXPECT_NE(a, b);
+ }
+ }
+ }
+}
+
+// ProgramTest provides a fixture for writing compiling sample
+// programs with CodeGen and verifying the linearized output matches
+// the input DAG.
+class ProgramTest : public ::testing::Test {
+ protected:
+ ProgramTest() : gen_(), node_hashes_() {}
+
+ // MakeInstruction calls CodeGen::MakeInstruction() and associated
+ // the returned address with a hash of the instruction.
+ CodeGen::Node MakeInstruction(uint16_t code,
+ uint32_t k,
+ CodeGen::Node jt = CodeGen::kNullNode,
+ CodeGen::Node jf = CodeGen::kNullNode) {
+ CodeGen::Node res = gen_.MakeInstruction(code, k, jt, jf);
+ EXPECT_NE(CodeGen::kNullNode, res);
+
+ Hash digest(code, k, Lookup(jt), Lookup(jf));
+ auto it = node_hashes_.insert(std::make_pair(res, digest));
+ EXPECT_EQ(digest, it.first->second);
+
+ return res;
+ }
+
+ // RunTest compiles the program and verifies that the output matches
+ // what is expected. It should be called at the end of each program
+ // test case.
+ void RunTest(CodeGen::Node head) {
+ // Compile the program
+ CodeGen::Program program = gen_.Compile(head);
+
+ // Walk the program backwards, and compute the hash for each instruction.
+ std::vector<Hash> prog_hashes(program.size());
+ for (size_t i = program.size(); i > 0; --i) {
+ const sock_filter& insn = program.at(i - 1);
+ Hash& hash = prog_hashes.at(i - 1);
+
+ if (BPF_CLASS(insn.code) == BPF_JMP) {
+ if (BPF_OP(insn.code) == BPF_JA) {
+ // The compiler adds JA instructions as needed, so skip them.
+ hash = prog_hashes.at(i + insn.k);
+ } else {
+ hash = Hash(insn.code, insn.k, prog_hashes.at(i + insn.jt),
+ prog_hashes.at(i + insn.jf));
+ }
+ } else if (BPF_CLASS(insn.code) == BPF_RET) {
+ hash = Hash(insn.code, insn.k);
+ } else {
+ hash = Hash(insn.code, insn.k, prog_hashes.at(i));
+ }
+ }
+
+ EXPECT_EQ(Lookup(head), prog_hashes.at(0));
+ }
+
+ private:
+ const Hash& Lookup(CodeGen::Node next) const {
+ if (next == CodeGen::kNullNode) {
+ return Hash::kZero;
+ }
+ auto it = node_hashes_.find(next);
+ if (it == node_hashes_.end()) {
+ ADD_FAILURE() << "No hash found for node " << next;
+ return Hash::kZero;
+ }
+ return it->second;
+ }
+
+ CodeGen gen_;
+ std::map<CodeGen::Node, Hash> node_hashes_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProgramTest);
+};
+
+TEST_F(ProgramTest, OneInstruction) {
+ // Create the most basic valid BPF program:
+ // RET 0
+ CodeGen::Node head = MakeInstruction(BPF_RET + BPF_K, 0);
+ RunTest(head);
+}
+
+TEST_F(ProgramTest, SimpleBranch) {
+ // Create a program with a single branch:
+ // JUMP if eq 42 then $0 else $1
+ // 0: RET 1
+ // 1: RET 0
+ CodeGen::Node head = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42,
+ MakeInstruction(BPF_RET + BPF_K, 1),
+ MakeInstruction(BPF_RET + BPF_K, 0));
+ RunTest(head);
+}
+
+TEST_F(ProgramTest, AtypicalBranch) {
+ // Create a program with a single branch:
+ // JUMP if eq 42 then $0 else $0
+ // 0: RET 0
+
+ CodeGen::Node ret = MakeInstruction(BPF_RET + BPF_K, 0);
+ CodeGen::Node head = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, ret, ret);
+
+ // N.B.: As the instructions in both sides of the branch are already
+ // the same object, we do not actually have any "mergeable" branches.
+ // This needs to be reflected in our choice of "flags".
+ RunTest(head);
+}
+
+TEST_F(ProgramTest, Complex) {
+ // Creates a basic BPF program that we'll use to test some of the code:
+ // JUMP if eq 42 the $0 else $1 (insn6)
+ // 0: LD 23 (insn5)
+ // 1: JUMP if eq 42 then $2 else $4 (insn4)
+ // 2: JUMP to $3 (insn2)
+ // 3: LD 42 (insn1)
+ // RET 42 (insn0)
+ // 4: LD 42 (insn3)
+ // RET 42 (insn3+)
+ CodeGen::Node insn0 = MakeInstruction(BPF_RET + BPF_K, 42);
+ CodeGen::Node insn1 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 42, insn0);
+ CodeGen::Node insn2 = insn1; // Implicit JUMP
+
+ // We explicitly duplicate instructions to test that they're merged.
+ CodeGen::Node insn3 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 42,
+ MakeInstruction(BPF_RET + BPF_K, 42));
+ EXPECT_EQ(insn2, insn3);
+
+ CodeGen::Node insn4 =
+ MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, insn2, insn3);
+ CodeGen::Node insn5 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 23, insn4);
+
+ // Force a basic block that ends in neither a jump instruction nor a return
+ // instruction. It only contains "insn5". This exercises one of the less
+ // common code paths in the topo-sort algorithm.
+ // This also gives us a diamond-shaped pattern in our graph, which stresses
+ // another aspect of the topo-sort algorithm (namely, the ability to
+ // correctly count the incoming branches for subtrees that are not disjunct).
+ CodeGen::Node insn6 =
+ MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, insn5, insn4);
+
+ RunTest(insn6);
+}
+
+TEST_F(ProgramTest, ConfusingTails) {
+ // This simple program demonstrates https://crbug.com/351103/
+ // The two "LOAD 0" instructions are blocks of their own. MergeTails() could
+ // be tempted to merge them since they are the same. However, they are
+ // not mergeable because they fall-through to non semantically equivalent
+ // blocks.
+ // Without the fix for this bug, this program should trigger the check in
+ // CompileAndCompare: the serialized graphs from the program and its compiled
+ // version will differ.
+ //
+ // 0) LOAD 1 // ???
+ // 1) if A == 0x1; then JMP 2 else JMP 3
+ // 2) LOAD 0 // System call number
+ // 3) if A == 0x2; then JMP 4 else JMP 5
+ // 4) LOAD 0 // System call number
+ // 5) if A == 0x1; then JMP 6 else JMP 7
+ // 6) RET 0
+ // 7) RET 1
+
+ CodeGen::Node i7 = MakeInstruction(BPF_RET + BPF_K, 1);
+ CodeGen::Node i6 = MakeInstruction(BPF_RET + BPF_K, 0);
+ CodeGen::Node i5 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i6, i7);
+ CodeGen::Node i4 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i5);
+ CodeGen::Node i3 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
+ CodeGen::Node i2 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i3);
+ CodeGen::Node i1 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
+ CodeGen::Node i0 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
+
+ RunTest(i0);
+}
+
+TEST_F(ProgramTest, ConfusingTailsBasic) {
+ // Without the fix for https://crbug.com/351103/, (see
+ // SampleProgramConfusingTails()), this would generate a cyclic graph and
+ // crash as the two "LOAD 0" instructions would get merged.
+ //
+ // 0) LOAD 1 // ???
+ // 1) if A == 0x1; then JMP 2 else JMP 3
+ // 2) LOAD 0 // System call number
+ // 3) if A == 0x2; then JMP 4 else JMP 5
+ // 4) LOAD 0 // System call number
+ // 5) RET 1
+
+ CodeGen::Node i5 = MakeInstruction(BPF_RET + BPF_K, 1);
+ CodeGen::Node i4 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i5);
+ CodeGen::Node i3 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
+ CodeGen::Node i2 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i3);
+ CodeGen::Node i1 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
+ CodeGen::Node i0 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
+
+ RunTest(i0);
+}
+
+TEST_F(ProgramTest, ConfusingTailsMergeable) {
+ // This is similar to SampleProgramConfusingTails(), except that
+ // instructions 2 and 4 are now RET instructions.
+ // In PointerCompare(), this exercises the path where two blocks are of the
+ // same length and identical and the last instruction is a JMP or RET, so the
+ // following blocks don't need to be looked at and the blocks are mergeable.
+ //
+ // 0) LOAD 1 // ???
+ // 1) if A == 0x1; then JMP 2 else JMP 3
+ // 2) RET 42
+ // 3) if A == 0x2; then JMP 4 else JMP 5
+ // 4) RET 42
+ // 5) if A == 0x1; then JMP 6 else JMP 7
+ // 6) RET 0
+ // 7) RET 1
+
+ CodeGen::Node i7 = MakeInstruction(BPF_RET + BPF_K, 1);
+ CodeGen::Node i6 = MakeInstruction(BPF_RET + BPF_K, 0);
+ CodeGen::Node i5 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i6, i7);
+ CodeGen::Node i4 = MakeInstruction(BPF_RET + BPF_K, 42);
+ CodeGen::Node i3 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
+ CodeGen::Node i2 = MakeInstruction(BPF_RET + BPF_K, 42);
+ CodeGen::Node i1 = MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
+ CodeGen::Node i0 = MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
+
+ RunTest(i0);
+}
+
+TEST_F(ProgramTest, InstructionFolding) {
+ // Check that simple instructions are folded as expected.
+ CodeGen::Node a = MakeInstruction(BPF_RET + BPF_K, 0);
+ EXPECT_EQ(a, MakeInstruction(BPF_RET + BPF_K, 0));
+ CodeGen::Node b = MakeInstruction(BPF_RET + BPF_K, 1);
+ EXPECT_EQ(a, MakeInstruction(BPF_RET + BPF_K, 0));
+ EXPECT_EQ(b, MakeInstruction(BPF_RET + BPF_K, 1));
+ EXPECT_EQ(b, MakeInstruction(BPF_RET + BPF_K, 1));
+
+ // Check that complex sequences are folded too.
+ CodeGen::Node c =
+ MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0,
+ MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 0x100, a, b));
+ EXPECT_EQ(c, MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, 0,
+ MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 0x100, a, b)));
+
+ RunTest(c);
+}
+
+TEST_F(ProgramTest, FarBranches) {
+ // BPF instructions use 8-bit fields for branch offsets, which means
+ // branch targets must be within 255 instructions of the branch
+ // instruction. CodeGen abstracts away this detail by inserting jump
+ // instructions as needed, which we test here by generating programs
+ // that should trigger any interesting boundary conditions.
+
+ // Populate with 260 initial instruction nodes.
+ std::vector<CodeGen::Node> nodes;
+ nodes.push_back(MakeInstruction(BPF_RET + BPF_K, 0));
+ for (size_t i = 1; i < 260; ++i) {
+ nodes.push_back(
+ MakeInstruction(BPF_ALU + BPF_ADD + BPF_K, i, nodes.back()));
+ }
+
+ // Exhaustively test branch offsets near BPF's limits.
+ for (size_t jt = 250; jt < 260; ++jt) {
+ for (size_t jf = 250; jf < 260; ++jf) {
+ nodes.push_back(MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 0,
+ nodes.rbegin()[jt], nodes.rbegin()[jf]));
+ RunTest(nodes.back());
+ }
+ }
+}
+
+TEST_F(ProgramTest, JumpReuse) {
+ // As a code size optimization, we try to reuse jumps when possible
+ // instead of emitting new ones. Here we make sure that optimization
+ // is working as intended.
+ //
+ // NOTE: To simplify testing, we rely on implementation details
+ // about what CodeGen::Node values indicate (i.e., vector indices),
+ // but CodeGen users should treat them as opaque values.
+
+ // Populate with 260 initial instruction nodes.
+ std::vector<CodeGen::Node> nodes;
+ nodes.push_back(MakeInstruction(BPF_RET + BPF_K, 0));
+ for (size_t i = 1; i < 260; ++i) {
+ nodes.push_back(
+ MakeInstruction(BPF_ALU + BPF_ADD + BPF_K, i, nodes.back()));
+ }
+
+ // Branching to nodes[0] and nodes[1] should require 3 new
+ // instructions: two far jumps plus the branch itself.
+ CodeGen::Node one =
+ MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 0, nodes[0], nodes[1]);
+ EXPECT_EQ(nodes.back() + 3, one); // XXX: Implementation detail!
+ RunTest(one);
+
+ // Branching again to the same target nodes should require only one
+ // new instruction, as we can reuse the previous branch's jumps.
+ CodeGen::Node two =
+ MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, nodes[0], nodes[1]);
+ EXPECT_EQ(one + 1, two); // XXX: Implementation detail!
+ RunTest(two);
+}
+
+} // namespace
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/cons.h b/libchrome/sandbox/linux/bpf_dsl/cons.h
new file mode 100644
index 0000000..07ac3df
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/cons.h
@@ -0,0 +1,137 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_CONS_H_
+#define SANDBOX_LINUX_BPF_DSL_CONS_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace cons {
+
+// Namespace cons provides an abstraction for immutable "cons list"
+// data structures as commonly provided in functional programming
+// languages like Lisp or Haskell.
+//
+// A cons list is a linked list consisting of "cells", each of which
+// have a "head" and a "tail" element. A cell's head element contains
+// a user specified value, while the tail element contains a (possibly
+// null) pointer to another cell.
+//
+// An empty list (idiomatically referred to as "nil") can be
+// constructed as "cons::List<Foo>()" or simply as "nullptr" if Foo
+// can be inferred from context (e.g., calling a function that has a
+// "cons::List<Foo>" parameter).
+//
+// Existing lists (including empty lists) can be extended by
+// prepending new values to the front using the "Cons(head, tail)"
+// function, which will allocate a new cons cell. Notably, cons lists
+// support creating multiple lists that share a common tail sequence.
+//
+// Lastly, lists support iteration via C++11's range-based for loop
+// construct.
+//
+// Examples:
+//
+// // basic construction
+// const cons::List<char> kNil = nullptr;
+// cons::List<char> ba = Cons('b', Cons('a', kNil));
+//
+// // common tail sequence
+// cons::List<char> cba = Cons('c', ba);
+// cons::List<char> dba = Cons('d', ba);
+//
+// // iteration
+// for (const char& ch : cba) {
+// // iterates 'c', 'b', 'a'
+// }
+// for (const char& ch : dba) {
+// // iterates 'd', 'b', 'a'
+// }
+
+// Forward declarations.
+template <typename T>
+class Cell;
+template <typename T>
+class ListIterator;
+
+// List represents a (possibly null) pointer to a cons cell.
+template <typename T>
+using List = std::shared_ptr<const Cell<T>>;
+
+// Cons extends a cons list by prepending a new value to the front.
+template <typename T>
+List<T> Cons(const T& head, List<T> tail) {
+ return std::make_shared<Cell<T>>(head, std::move(tail));
+}
+
+// Cell represents an individual "cons cell" within a cons list.
+template <typename T>
+class Cell {
+ public:
+ Cell(const T& head, List<T> tail) : head_(head), tail_(std::move(tail)) {}
+
+ // Head returns this cell's head element.
+ const T& head() const { return head_; }
+
+ // Tail returns this cell's tail element.
+ const List<T>& tail() const { return tail_; }
+
+ private:
+ T head_;
+ List<T> tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(Cell);
+};
+
+// Begin returns a list iterator pointing to the first element of the
+// cons list. It's provided to support range-based for loops.
+template <typename T>
+ListIterator<T> begin(const List<T>& list) {
+ return ListIterator<T>(list);
+}
+
+// End returns a list iterator pointing to the "past-the-end" element
+// of the cons list (i.e., nil). It's provided to support range-based
+// for loops.
+template <typename T>
+ListIterator<T> end(const List<T>& list) {
+ return ListIterator<T>();
+}
+
+// ListIterator provides C++ forward iterator semantics for traversing
+// a cons list.
+template <typename T>
+class ListIterator {
+ public:
+ ListIterator() : list_() {}
+ explicit ListIterator(const List<T>& list) : list_(list) {}
+
+ const T& operator*() const { return list_->head(); }
+
+ ListIterator& operator++() {
+ list_ = list_->tail();
+ return *this;
+ }
+
+ friend bool operator==(const ListIterator& lhs, const ListIterator& rhs) {
+ return lhs.list_ == rhs.list_;
+ }
+
+ private:
+ List<T> list_;
+};
+
+template <typename T>
+bool operator!=(const ListIterator<T>& lhs, const ListIterator<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace cons
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_CONS_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/dump_bpf.cc b/libchrome/sandbox/linux/bpf_dsl/dump_bpf.cc
new file mode 100644
index 0000000..2edf592
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/dump_bpf.cc
@@ -0,0 +1,159 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/dump_bpf.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <string>
+
+#include "base/strings/stringprintf.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/bpf_dsl/trap_registry.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+#include "sandbox/linux/system_headers/linux_seccomp.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+namespace {
+
+const char* AluOpToken(uint32_t code) {
+ switch (BPF_OP(code)) {
+ case BPF_ADD:
+ return "+";
+ case BPF_SUB:
+ return "-";
+ case BPF_MUL:
+ return "*";
+ case BPF_DIV:
+ return "/";
+ case BPF_MOD:
+ return "%";
+ case BPF_OR:
+ return "|";
+ case BPF_XOR:
+ return "^";
+ case BPF_AND:
+ return "&";
+ case BPF_LSH:
+ return "<<";
+ case BPF_RSH:
+ return ">>";
+ default:
+ return "???";
+ }
+}
+
+const char* JmpOpToken(uint32_t code) {
+ switch (BPF_OP(code)) {
+ case BPF_JSET:
+ return "&";
+ case BPF_JEQ:
+ return "==";
+ case BPF_JGE:
+ return ">=";
+ default:
+ return "???";
+ }
+}
+
+const char* DataOffsetName(size_t off) {
+ switch (off) {
+ case SECCOMP_NR_IDX:
+ return "System call number";
+ case SECCOMP_ARCH_IDX:
+ return "Architecture";
+ case SECCOMP_IP_LSB_IDX:
+ return "Instruction pointer (LSB)";
+ case SECCOMP_IP_MSB_IDX:
+ return "Instruction pointer (MSB)";
+ default:
+ return "???";
+ }
+}
+
+void AppendInstruction(std::string* dst, size_t pc, const sock_filter& insn) {
+ base::StringAppendF(dst, "%3zu) ", pc);
+ switch (BPF_CLASS(insn.code)) {
+ case BPF_LD:
+ if (insn.code == BPF_LD + BPF_W + BPF_ABS) {
+ base::StringAppendF(dst, "LOAD %" PRIu32 " // ", insn.k);
+ size_t maybe_argno =
+ (insn.k - offsetof(struct arch_seccomp_data, args)) /
+ sizeof(uint64_t);
+ if (maybe_argno < 6 && insn.k == SECCOMP_ARG_LSB_IDX(maybe_argno)) {
+ base::StringAppendF(dst, "Argument %zu (LSB)\n", maybe_argno);
+ } else if (maybe_argno < 6 &&
+ insn.k == SECCOMP_ARG_MSB_IDX(maybe_argno)) {
+ base::StringAppendF(dst, "Argument %zu (MSB)\n", maybe_argno);
+ } else {
+ base::StringAppendF(dst, "%s\n", DataOffsetName(insn.k));
+ }
+ } else {
+ base::StringAppendF(dst, "Load ???\n");
+ }
+ break;
+ case BPF_JMP:
+ if (BPF_OP(insn.code) == BPF_JA) {
+ base::StringAppendF(dst, "JMP %zu\n", pc + insn.k + 1);
+ } else {
+ base::StringAppendF(
+ dst, "if A %s 0x%" PRIx32 "; then JMP %zu else JMP %zu\n",
+ JmpOpToken(insn.code), insn.k, pc + insn.jt + 1, pc + insn.jf + 1);
+ }
+ break;
+ case BPF_RET:
+ base::StringAppendF(dst, "RET 0x%" PRIx32 " // ", insn.k);
+ if ((insn.k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
+ base::StringAppendF(dst, "Trap #%" PRIu32 "\n",
+ insn.k & SECCOMP_RET_DATA);
+ } else if ((insn.k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
+ base::StringAppendF(dst, "errno = %" PRIu32 "\n",
+ insn.k & SECCOMP_RET_DATA);
+ } else if ((insn.k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRACE) {
+ base::StringAppendF(dst, "Trace #%" PRIu32 "\n",
+ insn.k & SECCOMP_RET_DATA);
+ } else if (insn.k == SECCOMP_RET_ALLOW) {
+ base::StringAppendF(dst, "Allowed\n");
+ } else if (insn.k == SECCOMP_RET_KILL) {
+ base::StringAppendF(dst, "Kill\n");
+ } else {
+ base::StringAppendF(dst, "???\n");
+ }
+ break;
+ case BPF_ALU:
+ if (BPF_OP(insn.code) == BPF_NEG) {
+ base::StringAppendF(dst, "A := -A\n");
+ } else {
+ base::StringAppendF(dst, "A := A %s 0x%" PRIx32 "\n",
+ AluOpToken(insn.code), insn.k);
+ }
+ break;
+ default:
+ base::StringAppendF(dst, "???\n");
+ break;
+ }
+}
+
+} // namespace
+
+void DumpBPF::PrintProgram(const CodeGen::Program& program) {
+ fputs(StringPrintProgram(program).c_str(), stderr);
+}
+
+std::string DumpBPF::StringPrintProgram(const CodeGen::Program& program) {
+ std::string res;
+ for (size_t i = 0; i < program.size(); i++) {
+ AppendInstruction(&res, i + 1, program[i]);
+ }
+ return res;
+}
+
+} // namespace bpf_dsl
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/dump_bpf.h b/libchrome/sandbox/linux/bpf_dsl/dump_bpf.h
new file mode 100644
index 0000000..a7db589
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/dump_bpf.h
@@ -0,0 +1,24 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+class SANDBOX_EXPORT DumpBPF {
+ public:
+ // PrintProgram writes |program| in a human-readable format to stderr.
+ static void PrintProgram(const CodeGen::Program& program);
+
+ // StringPrintProgram writes |program| in a human-readable format to
+ // a std::string.
+ static std::string StringPrintProgram(const CodeGen::Program& program);
+};
+
+} // namespace bpf_dsl
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/errorcode.h b/libchrome/sandbox/linux/bpf_dsl/errorcode.h
new file mode 100644
index 0000000..611c27d
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/errorcode.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_ERRORCODE_H__
+#define SANDBOX_LINUX_BPF_DSL_ERRORCODE_H__
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+// TODO(mdempsky): Find a proper home for ERR_{MIN,MAX}_ERRNO and
+// remove this header.
+class SANDBOX_EXPORT ErrorCode {
+ public:
+ enum {
+ ERR_MIN_ERRNO = 0,
+#if defined(__mips__)
+ // MIPS only supports errno up to 1133
+ ERR_MAX_ERRNO = 1133,
+#else
+ // TODO(markus): Android only supports errno up to 255
+ // (crbug.com/181647).
+ ERR_MAX_ERRNO = 4095,
+#endif
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ErrorCode);
+};
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_ERRORCODE_H__
diff --git a/libchrome/sandbox/linux/bpf_dsl/linux_syscall_ranges.h b/libchrome/sandbox/linux/bpf_dsl/linux_syscall_ranges.h
new file mode 100644
index 0000000..a747770
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/linux_syscall_ranges.h
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_LINUX_SYSCALL_RANGES_H_
+#define SANDBOX_LINUX_BPF_DSL_LINUX_SYSCALL_RANGES_H_
+
+#if defined(__x86_64__)
+
+#define MIN_SYSCALL 0u
+#define MAX_PUBLIC_SYSCALL 1024u
+#define MAX_SYSCALL MAX_PUBLIC_SYSCALL
+
+#elif defined(__i386__)
+
+#define MIN_SYSCALL 0u
+#define MAX_PUBLIC_SYSCALL 1024u
+#define MAX_SYSCALL MAX_PUBLIC_SYSCALL
+
+#elif defined(__arm__) && (defined(__thumb__) || defined(__ARM_EABI__))
+
+// ARM EABI includes "ARM private" system calls starting at |__ARM_NR_BASE|,
+// and a "ghost syscall private to the kernel", cmpxchg,
+// at |__ARM_NR_BASE+0x00fff0|.
+// See </arch/arm/include/asm/unistd.h> in the Linux kernel.
+
+// __NR_SYSCALL_BASE is 0 in thumb and ARM EABI.
+#define MIN_SYSCALL 0u
+#define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + 1024u)
+// __ARM_NR_BASE is __NR_SYSCALL_BASE + 0xf0000u
+#define MIN_PRIVATE_SYSCALL 0xf0000u
+#define MAX_PRIVATE_SYSCALL (MIN_PRIVATE_SYSCALL + 16u)
+#define MIN_GHOST_SYSCALL (MIN_PRIVATE_SYSCALL + 0xfff0u)
+#define MAX_SYSCALL (MIN_GHOST_SYSCALL + 4u)
+
+#elif defined(__mips__) && (_MIPS_SIM == _ABIO32)
+
+#include <asm/unistd.h> // for __NR_O32_Linux and __NR_Linux_syscalls
+#define MIN_SYSCALL __NR_O32_Linux
+#define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + __NR_Linux_syscalls)
+#define MAX_SYSCALL MAX_PUBLIC_SYSCALL
+
+#elif defined(__mips__) && (_MIPS_SIM == _ABI64)
+
+#error "Add support to header file"
+
+#elif defined(__aarch64__)
+
+#define MIN_SYSCALL 0u
+#define MAX_PUBLIC_SYSCALL 279u
+#define MAX_SYSCALL MAX_PUBLIC_SYSCALL
+
+#else
+#error "Unsupported architecture"
+#endif
+
+#endif // SANDBOX_LINUX_BPF_DSL_LINUX_SYSCALL_RANGES_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/policy.cc b/libchrome/sandbox/linux/bpf_dsl/policy.cc
new file mode 100644
index 0000000..c20edc6
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/policy.cc
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/policy.h"
+
+#include <errno.h>
+
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+ResultExpr Policy::InvalidSyscall() const {
+ return Error(ENOSYS);
+}
+
+} // namespace bpf_dsl
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/policy.h b/libchrome/sandbox/linux/bpf_dsl/policy.h
new file mode 100644
index 0000000..6c67589
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/policy.h
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_POLICY_H_
+#define SANDBOX_LINUX_BPF_DSL_POLICY_H_
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+// Interface to implement to define a BPF sandbox policy.
+class SANDBOX_EXPORT Policy {
+ public:
+ Policy() {}
+ virtual ~Policy() {}
+
+ // User extension point for writing custom sandbox policies.
+ // The returned ResultExpr will control how the kernel responds to the
+ // specified system call number.
+ virtual ResultExpr EvaluateSyscall(int sysno) const = 0;
+
+ // Optional overload for specifying alternate behavior for invalid
+ // system calls. The default is to return ENOSYS.
+ virtual ResultExpr InvalidSyscall() const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Policy);
+};
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_POLICY_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/policy_compiler.cc b/libchrome/sandbox/linux/bpf_dsl/policy_compiler.cc
new file mode 100644
index 0000000..7ce517a
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/policy_compiler.cc
@@ -0,0 +1,466 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/policy_compiler.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/syscall.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/bpf_dsl/syscall_set.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+#include "sandbox/linux/system_headers/linux_seccomp.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+namespace {
+
+#if defined(__i386__) || defined(__x86_64__)
+const bool kIsIntel = true;
+#else
+const bool kIsIntel = false;
+#endif
+#if defined(__x86_64__) && defined(__ILP32__)
+const bool kIsX32 = true;
+#else
+const bool kIsX32 = false;
+#endif
+
+const int kSyscallsRequiredForUnsafeTraps[] = {
+ __NR_rt_sigprocmask,
+ __NR_rt_sigreturn,
+#if defined(__NR_sigprocmask)
+ __NR_sigprocmask,
+#endif
+#if defined(__NR_sigreturn)
+ __NR_sigreturn,
+#endif
+};
+
+bool HasExactlyOneBit(uint64_t x) {
+ // Common trick; e.g., see http://stackoverflow.com/a/108329.
+ return x != 0 && (x & (x - 1)) == 0;
+}
+
+ResultExpr DefaultPanic(const char* error) {
+ return Kill();
+}
+
+// A Trap() handler that returns an "errno" value. The value is encoded
+// in the "aux" parameter.
+intptr_t ReturnErrno(const struct arch_seccomp_data&, void* aux) {
+ // TrapFnc functions report error by following the native kernel convention
+ // of returning an exit code in the range of -1..-4096. They do not try to
+ // set errno themselves. The glibc wrapper that triggered the SIGSYS will
+ // ultimately do so for us.
+ int err = reinterpret_cast<intptr_t>(aux) & SECCOMP_RET_DATA;
+ return -err;
+}
+
+bool HasUnsafeTraps(const Policy* policy) {
+ DCHECK(policy);
+ for (uint32_t sysnum : SyscallSet::ValidOnly()) {
+ if (policy->EvaluateSyscall(sysnum)->HasUnsafeTraps()) {
+ return true;
+ }
+ }
+ return policy->InvalidSyscall()->HasUnsafeTraps();
+}
+
+} // namespace
+
+struct PolicyCompiler::Range {
+ uint32_t from;
+ CodeGen::Node node;
+};
+
+PolicyCompiler::PolicyCompiler(const Policy* policy, TrapRegistry* registry)
+ : policy_(policy),
+ registry_(registry),
+ escapepc_(0),
+ panic_func_(DefaultPanic),
+ gen_(),
+ has_unsafe_traps_(HasUnsafeTraps(policy_)) {
+ DCHECK(policy);
+}
+
+PolicyCompiler::~PolicyCompiler() {
+}
+
+CodeGen::Program PolicyCompiler::Compile() {
+ CHECK(policy_->InvalidSyscall()->IsDeny())
+ << "Policies should deny invalid system calls";
+
+ // If our BPF program has unsafe traps, enable support for them.
+ if (has_unsafe_traps_) {
+ CHECK_NE(0U, escapepc_) << "UnsafeTrap() requires a valid escape PC";
+
+ for (int sysnum : kSyscallsRequiredForUnsafeTraps) {
+ CHECK(policy_->EvaluateSyscall(sysnum)->IsAllow())
+ << "Policies that use UnsafeTrap() must unconditionally allow all "
+ "required system calls";
+ }
+
+ CHECK(registry_->EnableUnsafeTraps())
+ << "We'd rather die than enable unsafe traps";
+ }
+
+ // Assemble the BPF filter program.
+ return gen_.Compile(AssemblePolicy());
+}
+
+void PolicyCompiler::DangerousSetEscapePC(uint64_t escapepc) {
+ escapepc_ = escapepc;
+}
+
+void PolicyCompiler::SetPanicFunc(PanicFunc panic_func) {
+ panic_func_ = panic_func;
+}
+
+CodeGen::Node PolicyCompiler::AssemblePolicy() {
+ // A compiled policy consists of three logical parts:
+ // 1. Check that the "arch" field matches the expected architecture.
+ // 2. If the policy involves unsafe traps, check if the syscall was
+ // invoked by Syscall::Call, and then allow it unconditionally.
+ // 3. Check the system call number and jump to the appropriate compiled
+ // system call policy number.
+ return CheckArch(MaybeAddEscapeHatch(DispatchSyscall()));
+}
+
+CodeGen::Node PolicyCompiler::CheckArch(CodeGen::Node passed) {
+ // If the architecture doesn't match SECCOMP_ARCH, disallow the
+ // system call.
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARCH_IDX,
+ gen_.MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, SECCOMP_ARCH, passed,
+ CompileResult(panic_func_(
+ "Invalid audit architecture in BPF filter"))));
+}
+
+CodeGen::Node PolicyCompiler::MaybeAddEscapeHatch(CodeGen::Node rest) {
+ // If no unsafe traps, then simply return |rest|.
+ if (!has_unsafe_traps_) {
+ return rest;
+ }
+
+ // We already enabled unsafe traps in Compile, but enable them again to give
+ // the trap registry a second chance to complain before we add the backdoor.
+ CHECK(registry_->EnableUnsafeTraps());
+
+ // Allow system calls, if they originate from our magic return address.
+ const uint32_t lopc = static_cast<uint32_t>(escapepc_);
+ const uint32_t hipc = static_cast<uint32_t>(escapepc_ >> 32);
+
+ // BPF cannot do native 64-bit comparisons, so we have to compare
+ // both 32-bit halves of the instruction pointer. If they match what
+ // we expect, we return ERR_ALLOWED. If either or both don't match,
+ // we continue evalutating the rest of the sandbox policy.
+ //
+ // For simplicity, we check the full 64-bit instruction pointer even
+ // on 32-bit architectures.
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, SECCOMP_IP_LSB_IDX,
+ gen_.MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K, lopc,
+ gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, SECCOMP_IP_MSB_IDX,
+ gen_.MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, hipc,
+ CompileResult(Allow()), rest)),
+ rest));
+}
+
+CodeGen::Node PolicyCompiler::DispatchSyscall() {
+ // Evaluate all possible system calls and group their Nodes into
+ // ranges of identical codes.
+ Ranges ranges;
+ FindRanges(&ranges);
+
+ // Compile the system call ranges to an optimized BPF jumptable
+ CodeGen::Node jumptable = AssembleJumpTable(ranges.begin(), ranges.end());
+
+ // Grab the system call number, so that we can check it and then
+ // execute the jump table.
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX, CheckSyscallNumber(jumptable));
+}
+
+CodeGen::Node PolicyCompiler::CheckSyscallNumber(CodeGen::Node passed) {
+ if (kIsIntel) {
+ // On Intel architectures, verify that system call numbers are in the
+ // expected number range.
+ CodeGen::Node invalidX32 =
+ CompileResult(panic_func_("Illegal mixing of system call ABIs"));
+ if (kIsX32) {
+ // The newer x32 API always sets bit 30.
+ return gen_.MakeInstruction(
+ BPF_JMP + BPF_JSET + BPF_K, 0x40000000, passed, invalidX32);
+ } else {
+ // The older i386 and x86-64 APIs clear bit 30 on all system calls.
+ return gen_.MakeInstruction(
+ BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, passed);
+ }
+ }
+
+ // TODO(mdempsky): Similar validation for other architectures?
+ return passed;
+}
+
+void PolicyCompiler::FindRanges(Ranges* ranges) {
+ // Please note that "struct seccomp_data" defines system calls as a signed
+ // int32_t, but BPF instructions always operate on unsigned quantities. We
+ // deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL,
+ // and then verifying that the rest of the number range (both positive and
+ // negative) all return the same Node.
+ const CodeGen::Node invalid_node = CompileResult(policy_->InvalidSyscall());
+ uint32_t old_sysnum = 0;
+ CodeGen::Node old_node =
+ SyscallSet::IsValid(old_sysnum)
+ ? CompileResult(policy_->EvaluateSyscall(old_sysnum))
+ : invalid_node;
+
+ for (uint32_t sysnum : SyscallSet::All()) {
+ CodeGen::Node node =
+ SyscallSet::IsValid(sysnum)
+ ? CompileResult(policy_->EvaluateSyscall(static_cast<int>(sysnum)))
+ : invalid_node;
+ // N.B., here we rely on CodeGen folding (i.e., returning the same
+ // node value for) identical code sequences, otherwise our jump
+ // table will blow up in size.
+ if (node != old_node) {
+ ranges->push_back(Range{old_sysnum, old_node});
+ old_sysnum = sysnum;
+ old_node = node;
+ }
+ }
+ ranges->push_back(Range{old_sysnum, old_node});
+}
+
+CodeGen::Node PolicyCompiler::AssembleJumpTable(Ranges::const_iterator start,
+ Ranges::const_iterator stop) {
+ // We convert the list of system call ranges into jump table that performs
+ // a binary search over the ranges.
+ // As a sanity check, we need to have at least one distinct ranges for us
+ // to be able to build a jump table.
+ CHECK(start < stop) << "Invalid iterator range";
+ const auto n = stop - start;
+ if (n == 1) {
+ // If we have narrowed things down to a single range object, we can
+ // return from the BPF filter program.
+ return start->node;
+ }
+
+ // Pick the range object that is located at the mid point of our list.
+ // We compare our system call number against the lowest valid system call
+ // number in this range object. If our number is lower, it is outside of
+ // this range object. If it is greater or equal, it might be inside.
+ Ranges::const_iterator mid = start + n / 2;
+
+ // Sub-divide the list of ranges and continue recursively.
+ CodeGen::Node jf = AssembleJumpTable(start, mid);
+ CodeGen::Node jt = AssembleJumpTable(mid, stop);
+ return gen_.MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, mid->from, jt, jf);
+}
+
+CodeGen::Node PolicyCompiler::CompileResult(const ResultExpr& res) {
+ return res->Compile(this);
+}
+
+CodeGen::Node PolicyCompiler::MaskedEqual(int argno,
+ size_t width,
+ uint64_t mask,
+ uint64_t value,
+ CodeGen::Node passed,
+ CodeGen::Node failed) {
+ // Sanity check that arguments make sense.
+ CHECK(argno >= 0 && argno < 6) << "Invalid argument number " << argno;
+ CHECK(width == 4 || width == 8) << "Invalid argument width " << width;
+ CHECK_NE(0U, mask) << "Zero mask is invalid";
+ CHECK_EQ(value, value & mask) << "Value contains masked out bits";
+ if (sizeof(void*) == 4) {
+ CHECK_EQ(4U, width) << "Invalid width on 32-bit platform";
+ }
+ if (width == 4) {
+ CHECK_EQ(0U, mask >> 32) << "Mask exceeds argument size";
+ CHECK_EQ(0U, value >> 32) << "Value exceeds argument size";
+ }
+
+ // We want to emit code to check "(arg & mask) == value" where arg, mask, and
+ // value are 64-bit values, but the BPF machine is only 32-bit. We implement
+ // this by independently testing the upper and lower 32-bits and continuing to
+ // |passed| if both evaluate true, or to |failed| if either evaluate false.
+ return MaskedEqualHalf(argno, width, mask, value, ArgHalf::UPPER,
+ MaskedEqualHalf(argno, width, mask, value,
+ ArgHalf::LOWER, passed, failed),
+ failed);
+}
+
+CodeGen::Node PolicyCompiler::MaskedEqualHalf(int argno,
+ size_t width,
+ uint64_t full_mask,
+ uint64_t full_value,
+ ArgHalf half,
+ CodeGen::Node passed,
+ CodeGen::Node failed) {
+ if (width == 4 && half == ArgHalf::UPPER) {
+ // Special logic for sanity checking the upper 32-bits of 32-bit system
+ // call arguments.
+
+ // TODO(mdempsky): Compile Unexpected64bitArgument() just per program.
+ CodeGen::Node invalid_64bit = Unexpected64bitArgument();
+
+ const uint32_t upper = SECCOMP_ARG_MSB_IDX(argno);
+ const uint32_t lower = SECCOMP_ARG_LSB_IDX(argno);
+
+ if (sizeof(void*) == 4) {
+ // On 32-bit platforms, the upper 32-bits should always be 0:
+ // LDW [upper]
+ // JEQ 0, passed, invalid
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ upper,
+ gen_.MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K, 0, passed, invalid_64bit));
+ }
+
+ // On 64-bit platforms, the upper 32-bits may be 0 or ~0; but we only allow
+ // ~0 if the sign bit of the lower 32-bits is set too:
+ // LDW [upper]
+ // JEQ 0, passed, (next)
+ // JEQ ~0, (next), invalid
+ // LDW [lower]
+ // JSET (1<<31), passed, invalid
+ //
+ // TODO(mdempsky): The JSET instruction could perhaps jump to passed->next
+ // instead, as the first instruction of passed should be "LDW [lower]".
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ upper,
+ gen_.MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ 0,
+ passed,
+ gen_.MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ std::numeric_limits<uint32_t>::max(),
+ gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ lower,
+ gen_.MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+ 1U << 31,
+ passed,
+ invalid_64bit)),
+ invalid_64bit)));
+ }
+
+ const uint32_t idx = (half == ArgHalf::UPPER) ? SECCOMP_ARG_MSB_IDX(argno)
+ : SECCOMP_ARG_LSB_IDX(argno);
+ const uint32_t mask = (half == ArgHalf::UPPER) ? full_mask >> 32 : full_mask;
+ const uint32_t value =
+ (half == ArgHalf::UPPER) ? full_value >> 32 : full_value;
+
+ // Emit a suitable instruction sequence for (arg & mask) == value.
+
+ // For (arg & 0) == 0, just return passed.
+ if (mask == 0) {
+ CHECK_EQ(0U, value);
+ return passed;
+ }
+
+ // For (arg & ~0) == value, emit:
+ // LDW [idx]
+ // JEQ value, passed, failed
+ if (mask == std::numeric_limits<uint32_t>::max()) {
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ idx,
+ gen_.MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, value, passed, failed));
+ }
+
+ // For (arg & mask) == 0, emit:
+ // LDW [idx]
+ // JSET mask, failed, passed
+ // (Note: failed and passed are intentionally swapped.)
+ if (value == 0) {
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ idx,
+ gen_.MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, mask, failed, passed));
+ }
+
+ // For (arg & x) == x where x is a single-bit value, emit:
+ // LDW [idx]
+ // JSET mask, passed, failed
+ if (mask == value && HasExactlyOneBit(mask)) {
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ idx,
+ gen_.MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, mask, passed, failed));
+ }
+
+ // Generic fallback:
+ // LDW [idx]
+ // AND mask
+ // JEQ value, passed, failed
+ return gen_.MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ idx,
+ gen_.MakeInstruction(
+ BPF_ALU + BPF_AND + BPF_K,
+ mask,
+ gen_.MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K, value, passed, failed)));
+}
+
+CodeGen::Node PolicyCompiler::Unexpected64bitArgument() {
+ return CompileResult(panic_func_("Unexpected 64bit argument detected"));
+}
+
+CodeGen::Node PolicyCompiler::Return(uint32_t ret) {
+ if (has_unsafe_traps_ && (ret & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
+ // When inside an UnsafeTrap() callback, we want to allow all system calls.
+ // This means, we must conditionally disable the sandbox -- and that's not
+ // something that kernel-side BPF filters can do, as they cannot inspect
+ // any state other than the syscall arguments.
+ // But if we redirect all error handlers to user-space, then we can easily
+ // make this decision.
+ // The performance penalty for this extra round-trip to user-space is not
+ // actually that bad, as we only ever pay it for denied system calls; and a
+ // typical program has very few of these.
+ return Trap(ReturnErrno, reinterpret_cast<void*>(ret & SECCOMP_RET_DATA),
+ true);
+ }
+
+ return gen_.MakeInstruction(BPF_RET + BPF_K, ret);
+}
+
+CodeGen::Node PolicyCompiler::Trap(TrapRegistry::TrapFnc fnc,
+ const void* aux,
+ bool safe) {
+ uint16_t trap_id = registry_->Add(fnc, aux, safe);
+ return gen_.MakeInstruction(BPF_RET + BPF_K, SECCOMP_RET_TRAP + trap_id);
+}
+
+bool PolicyCompiler::IsRequiredForUnsafeTrap(int sysno) {
+ for (size_t i = 0; i < arraysize(kSyscallsRequiredForUnsafeTraps); ++i) {
+ if (sysno == kSyscallsRequiredForUnsafeTraps[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace bpf_dsl
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/policy_compiler.h b/libchrome/sandbox/linux/bpf_dsl/policy_compiler.h
new file mode 100644
index 0000000..48b1d78
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/policy_compiler.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_POLICY_COMPILER_H_
+#define SANDBOX_LINUX_BPF_DSL_POLICY_COMPILER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/linux/bpf_dsl/trap_registry.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+class Policy;
+
+// PolicyCompiler implements the bpf_dsl compiler, allowing users to
+// transform bpf_dsl policies into BPF programs to be executed by the
+// Linux kernel.
+class SANDBOX_EXPORT PolicyCompiler {
+ public:
+ using PanicFunc = bpf_dsl::ResultExpr (*)(const char* error);
+
+ PolicyCompiler(const Policy* policy, TrapRegistry* registry);
+ ~PolicyCompiler();
+
+ // Compile registers any trap handlers needed by the policy and
+ // compiles the policy to a BPF program, which it returns.
+ CodeGen::Program Compile();
+
+ // DangerousSetEscapePC sets the "escape PC" that is allowed to issue any
+ // system calls, regardless of policy.
+ void DangerousSetEscapePC(uint64_t escapepc);
+
+ // SetPanicFunc sets the callback function used for handling faulty
+ // system call conditions. The default behavior is to immediately kill
+ // the process.
+ // TODO(mdempsky): Move this into Policy?
+ void SetPanicFunc(PanicFunc panic_func);
+
+ // UnsafeTraps require some syscalls to always be allowed.
+ // This helper function returns true for these calls.
+ static bool IsRequiredForUnsafeTrap(int sysno);
+
+ // Functions below are meant for use within bpf_dsl itself.
+
+ // Return returns a CodeGen::Node that returns the specified seccomp
+ // return value.
+ CodeGen::Node Return(uint32_t ret);
+
+ // Trap returns a CodeGen::Node to indicate the system call should
+ // instead invoke a trap handler.
+ CodeGen::Node Trap(TrapRegistry::TrapFnc fnc, const void* aux, bool safe);
+
+ // MaskedEqual returns a CodeGen::Node that represents a conditional branch.
+ // Argument "argno" (1..6) will be bitwise-AND'd with "mask" and compared
+ // to "value"; if equal, then "passed" will be executed, otherwise "failed".
+ // If "width" is 4, the argument must in the range of 0x0..(1u << 32 - 1)
+ // If it is outside this range, the sandbox treats the system call just
+ // the same as any other ABI violation (i.e., it panics).
+ CodeGen::Node MaskedEqual(int argno,
+ size_t width,
+ uint64_t mask,
+ uint64_t value,
+ CodeGen::Node passed,
+ CodeGen::Node failed);
+
+ private:
+ struct Range;
+ typedef std::vector<Range> Ranges;
+
+ // Used by MaskedEqualHalf to track which half of the argument it's
+ // emitting instructions for.
+ enum class ArgHalf {
+ LOWER,
+ UPPER,
+ };
+
+ // Compile the configured policy into a complete instruction sequence.
+ CodeGen::Node AssemblePolicy();
+
+ // Return an instruction sequence that checks the
+ // arch_seccomp_data's "arch" field is valid, and then passes
+ // control to |passed| if so.
+ CodeGen::Node CheckArch(CodeGen::Node passed);
+
+ // If |has_unsafe_traps_| is true, returns an instruction sequence
+ // that allows all system calls from |escapepc_|, and otherwise
+ // passes control to |rest|. Otherwise, simply returns |rest|.
+ CodeGen::Node MaybeAddEscapeHatch(CodeGen::Node rest);
+
+ // Return an instruction sequence that loads and checks the system
+ // call number, performs a binary search, and then dispatches to an
+ // appropriate instruction sequence compiled from the current
+ // policy.
+ CodeGen::Node DispatchSyscall();
+
+ // Return an instruction sequence that checks the system call number
+ // (expected to be loaded in register A) and if valid, passes
+ // control to |passed| (with register A still valid).
+ CodeGen::Node CheckSyscallNumber(CodeGen::Node passed);
+
+ // Finds all the ranges of system calls that need to be handled. Ranges are
+ // sorted in ascending order of system call numbers. There are no gaps in the
+ // ranges. System calls with identical CodeGen::Nodes are coalesced into a
+ // single
+ // range.
+ void FindRanges(Ranges* ranges);
+
+ // Returns a BPF program snippet that implements a jump table for the
+ // given range of system call numbers. This function runs recursively.
+ CodeGen::Node AssembleJumpTable(Ranges::const_iterator start,
+ Ranges::const_iterator stop);
+
+ // CompileResult compiles an individual result expression into a
+ // CodeGen node.
+ CodeGen::Node CompileResult(const ResultExpr& res);
+
+ // Returns a BPF program that evaluates half of a conditional expression;
+ // it should only ever be called from CondExpression().
+ CodeGen::Node MaskedEqualHalf(int argno,
+ size_t width,
+ uint64_t full_mask,
+ uint64_t full_value,
+ ArgHalf half,
+ CodeGen::Node passed,
+ CodeGen::Node failed);
+
+ // Returns the fatal CodeGen::Node that is used to indicate that somebody
+ // attempted to pass a 64bit value in a 32bit system call argument.
+ CodeGen::Node Unexpected64bitArgument();
+
+ const Policy* policy_;
+ TrapRegistry* registry_;
+ uint64_t escapepc_;
+ PanicFunc panic_func_;
+
+ CodeGen gen_;
+ bool has_unsafe_traps_;
+
+ DISALLOW_COPY_AND_ASSIGN(PolicyCompiler);
+};
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_POLICY_COMPILER_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/seccomp_macros.h b/libchrome/sandbox/linux/bpf_dsl/seccomp_macros.h
new file mode 100644
index 0000000..af70f21
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/seccomp_macros.h
@@ -0,0 +1,294 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_SECCOMP_MACROS_H_
+#define SANDBOX_LINUX_BPF_DSL_SECCOMP_MACROS_H_
+
+#include <sys/types.h> // For __BIONIC__.
+// Old Bionic versions do not have sys/user.h. The if can be removed once we no
+// longer need to support these old Bionic versions.
+// All x86_64 builds use a new enough bionic to have sys/user.h.
+#if !defined(__BIONIC__) || defined(__x86_64__)
+#if !defined(__native_client_nonsfi__)
+#include <sys/user.h>
+#endif
+#if defined(__mips__)
+// sys/user.h in eglibc misses size_t definition
+#include <stddef.h>
+#endif
+#endif
+
+#include "sandbox/linux/system_headers/linux_seccomp.h" // For AUDIT_ARCH_*
+
+// Impose some reasonable maximum BPF program size. Realistically, the
+// kernel probably has much lower limits. But by limiting to less than
+// 30 bits, we can ease requirements on some of our data types.
+#define SECCOMP_MAX_PROGRAM_SIZE (1<<30)
+
+#if defined(__i386__)
+#define SECCOMP_ARCH AUDIT_ARCH_I386
+
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[(_reg)])
+#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, REG_EAX)
+#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, REG_EAX)
+#define SECCOMP_IP(_ctx) SECCOMP_REG(_ctx, REG_EIP)
+#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, REG_EBX)
+#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, REG_ECX)
+#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, REG_EDX)
+#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, REG_ESI)
+#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, REG_EDI)
+#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, REG_EBP)
+#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 0)
+
+
+#if defined(__BIONIC__) || defined(__native_client_nonsfi__)
+// Old Bionic versions and PNaCl toolchain don't have sys/user.h, so we just
+// define regs_struct directly. This can be removed once we no longer need to
+// support these old Bionic versions and PNaCl toolchain.
+struct regs_struct {
+ long int ebx;
+ long int ecx;
+ long int edx;
+ long int esi;
+ long int edi;
+ long int ebp;
+ long int eax;
+ long int xds;
+ long int xes;
+ long int xfs;
+ long int xgs;
+ long int orig_eax;
+ long int eip;
+ long int xcs;
+ long int eflags;
+ long int esp;
+ long int xss;
+};
+#else
+typedef user_regs_struct regs_struct;
+#endif
+
+#define SECCOMP_PT_RESULT(_regs) (_regs).eax
+#define SECCOMP_PT_SYSCALL(_regs) (_regs).orig_eax
+#define SECCOMP_PT_IP(_regs) (_regs).eip
+#define SECCOMP_PT_PARM1(_regs) (_regs).ebx
+#define SECCOMP_PT_PARM2(_regs) (_regs).ecx
+#define SECCOMP_PT_PARM3(_regs) (_regs).edx
+#define SECCOMP_PT_PARM4(_regs) (_regs).esi
+#define SECCOMP_PT_PARM5(_regs) (_regs).edi
+#define SECCOMP_PT_PARM6(_regs) (_regs).ebp
+
+#elif defined(__x86_64__)
+#define SECCOMP_ARCH AUDIT_ARCH_X86_64
+
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[(_reg)])
+#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, REG_RAX)
+#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, REG_RAX)
+#define SECCOMP_IP(_ctx) SECCOMP_REG(_ctx, REG_RIP)
+#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, REG_RDI)
+#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, REG_RSI)
+#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, REG_RDX)
+#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, REG_R10)
+#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, REG_R8)
+#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, REG_R9)
+#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 0)
+
+typedef user_regs_struct regs_struct;
+#define SECCOMP_PT_RESULT(_regs) (_regs).rax
+#define SECCOMP_PT_SYSCALL(_regs) (_regs).orig_rax
+#define SECCOMP_PT_IP(_regs) (_regs).rip
+#define SECCOMP_PT_PARM1(_regs) (_regs).rdi
+#define SECCOMP_PT_PARM2(_regs) (_regs).rsi
+#define SECCOMP_PT_PARM3(_regs) (_regs).rdx
+#define SECCOMP_PT_PARM4(_regs) (_regs).r10
+#define SECCOMP_PT_PARM5(_regs) (_regs).r8
+#define SECCOMP_PT_PARM6(_regs) (_regs).r9
+
+#elif defined(__arm__) && (defined(__thumb__) || defined(__ARM_EABI__))
+#define SECCOMP_ARCH AUDIT_ARCH_ARM
+
+// ARM sigcontext_t is different from i386/x86_64.
+// See </arch/arm/include/asm/sigcontext.h> in the Linux kernel.
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.arm_##_reg)
+// ARM EABI syscall convention.
+#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, r0)
+#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, r7)
+#define SECCOMP_IP(_ctx) SECCOMP_REG(_ctx, pc)
+#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, r0)
+#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, r1)
+#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, r2)
+#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, r3)
+#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, r4)
+#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, r5)
+#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 0)
+
+#if defined(__BIONIC__) || defined(__native_client_nonsfi__)
+// Old Bionic versions and PNaCl toolchain don't have sys/user.h, so we just
+// define regs_struct directly. This can be removed once we no longer need to
+// support these old Bionic versions and PNaCl toolchain.
+struct regs_struct {
+ unsigned long uregs[18];
+};
+#else
+typedef user_regs regs_struct;
+#endif
+
+#define REG_cpsr uregs[16]
+#define REG_pc uregs[15]
+#define REG_lr uregs[14]
+#define REG_sp uregs[13]
+#define REG_ip uregs[12]
+#define REG_fp uregs[11]
+#define REG_r10 uregs[10]
+#define REG_r9 uregs[9]
+#define REG_r8 uregs[8]
+#define REG_r7 uregs[7]
+#define REG_r6 uregs[6]
+#define REG_r5 uregs[5]
+#define REG_r4 uregs[4]
+#define REG_r3 uregs[3]
+#define REG_r2 uregs[2]
+#define REG_r1 uregs[1]
+#define REG_r0 uregs[0]
+#define REG_ORIG_r0 uregs[17]
+
+#define SECCOMP_PT_RESULT(_regs) (_regs).REG_r0
+#define SECCOMP_PT_SYSCALL(_regs) (_regs).REG_r7
+#define SECCOMP_PT_IP(_regs) (_regs).REG_pc
+#define SECCOMP_PT_PARM1(_regs) (_regs).REG_r0
+#define SECCOMP_PT_PARM2(_regs) (_regs).REG_r1
+#define SECCOMP_PT_PARM3(_regs) (_regs).REG_r2
+#define SECCOMP_PT_PARM4(_regs) (_regs).REG_r3
+#define SECCOMP_PT_PARM5(_regs) (_regs).REG_r4
+#define SECCOMP_PT_PARM6(_regs) (_regs).REG_r5
+
+#elif defined(__mips__) && (_MIPS_SIM == _MIPS_SIM_ABI32)
+#define SECCOMP_ARCH AUDIT_ARCH_MIPSEL
+#define SYSCALL_EIGHT_ARGS
+// MIPS sigcontext_t is different from i386/x86_64 and ARM.
+// See </arch/mips/include/uapi/asm/sigcontext.h> in the Linux kernel.
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[_reg])
+// Based on MIPS o32 ABI syscall convention.
+// On MIPS, when indirect syscall is being made (syscall(__NR_foo)),
+// real identificator (__NR_foo) is not in v0, but in a0
+#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, 2)
+#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, 2)
+#define SECCOMP_IP(_ctx) (_ctx)->uc_mcontext.pc
+#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, 4)
+#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, 5)
+#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, 6)
+#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, 7)
+// Only the first 4 arguments of syscall are in registers.
+// The rest are on the stack.
+#define SECCOMP_STACKPARM(_ctx, n) (((long *)SECCOMP_REG(_ctx, 29))[(n)])
+#define SECCOMP_PARM5(_ctx) SECCOMP_STACKPARM(_ctx, 4)
+#define SECCOMP_PARM6(_ctx) SECCOMP_STACKPARM(_ctx, 5)
+#define SECCOMP_PARM7(_ctx) SECCOMP_STACKPARM(_ctx, 6)
+#define SECCOMP_PARM8(_ctx) SECCOMP_STACKPARM(_ctx, 7)
+#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
+ instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
+ 8*(nr) + 0)
+
+// On Mips we don't have structures like user_regs or user_regs_struct in
+// sys/user.h that we could use, so we just define regs_struct directly.
+struct regs_struct {
+ unsigned long long regs[32];
+};
+
+#define REG_a3 regs[7]
+#define REG_a2 regs[6]
+#define REG_a1 regs[5]
+#define REG_a0 regs[4]
+#define REG_v1 regs[3]
+#define REG_v0 regs[2]
+
+#define SECCOMP_PT_RESULT(_regs) (_regs).REG_v0
+#define SECCOMP_PT_SYSCALL(_regs) (_regs).REG_v0
+#define SECCOMP_PT_PARM1(_regs) (_regs).REG_a0
+#define SECCOMP_PT_PARM2(_regs) (_regs).REG_a1
+#define SECCOMP_PT_PARM3(_regs) (_regs).REG_a2
+#define SECCOMP_PT_PARM4(_regs) (_regs).REG_a3
+
+#elif defined(__aarch64__)
+struct regs_struct {
+ unsigned long long regs[31];
+ unsigned long long sp;
+ unsigned long long pc;
+ unsigned long long pstate;
+};
+
+#define SECCOMP_ARCH AUDIT_ARCH_AARCH64
+
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.regs[_reg])
+
+#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, 0)
+#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, 8)
+#define SECCOMP_IP(_ctx) (_ctx)->uc_mcontext.pc
+#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, 0)
+#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, 1)
+#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, 2)
+#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, 3)
+#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, 4)
+#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, 5)
+
+#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX \
+ (offsetof(struct arch_seccomp_data, instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX \
+ (offsetof(struct arch_seccomp_data, instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) \
+ (offsetof(struct arch_seccomp_data, args) + 8 * (nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) \
+ (offsetof(struct arch_seccomp_data, args) + 8 * (nr) + 0)
+
+#define SECCOMP_PT_RESULT(_regs) (_regs).regs[0]
+#define SECCOMP_PT_SYSCALL(_regs) (_regs).regs[8]
+#define SECCOMP_PT_IP(_regs) (_regs).pc
+#define SECCOMP_PT_PARM1(_regs) (_regs).regs[0]
+#define SECCOMP_PT_PARM2(_regs) (_regs).regs[1]
+#define SECCOMP_PT_PARM3(_regs) (_regs).regs[2]
+#define SECCOMP_PT_PARM4(_regs) (_regs).regs[3]
+#define SECCOMP_PT_PARM5(_regs) (_regs).regs[4]
+#define SECCOMP_PT_PARM6(_regs) (_regs).regs[5]
+#else
+#error Unsupported target platform
+
+#endif
+
+#endif // SANDBOX_LINUX_BPF_DSL_SECCOMP_MACROS_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/syscall_set.cc b/libchrome/sandbox/linux/bpf_dsl/syscall_set.cc
new file mode 100644
index 0000000..3d61fa3
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/syscall_set.cc
@@ -0,0 +1,146 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/syscall_set.h"
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/linux_syscall_ranges.h"
+
+namespace sandbox {
+
+namespace {
+
+#if defined(__mips__) && (_MIPS_SIM == _MIPS_SIM_ABI32)
+// This is true for Mips O32 ABI.
+static_assert(MIN_SYSCALL == __NR_Linux, "min syscall number should be 4000");
+#else
+// This true for supported architectures (Intel and ARM EABI).
+static_assert(MIN_SYSCALL == 0u,
+ "min syscall should always be zero");
+#endif
+
+// SyscallRange represents an inclusive range of system call numbers.
+struct SyscallRange {
+ uint32_t first;
+ uint32_t last;
+};
+
+const SyscallRange kValidSyscallRanges[] = {
+ // First we iterate up to MAX_PUBLIC_SYSCALL, which is equal to MAX_SYSCALL
+ // on Intel architectures, but leaves room for private syscalls on ARM.
+ {MIN_SYSCALL, MAX_PUBLIC_SYSCALL},
+#if defined(__arm__)
+ // ARM EABI includes "ARM private" system calls starting at
+ // MIN_PRIVATE_SYSCALL, and a "ghost syscall private to the kernel" at
+ // MIN_GHOST_SYSCALL.
+ {MIN_PRIVATE_SYSCALL, MAX_PRIVATE_SYSCALL},
+ {MIN_GHOST_SYSCALL, MAX_SYSCALL},
+#endif
+};
+
+} // namespace
+
+SyscallSet::Iterator SyscallSet::begin() const {
+ return Iterator(set_, false);
+}
+
+SyscallSet::Iterator SyscallSet::end() const {
+ return Iterator(set_, true);
+}
+
+bool SyscallSet::IsValid(uint32_t num) {
+ for (const SyscallRange& range : kValidSyscallRanges) {
+ if (num >= range.first && num <= range.last) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool operator==(const SyscallSet& lhs, const SyscallSet& rhs) {
+ return (lhs.set_ == rhs.set_);
+}
+
+SyscallSet::Iterator::Iterator(Set set, bool done)
+ : set_(set), done_(done), num_(0) {
+ // If the set doesn't contain 0, we need to skip to the next element.
+ if (!done && set_ == (IsValid(num_) ? Set::INVALID_ONLY : Set::VALID_ONLY)) {
+ ++*this;
+ }
+}
+
+uint32_t SyscallSet::Iterator::operator*() const {
+ DCHECK(!done_);
+ return num_;
+}
+
+SyscallSet::Iterator& SyscallSet::Iterator::operator++() {
+ DCHECK(!done_);
+
+ num_ = NextSyscall();
+ if (num_ == 0) {
+ done_ = true;
+ }
+
+ return *this;
+}
+
+// NextSyscall returns the next system call in the iterated system
+// call set after |num_|, or 0 if no such system call exists.
+uint32_t SyscallSet::Iterator::NextSyscall() const {
+ const bool want_valid = (set_ != Set::INVALID_ONLY);
+ const bool want_invalid = (set_ != Set::VALID_ONLY);
+
+ for (const SyscallRange& range : kValidSyscallRanges) {
+ if (want_invalid && range.first > 0 && num_ < range.first - 1) {
+ // Even when iterating invalid syscalls, we only include the end points;
+ // so skip directly to just before the next (valid) range.
+ return range.first - 1;
+ }
+ if (want_valid && num_ < range.first) {
+ return range.first;
+ }
+ if (want_valid && num_ < range.last) {
+ return num_ + 1;
+ }
+ if (want_invalid && num_ <= range.last) {
+ return range.last + 1;
+ }
+ }
+
+ if (want_invalid) {
+ // BPF programs only ever operate on unsigned quantities. So,
+ // that's how we iterate; we return values from
+ // 0..0xFFFFFFFFu. But there are places, where the kernel might
+ // interpret system call numbers as signed quantities, so the
+ // boundaries between signed and unsigned values are potential
+ // problem cases. We want to explicitly return these values from
+ // our iterator.
+ if (num_ < 0x7FFFFFFFu)
+ return 0x7FFFFFFFu;
+ if (num_ < 0x80000000u)
+ return 0x80000000u;
+
+ if (num_ < 0xFFFFFFFFu)
+ return 0xFFFFFFFFu;
+ }
+
+ return 0;
+}
+
+bool operator==(const SyscallSet::Iterator& lhs,
+ const SyscallSet::Iterator& rhs) {
+ DCHECK(lhs.set_ == rhs.set_);
+ return (lhs.done_ == rhs.done_) && (lhs.num_ == rhs.num_);
+}
+
+bool operator!=(const SyscallSet::Iterator& lhs,
+ const SyscallSet::Iterator& rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/syscall_set.h b/libchrome/sandbox/linux/bpf_dsl/syscall_set.h
new file mode 100644
index 0000000..b9f076d
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/syscall_set.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_SYSCALL_SET_H__
+#define SANDBOX_LINUX_BPF_DSL_SYSCALL_SET_H__
+
+#include <stdint.h>
+
+#include <iterator>
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// Iterates over the entire system call range from 0..0xFFFFFFFFu. This
+// iterator is aware of how system calls look like and will skip quickly
+// over ranges that can't contain system calls. It iterates more slowly
+// whenever it reaches a range that is potentially problematic, returning
+// the last invalid value before a valid range of system calls, and the
+// first invalid value after a valid range of syscalls. It iterates over
+// individual values whenever it is in the normal range for system calls
+// (typically MIN_SYSCALL..MAX_SYSCALL).
+//
+// Example usage:
+// for (uint32_t sysnum : SyscallSet::All()) {
+// // Do something with sysnum.
+// }
+class SANDBOX_EXPORT SyscallSet {
+ public:
+ class Iterator;
+
+ SyscallSet(const SyscallSet& ss) : set_(ss.set_) {}
+ ~SyscallSet() {}
+
+ Iterator begin() const;
+ Iterator end() const;
+
+ // All returns a SyscallSet that contains both valid and invalid
+ // system call numbers.
+ static SyscallSet All() { return SyscallSet(Set::ALL); }
+
+ // ValidOnly returns a SyscallSet that contains only valid system
+ // call numbers.
+ static SyscallSet ValidOnly() { return SyscallSet(Set::VALID_ONLY); }
+
+ // InvalidOnly returns a SyscallSet that contains only invalid
+ // system call numbers, but still omits numbers in the middle of a
+ // range of invalid system call numbers.
+ static SyscallSet InvalidOnly() { return SyscallSet(Set::INVALID_ONLY); }
+
+ // IsValid returns whether |num| specifies a valid system call
+ // number.
+ static bool IsValid(uint32_t num);
+
+ private:
+ enum class Set { ALL, VALID_ONLY, INVALID_ONLY };
+
+ explicit SyscallSet(Set set) : set_(set) {}
+
+ Set set_;
+
+ friend bool operator==(const SyscallSet&, const SyscallSet&);
+ DISALLOW_ASSIGN(SyscallSet);
+};
+
+SANDBOX_EXPORT bool operator==(const SyscallSet& lhs, const SyscallSet& rhs);
+
+// Iterator provides C++ input iterator semantics for traversing a
+// SyscallSet.
+class SyscallSet::Iterator
+ : public std::iterator<std::input_iterator_tag, uint32_t> {
+ public:
+ Iterator(const Iterator& it)
+ : set_(it.set_), done_(it.done_), num_(it.num_) {}
+ ~Iterator() {}
+
+ uint32_t operator*() const;
+ Iterator& operator++();
+
+ private:
+ Iterator(Set set, bool done);
+
+ uint32_t NextSyscall() const;
+
+ Set set_;
+ bool done_;
+ uint32_t num_;
+
+ friend SyscallSet;
+ friend bool operator==(const Iterator&, const Iterator&);
+ DISALLOW_ASSIGN(Iterator);
+};
+
+SANDBOX_EXPORT bool operator==(const SyscallSet::Iterator& lhs,
+ const SyscallSet::Iterator& rhs);
+SANDBOX_EXPORT bool operator!=(const SyscallSet::Iterator& lhs,
+ const SyscallSet::Iterator& rhs);
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_SYSCALL_SET_H__
diff --git a/libchrome/sandbox/linux/bpf_dsl/syscall_set_unittest.cc b/libchrome/sandbox/linux/bpf_dsl/syscall_set_unittest.cc
new file mode 100644
index 0000000..5069e8e
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/syscall_set_unittest.cc
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/syscall_set.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/linux_syscall_ranges.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+namespace sandbox {
+
+namespace {
+
+const SyscallSet kSyscallSets[] = {
+ SyscallSet::All(),
+ SyscallSet::InvalidOnly(),
+};
+
+SANDBOX_TEST(SyscallSet, Monotonous) {
+ for (const SyscallSet& set : kSyscallSets) {
+ uint32_t prev = 0;
+ bool have_prev = false;
+ for (uint32_t sysnum : set) {
+ if (have_prev) {
+ SANDBOX_ASSERT(sysnum > prev);
+ } else if (set == SyscallSet::All()) {
+ // The iterator should start at 0.
+ SANDBOX_ASSERT(sysnum == 0);
+ }
+
+ prev = sysnum;
+ have_prev = true;
+ }
+
+ // The iterator should always return 0xFFFFFFFFu as the last value.
+ SANDBOX_ASSERT(have_prev);
+ SANDBOX_ASSERT(prev == 0xFFFFFFFFu);
+ }
+}
+
+// AssertRange checks that SyscallIterator produces all system call
+// numbers in the inclusive range [min, max].
+void AssertRange(uint32_t min, uint32_t max) {
+ SANDBOX_ASSERT(min < max);
+ uint32_t prev = min - 1;
+ for (uint32_t sysnum : SyscallSet::All()) {
+ if (sysnum >= min && sysnum <= max) {
+ SANDBOX_ASSERT(prev == sysnum - 1);
+ prev = sysnum;
+ }
+ }
+ SANDBOX_ASSERT(prev == max);
+}
+
+SANDBOX_TEST(SyscallSet, ValidSyscallRanges) {
+ AssertRange(MIN_SYSCALL, MAX_PUBLIC_SYSCALL);
+#if defined(__arm__)
+ AssertRange(MIN_PRIVATE_SYSCALL, MAX_PRIVATE_SYSCALL);
+ AssertRange(MIN_GHOST_SYSCALL, MAX_SYSCALL);
+#endif
+}
+
+SANDBOX_TEST(SyscallSet, InvalidSyscalls) {
+ static const uint32_t kExpected[] = {
+#if defined(__mips__)
+ 0,
+ MIN_SYSCALL - 1,
+#endif
+ MAX_PUBLIC_SYSCALL + 1,
+#if defined(__arm__)
+ MIN_PRIVATE_SYSCALL - 1,
+ MAX_PRIVATE_SYSCALL + 1,
+ MIN_GHOST_SYSCALL - 1,
+ MAX_SYSCALL + 1,
+#endif
+ 0x7FFFFFFFu,
+ 0x80000000u,
+ 0xFFFFFFFFu,
+ };
+
+ for (const SyscallSet& set : kSyscallSets) {
+ size_t i = 0;
+ for (uint32_t sysnum : set) {
+ if (!SyscallSet::IsValid(sysnum)) {
+ SANDBOX_ASSERT(i < arraysize(kExpected));
+ SANDBOX_ASSERT(kExpected[i] == sysnum);
+ ++i;
+ }
+ }
+ SANDBOX_ASSERT(i == arraysize(kExpected));
+ }
+}
+
+SANDBOX_TEST(SyscallSet, ValidOnlyIsOnlyValid) {
+ for (uint32_t sysnum : SyscallSet::ValidOnly()) {
+ SANDBOX_ASSERT(SyscallSet::IsValid(sysnum));
+ }
+}
+
+SANDBOX_TEST(SyscallSet, InvalidOnlyIsOnlyInvalid) {
+ for (uint32_t sysnum : SyscallSet::InvalidOnly()) {
+ SANDBOX_ASSERT(!SyscallSet::IsValid(sysnum));
+ }
+}
+
+SANDBOX_TEST(SyscallSet, AllIsValidOnlyPlusInvalidOnly) {
+ std::vector<uint32_t> merged;
+ const SyscallSet valid_only = SyscallSet::ValidOnly();
+ const SyscallSet invalid_only = SyscallSet::InvalidOnly();
+ std::merge(valid_only.begin(),
+ valid_only.end(),
+ invalid_only.begin(),
+ invalid_only.end(),
+ std::back_inserter(merged));
+
+ const SyscallSet all = SyscallSet::All();
+ SANDBOX_ASSERT(merged == std::vector<uint32_t>(all.begin(), all.end()));
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/trap_registry.h b/libchrome/sandbox/linux/bpf_dsl/trap_registry.h
new file mode 100644
index 0000000..0a5d2f1
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/trap_registry.h
@@ -0,0 +1,73 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_TRAP_REGISTRY_H_
+#define SANDBOX_LINUX_BPF_DSL_TRAP_REGISTRY_H_
+
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// This must match the kernel's seccomp_data structure.
+struct arch_seccomp_data {
+ int nr;
+ uint32_t arch;
+ uint64_t instruction_pointer;
+ uint64_t args[6];
+};
+
+namespace bpf_dsl {
+
+// TrapRegistry provides an interface for registering "trap handlers"
+// by associating them with non-zero 16-bit trap IDs. Trap IDs should
+// remain valid for the lifetime of the trap registry.
+class SANDBOX_EXPORT TrapRegistry {
+ public:
+ // TrapFnc is a pointer to a function that fulfills the trap handler
+ // function signature.
+ //
+ // Trap handlers follow the calling convention of native system
+ // calls; e.g., to report an error, they return an exit code in the
+ // range -1..-4096 instead of directly modifying errno. However,
+ // modifying errno is harmless, as the original value will be
+ // restored afterwards.
+ //
+ // Trap handlers are executed from signal context and possibly an
+ // async-signal context, so they must be async-signal safe:
+ // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+ typedef intptr_t (*TrapFnc)(const struct arch_seccomp_data& args, void* aux);
+
+ // Add registers the specified trap handler tuple and returns a
+ // non-zero trap ID that uniquely identifies the tuple for the life
+ // time of the trap registry. If the same tuple is registered
+ // multiple times, the same value will be returned each time.
+ virtual uint16_t Add(TrapFnc fnc, const void* aux, bool safe) = 0;
+
+ // EnableUnsafeTraps tries to enable unsafe traps and returns
+ // whether it was successful. This is a one-way operation.
+ //
+ // CAUTION: Enabling unsafe traps effectively defeats the security
+ // guarantees provided by the sandbox policy. TrapRegistry
+ // implementations should ensure unsafe traps are only enabled
+ // during testing.
+ virtual bool EnableUnsafeTraps() = 0;
+
+ protected:
+ TrapRegistry() {}
+
+ // TrapRegistry's destructor is intentionally non-virtual so that
+ // implementations can omit their destructor. Instead we protect against
+ // misuse by marking it protected.
+ ~TrapRegistry() {}
+
+ DISALLOW_COPY_AND_ASSIGN(TrapRegistry);
+};
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_TRAP_REGISTRY_H_
diff --git a/libchrome/sandbox/linux/bpf_dsl/verifier.cc b/libchrome/sandbox/linux/bpf_dsl/verifier.cc
new file mode 100644
index 0000000..b5383e5
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/verifier.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/bpf_dsl/verifier.h"
+
+#include <stdint.h>
+#include <string.h>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/bpf_dsl/trap_registry.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+#include "sandbox/linux/system_headers/linux_seccomp.h"
+
+namespace sandbox {
+namespace bpf_dsl {
+
+namespace {
+
+struct State {
+ State(const std::vector<struct sock_filter>& p,
+ const struct arch_seccomp_data& d)
+ : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
+ const std::vector<struct sock_filter>& program;
+ const struct arch_seccomp_data& data;
+ unsigned int ip;
+ uint32_t accumulator;
+ bool acc_is_valid;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(State);
+};
+
+void Ld(State* state, const struct sock_filter& insn, const char** err) {
+ if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS ||
+ insn.jt != 0 || insn.jf != 0) {
+ *err = "Invalid BPF_LD instruction";
+ return;
+ }
+ if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
+ // We only allow loading of properly aligned 32bit quantities.
+ memcpy(&state->accumulator,
+ reinterpret_cast<const char*>(&state->data) + insn.k, 4);
+ } else {
+ *err = "Invalid operand in BPF_LD instruction";
+ return;
+ }
+ state->acc_is_valid = true;
+ return;
+}
+
+void Jmp(State* state, const struct sock_filter& insn, const char** err) {
+ if (BPF_OP(insn.code) == BPF_JA) {
+ if (state->ip + insn.k + 1 >= state->program.size() ||
+ state->ip + insn.k + 1 <= state->ip) {
+ compilation_failure:
+ *err = "Invalid BPF_JMP instruction";
+ return;
+ }
+ state->ip += insn.k;
+ } else {
+ if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
+ state->ip + insn.jt + 1 >= state->program.size() ||
+ state->ip + insn.jf + 1 >= state->program.size()) {
+ goto compilation_failure;
+ }
+ switch (BPF_OP(insn.code)) {
+ case BPF_JEQ:
+ if (state->accumulator == insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ case BPF_JGT:
+ if (state->accumulator > insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ case BPF_JGE:
+ if (state->accumulator >= insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ case BPF_JSET:
+ if (state->accumulator & insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ default:
+ goto compilation_failure;
+ }
+ }
+}
+
+uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
+ if (BPF_SRC(insn.code) != BPF_K) {
+ *err = "Invalid BPF_RET instruction";
+ return 0;
+ }
+ return insn.k;
+}
+
+void Alu(State* state, const struct sock_filter& insn, const char** err) {
+ if (BPF_OP(insn.code) == BPF_NEG) {
+ state->accumulator = -state->accumulator;
+ return;
+ } else {
+ if (BPF_SRC(insn.code) != BPF_K) {
+ *err = "Unexpected source operand in arithmetic operation";
+ return;
+ }
+ switch (BPF_OP(insn.code)) {
+ case BPF_ADD:
+ state->accumulator += insn.k;
+ break;
+ case BPF_SUB:
+ state->accumulator -= insn.k;
+ break;
+ case BPF_MUL:
+ state->accumulator *= insn.k;
+ break;
+ case BPF_DIV:
+ if (!insn.k) {
+ *err = "Illegal division by zero";
+ break;
+ }
+ state->accumulator /= insn.k;
+ break;
+ case BPF_MOD:
+ if (!insn.k) {
+ *err = "Illegal division by zero";
+ break;
+ }
+ state->accumulator %= insn.k;
+ break;
+ case BPF_OR:
+ state->accumulator |= insn.k;
+ break;
+ case BPF_XOR:
+ state->accumulator ^= insn.k;
+ break;
+ case BPF_AND:
+ state->accumulator &= insn.k;
+ break;
+ case BPF_LSH:
+ if (insn.k > 32) {
+ *err = "Illegal shift operation";
+ break;
+ }
+ state->accumulator <<= insn.k;
+ break;
+ case BPF_RSH:
+ if (insn.k > 32) {
+ *err = "Illegal shift operation";
+ break;
+ }
+ state->accumulator >>= insn.k;
+ break;
+ default:
+ *err = "Invalid operator in arithmetic operation";
+ break;
+ }
+ }
+}
+
+} // namespace
+
+uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
+ const struct arch_seccomp_data& data,
+ const char** err) {
+ *err = NULL;
+ if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
+ *err = "Invalid program length";
+ return 0;
+ }
+ for (State state(program, data); !*err; ++state.ip) {
+ if (state.ip >= program.size()) {
+ *err = "Invalid instruction pointer in BPF program";
+ break;
+ }
+ const struct sock_filter& insn = program[state.ip];
+ switch (BPF_CLASS(insn.code)) {
+ case BPF_LD:
+ Ld(&state, insn, err);
+ break;
+ case BPF_JMP:
+ Jmp(&state, insn, err);
+ break;
+ case BPF_RET: {
+ uint32_t r = Ret(&state, insn, err);
+ switch (r & SECCOMP_RET_ACTION) {
+ case SECCOMP_RET_ALLOW:
+ case SECCOMP_RET_ERRNO:
+ case SECCOMP_RET_KILL:
+ case SECCOMP_RET_TRACE:
+ case SECCOMP_RET_TRAP:
+ break;
+ case SECCOMP_RET_INVALID: // Should never show up in BPF program
+ default:
+ *err = "Unexpected return code found in BPF program";
+ return 0;
+ }
+ return r;
+ }
+ case BPF_ALU:
+ Alu(&state, insn, err);
+ break;
+ default:
+ *err = "Unexpected instruction in BPF program";
+ break;
+ }
+ }
+ return 0;
+}
+
+} // namespace bpf_dsl
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/bpf_dsl/verifier.h b/libchrome/sandbox/linux/bpf_dsl/verifier.h
new file mode 100644
index 0000000..9b25ab1
--- /dev/null
+++ b/libchrome/sandbox/linux/bpf_dsl/verifier.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_BPF_DSL_VERIFIER_H__
+#define SANDBOX_LINUX_BPF_DSL_VERIFIER_H__
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+struct sock_filter;
+
+namespace sandbox {
+struct arch_seccomp_data;
+
+namespace bpf_dsl {
+
+// TODO(mdempsky): This class doesn't perform any verification any more, so it
+// deserves a new name.
+class SANDBOX_EXPORT Verifier {
+ public:
+ // Evaluate a given BPF program for a particular set of system call
+ // parameters. If evaluation failed for any reason, "err" will be set to
+ // a non-NULL error string. Otherwise, the BPF program's result will be
+ // returned by the function and "err" is NULL.
+ // We do not actually implement the full BPF state machine, but only the
+ // parts that can actually be generated by our BPF compiler. If this code
+ // is used for purposes other than verifying the output of the sandbox's
+ // BPF compiler, we might have to extend this BPF interpreter.
+ static uint32_t EvaluateBPF(const std::vector<struct sock_filter>& program,
+ const struct arch_seccomp_data& data,
+ const char** err);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Verifier);
+};
+
+} // namespace bpf_dsl
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_BPF_DSL_VERIFIER_H__
diff --git a/libchrome/sandbox/linux/integration_tests/DEPS b/libchrome/sandbox/linux/integration_tests/DEPS
new file mode 100644
index 0000000..d50729c
--- /dev/null
+++ b/libchrome/sandbox/linux/integration_tests/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+ "+sandbox/linux/bpf_dsl",
+ "+sandbox/linux/seccomp-bpf",
+ "+sandbox/linux/services",
+ "+sandbox/linux/syscall_broker",
+ "+sandbox/linux/system_headers",
+]
diff --git a/libchrome/sandbox/linux/sandbox_linux.gypi b/libchrome/sandbox/linux/sandbox_linux.gypi
new file mode 100644
index 0000000..e96ae9e
--- /dev/null
+++ b/libchrome/sandbox/linux/sandbox_linux.gypi
@@ -0,0 +1,434 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'conditions': [
+ ['OS=="linux"', {
+ 'compile_suid_client': 1,
+ 'compile_credentials': 1,
+ 'use_base_test_suite': 1,
+ }, {
+ 'compile_suid_client': 0,
+ 'compile_credentials': 0,
+ 'use_base_test_suite': 0,
+ }],
+ ['OS=="linux" and (target_arch=="ia32" or target_arch=="x64" or '
+ 'target_arch=="mipsel")', {
+ 'compile_seccomp_bpf_demo': 1,
+ }, {
+ 'compile_seccomp_bpf_demo': 0,
+ }],
+ ],
+ },
+ 'target_defaults': {
+ 'target_conditions': [
+ # All linux/ files will automatically be excluded on Android
+ # so make sure we re-include them explicitly.
+ ['OS == "android"', {
+ 'sources/': [
+ ['include', '^linux/'],
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ # We have two principal targets: sandbox and sandbox_linux_unittests
+ # All other targets are listed as dependencies.
+ # There is one notable exception: for historical reasons, chrome_sandbox is
+ # the setuid sandbox and is its own target.
+ {
+ 'target_name': 'sandbox',
+ 'type': 'none',
+ 'dependencies': [
+ 'sandbox_services',
+ ],
+ 'conditions': [
+ [ 'compile_suid_client==1', {
+ 'dependencies': [
+ 'suid_sandbox_client',
+ ],
+ }],
+ # Compile seccomp BPF when we support it.
+ [ 'use_seccomp_bpf==1', {
+ 'dependencies': [
+ 'seccomp_bpf',
+ 'seccomp_bpf_helpers',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'sandbox_linux_test_utils',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'tests/sandbox_test_runner.cc',
+ 'tests/sandbox_test_runner.h',
+ 'tests/sandbox_test_runner_function_pointer.cc',
+ 'tests/sandbox_test_runner_function_pointer.h',
+ 'tests/test_utils.cc',
+ 'tests/test_utils.h',
+ 'tests/unit_tests.cc',
+ 'tests/unit_tests.h',
+ ],
+ 'conditions': [
+ [ 'use_seccomp_bpf==1', {
+ 'sources': [
+ 'seccomp-bpf/bpf_tester_compatibility_delegate.h',
+ 'seccomp-bpf/bpf_tests.h',
+ 'seccomp-bpf/sandbox_bpf_test_runner.cc',
+ 'seccomp-bpf/sandbox_bpf_test_runner.h',
+ ],
+ 'dependencies': [
+ 'seccomp_bpf',
+ ]
+ }],
+ [ 'use_base_test_suite==1', {
+ 'dependencies': [
+ '../base/base.gyp:test_support_base',
+ ],
+ 'defines': [
+ 'SANDBOX_USES_BASE_TEST_SUITE',
+ ],
+ }],
+ ],
+ },
+ {
+ # The main sandboxing test target.
+ 'target_name': 'sandbox_linux_unittests',
+ 'includes': [
+ 'sandbox_linux_test_sources.gypi',
+ ],
+ 'type': 'executable',
+ 'conditions': [
+ [ 'OS == "android"', {
+ 'variables': {
+ 'test_type': 'gtest',
+ 'test_suite_name': '<(_target_name)',
+ },
+ 'includes': [
+ '../../build/android/test_runner.gypi',
+ ],
+ }]
+ ]
+ },
+ {
+ 'target_name': 'seccomp_bpf',
+ 'type': '<(component)',
+ 'sources': [
+ 'bpf_dsl/bpf_dsl.cc',
+ 'bpf_dsl/bpf_dsl.h',
+ 'bpf_dsl/bpf_dsl_forward.h',
+ 'bpf_dsl/bpf_dsl_impl.h',
+ 'bpf_dsl/codegen.cc',
+ 'bpf_dsl/codegen.h',
+ 'bpf_dsl/cons.h',
+ 'bpf_dsl/errorcode.h',
+ 'bpf_dsl/linux_syscall_ranges.h',
+ 'bpf_dsl/policy.cc',
+ 'bpf_dsl/policy.h',
+ 'bpf_dsl/policy_compiler.cc',
+ 'bpf_dsl/policy_compiler.h',
+ 'bpf_dsl/seccomp_macros.h',
+ 'bpf_dsl/seccomp_macros.h',
+ 'bpf_dsl/syscall_set.cc',
+ 'bpf_dsl/syscall_set.h',
+ 'bpf_dsl/trap_registry.h',
+ 'seccomp-bpf/die.cc',
+ 'seccomp-bpf/die.h',
+ 'seccomp-bpf/sandbox_bpf.cc',
+ 'seccomp-bpf/sandbox_bpf.h',
+ 'seccomp-bpf/syscall.cc',
+ 'seccomp-bpf/syscall.h',
+ 'seccomp-bpf/trap.cc',
+ 'seccomp-bpf/trap.h',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ 'sandbox_services',
+ 'sandbox_services_headers',
+ ],
+ 'defines': [
+ 'SANDBOX_IMPLEMENTATION',
+ ],
+ 'includes': [
+ # Disable LTO due to compiler bug
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57703
+ '../../build/android/disable_gcc_lto.gypi',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ },
+ {
+ 'target_name': 'seccomp_bpf_helpers',
+ 'type': '<(component)',
+ 'sources': [
+ 'seccomp-bpf-helpers/baseline_policy.cc',
+ 'seccomp-bpf-helpers/baseline_policy.h',
+ 'seccomp-bpf-helpers/sigsys_handlers.cc',
+ 'seccomp-bpf-helpers/sigsys_handlers.h',
+ 'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
+ 'seccomp-bpf-helpers/syscall_parameters_restrictions.h',
+ 'seccomp-bpf-helpers/syscall_sets.cc',
+ 'seccomp-bpf-helpers/syscall_sets.h',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ 'sandbox_services',
+ 'seccomp_bpf',
+ ],
+ 'defines': [
+ 'SANDBOX_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ },
+ {
+ # The setuid sandbox, for Linux
+ 'target_name': 'chrome_sandbox',
+ 'type': 'executable',
+ 'sources': [
+ 'suid/common/sandbox.h',
+ 'suid/common/suid_unsafe_environment_variables.h',
+ 'suid/process_util.h',
+ 'suid/process_util_linux.c',
+ 'suid/sandbox.c',
+ ],
+ 'cflags': [
+ # For ULLONG_MAX
+ '-std=gnu99',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ # Do not use any sanitizer tools with this binary. http://crbug.com/382766
+ 'cflags/': [
+ ['exclude', '-fsanitize'],
+ ],
+ 'ldflags/': [
+ ['exclude', '-fsanitize'],
+ ],
+ },
+ { 'target_name': 'sandbox_services',
+ 'type': '<(component)',
+ 'sources': [
+ 'services/init_process_reaper.cc',
+ 'services/init_process_reaper.h',
+ 'services/proc_util.cc',
+ 'services/proc_util.h',
+ 'services/resource_limits.cc',
+ 'services/resource_limits.h',
+ 'services/scoped_process.cc',
+ 'services/scoped_process.h',
+ 'services/syscall_wrappers.cc',
+ 'services/syscall_wrappers.h',
+ 'services/thread_helpers.cc',
+ 'services/thread_helpers.h',
+ 'services/yama.cc',
+ 'services/yama.h',
+ 'syscall_broker/broker_channel.cc',
+ 'syscall_broker/broker_channel.h',
+ 'syscall_broker/broker_client.cc',
+ 'syscall_broker/broker_client.h',
+ 'syscall_broker/broker_common.h',
+ 'syscall_broker/broker_file_permission.cc',
+ 'syscall_broker/broker_file_permission.h',
+ 'syscall_broker/broker_host.cc',
+ 'syscall_broker/broker_host.h',
+ 'syscall_broker/broker_policy.cc',
+ 'syscall_broker/broker_policy.h',
+ 'syscall_broker/broker_process.cc',
+ 'syscall_broker/broker_process.h',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'defines': [
+ 'SANDBOX_IMPLEMENTATION',
+ ],
+ 'conditions': [
+ ['compile_credentials==1', {
+ 'sources': [
+ 'services/credentials.cc',
+ 'services/credentials.h',
+ 'services/namespace_sandbox.cc',
+ 'services/namespace_sandbox.h',
+ 'services/namespace_utils.cc',
+ 'services/namespace_utils.h',
+ ],
+ 'dependencies': [
+ # for capability.h.
+ 'sandbox_services_headers',
+ ],
+ }],
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ { 'target_name': 'sandbox_services_headers',
+ 'type': 'none',
+ 'sources': [
+ 'system_headers/arm64_linux_syscalls.h',
+ 'system_headers/arm64_linux_ucontext.h',
+ 'system_headers/arm_linux_syscalls.h',
+ 'system_headers/arm_linux_ucontext.h',
+ 'system_headers/capability.h',
+ 'system_headers/i386_linux_ucontext.h',
+ 'system_headers/linux_futex.h',
+ 'system_headers/linux_seccomp.h',
+ 'system_headers/linux_syscalls.h',
+ 'system_headers/linux_time.h',
+ 'system_headers/linux_ucontext.h',
+ 'system_headers/mips_linux_syscalls.h',
+ 'system_headers/mips_linux_ucontext.h',
+ 'system_headers/x86_32_linux_syscalls.h',
+ 'system_headers/x86_64_linux_syscalls.h',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ {
+ 'target_name': 'suid_sandbox_client',
+ 'type': '<(component)',
+ 'sources': [
+ 'suid/common/sandbox.h',
+ 'suid/common/suid_unsafe_environment_variables.h',
+ 'suid/client/setuid_sandbox_client.cc',
+ 'suid/client/setuid_sandbox_client.h',
+ 'suid/client/setuid_sandbox_host.cc',
+ 'suid/client/setuid_sandbox_host.h',
+ ],
+ 'defines': [
+ 'SANDBOX_IMPLEMENTATION',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ 'sandbox_services',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ {
+ 'target_name': 'bpf_dsl_golden',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'generate',
+ 'inputs': [
+ 'bpf_dsl/golden/generate.py',
+ 'bpf_dsl/golden/i386/ArgSizePolicy.txt',
+ 'bpf_dsl/golden/i386/BasicPolicy.txt',
+ 'bpf_dsl/golden/i386/ElseIfPolicy.txt',
+ 'bpf_dsl/golden/i386/MaskingPolicy.txt',
+ 'bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
+ 'bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
+ 'bpf_dsl/golden/i386/SwitchPolicy.txt',
+ 'bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
+ 'bpf_dsl/golden/x86-64/BasicPolicy.txt',
+ 'bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
+ 'bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
+ 'bpf_dsl/golden/x86-64/MaskingPolicy.txt',
+ 'bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
+ 'bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
+ 'bpf_dsl/golden/x86-64/SwitchPolicy.txt',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
+ ],
+ 'action': [
+ 'python',
+ 'linux/bpf_dsl/golden/generate.py',
+ '<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
+ 'linux/bpf_dsl/golden/i386/ArgSizePolicy.txt',
+ 'linux/bpf_dsl/golden/i386/BasicPolicy.txt',
+ 'linux/bpf_dsl/golden/i386/ElseIfPolicy.txt',
+ 'linux/bpf_dsl/golden/i386/MaskingPolicy.txt',
+ 'linux/bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
+ 'linux/bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
+ 'linux/bpf_dsl/golden/i386/SwitchPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/BasicPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/MaskingPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
+ 'linux/bpf_dsl/golden/x86-64/SwitchPolicy.txt',
+ ],
+ 'message': 'Generating header from golden files ...',
+ },
+ ],
+ },
+ ],
+ 'conditions': [
+ [ 'OS=="android"', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_linux_unittests_deps',
+ 'type': 'none',
+ 'dependencies': [
+ 'sandbox_linux_unittests',
+ ],
+ 'variables': {
+ 'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests__dist/',
+ 'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests',
+ 'include_main_binary': 1,
+ },
+ 'includes': [
+ '../../build/android/native_app_dependencies.gypi'
+ ],
+ }],
+ }],
+ [ 'OS=="android"', {
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_linux_unittests_android_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'sandbox_linux_unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ '../sandbox_linux_unittests_android.isolate',
+ ],
+ },
+ ],
+ },
+ ],
+ ],
+ }],
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_linux_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'sandbox_linux_unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ '../sandbox_linux_unittests.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp b/libchrome/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp
new file mode 100644
index 0000000..50e637c
--- /dev/null
+++ b/libchrome/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp
@@ -0,0 +1,87 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'includes': [
+ '../../build/common_untrusted.gypi',
+ ],
+ 'conditions': [
+ ['disable_nacl==0 and disable_nacl_untrusted==0', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_linux_nacl_nonsfi',
+ 'type': 'none',
+ 'variables': {
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libsandbox_linux_nacl_nonsfi.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 0,
+ 'build_pnacl_newlib': 0,
+ 'build_nonsfi_helper': 1,
+ 'compile_flags': [
+ '-fgnu-inline-asm',
+ ],
+ 'sources': [
+ # This is the subset of linux build target, needed for
+ # nacl_helper_nonsfi's sandbox implementation.
+ 'bpf_dsl/bpf_dsl.cc',
+ 'bpf_dsl/codegen.cc',
+ 'bpf_dsl/policy.cc',
+ 'bpf_dsl/policy_compiler.cc',
+ 'bpf_dsl/syscall_set.cc',
+ 'seccomp-bpf-helpers/sigsys_handlers.cc',
+ 'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
+ 'seccomp-bpf/die.cc',
+ 'seccomp-bpf/sandbox_bpf.cc',
+ 'seccomp-bpf/syscall.cc',
+ 'seccomp-bpf/trap.cc',
+ 'services/credentials.cc',
+ 'services/namespace_sandbox.cc',
+ 'services/namespace_utils.cc',
+ 'services/proc_util.cc',
+ 'services/resource_limits.cc',
+ 'services/syscall_wrappers.cc',
+ 'services/thread_helpers.cc',
+ 'suid/client/setuid_sandbox_client.cc',
+ ],
+ },
+ 'dependencies': [
+ '../../base/base_nacl.gyp:base_nacl_nonsfi',
+ ],
+ },
+ ],
+ }],
+
+ ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_linux_test_utils_nacl_nonsfi',
+ 'type': 'none',
+ 'variables': {
+ 'nacl_untrusted_build': 1,
+ 'nlib_target': 'libsandbox_linux_test_utils_nacl_nonsfi.a',
+ 'build_glibc': 0,
+ 'build_newlib': 0,
+ 'build_irt': 0,
+ 'build_pnacl_newlib': 0,
+ 'build_nonsfi_helper': 1,
+
+ 'sources': [
+ 'seccomp-bpf/sandbox_bpf_test_runner.cc',
+ 'tests/sandbox_test_runner.cc',
+ 'tests/unit_tests.cc',
+ ],
+ },
+ 'dependencies': [
+ '../../testing/gtest_nacl.gyp:gtest_nacl',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/sandbox/linux/sandbox_linux_test_sources.gypi b/libchrome/sandbox/linux/sandbox_linux_test_sources.gypi
new file mode 100644
index 0000000..612814e
--- /dev/null
+++ b/libchrome/sandbox/linux/sandbox_linux_test_sources.gypi
@@ -0,0 +1,93 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Tests need to be compiled in the same link unit, so we have to list them
+# in a separate .gypi file.
+{
+ 'dependencies': [
+ 'sandbox',
+ 'sandbox_linux_test_utils',
+ 'sandbox_services',
+ '../base/base.gyp:base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'services/proc_util_unittest.cc',
+ 'services/scoped_process_unittest.cc',
+ 'services/resource_limits_unittests.cc',
+ 'services/syscall_wrappers_unittest.cc',
+ 'services/thread_helpers_unittests.cc',
+ 'services/yama_unittests.cc',
+ 'syscall_broker/broker_file_permission_unittest.cc',
+ 'syscall_broker/broker_process_unittest.cc',
+ 'tests/main.cc',
+ 'tests/scoped_temporary_file.cc',
+ 'tests/scoped_temporary_file.h',
+ 'tests/scoped_temporary_file_unittest.cc',
+ 'tests/test_utils_unittest.cc',
+ 'tests/unit_tests_unittest.cc',
+ ],
+ 'conditions': [
+ [ 'compile_suid_client==1', {
+ 'sources': [
+ 'suid/client/setuid_sandbox_client_unittest.cc',
+ 'suid/client/setuid_sandbox_host_unittest.cc',
+ ],
+ }],
+ [ 'use_seccomp_bpf==1', {
+ 'sources': [
+ 'bpf_dsl/bpf_dsl_unittest.cc',
+ 'bpf_dsl/codegen_unittest.cc',
+ 'bpf_dsl/cons_unittest.cc',
+ 'bpf_dsl/dump_bpf.cc',
+ 'bpf_dsl/dump_bpf.h',
+ 'bpf_dsl/syscall_set_unittest.cc',
+ 'bpf_dsl/test_trap_registry.cc',
+ 'bpf_dsl/test_trap_registry.h',
+ 'bpf_dsl/test_trap_registry_unittest.cc',
+ 'bpf_dsl/verifier.cc',
+ 'bpf_dsl/verifier.h',
+ 'integration_tests/bpf_dsl_seccomp_unittest.cc',
+ 'integration_tests/seccomp_broker_process_unittest.cc',
+ 'seccomp-bpf-helpers/baseline_policy_unittest.cc',
+ 'seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc',
+ 'seccomp-bpf/bpf_tests_unittest.cc',
+ 'seccomp-bpf/sandbox_bpf_unittest.cc',
+ 'seccomp-bpf/syscall_unittest.cc',
+ 'seccomp-bpf/trap_unittest.cc',
+ ],
+ 'dependencies': [
+ 'bpf_dsl_golden',
+ ],
+ }],
+ [ 'compile_credentials==1', {
+ 'sources': [
+ 'integration_tests/namespace_unix_domain_socket_unittest.cc',
+ 'services/credentials_unittest.cc',
+ 'services/namespace_utils_unittest.cc',
+ ],
+ 'dependencies': [
+ '../build/linux/system.gyp:libcap'
+ ],
+ 'conditions': [
+ [ 'use_base_test_suite==1', {
+ 'sources': [
+ 'services/namespace_sandbox_unittest.cc',
+ ]
+ }]
+ ],
+ }],
+ [ 'use_base_test_suite==1', {
+ 'dependencies': [
+ '../base/base.gyp:test_support_base',
+ ],
+ 'defines': [
+ 'SANDBOX_USES_BASE_TEST_SUITE',
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/DEPS b/libchrome/sandbox/linux/seccomp-bpf-helpers/DEPS
new file mode 100644
index 0000000..4419fd1
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/DEPS
@@ -0,0 +1,7 @@
+include_rules = [
+ "+sandbox/linux/bpf_dsl",
+ "+sandbox/linux/seccomp-bpf",
+ "+sandbox/linux/services",
+ "+sandbox/linux/system_headers",
+ "+third_party/lss/linux_syscall_support.h",
+]
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc b/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
new file mode 100644
index 0000000..2bf572c
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
@@ -0,0 +1,284 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf-helpers/baseline_policy.h"
+
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h"
+#include "sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h"
+#include "sandbox/linux/seccomp-bpf-helpers/syscall_sets.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+#if !defined(SO_PEEK_OFF)
+#define SO_PEEK_OFF 42
+#endif
+
+// Changing this implementation will have an effect on *all* policies.
+// Currently this means: Renderer/Worker, GPU, Flash and NaCl.
+
+using sandbox::bpf_dsl::Allow;
+using sandbox::bpf_dsl::Arg;
+using sandbox::bpf_dsl::Error;
+using sandbox::bpf_dsl::If;
+using sandbox::bpf_dsl::ResultExpr;
+
+namespace sandbox {
+
+namespace {
+
+bool IsBaselinePolicyAllowed(int sysno) {
+ return SyscallSets::IsAllowedAddressSpaceAccess(sysno) ||
+ SyscallSets::IsAllowedBasicScheduler(sysno) ||
+ SyscallSets::IsAllowedEpoll(sysno) ||
+ SyscallSets::IsAllowedFileSystemAccessViaFd(sysno) ||
+ SyscallSets::IsAllowedFutex(sysno) ||
+ SyscallSets::IsAllowedGeneralIo(sysno) ||
+ SyscallSets::IsAllowedGetOrModifySocket(sysno) ||
+ SyscallSets::IsAllowedGettime(sysno) ||
+ SyscallSets::IsAllowedProcessStartOrDeath(sysno) ||
+ SyscallSets::IsAllowedSignalHandling(sysno) ||
+ SyscallSets::IsGetSimpleId(sysno) ||
+ SyscallSets::IsKernelInternalApi(sysno) ||
+#if defined(__arm__)
+ SyscallSets::IsArmPrivate(sysno) ||
+#endif
+#if defined(__mips__)
+ SyscallSets::IsMipsPrivate(sysno) ||
+#endif
+ SyscallSets::IsAllowedOperationOnFd(sysno);
+}
+
+// System calls that will trigger the crashing SIGSYS handler.
+bool IsBaselinePolicyWatched(int sysno) {
+ return SyscallSets::IsAdminOperation(sysno) ||
+ SyscallSets::IsAdvancedScheduler(sysno) ||
+ SyscallSets::IsAdvancedTimer(sysno) ||
+ SyscallSets::IsAsyncIo(sysno) ||
+ SyscallSets::IsDebug(sysno) ||
+ SyscallSets::IsEventFd(sysno) ||
+ SyscallSets::IsExtendedAttributes(sysno) ||
+ SyscallSets::IsFaNotify(sysno) ||
+ SyscallSets::IsFsControl(sysno) ||
+ SyscallSets::IsGlobalFSViewChange(sysno) ||
+ SyscallSets::IsGlobalProcessEnvironment(sysno) ||
+ SyscallSets::IsGlobalSystemStatus(sysno) ||
+ SyscallSets::IsInotify(sysno) ||
+ SyscallSets::IsKernelModule(sysno) ||
+ SyscallSets::IsKeyManagement(sysno) ||
+ SyscallSets::IsKill(sysno) ||
+ SyscallSets::IsMessageQueue(sysno) ||
+ SyscallSets::IsMisc(sysno) ||
+#if defined(__x86_64__)
+ SyscallSets::IsNetworkSocketInformation(sysno) ||
+#endif
+ SyscallSets::IsNuma(sysno) ||
+ SyscallSets::IsPrctl(sysno) ||
+ SyscallSets::IsProcessGroupOrSession(sysno) ||
+#if defined(__i386__) || defined(__mips__)
+ SyscallSets::IsSocketCall(sysno) ||
+#endif
+#if defined(__arm__)
+ SyscallSets::IsArmPciConfig(sysno) ||
+#endif
+#if defined(__mips__)
+ SyscallSets::IsMipsMisc(sysno) ||
+#endif
+ SyscallSets::IsTimer(sysno);
+}
+
+// |fs_denied_errno| is the errno return for denied filesystem access.
+ResultExpr EvaluateSyscallImpl(int fs_denied_errno,
+ pid_t current_pid,
+ int sysno) {
+#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || \
+ defined(MEMORY_SANITIZER)
+ // TCGETS is required by the sanitizers on failure.
+ if (sysno == __NR_ioctl) {
+ return RestrictIoctl();
+ }
+
+ if (sysno == __NR_sched_getaffinity) {
+ return Allow();
+ }
+
+ // Used when RSS limiting is enabled in sanitizers.
+ if (sysno == __NR_getrusage) {
+ return RestrictGetrusage();
+ }
+
+ if (sysno == __NR_sigaltstack) {
+ // Required for better stack overflow detection in ASan. Disallowed in
+ // non-ASan builds.
+ return Allow();
+ }
+#endif // defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||
+ // defined(MEMORY_SANITIZER)
+
+ if (IsBaselinePolicyAllowed(sysno)) {
+ return Allow();
+ }
+
+#if defined(OS_ANDROID)
+ // Needed for thread creation.
+ if (sysno == __NR_sigaltstack)
+ return Allow();
+#endif
+
+ if (sysno == __NR_clock_gettime) {
+ return RestrictClockID();
+ }
+
+ if (sysno == __NR_clone) {
+ return RestrictCloneToThreadsAndEPERMFork();
+ }
+
+ if (sysno == __NR_fcntl)
+ return RestrictFcntlCommands();
+
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ if (sysno == __NR_fcntl64)
+ return RestrictFcntlCommands();
+#endif
+
+#if !defined(__aarch64__)
+ // fork() is never used as a system call (clone() is used instead), but we
+ // have seen it in fallback code on Android.
+ if (sysno == __NR_fork) {
+ return Error(EPERM);
+ }
+#endif
+
+ if (sysno == __NR_futex)
+ return RestrictFutex();
+
+ if (sysno == __NR_set_robust_list)
+ return Error(EPERM);
+
+ if (sysno == __NR_getpriority || sysno ==__NR_setpriority)
+ return RestrictGetSetpriority(current_pid);
+
+ if (sysno == __NR_madvise) {
+ // Only allow MADV_DONTNEED (aka MADV_FREE).
+ const Arg<int> advice(2);
+ return If(advice == MADV_DONTNEED, Allow()).Else(Error(EPERM));
+ }
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__)
+ if (sysno == __NR_mmap)
+ return RestrictMmapFlags();
+#endif
+
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ if (sysno == __NR_mmap2)
+ return RestrictMmapFlags();
+#endif
+
+ if (sysno == __NR_mprotect)
+ return RestrictMprotectFlags();
+
+ if (sysno == __NR_prctl)
+ return RestrictPrctl();
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ if (sysno == __NR_socketpair) {
+ // Only allow AF_UNIX, PF_UNIX. Crash if anything else is seen.
+ static_assert(AF_UNIX == PF_UNIX,
+ "af_unix and pf_unix should not be different");
+ const Arg<int> domain(0);
+ return If(domain == AF_UNIX, Allow()).Else(CrashSIGSYS());
+ }
+#endif
+
+ if (SyscallSets::IsKill(sysno)) {
+ return RestrictKillTarget(current_pid, sysno);
+ }
+
+ if (SyscallSets::IsFileSystem(sysno) ||
+ SyscallSets::IsCurrentDirectory(sysno)) {
+ return Error(fs_denied_errno);
+ }
+
+ if (SyscallSets::IsSeccomp(sysno))
+ return Error(EPERM);
+
+ if (SyscallSets::IsAnySystemV(sysno)) {
+ return Error(EPERM);
+ }
+
+ if (SyscallSets::IsUmask(sysno) ||
+ SyscallSets::IsDeniedFileSystemAccessViaFd(sysno) ||
+ SyscallSets::IsDeniedGetOrModifySocket(sysno) ||
+ SyscallSets::IsProcessPrivilegeChange(sysno)) {
+ return Error(EPERM);
+ }
+
+#if defined(__i386__) || defined(__mips__)
+ if (SyscallSets::IsSocketCall(sysno))
+ return RestrictSocketcallCommand();
+#endif
+
+#if !defined(__i386__)
+ if (sysno == __NR_getsockopt || sysno ==__NR_setsockopt) {
+ // Used by Mojo EDK to catch a message pipe being sent over itself.
+ const Arg<int> level(1);
+ const Arg<int> optname(2);
+ return If(AllOf(level == SOL_SOCKET, optname == SO_PEEK_OFF), Allow())
+ .Else(CrashSIGSYS());
+ }
+#endif
+
+ if (IsBaselinePolicyWatched(sysno)) {
+ // Previously unseen syscalls. TODO(jln): some of these should
+ // be denied gracefully right away.
+ return CrashSIGSYS();
+ }
+
+ // In any other case crash the program with our SIGSYS handler.
+ return CrashSIGSYS();
+}
+
+} // namespace.
+
+// Unfortunately C++03 doesn't allow delegated constructors.
+// Call other constructor when C++11 lands.
+BaselinePolicy::BaselinePolicy() : BaselinePolicy(EPERM) {}
+
+BaselinePolicy::BaselinePolicy(int fs_denied_errno)
+ : fs_denied_errno_(fs_denied_errno), policy_pid_(sys_getpid()) {
+}
+
+BaselinePolicy::~BaselinePolicy() {
+ // Make sure that this policy is created, used and destroyed by a single
+ // process.
+ DCHECK_EQ(sys_getpid(), policy_pid_);
+}
+
+ResultExpr BaselinePolicy::EvaluateSyscall(int sysno) const {
+ // Sanity check that we're only called with valid syscall numbers.
+ DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
+ // Make sure that this policy is used in the creating process.
+ if (1 == sysno) {
+ DCHECK_EQ(sys_getpid(), policy_pid_);
+ }
+ return EvaluateSyscallImpl(fs_denied_errno_, policy_pid_, sysno);
+}
+
+ResultExpr BaselinePolicy::InvalidSyscall() const {
+ return CrashSIGSYS();
+}
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h b/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
new file mode 100644
index 0000000..fa40e72
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
+
+#include <sys/types.h>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// This is a helper to build seccomp-bpf policies, i.e. policies for a sandbox
+// that reduces the Linux kernel's attack surface. Given its nature, it doesn't
+// have a clear semantics and is mostly "implementation-defined".
+//
+// This class implements the Policy interface with a "baseline"
+// policy for use within Chromium.
+// The "baseline" policy is somewhat arbitrary. All Chromium policies are an
+// alteration of it, and it represents a reasonable common ground to run most
+// code in a sandboxed environment.
+// A baseline policy is only valid for the process for which this object was
+// instantiated (so do not fork() and use it in a child).
+class SANDBOX_EXPORT BaselinePolicy : public bpf_dsl::Policy {
+ public:
+ BaselinePolicy();
+ // |fs_denied_errno| is the errno returned when a filesystem access system
+ // call is denied.
+ explicit BaselinePolicy(int fs_denied_errno);
+ ~BaselinePolicy() override;
+
+ bpf_dsl::ResultExpr EvaluateSyscall(int system_call_number) const override;
+ bpf_dsl::ResultExpr InvalidSyscall() const override;
+ pid_t policy_pid() const { return policy_pid_; }
+
+ private:
+ int fs_denied_errno_;
+
+ // The PID that the policy applies to (should be equal to the current pid).
+ pid_t policy_pid_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaselinePolicy);
+};
+
+} // namespace sandbox.
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc b/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
new file mode 100644
index 0000000..f0392b1
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
@@ -0,0 +1,399 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf-helpers/baseline_policy.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <sched.h>
+#include <signal.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h"
+#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/services/thread_helpers.h"
+#include "sandbox/linux/system_headers/linux_futex.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+#include "sandbox/linux/tests/test_utils.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+#if !defined(SO_PEEK_OFF)
+#define SO_PEEK_OFF 42
+#endif
+
+namespace sandbox {
+
+namespace {
+
+// This also tests that read(), write() and fstat() are allowed.
+void TestPipeOrSocketPair(base::ScopedFD read_end, base::ScopedFD write_end) {
+ BPF_ASSERT_LE(0, read_end.get());
+ BPF_ASSERT_LE(0, write_end.get());
+ struct stat stat_buf;
+ int sys_ret = fstat(read_end.get(), &stat_buf);
+ BPF_ASSERT_EQ(0, sys_ret);
+ BPF_ASSERT(S_ISFIFO(stat_buf.st_mode) || S_ISSOCK(stat_buf.st_mode));
+
+ const ssize_t kTestTransferSize = 4;
+ static const char kTestString[kTestTransferSize] = {'T', 'E', 'S', 'T'};
+ ssize_t transfered = 0;
+
+ transfered =
+ HANDLE_EINTR(write(write_end.get(), kTestString, kTestTransferSize));
+ BPF_ASSERT_EQ(kTestTransferSize, transfered);
+ char read_buf[kTestTransferSize + 1] = {0};
+ transfered = HANDLE_EINTR(read(read_end.get(), read_buf, sizeof(read_buf)));
+ BPF_ASSERT_EQ(kTestTransferSize, transfered);
+ BPF_ASSERT_EQ(0, memcmp(kTestString, read_buf, kTestTransferSize));
+}
+
+// Test that a few easy-to-test system calls are allowed.
+BPF_TEST_C(BaselinePolicy, BaselinePolicyBasicAllowed, BaselinePolicy) {
+ BPF_ASSERT_EQ(0, sched_yield());
+
+ int pipefd[2];
+ int sys_ret = pipe(pipefd);
+ BPF_ASSERT_EQ(0, sys_ret);
+ TestPipeOrSocketPair(base::ScopedFD(pipefd[0]), base::ScopedFD(pipefd[1]));
+
+ BPF_ASSERT_LE(1, getpid());
+ BPF_ASSERT_LE(0, getuid());
+}
+
+BPF_TEST_C(BaselinePolicy, FchmodErrno, BaselinePolicy) {
+ int ret = fchmod(-1, 07777);
+ BPF_ASSERT_EQ(-1, ret);
+ // Without the sandbox, this would EBADF instead.
+ BPF_ASSERT_EQ(EPERM, errno);
+}
+
+BPF_TEST_C(BaselinePolicy, ForkErrno, BaselinePolicy) {
+ errno = 0;
+ pid_t pid = fork();
+ const int fork_errno = errno;
+ TestUtils::HandlePostForkReturn(pid);
+
+ BPF_ASSERT_EQ(-1, pid);
+ BPF_ASSERT_EQ(EPERM, fork_errno);
+}
+
+pid_t ForkX86Glibc() {
+ static pid_t ptid;
+ return sys_clone(CLONE_PARENT_SETTID | SIGCHLD, nullptr, &ptid, nullptr,
+ nullptr);
+}
+
+BPF_TEST_C(BaselinePolicy, ForkX86Eperm, BaselinePolicy) {
+ errno = 0;
+ pid_t pid = ForkX86Glibc();
+ const int fork_errno = errno;
+ TestUtils::HandlePostForkReturn(pid);
+
+ BPF_ASSERT_EQ(-1, pid);
+ BPF_ASSERT_EQ(EPERM, fork_errno);
+}
+
+pid_t ForkARMGlibc() {
+ static pid_t ctid;
+ return sys_clone(CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, nullptr,
+ nullptr, &ctid, nullptr);
+}
+
+BPF_TEST_C(BaselinePolicy, ForkArmEperm, BaselinePolicy) {
+ errno = 0;
+ pid_t pid = ForkARMGlibc();
+ const int fork_errno = errno;
+ TestUtils::HandlePostForkReturn(pid);
+
+ BPF_ASSERT_EQ(-1, pid);
+ BPF_ASSERT_EQ(EPERM, fork_errno);
+}
+
+BPF_TEST_C(BaselinePolicy, CreateThread, BaselinePolicy) {
+ base::Thread thread("sandbox_tests");
+ BPF_ASSERT(thread.Start());
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ DisallowedCloneFlagCrashes,
+ DEATH_SEGV_MESSAGE(GetCloneErrorMessageContentForTests()),
+ BaselinePolicy) {
+ pid_t pid = sys_clone(CLONE_THREAD | SIGCHLD);
+ TestUtils::HandlePostForkReturn(pid);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ DisallowedKillCrashes,
+ DEATH_SEGV_MESSAGE(GetKillErrorMessageContentForTests()),
+ BaselinePolicy) {
+ BPF_ASSERT_NE(1, getpid());
+ kill(1, 0);
+ _exit(0);
+}
+
+BPF_TEST_C(BaselinePolicy, CanKillSelf, BaselinePolicy) {
+ int sys_ret = kill(getpid(), 0);
+ BPF_ASSERT_EQ(0, sys_ret);
+}
+
+BPF_TEST_C(BaselinePolicy, Socketpair, BaselinePolicy) {
+ int sv[2];
+ int sys_ret = socketpair(AF_UNIX, SOCK_DGRAM, 0, sv);
+ BPF_ASSERT_EQ(0, sys_ret);
+ TestPipeOrSocketPair(base::ScopedFD(sv[0]), base::ScopedFD(sv[1]));
+
+ sys_ret = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sv);
+ BPF_ASSERT_EQ(0, sys_ret);
+ TestPipeOrSocketPair(base::ScopedFD(sv[0]), base::ScopedFD(sv[1]));
+}
+
+// Not all architectures can restrict the domain for socketpair().
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+BPF_DEATH_TEST_C(BaselinePolicy,
+ SocketpairWrongDomain,
+ DEATH_SEGV_MESSAGE(GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ int sv[2];
+ ignore_result(socketpair(AF_INET, SOCK_STREAM, 0, sv));
+ _exit(1);
+}
+#endif // defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+
+BPF_TEST_C(BaselinePolicy, EPERM_open, BaselinePolicy) {
+ errno = 0;
+ int sys_ret = open("/proc/cpuinfo", O_RDONLY);
+ BPF_ASSERT_EQ(-1, sys_ret);
+ BPF_ASSERT_EQ(EPERM, errno);
+}
+
+BPF_TEST_C(BaselinePolicy, EPERM_access, BaselinePolicy) {
+ errno = 0;
+ int sys_ret = access("/proc/cpuinfo", R_OK);
+ BPF_ASSERT_EQ(-1, sys_ret);
+ BPF_ASSERT_EQ(EPERM, errno);
+}
+
+BPF_TEST_C(BaselinePolicy, EPERM_getcwd, BaselinePolicy) {
+ errno = 0;
+ char buf[1024];
+ char* cwd = getcwd(buf, sizeof(buf));
+ BPF_ASSERT_EQ(NULL, cwd);
+ BPF_ASSERT_EQ(EPERM, errno);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ SIGSYS_InvalidSyscall,
+ DEATH_SEGV_MESSAGE(GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ Syscall::InvalidCall();
+}
+
+// A failing test using this macro could be problematic since we perform
+// system calls by passing "0" as every argument.
+// The kernel could SIGSEGV the process or the system call itself could reboot
+// the machine. Some thoughts have been given when hand-picking the system
+// calls below to limit any potential side effects outside of the current
+// process.
+#define TEST_BASELINE_SIGSYS(sysno) \
+ BPF_DEATH_TEST_C(BaselinePolicy, \
+ SIGSYS_##sysno, \
+ DEATH_SEGV_MESSAGE(GetErrorMessageContentForTests()), \
+ BaselinePolicy) { \
+ syscall(sysno, 0, 0, 0, 0, 0, 0); \
+ _exit(1); \
+ }
+
+TEST_BASELINE_SIGSYS(__NR_acct);
+TEST_BASELINE_SIGSYS(__NR_chroot);
+TEST_BASELINE_SIGSYS(__NR_fanotify_init);
+TEST_BASELINE_SIGSYS(__NR_fgetxattr);
+TEST_BASELINE_SIGSYS(__NR_getcpu);
+TEST_BASELINE_SIGSYS(__NR_getitimer);
+TEST_BASELINE_SIGSYS(__NR_init_module);
+TEST_BASELINE_SIGSYS(__NR_io_cancel);
+TEST_BASELINE_SIGSYS(__NR_keyctl);
+TEST_BASELINE_SIGSYS(__NR_mq_open);
+TEST_BASELINE_SIGSYS(__NR_ptrace);
+TEST_BASELINE_SIGSYS(__NR_sched_setaffinity);
+TEST_BASELINE_SIGSYS(__NR_setpgid);
+TEST_BASELINE_SIGSYS(__NR_swapon);
+TEST_BASELINE_SIGSYS(__NR_sysinfo);
+TEST_BASELINE_SIGSYS(__NR_syslog);
+TEST_BASELINE_SIGSYS(__NR_timer_create);
+
+#if !defined(__aarch64__)
+TEST_BASELINE_SIGSYS(__NR_eventfd);
+TEST_BASELINE_SIGSYS(__NR_inotify_init);
+TEST_BASELINE_SIGSYS(__NR_vserver);
+#endif
+
+#if defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+BPF_TEST_C(BaselinePolicy, FutexEINVAL, BaselinePolicy) {
+ int ops[] = {
+ FUTEX_CMP_REQUEUE_PI, FUTEX_CMP_REQUEUE_PI_PRIVATE,
+ FUTEX_UNLOCK_PI_PRIVATE,
+ };
+
+ for (int op : ops) {
+ BPF_ASSERT_EQ(-1, syscall(__NR_futex, NULL, op, 0, NULL, NULL, 0));
+ BPF_ASSERT_EQ(EINVAL, errno);
+ }
+}
+#else
+BPF_DEATH_TEST_C(BaselinePolicy,
+ FutexWithRequeuePriorityInheritence,
+ DEATH_SEGV_MESSAGE(GetFutexErrorMessageContentForTests()),
+ BaselinePolicy) {
+ syscall(__NR_futex, NULL, FUTEX_CMP_REQUEUE_PI, 0, NULL, NULL, 0);
+ _exit(1);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ FutexWithRequeuePriorityInheritencePrivate,
+ DEATH_SEGV_MESSAGE(GetFutexErrorMessageContentForTests()),
+ BaselinePolicy) {
+ syscall(__NR_futex, NULL, FUTEX_CMP_REQUEUE_PI_PRIVATE, 0, NULL, NULL, 0);
+ _exit(1);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ FutexWithUnlockPIPrivate,
+ DEATH_SEGV_MESSAGE(GetFutexErrorMessageContentForTests()),
+ BaselinePolicy) {
+ syscall(__NR_futex, NULL, FUTEX_UNLOCK_PI_PRIVATE, 0, NULL, NULL, 0);
+ _exit(1);
+}
+#endif // defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+
+BPF_TEST_C(BaselinePolicy, PrctlDumpable, BaselinePolicy) {
+ const int is_dumpable = prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
+ BPF_ASSERT(is_dumpable == 1 || is_dumpable == 0);
+ const int prctl_ret = prctl(PR_SET_DUMPABLE, is_dumpable, 0, 0, 0, 0);
+ BPF_ASSERT_EQ(0, prctl_ret);
+}
+
+// Workaround incomplete Android headers.
+#if !defined(PR_CAPBSET_READ)
+#define PR_CAPBSET_READ 23
+#endif
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ PrctlSigsys,
+ DEATH_SEGV_MESSAGE(GetPrctlErrorMessageContentForTests()),
+ BaselinePolicy) {
+ prctl(PR_CAPBSET_READ, 0, 0, 0, 0);
+ _exit(1);
+}
+
+BPF_TEST_C(BaselinePolicy, GetOrSetPriority, BaselinePolicy) {
+ errno = 0;
+ const int original_prio = getpriority(PRIO_PROCESS, 0);
+ // Check errno instead of the return value since this system call can return
+ // -1 as a valid value.
+ BPF_ASSERT_EQ(0, errno);
+
+ errno = 0;
+ int rc = getpriority(PRIO_PROCESS, getpid());
+ BPF_ASSERT_EQ(0, errno);
+
+ rc = getpriority(PRIO_PROCESS, getpid() + 1);
+ BPF_ASSERT_EQ(-1, rc);
+ BPF_ASSERT_EQ(EPERM, errno);
+
+ rc = setpriority(PRIO_PROCESS, 0, original_prio);
+ BPF_ASSERT_EQ(0, rc);
+
+ rc = setpriority(PRIO_PROCESS, getpid(), original_prio);
+ BPF_ASSERT_EQ(0, rc);
+
+ errno = 0;
+ rc = setpriority(PRIO_PROCESS, getpid() + 1, original_prio);
+ BPF_ASSERT_EQ(-1, rc);
+ BPF_ASSERT_EQ(EPERM, errno);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ GetPrioritySigsys,
+ DEATH_SEGV_MESSAGE(GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ getpriority(PRIO_USER, 0);
+ _exit(1);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ ClockGettimeWithDisallowedClockCrashes,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
+}
+
+#if !defined(__i386__)
+BPF_DEATH_TEST_C(BaselinePolicy,
+ GetSockOptWrongLevelSigsys,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ int fds[2];
+ PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
+ int id;
+ socklen_t peek_off_size = sizeof(id);
+ getsockopt(fds[0], IPPROTO_TCP, SO_PEEK_OFF, &id, &peek_off_size);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ GetSockOptWrongOptionSigsys,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ int fds[2];
+ PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
+ int id;
+ socklen_t peek_off_size = sizeof(id);
+ getsockopt(fds[0], SOL_SOCKET, SO_DEBUG, &id, &peek_off_size);
+}
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ SetSockOptWrongLevelSigsys,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ int fds[2];
+ PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
+ int id;
+ setsockopt(fds[0], IPPROTO_TCP, SO_PEEK_OFF, &id, sizeof(id));
+}
+
+
+BPF_DEATH_TEST_C(BaselinePolicy,
+ SetSockOptWrongOptionSigsys,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ BaselinePolicy) {
+ int fds[2];
+ PCHECK(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
+ int id;
+ setsockopt(fds[0], SOL_SOCKET, SO_DEBUG, &id, sizeof(id));
+}
+#endif
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc b/libchrome/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
new file mode 100644
index 0000000..077bc61
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
@@ -0,0 +1,299 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note: any code in this file MUST be async-signal safe.
+
+#include "sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+#if defined(__mips__)
+// __NR_Linux, is defined in <asm/unistd.h>.
+#include <asm/unistd.h>
+#endif
+
+#define SECCOMP_MESSAGE_COMMON_CONTENT "seccomp-bpf failure"
+#define SECCOMP_MESSAGE_CLONE_CONTENT "clone() failure"
+#define SECCOMP_MESSAGE_PRCTL_CONTENT "prctl() failure"
+#define SECCOMP_MESSAGE_IOCTL_CONTENT "ioctl() failure"
+#define SECCOMP_MESSAGE_KILL_CONTENT "(tg)kill() failure"
+#define SECCOMP_MESSAGE_FUTEX_CONTENT "futex() failure"
+
+namespace {
+
+inline bool IsArchitectureX86_64() {
+#if defined(__x86_64__)
+ return true;
+#else
+ return false;
+#endif
+}
+
+// Write |error_message| to stderr. Similar to RawLog(), but a bit more careful
+// about async-signal safety. |size| is the size to write and should typically
+// not include a terminating \0.
+void WriteToStdErr(const char* error_message, size_t size) {
+ while (size > 0) {
+ // TODO(jln): query the current policy to check if send() is available and
+ // use it to perform a non-blocking write.
+ const int ret = HANDLE_EINTR(write(STDERR_FILENO, error_message, size));
+ // We can't handle any type of error here.
+ if (ret <= 0 || static_cast<size_t>(ret) > size) break;
+ size -= ret;
+ error_message += ret;
+ }
+}
+
+// Invalid syscall values are truncated to zero.
+// On architectures where base value is zero (Intel and Arm),
+// syscall number is the same as offset from base.
+// This function returns values between 0 and 1023 on all architectures.
+// On architectures where base value is different than zero (currently only
+// Mips), we are truncating valid syscall values to offset from base.
+uint32_t SyscallNumberToOffsetFromBase(uint32_t sysno) {
+#if defined(__mips__)
+ // On MIPS syscall numbers are in different range than on x86 and ARM.
+ // Valid MIPS O32 ABI syscall __NR_syscall will be truncated to zero for
+ // simplicity.
+ sysno = sysno - __NR_Linux;
+#endif
+
+ if (sysno >= 1024)
+ sysno = 0;
+
+ return sysno;
+}
+
+// Print a seccomp-bpf failure to handle |sysno| to stderr in an
+// async-signal safe way.
+void PrintSyscallError(uint32_t sysno) {
+ if (sysno >= 1024)
+ sysno = 0;
+ // TODO(markus): replace with async-signal safe snprintf when available.
+ const size_t kNumDigits = 4;
+ char sysno_base10[kNumDigits];
+ uint32_t rem = sysno;
+ uint32_t mod = 0;
+ for (int i = kNumDigits - 1; i >= 0; i--) {
+ mod = rem % 10;
+ rem /= 10;
+ sysno_base10[i] = '0' + mod;
+ }
+#if defined(__mips__) && (_MIPS_SIM == _MIPS_SIM_ABI32)
+ static const char kSeccompErrorPrefix[] = __FILE__
+ ":**CRASHING**:" SECCOMP_MESSAGE_COMMON_CONTENT " in syscall 4000 + ";
+#else
+ static const char kSeccompErrorPrefix[] =
+ __FILE__":**CRASHING**:" SECCOMP_MESSAGE_COMMON_CONTENT " in syscall ";
+#endif
+ static const char kSeccompErrorPostfix[] = "\n";
+ WriteToStdErr(kSeccompErrorPrefix, sizeof(kSeccompErrorPrefix) - 1);
+ WriteToStdErr(sysno_base10, sizeof(sysno_base10));
+ WriteToStdErr(kSeccompErrorPostfix, sizeof(kSeccompErrorPostfix) - 1);
+}
+
+} // namespace.
+
+namespace sandbox {
+
+intptr_t CrashSIGSYS_Handler(const struct arch_seccomp_data& args, void* aux) {
+ uint32_t syscall = SyscallNumberToOffsetFromBase(args.nr);
+
+ PrintSyscallError(syscall);
+
+ // Encode 8-bits of the 1st two arguments too, so we can discern which socket
+ // type, which fcntl, ... etc., without being likely to hit a mapped
+ // address.
+ // Do not encode more bits here without thinking about increasing the
+ // likelihood of collision with mapped pages.
+ syscall |= ((args.args[0] & 0xffUL) << 12);
+ syscall |= ((args.args[1] & 0xffUL) << 20);
+ // Purposefully dereference the syscall as an address so it'll show up very
+ // clearly and easily in crash dumps.
+ volatile char* addr = reinterpret_cast<volatile char*>(syscall);
+ *addr = '\0';
+ // In case we hit a mapped address, hit the null page with just the syscall,
+ // for paranoia.
+ syscall &= 0xfffUL;
+ addr = reinterpret_cast<volatile char*>(syscall);
+ *addr = '\0';
+ for (;;)
+ _exit(1);
+}
+
+// TODO(jln): refactor the reporting functions.
+
+intptr_t SIGSYSCloneFailure(const struct arch_seccomp_data& args, void* aux) {
+ static const char kSeccompCloneError[] =
+ __FILE__":**CRASHING**:" SECCOMP_MESSAGE_CLONE_CONTENT "\n";
+ WriteToStdErr(kSeccompCloneError, sizeof(kSeccompCloneError) - 1);
+ // "flags" is the first argument in the kernel's clone().
+ // Mark as volatile to be able to find the value on the stack in a minidump.
+ volatile uint64_t clone_flags = args.args[0];
+ volatile char* addr;
+ if (IsArchitectureX86_64()) {
+ addr = reinterpret_cast<volatile char*>(clone_flags & 0xFFFFFF);
+ *addr = '\0';
+ }
+ // Hit the NULL page if this fails to fault.
+ addr = reinterpret_cast<volatile char*>(clone_flags & 0xFFF);
+ *addr = '\0';
+ for (;;)
+ _exit(1);
+}
+
+intptr_t SIGSYSPrctlFailure(const struct arch_seccomp_data& args,
+ void* /* aux */) {
+ static const char kSeccompPrctlError[] =
+ __FILE__":**CRASHING**:" SECCOMP_MESSAGE_PRCTL_CONTENT "\n";
+ WriteToStdErr(kSeccompPrctlError, sizeof(kSeccompPrctlError) - 1);
+ // Mark as volatile to be able to find the value on the stack in a minidump.
+ volatile uint64_t option = args.args[0];
+ volatile char* addr =
+ reinterpret_cast<volatile char*>(option & 0xFFF);
+ *addr = '\0';
+ for (;;)
+ _exit(1);
+}
+
+intptr_t SIGSYSIoctlFailure(const struct arch_seccomp_data& args,
+ void* /* aux */) {
+ static const char kSeccompIoctlError[] =
+ __FILE__":**CRASHING**:" SECCOMP_MESSAGE_IOCTL_CONTENT "\n";
+ WriteToStdErr(kSeccompIoctlError, sizeof(kSeccompIoctlError) - 1);
+ // Make "request" volatile so that we can see it on the stack in a minidump.
+ volatile uint64_t request = args.args[1];
+ volatile char* addr = reinterpret_cast<volatile char*>(request & 0xFFFF);
+ *addr = '\0';
+ // Hit the NULL page if this fails.
+ addr = reinterpret_cast<volatile char*>(request & 0xFFF);
+ *addr = '\0';
+ for (;;)
+ _exit(1);
+}
+
+intptr_t SIGSYSKillFailure(const struct arch_seccomp_data& args,
+ void* /* aux */) {
+ static const char kSeccompKillError[] =
+ __FILE__":**CRASHING**:" SECCOMP_MESSAGE_KILL_CONTENT "\n";
+ WriteToStdErr(kSeccompKillError, sizeof(kSeccompKillError) - 1);
+ // Make "pid" volatile so that we can see it on the stack in a minidump.
+ volatile uint64_t my_pid = sys_getpid();
+ volatile char* addr = reinterpret_cast<volatile char*>(my_pid & 0xFFF);
+ *addr = '\0';
+ for (;;)
+ _exit(1);
+}
+
+intptr_t SIGSYSFutexFailure(const struct arch_seccomp_data& args,
+ void* /* aux */) {
+ static const char kSeccompFutexError[] =
+ __FILE__ ":**CRASHING**:" SECCOMP_MESSAGE_FUTEX_CONTENT "\n";
+ WriteToStdErr(kSeccompFutexError, sizeof(kSeccompFutexError) - 1);
+ volatile int futex_op = args.args[1];
+ volatile char* addr = reinterpret_cast<volatile char*>(futex_op & 0xFFF);
+ *addr = '\0';
+ for (;;)
+ _exit(1);
+}
+
+intptr_t SIGSYSSchedHandler(const struct arch_seccomp_data& args,
+ void* aux) {
+ switch (args.nr) {
+ case __NR_sched_getaffinity:
+ case __NR_sched_getattr:
+ case __NR_sched_getparam:
+ case __NR_sched_getscheduler:
+ case __NR_sched_rr_get_interval:
+ case __NR_sched_setaffinity:
+ case __NR_sched_setattr:
+ case __NR_sched_setparam:
+ case __NR_sched_setscheduler:
+ const pid_t tid = sys_gettid();
+ // The first argument is the pid. If is our thread id, then replace it
+ // with 0, which is equivalent and allowed by the policy.
+ if (args.args[0] == static_cast<uint64_t>(tid)) {
+ return Syscall::Call(args.nr,
+ 0,
+ static_cast<intptr_t>(args.args[1]),
+ static_cast<intptr_t>(args.args[2]),
+ static_cast<intptr_t>(args.args[3]),
+ static_cast<intptr_t>(args.args[4]),
+ static_cast<intptr_t>(args.args[5]));
+ }
+ break;
+ }
+
+ CrashSIGSYS_Handler(args, aux);
+
+ // Should never be reached.
+ RAW_CHECK(false);
+ return -ENOSYS;
+}
+
+bpf_dsl::ResultExpr CrashSIGSYS() {
+ return bpf_dsl::Trap(CrashSIGSYS_Handler, NULL);
+}
+
+bpf_dsl::ResultExpr CrashSIGSYSClone() {
+ return bpf_dsl::Trap(SIGSYSCloneFailure, NULL);
+}
+
+bpf_dsl::ResultExpr CrashSIGSYSPrctl() {
+ return bpf_dsl::Trap(SIGSYSPrctlFailure, NULL);
+}
+
+bpf_dsl::ResultExpr CrashSIGSYSIoctl() {
+ return bpf_dsl::Trap(SIGSYSIoctlFailure, NULL);
+}
+
+bpf_dsl::ResultExpr CrashSIGSYSKill() {
+ return bpf_dsl::Trap(SIGSYSKillFailure, NULL);
+}
+
+bpf_dsl::ResultExpr CrashSIGSYSFutex() {
+ return bpf_dsl::Trap(SIGSYSFutexFailure, NULL);
+}
+
+bpf_dsl::ResultExpr RewriteSchedSIGSYS() {
+ return bpf_dsl::Trap(SIGSYSSchedHandler, NULL);
+}
+
+const char* GetErrorMessageContentForTests() {
+ return SECCOMP_MESSAGE_COMMON_CONTENT;
+}
+
+const char* GetCloneErrorMessageContentForTests() {
+ return SECCOMP_MESSAGE_CLONE_CONTENT;
+}
+
+const char* GetPrctlErrorMessageContentForTests() {
+ return SECCOMP_MESSAGE_PRCTL_CONTENT;
+}
+
+const char* GetIoctlErrorMessageContentForTests() {
+ return SECCOMP_MESSAGE_IOCTL_CONTENT;
+}
+
+const char* GetKillErrorMessageContentForTests() {
+ return SECCOMP_MESSAGE_KILL_CONTENT;
+}
+
+const char* GetFutexErrorMessageContentForTests() {
+ return SECCOMP_MESSAGE_FUTEX_CONTENT;
+}
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h b/libchrome/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h
new file mode 100644
index 0000000..c64e994
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SIGSYS_HANDLERS_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SIGSYS_HANDLERS_H_
+
+#include <stdint.h>
+
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
+#include "sandbox/sandbox_export.h"
+
+// The handlers are suitable for use in Trap() error codes. They are
+// guaranteed to be async-signal safe.
+// See sandbox/linux/seccomp-bpf/trap.h to see how they work.
+
+namespace sandbox {
+
+struct arch_seccomp_data;
+
+// This handler will crash the currently running process. The crashing address
+// will be the number of the current system call, extracted from |args|.
+// This handler will also print to stderr the number of the crashing syscall.
+SANDBOX_EXPORT intptr_t
+ CrashSIGSYS_Handler(const struct arch_seccomp_data& args, void* aux);
+
+// The following three handlers are suitable to report failures with the
+// clone(), prctl() and ioctl() system calls respectively.
+
+// The crashing address will be (clone_flags & 0xFFFFFF), where clone_flags is
+// the clone(2) argument, extracted from |args|.
+SANDBOX_EXPORT intptr_t
+ SIGSYSCloneFailure(const struct arch_seccomp_data& args, void* aux);
+// The crashing address will be (option & 0xFFF), where option is the prctl(2)
+// argument.
+SANDBOX_EXPORT intptr_t
+ SIGSYSPrctlFailure(const struct arch_seccomp_data& args, void* aux);
+// The crashing address will be request & 0xFFFF, where request is the ioctl(2)
+// argument.
+SANDBOX_EXPORT intptr_t
+ SIGSYSIoctlFailure(const struct arch_seccomp_data& args, void* aux);
+// The crashing address will be (pid & 0xFFF), where pid is the first
+// argument (and can be a tid).
+SANDBOX_EXPORT intptr_t
+ SIGSYSKillFailure(const struct arch_seccomp_data& args, void* aux);
+// The crashing address will be (op & 0xFFF), where op is the second
+// argument.
+SANDBOX_EXPORT intptr_t
+ SIGSYSFutexFailure(const struct arch_seccomp_data& args, void* aux);
+// If the syscall is not being called on the current tid, crashes in the same
+// way as CrashSIGSYS_Handler. Otherwise, returns the result of calling the
+// syscall with the pid argument set to 0 (which for these calls means the
+// current thread). The following syscalls are supported:
+//
+// sched_getaffinity(), sched_getattr(), sched_getparam(), sched_getscheduler(),
+// sched_rr_get_interval(), sched_setaffinity(), sched_setattr(),
+// sched_setparam(), sched_setscheduler()
+SANDBOX_EXPORT intptr_t
+ SIGSYSSchedHandler(const struct arch_seccomp_data& args, void* aux);
+
+// Variants of the above functions for use with bpf_dsl.
+SANDBOX_EXPORT bpf_dsl::ResultExpr CrashSIGSYS();
+SANDBOX_EXPORT bpf_dsl::ResultExpr CrashSIGSYSClone();
+SANDBOX_EXPORT bpf_dsl::ResultExpr CrashSIGSYSPrctl();
+SANDBOX_EXPORT bpf_dsl::ResultExpr CrashSIGSYSIoctl();
+SANDBOX_EXPORT bpf_dsl::ResultExpr CrashSIGSYSKill();
+SANDBOX_EXPORT bpf_dsl::ResultExpr CrashSIGSYSFutex();
+SANDBOX_EXPORT bpf_dsl::ResultExpr RewriteSchedSIGSYS();
+
+// Following four functions return substrings of error messages used
+// in the above four functions. They are useful in death tests.
+SANDBOX_EXPORT const char* GetErrorMessageContentForTests();
+SANDBOX_EXPORT const char* GetCloneErrorMessageContentForTests();
+SANDBOX_EXPORT const char* GetPrctlErrorMessageContentForTests();
+SANDBOX_EXPORT const char* GetIoctlErrorMessageContentForTests();
+SANDBOX_EXPORT const char* GetKillErrorMessageContentForTests();
+SANDBOX_EXPORT const char* GetFutexErrorMessageContentForTests();
+
+} // namespace sandbox.
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SIGSYS_HANDLERS_H_
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
new file mode 100644
index 0000000..56c4cb3
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -0,0 +1,354 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <fcntl.h>
+#include <linux/net.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/system_headers/linux_futex.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+#include "sandbox/linux/system_headers/linux_time.h"
+
+// PNaCl toolchain does not provide sys/ioctl.h header.
+#if !defined(OS_NACL_NONSFI)
+#include <sys/ioctl.h>
+#endif
+
+#if defined(OS_ANDROID)
+
+#if !defined(F_DUPFD_CLOEXEC)
+#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
+#endif
+
+// https://android.googlesource.com/platform/bionic/+/lollipop-release/libc/private/bionic_prctl.h
+#if !defined(PR_SET_VMA)
+#define PR_SET_VMA 0x53564d41
+#endif
+
+#ifndef PR_SET_PTRACER
+#define PR_SET_PTRACER 0x59616d61
+#endif
+
+#endif // defined(OS_ANDROID)
+
+#if defined(__arm__) && !defined(MAP_STACK)
+#define MAP_STACK 0x20000 // Daisy build environment has old headers.
+#endif
+
+#if defined(__mips__) && !defined(MAP_STACK)
+#define MAP_STACK 0x40000
+#endif
+namespace {
+
+inline bool IsArchitectureX86_64() {
+#if defined(__x86_64__)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool IsArchitectureI386() {
+#if defined(__i386__)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool IsAndroid() {
+#if defined(OS_ANDROID)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool IsArchitectureMips() {
+#if defined(__mips__)
+ return true;
+#else
+ return false;
+#endif
+}
+
+// Ubuntu's version of glibc has a race condition in sem_post that can cause
+// it to call futex(2) with bogus op arguments. To workaround this, we need
+// to allow those futex(2) calls to fail with EINVAL, instead of crashing the
+// process. See crbug.com/598471.
+inline bool IsBuggyGlibcSemPost() {
+#if defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+ return true;
+#else
+ return false;
+#endif
+}
+
+} // namespace.
+
+#define CASES SANDBOX_BPF_DSL_CASES
+
+using sandbox::bpf_dsl::Allow;
+using sandbox::bpf_dsl::Arg;
+using sandbox::bpf_dsl::BoolExpr;
+using sandbox::bpf_dsl::Error;
+using sandbox::bpf_dsl::If;
+using sandbox::bpf_dsl::ResultExpr;
+
+namespace sandbox {
+
+#if !defined(OS_NACL_NONSFI)
+// Allow Glibc's and Android pthread creation flags, crash on any other
+// thread creation attempts and EPERM attempts to use neither
+// CLONE_VM, nor CLONE_THREAD, which includes all fork() implementations.
+ResultExpr RestrictCloneToThreadsAndEPERMFork() {
+ const Arg<unsigned long> flags(0);
+
+ // TODO(mdempsky): Extend DSL to support (flags & ~mask1) == mask2.
+ const uint64_t kAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
+ CLONE_SIGHAND | CLONE_THREAD |
+ CLONE_SYSVSEM;
+ const uint64_t kObsoleteAndroidCloneMask = kAndroidCloneMask | CLONE_DETACHED;
+
+ const uint64_t kGlibcPthreadFlags =
+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD |
+ CLONE_SYSVSEM | CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
+ const BoolExpr glibc_test = flags == kGlibcPthreadFlags;
+
+ const BoolExpr android_test =
+ AnyOf(flags == kAndroidCloneMask, flags == kObsoleteAndroidCloneMask,
+ flags == kGlibcPthreadFlags);
+
+ return If(IsAndroid() ? android_test : glibc_test, Allow())
+ .ElseIf((flags & (CLONE_VM | CLONE_THREAD)) == 0, Error(EPERM))
+ .Else(CrashSIGSYSClone());
+}
+
+ResultExpr RestrictPrctl() {
+ // Will need to add seccomp compositing in the future. PR_SET_PTRACER is
+ // used by breakpad but not needed anymore.
+ const Arg<int> option(0);
+ return Switch(option)
+ .CASES((PR_GET_NAME, PR_SET_NAME, PR_GET_DUMPABLE, PR_SET_DUMPABLE
+#if defined(OS_ANDROID)
+ , PR_SET_VMA, PR_SET_PTRACER
+
+// Enable PR_SET_TIMERSLACK_PID, an Android custom prctl which is used in:
+// https://android.googlesource.com/platform/system/core/+/lollipop-release/libcutils/sched_policy.c.
+// Depending on the Android kernel version, this prctl may have different
+// values. Since we don't know the correct value for the running kernel, we must
+// allow them all.
+//
+// The effect is:
+// On 3.14 kernels, this allows PR_SET_TIMERSLACK_PID and 43 and 127 (invalid
+// prctls which will return EINVAL)
+// On 3.18 kernels, this allows PR_SET_TIMERSLACK_PID, PR_SET_THP_DISABLE, and
+// 127 (invalid).
+// On 4.1 kernels and up, this allows PR_SET_TIMERSLACK_PID, PR_SET_THP_DISABLE,
+// and PR_MPX_ENABLE_MANAGEMENT.
+
+// https://android.googlesource.com/kernel/common/+/android-3.14/include/uapi/linux/prctl.h
+#define PR_SET_TIMERSLACK_PID_1 41
+
+// https://android.googlesource.com/kernel/common/+/android-3.18/include/uapi/linux/prctl.h
+#define PR_SET_TIMERSLACK_PID_2 43
+
+// https://android.googlesource.com/kernel/common/+/android-4.1/include/uapi/linux/prctl.h and up
+#define PR_SET_TIMERSLACK_PID_3 127
+
+ , PR_SET_TIMERSLACK_PID_1
+ , PR_SET_TIMERSLACK_PID_2
+ , PR_SET_TIMERSLACK_PID_3
+#endif // defined(OS_ANDROID)
+ ),
+ Allow())
+ .Default(CrashSIGSYSPrctl());
+}
+
+ResultExpr RestrictIoctl() {
+ const Arg<int> request(1);
+ return Switch(request).CASES((TCGETS, FIONREAD), Allow()).Default(
+ CrashSIGSYSIoctl());
+}
+
+ResultExpr RestrictMmapFlags() {
+ // The flags you see are actually the allowed ones, and the variable is a
+ // "denied" mask because of the negation operator.
+ // Significantly, we don't permit MAP_HUGETLB, or the newer flags such as
+ // MAP_POPULATE.
+ // TODO(davidung), remove MAP_DENYWRITE with updated Tegra libraries.
+ const uint64_t kAllowedMask = MAP_SHARED | MAP_PRIVATE | MAP_ANONYMOUS |
+ MAP_STACK | MAP_NORESERVE | MAP_FIXED |
+ MAP_DENYWRITE;
+ const Arg<int> flags(3);
+ return If((flags & ~kAllowedMask) == 0, Allow()).Else(CrashSIGSYS());
+}
+
+ResultExpr RestrictMprotectFlags() {
+ // The flags you see are actually the allowed ones, and the variable is a
+ // "denied" mask because of the negation operator.
+ // Significantly, we don't permit weird undocumented flags such as
+ // PROT_GROWSDOWN.
+ const uint64_t kAllowedMask = PROT_READ | PROT_WRITE | PROT_EXEC;
+ const Arg<int> prot(2);
+ return If((prot & ~kAllowedMask) == 0, Allow()).Else(CrashSIGSYS());
+}
+
+ResultExpr RestrictFcntlCommands() {
+ // We also restrict the flags in F_SETFL. We don't want to permit flags with
+ // a history of trouble such as O_DIRECT. The flags you see are actually the
+ // allowed ones, and the variable is a "denied" mask because of the negation
+ // operator.
+ // Glibc overrides the kernel's O_LARGEFILE value. Account for this.
+ uint64_t kOLargeFileFlag = O_LARGEFILE;
+ if (IsArchitectureX86_64() || IsArchitectureI386() || IsArchitectureMips())
+ kOLargeFileFlag = 0100000;
+
+ const Arg<int> cmd(1);
+ const Arg<long> long_arg(2);
+
+ const uint64_t kAllowedMask = O_ACCMODE | O_APPEND | O_NONBLOCK | O_SYNC |
+ kOLargeFileFlag | O_CLOEXEC | O_NOATIME;
+ return Switch(cmd)
+ .CASES((F_GETFL,
+ F_GETFD,
+ F_SETFD,
+ F_SETLK,
+ F_SETLKW,
+ F_GETLK,
+ F_DUPFD,
+ F_DUPFD_CLOEXEC),
+ Allow())
+ .Case(F_SETFL,
+ If((long_arg & ~kAllowedMask) == 0, Allow()).Else(CrashSIGSYS()))
+ .Default(CrashSIGSYS());
+}
+
+#if defined(__i386__) || defined(__mips__)
+ResultExpr RestrictSocketcallCommand() {
+ // Unfortunately, we are unable to restrict the first parameter to
+ // socketpair(2). Whilst initially sounding bad, it's noteworthy that very
+ // few protocols actually support socketpair(2). The scary call that we're
+ // worried about, socket(2), remains blocked.
+ const Arg<int> call(0);
+ return Switch(call)
+ .CASES((SYS_SOCKETPAIR,
+ SYS_SHUTDOWN,
+ SYS_RECV,
+ SYS_SEND,
+ SYS_RECVFROM,
+ SYS_SENDTO,
+ SYS_RECVMSG,
+ SYS_SENDMSG),
+ Allow())
+ .Default(Error(EPERM));
+}
+#endif
+
+ResultExpr RestrictKillTarget(pid_t target_pid, int sysno) {
+ switch (sysno) {
+ case __NR_kill:
+ case __NR_tgkill: {
+ const Arg<pid_t> pid(0);
+ return If(pid == target_pid, Allow()).Else(CrashSIGSYSKill());
+ }
+ case __NR_tkill:
+ return CrashSIGSYSKill();
+ default:
+ NOTREACHED();
+ return CrashSIGSYS();
+ }
+}
+
+ResultExpr RestrictFutex() {
+ const uint64_t kAllowedFutexFlags = FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME;
+ const Arg<int> op(1);
+ return Switch(op & ~kAllowedFutexFlags)
+ .CASES((FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE,
+ FUTEX_WAKE_OP, FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET),
+ Allow())
+ .Default(IsBuggyGlibcSemPost() ? Error(EINVAL) : CrashSIGSYSFutex());
+}
+
+ResultExpr RestrictGetSetpriority(pid_t target_pid) {
+ const Arg<int> which(0);
+ const Arg<int> who(1);
+ return If(which == PRIO_PROCESS,
+ Switch(who).CASES((0, target_pid), Allow()).Default(Error(EPERM)))
+ .Else(CrashSIGSYS());
+}
+
+ResultExpr RestrictSchedTarget(pid_t target_pid, int sysno) {
+ switch (sysno) {
+ case __NR_sched_getaffinity:
+ case __NR_sched_getattr:
+ case __NR_sched_getparam:
+ case __NR_sched_getscheduler:
+ case __NR_sched_rr_get_interval:
+ case __NR_sched_setaffinity:
+ case __NR_sched_setattr:
+ case __NR_sched_setparam:
+ case __NR_sched_setscheduler: {
+ const Arg<pid_t> pid(0);
+ return Switch(pid)
+ .CASES((0, target_pid), Allow())
+ .Default(RewriteSchedSIGSYS());
+ }
+ default:
+ NOTREACHED();
+ return CrashSIGSYS();
+ }
+}
+
+ResultExpr RestrictPrlimit64(pid_t target_pid) {
+ const Arg<pid_t> pid(0);
+ return Switch(pid).CASES((0, target_pid), Allow()).Default(CrashSIGSYS());
+}
+
+ResultExpr RestrictGetrusage() {
+ const Arg<int> who(0);
+ return If(who == RUSAGE_SELF, Allow()).Else(CrashSIGSYS());
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+ResultExpr RestrictClockID() {
+ static_assert(4 == sizeof(clockid_t), "clockid_t is not 32bit");
+ const Arg<clockid_t> clockid(0);
+ return Switch(clockid)
+ .CASES((
+#if defined(OS_ANDROID)
+ CLOCK_BOOTTIME,
+#endif
+ CLOCK_MONOTONIC,
+ CLOCK_MONOTONIC_COARSE,
+ CLOCK_PROCESS_CPUTIME_ID,
+ CLOCK_REALTIME,
+ CLOCK_REALTIME_COARSE,
+ CLOCK_THREAD_CPUTIME_ID),
+ Allow())
+ .Default(CrashSIGSYS());
+}
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
new file mode 100644
index 0000000..b96fe20
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_PARAMETERS_RESTRICTIONS_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_PARAMETERS_RESTRICTIONS_H_
+
+#include <unistd.h>
+
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
+#include "sandbox/sandbox_export.h"
+
+// These are helpers to build seccomp-bpf policies, i.e. policies for a
+// sandbox that reduces the Linux kernel's attack surface. They return a
+// bpf_dsl::ResultExpr suitable to restrict certain system call parameters.
+
+namespace sandbox {
+
+// Allow clone(2) for threads.
+// Reject fork(2) attempts with EPERM.
+// Don't restrict on ASAN.
+// Crash if anything else is attempted.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictCloneToThreadsAndEPERMFork();
+
+// Allow PR_SET_NAME, PR_SET_DUMPABLE, PR_GET_DUMPABLE.
+// Crash if anything else is attempted.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictPrctl();
+
+// Allow TCGETS and FIONREAD.
+// Crash if anything else is attempted.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictIoctl();
+
+// Restrict the flags argument in mmap(2).
+// Only allow: MAP_SHARED | MAP_PRIVATE | MAP_ANONYMOUS |
+// MAP_STACK | MAP_NORESERVE | MAP_FIXED | MAP_DENYWRITE.
+// Crash if any other flag is used.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictMmapFlags();
+
+// Restrict the prot argument in mprotect(2).
+// Only allow: PROT_READ | PROT_WRITE | PROT_EXEC.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictMprotectFlags();
+
+// Restrict fcntl(2) cmd argument to:
+// We allow F_GETFL, F_SETFL, F_GETFD, F_SETFD, F_DUPFD, F_DUPFD_CLOEXEC,
+// F_SETLK, F_SETLKW and F_GETLK.
+// Also, in F_SETFL, restrict the allowed flags to: O_ACCMODE | O_APPEND |
+// O_NONBLOCK | O_SYNC | O_LARGEFILE | O_CLOEXEC | O_NOATIME.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictFcntlCommands();
+
+#if defined(__i386__) || defined(__mips__)
+// Restrict socketcall(2) to only allow socketpair(2), send(2), recv(2),
+// sendto(2), recvfrom(2), shutdown(2), sendmsg(2) and recvmsg(2).
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictSocketcallCommand();
+#endif
+
+// Restrict |sysno| (which must be kill, tkill or tgkill) by allowing tgkill or
+// kill iff the first parameter is |target_pid|, crashing otherwise or if
+// |sysno| is tkill.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictKillTarget(pid_t target_pid,
+ int sysno);
+
+// Crash if FUTEX_CMP_REQUEUE_PI is used in the second argument of futex(2).
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictFutex();
+
+// Crash if |which| is not PRIO_PROCESS. EPERM if |who| is not 0, neither
+// |target_pid| while calling setpriority(2) / getpriority(2).
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictGetSetpriority(pid_t target_pid);
+
+// Restricts |pid| for sched_* syscalls which take a pid as the first argument.
+// We only allow calling these syscalls if the pid argument is equal to the pid
+// of the sandboxed process or 0 (indicating the current thread). The following
+// syscalls are supported:
+//
+// sched_getaffinity(), sched_getattr(), sched_getparam(), sched_getscheduler(),
+// sched_rr_get_interval(), sched_setaffinity(), sched_setattr(),
+// sched_setparam(), sched_setscheduler()
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictSchedTarget(pid_t target_pid,
+ int sysno);
+
+// Restricts the |pid| argument of prlimit64 to 0 (meaning the calling process)
+// or target_pid.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictPrlimit64(pid_t target_pid);
+
+// Restricts the |who| argument of getrusage to RUSAGE_SELF (meaning the calling
+// process).
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictGetrusage();
+
+// Restrict |clk_id| for clock_getres(), clock_gettime() and clock_settime().
+// We allow accessing only CLOCK_MONOTONIC, CLOCK_PROCESS_CPUTIME_ID,
+// CLOCK_REALTIME, and CLOCK_THREAD_CPUTIME_ID. In particular, this disallows
+// access to arbitrary per-{process,thread} CPU-time clock IDs (such as those
+// returned by {clock,pthread}_getcpuclockid), which can leak information
+// about the state of the host OS.
+SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictClockID();
+
+} // namespace sandbox.
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_PARAMETERS_RESTRICTIONS_H_
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
new file mode 100644
index 0000000..804a8fe
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
@@ -0,0 +1,241 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h"
+
+#include <errno.h>
+#include <sched.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/bind.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/sys_info.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.h"
+#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+#include "sandbox/linux/system_headers/linux_time.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+#if !defined(OS_ANDROID)
+#include "third_party/lss/linux_syscall_support.h" // for MAKE_PROCESS_CPUCLOCK
+#endif
+
+namespace sandbox {
+
+namespace {
+
+// NOTE: most of the parameter restrictions are tested in
+// baseline_policy_unittest.cc as a more end-to-end test.
+
+using sandbox::bpf_dsl::Allow;
+using sandbox::bpf_dsl::ResultExpr;
+
+class RestrictClockIdPolicy : public bpf_dsl::Policy {
+ public:
+ RestrictClockIdPolicy() {}
+ ~RestrictClockIdPolicy() override {}
+
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ switch (sysno) {
+ case __NR_clock_gettime:
+ case __NR_clock_getres:
+ return RestrictClockID();
+ default:
+ return Allow();
+ }
+ }
+};
+
+void CheckClock(clockid_t clockid) {
+ struct timespec ts;
+ ts.tv_sec = -1;
+ ts.tv_nsec = -1;
+ BPF_ASSERT_EQ(0, clock_getres(clockid, &ts));
+ BPF_ASSERT_EQ(0, ts.tv_sec);
+ BPF_ASSERT_LE(0, ts.tv_nsec);
+ ts.tv_sec = -1;
+ ts.tv_nsec = -1;
+ BPF_ASSERT_EQ(0, clock_gettime(clockid, &ts));
+ BPF_ASSERT_LE(0, ts.tv_sec);
+ BPF_ASSERT_LE(0, ts.tv_nsec);
+}
+
+BPF_TEST_C(ParameterRestrictions,
+ clock_gettime_allowed,
+ RestrictClockIdPolicy) {
+ CheckClock(CLOCK_MONOTONIC);
+ CheckClock(CLOCK_MONOTONIC_COARSE);
+ CheckClock(CLOCK_PROCESS_CPUTIME_ID);
+#if defined(OS_ANDROID)
+ CheckClock(CLOCK_BOOTTIME);
+#endif
+ CheckClock(CLOCK_REALTIME);
+ CheckClock(CLOCK_REALTIME_COARSE);
+ CheckClock(CLOCK_THREAD_CPUTIME_ID);
+}
+
+BPF_DEATH_TEST_C(ParameterRestrictions,
+ clock_gettime_crash_monotonic_raw,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ RestrictClockIdPolicy) {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
+}
+
+#if !defined(OS_ANDROID)
+BPF_DEATH_TEST_C(ParameterRestrictions,
+ clock_gettime_crash_cpu_clock,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ RestrictClockIdPolicy) {
+ // We can't use clock_getcpuclockid() because it's not implemented in newlib,
+ // and it might not work inside the sandbox anyway.
+ const pid_t kInitPID = 1;
+ const clockid_t kInitCPUClockID =
+ MAKE_PROCESS_CPUCLOCK(kInitPID, CPUCLOCK_SCHED);
+
+ struct timespec ts;
+ clock_gettime(kInitCPUClockID, &ts);
+}
+#endif // !defined(OS_ANDROID)
+
+class RestrictSchedPolicy : public bpf_dsl::Policy {
+ public:
+ RestrictSchedPolicy() {}
+ ~RestrictSchedPolicy() override {}
+
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ switch (sysno) {
+ case __NR_sched_getparam:
+ return RestrictSchedTarget(getpid(), sysno);
+ default:
+ return Allow();
+ }
+ }
+};
+
+void CheckSchedGetParam(pid_t pid, struct sched_param* param) {
+ BPF_ASSERT_EQ(0, sched_getparam(pid, param));
+}
+
+void SchedGetParamThread(base::WaitableEvent* thread_run) {
+ const pid_t pid = getpid();
+ const pid_t tid = sys_gettid();
+ BPF_ASSERT_NE(pid, tid);
+
+ struct sched_param current_pid_param;
+ CheckSchedGetParam(pid, ¤t_pid_param);
+
+ struct sched_param zero_param;
+ CheckSchedGetParam(0, &zero_param);
+
+ struct sched_param tid_param;
+ CheckSchedGetParam(tid, &tid_param);
+
+ BPF_ASSERT_EQ(zero_param.sched_priority, tid_param.sched_priority);
+
+ // Verify that the SIGSYS handler sets errno properly.
+ errno = 0;
+ BPF_ASSERT_EQ(-1, sched_getparam(tid, NULL));
+ BPF_ASSERT_EQ(EINVAL, errno);
+
+ thread_run->Signal();
+}
+
+BPF_TEST_C(ParameterRestrictions,
+ sched_getparam_allowed,
+ RestrictSchedPolicy) {
+ base::WaitableEvent thread_run(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ // Run the actual test in a new thread so that the current pid and tid are
+ // different.
+ base::Thread getparam_thread("sched_getparam_thread");
+ BPF_ASSERT(getparam_thread.Start());
+ getparam_thread.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&SchedGetParamThread, &thread_run));
+ BPF_ASSERT(thread_run.TimedWait(base::TimeDelta::FromMilliseconds(5000)));
+ getparam_thread.Stop();
+}
+
+BPF_DEATH_TEST_C(ParameterRestrictions,
+ sched_getparam_crash_non_zero,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ RestrictSchedPolicy) {
+ const pid_t kInitPID = 1;
+ struct sched_param param;
+ sched_getparam(kInitPID, ¶m);
+}
+
+class RestrictPrlimit64Policy : public bpf_dsl::Policy {
+ public:
+ RestrictPrlimit64Policy() {}
+ ~RestrictPrlimit64Policy() override {}
+
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ switch (sysno) {
+ case __NR_prlimit64:
+ return RestrictPrlimit64(getpid());
+ default:
+ return Allow();
+ }
+ }
+};
+
+BPF_TEST_C(ParameterRestrictions, prlimit64_allowed, RestrictPrlimit64Policy) {
+ BPF_ASSERT_EQ(0, sys_prlimit64(0, RLIMIT_AS, NULL, NULL));
+ BPF_ASSERT_EQ(0, sys_prlimit64(getpid(), RLIMIT_AS, NULL, NULL));
+}
+
+BPF_DEATH_TEST_C(ParameterRestrictions,
+ prlimit64_crash_not_self,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ RestrictPrlimit64Policy) {
+ const pid_t kInitPID = 1;
+ BPF_ASSERT_NE(kInitPID, getpid());
+ sys_prlimit64(kInitPID, RLIMIT_AS, NULL, NULL);
+}
+
+class RestrictGetrusagePolicy : public bpf_dsl::Policy {
+ public:
+ RestrictGetrusagePolicy() {}
+ ~RestrictGetrusagePolicy() override {}
+
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ switch (sysno) {
+ case __NR_getrusage:
+ return RestrictGetrusage();
+ default:
+ return Allow();
+ }
+ }
+};
+
+BPF_TEST_C(ParameterRestrictions, getrusage_allowed, RestrictGetrusagePolicy) {
+ struct rusage usage;
+ BPF_ASSERT_EQ(0, getrusage(RUSAGE_SELF, &usage));
+}
+
+BPF_DEATH_TEST_C(ParameterRestrictions,
+ getrusage_crash_not_self,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+ RestrictGetrusagePolicy) {
+ struct rusage usage;
+ getrusage(RUSAGE_CHILDREN, &usage);
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
new file mode 100644
index 0000000..c217d47
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
@@ -0,0 +1,1060 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf-helpers/syscall_sets.h"
+
+#include "build/build_config.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+namespace sandbox {
+
+// The functions below cover all existing i386, x86_64, and ARM system calls;
+// excluding syscalls made obsolete in ARM EABI.
+// The implicitly defined sets form a partition of the sets of
+// system calls.
+
+bool SyscallSets::IsKill(int sysno) {
+ switch (sysno) {
+ case __NR_kill:
+ case __NR_tgkill:
+ case __NR_tkill: // Deprecated.
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedGettime(int sysno) {
+ switch (sysno) {
+ case __NR_gettimeofday:
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_time:
+#endif
+ return true;
+ case __NR_adjtimex: // Privileged.
+ case __NR_clock_adjtime: // Privileged.
+ case __NR_clock_getres: // Could be allowed.
+ case __NR_clock_gettime:
+ case __NR_clock_nanosleep: // Could be allowed.
+ case __NR_clock_settime: // Privileged.
+#if defined(__i386__) || defined(__mips__)
+ case __NR_ftime: // Obsolete.
+#endif
+ case __NR_settimeofday: // Privileged.
+#if defined(__i386__) || defined(__mips__)
+ case __NR_stime:
+#endif
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsCurrentDirectory(int sysno) {
+ switch (sysno) {
+ case __NR_getcwd:
+ case __NR_chdir:
+ case __NR_fchdir:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsUmask(int sysno) {
+ switch (sysno) {
+ case __NR_umask:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// System calls that directly access the file system. They might acquire
+// a new file descriptor or otherwise perform an operation directly
+// via a path.
+// Both EPERM and ENOENT are valid errno unless otherwise noted in comment.
+bool SyscallSets::IsFileSystem(int sysno) {
+ switch (sysno) {
+#if !defined(__aarch64__)
+ case __NR_access: // EPERM not a valid errno.
+ case __NR_chmod:
+ case __NR_chown:
+#if defined(__i386__) || defined(__arm__)
+ case __NR_chown32:
+#endif
+ case __NR_creat:
+ case __NR_futimesat: // Should be called utimesat ?
+ case __NR_lchown:
+ case __NR_link:
+ case __NR_lstat: // EPERM not a valid errno.
+ case __NR_mkdir:
+ case __NR_mknod:
+ case __NR_open:
+ case __NR_readlink: // EPERM not a valid errno.
+ case __NR_rename:
+ case __NR_rmdir:
+ case __NR_stat: // EPERM not a valid errno.
+ case __NR_symlink:
+ case __NR_unlink:
+ case __NR_uselib: // Neither EPERM, nor ENOENT are valid errno.
+ case __NR_ustat: // Same as above. Deprecated.
+ case __NR_utimes:
+#endif // !defined(__aarch64__)
+
+ case __NR_execve:
+ case __NR_faccessat: // EPERM not a valid errno.
+ case __NR_fchmodat:
+ case __NR_fchownat: // Should be called chownat ?
+#if defined(__x86_64__) || defined(__aarch64__)
+ case __NR_newfstatat: // fstatat(). EPERM not a valid errno.
+#elif defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_fstatat64:
+#endif
+#if defined(__i386__) || defined(__arm__)
+ case __NR_lchown32:
+#endif
+ case __NR_linkat:
+ case __NR_lookup_dcookie: // ENOENT not a valid errno.
+
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_lstat64:
+#endif
+#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
+ case __NR_memfd_create:
+#endif
+ case __NR_mkdirat:
+ case __NR_mknodat:
+#if defined(__i386__)
+ case __NR_oldlstat:
+ case __NR_oldstat:
+#endif
+ case __NR_openat:
+ case __NR_readlinkat:
+ case __NR_renameat:
+ case __NR_renameat2:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_stat64:
+#endif
+ case __NR_statfs: // EPERM not a valid errno.
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_statfs64:
+#endif
+ case __NR_symlinkat:
+ case __NR_truncate:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_truncate64:
+#endif
+ case __NR_unlinkat:
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_utime:
+#endif
+ case __NR_utimensat: // New.
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedFileSystemAccessViaFd(int sysno) {
+ switch (sysno) {
+ case __NR_fstat:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_fstat64:
+#endif
+ return true;
+// TODO(jln): these should be denied gracefully as well (moved below).
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_fadvise64: // EPERM not a valid errno.
+#endif
+#if defined(__i386__)
+ case __NR_fadvise64_64:
+#endif
+#if defined(__arm__)
+ case __NR_arm_fadvise64_64:
+#endif
+ case __NR_fdatasync: // EPERM not a valid errno.
+ case __NR_flock: // EPERM not a valid errno.
+ case __NR_fstatfs: // Give information about the whole filesystem.
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_fstatfs64:
+#endif
+ case __NR_fsync: // EPERM not a valid errno.
+#if defined(__i386__)
+ case __NR_oldfstat:
+#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_sync_file_range: // EPERM not a valid errno.
+#elif defined(__arm__)
+ case __NR_arm_sync_file_range: // EPERM not a valid errno.
+#endif
+ default:
+ return false;
+ }
+}
+
+// EPERM is a good errno for any of these.
+bool SyscallSets::IsDeniedFileSystemAccessViaFd(int sysno) {
+ switch (sysno) {
+ case __NR_fallocate:
+ case __NR_fchmod:
+ case __NR_fchown:
+ case __NR_ftruncate:
+#if defined(__i386__) || defined(__arm__)
+ case __NR_fchown32:
+#endif
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_ftruncate64:
+#endif
+#if !defined(__aarch64__)
+ case __NR_getdents: // EPERM not a valid errno.
+#endif
+ case __NR_getdents64: // EPERM not a valid errno.
+#if defined(__i386__) || defined(__mips__)
+ case __NR_readdir:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsGetSimpleId(int sysno) {
+ switch (sysno) {
+ case __NR_capget:
+ case __NR_getegid:
+ case __NR_geteuid:
+ case __NR_getgid:
+ case __NR_getgroups:
+ case __NR_getpid:
+ case __NR_getppid:
+ case __NR_getresgid:
+ case __NR_getsid:
+ case __NR_gettid:
+ case __NR_getuid:
+ case __NR_getresuid:
+#if defined(__i386__) || defined(__arm__)
+ case __NR_getegid32:
+ case __NR_geteuid32:
+ case __NR_getgid32:
+ case __NR_getgroups32:
+ case __NR_getresgid32:
+ case __NR_getresuid32:
+ case __NR_getuid32:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsProcessPrivilegeChange(int sysno) {
+ switch (sysno) {
+ case __NR_capset:
+#if defined(__i386__) || defined(__x86_64__)
+ case __NR_ioperm: // Intel privilege.
+ case __NR_iopl: // Intel privilege.
+#endif
+ case __NR_setfsgid:
+ case __NR_setfsuid:
+ case __NR_setgid:
+ case __NR_setgroups:
+ case __NR_setregid:
+ case __NR_setresgid:
+ case __NR_setresuid:
+ case __NR_setreuid:
+ case __NR_setuid:
+#if defined(__i386__) || defined(__arm__)
+ case __NR_setfsgid32:
+ case __NR_setfsuid32:
+ case __NR_setgid32:
+ case __NR_setgroups32:
+ case __NR_setregid32:
+ case __NR_setresgid32:
+ case __NR_setresuid32:
+ case __NR_setreuid32:
+ case __NR_setuid32:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsProcessGroupOrSession(int sysno) {
+ switch (sysno) {
+ case __NR_setpgid:
+#if !defined(__aarch64__)
+ case __NR_getpgrp:
+#endif
+ case __NR_setsid:
+ case __NR_getpgid:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedSignalHandling(int sysno) {
+ switch (sysno) {
+ case __NR_rt_sigaction:
+ case __NR_rt_sigprocmask:
+ case __NR_rt_sigreturn:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_sigaction:
+ case __NR_sigprocmask:
+ case __NR_sigreturn:
+#endif
+ return true;
+ case __NR_rt_sigpending:
+ case __NR_rt_sigqueueinfo:
+ case __NR_rt_sigsuspend:
+ case __NR_rt_sigtimedwait:
+ case __NR_rt_tgsigqueueinfo:
+ case __NR_sigaltstack:
+#if !defined(__aarch64__)
+ case __NR_signalfd:
+#endif
+ case __NR_signalfd4:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_sigpending:
+ case __NR_sigsuspend:
+#endif
+#if defined(__i386__) || defined(__mips__)
+ case __NR_signal:
+ case __NR_sgetmask: // Obsolete.
+ case __NR_ssetmask:
+#endif
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedOperationOnFd(int sysno) {
+ switch (sysno) {
+ case __NR_close:
+ case __NR_dup:
+#if !defined(__aarch64__)
+ case __NR_dup2:
+#endif
+ case __NR_dup3:
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_shutdown:
+#endif
+ return true;
+ case __NR_fcntl:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_fcntl64:
+#endif
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsKernelInternalApi(int sysno) {
+ switch (sysno) {
+ case __NR_restart_syscall:
+#if defined(__arm__)
+ case __ARM_NR_cmpxchg:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+// This should be thought through in conjunction with IsFutex().
+bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) {
+ switch (sysno) {
+ case __NR_exit:
+ case __NR_exit_group:
+ case __NR_wait4:
+ case __NR_waitid:
+#if defined(__i386__)
+ case __NR_waitpid:
+#endif
+ return true;
+ case __NR_clone: // Should be parameter-restricted.
+ case __NR_setns: // Privileged.
+#if !defined(__aarch64__)
+ case __NR_fork:
+#endif
+#if defined(__i386__) || defined(__x86_64__)
+ case __NR_get_thread_area:
+#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_set_thread_area:
+#endif
+ case __NR_set_tid_address:
+ case __NR_unshare:
+#if !defined(__mips__) && !defined(__aarch64__)
+ case __NR_vfork:
+#endif
+ default:
+ return false;
+ }
+}
+
+// It's difficult to restrict those, but there is attack surface here.
+bool SyscallSets::IsAllowedFutex(int sysno) {
+ switch (sysno) {
+ case __NR_get_robust_list:
+ case __NR_set_robust_list:
+ case __NR_futex:
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedEpoll(int sysno) {
+ switch (sysno) {
+#if !defined(__aarch64__)
+ case __NR_epoll_create:
+ case __NR_epoll_wait:
+#endif
+ case __NR_epoll_create1:
+ case __NR_epoll_ctl:
+ return true;
+ default:
+#if defined(__x86_64__)
+ case __NR_epoll_ctl_old:
+#endif
+ case __NR_epoll_pwait:
+#if defined(__x86_64__)
+ case __NR_epoll_wait_old:
+#endif
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedGetOrModifySocket(int sysno) {
+ switch (sysno) {
+#if !defined(__aarch64__)
+ case __NR_pipe:
+#endif
+ case __NR_pipe2:
+ return true;
+ default:
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_socketpair: // We will want to inspect its argument.
+#endif
+ return false;
+ }
+}
+
+bool SyscallSets::IsDeniedGetOrModifySocket(int sysno) {
+ switch (sysno) {
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_accept:
+ case __NR_accept4:
+ case __NR_bind:
+ case __NR_connect:
+ case __NR_socket:
+ case __NR_listen:
+ return true;
+#endif
+ default:
+ return false;
+ }
+}
+
+#if defined(__i386__) || defined(__mips__)
+// Big multiplexing system call for sockets.
+bool SyscallSets::IsSocketCall(int sysno) {
+ switch (sysno) {
+ case __NR_socketcall:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__)
+bool SyscallSets::IsNetworkSocketInformation(int sysno) {
+ switch (sysno) {
+ case __NR_getpeername:
+ case __NR_getsockname:
+ case __NR_getsockopt:
+ case __NR_setsockopt:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+bool SyscallSets::IsAllowedAddressSpaceAccess(int sysno) {
+ switch (sysno) {
+ case __NR_brk:
+ case __NR_mlock:
+ case __NR_munlock:
+ case __NR_munmap:
+ return true;
+ case __NR_madvise:
+ case __NR_mincore:
+ case __NR_mlockall:
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_mmap:
+#endif
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_mmap2:
+#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_modify_ldt:
+#endif
+ case __NR_mprotect:
+ case __NR_mremap:
+ case __NR_msync:
+ case __NR_munlockall:
+ case __NR_readahead:
+ case __NR_remap_file_pages:
+#if defined(__i386__)
+ case __NR_vm86:
+ case __NR_vm86old:
+#endif
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedGeneralIo(int sysno) {
+ switch (sysno) {
+ case __NR_lseek:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR__llseek:
+#endif
+#if !defined(__aarch64__)
+ case __NR_poll:
+#endif
+ case __NR_ppoll:
+ case __NR_pselect6:
+ case __NR_read:
+ case __NR_readv:
+#if defined(__arm__) || defined(__mips__)
+ case __NR_recv:
+#endif
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_recvfrom: // Could specify source.
+ case __NR_recvmsg: // Could specify source.
+#endif
+#if defined(__i386__) || defined(__x86_64__)
+ case __NR_select:
+#endif
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR__newselect:
+#endif
+#if defined(__arm__)
+ case __NR_send:
+#endif
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_sendmsg: // Could specify destination.
+ case __NR_sendto: // Could specify destination.
+#endif
+ case __NR_write:
+ case __NR_writev:
+ return true;
+ case __NR_ioctl: // Can be very powerful.
+ case __NR_pread64:
+ case __NR_preadv:
+ case __NR_pwrite64:
+ case __NR_pwritev:
+ case __NR_recvmmsg: // Could specify source.
+ case __NR_sendfile:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_sendfile64:
+#endif
+ case __NR_sendmmsg: // Could specify destination.
+ case __NR_splice:
+ case __NR_tee:
+ case __NR_vmsplice:
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsPrctl(int sysno) {
+ switch (sysno) {
+#if defined(__x86_64__)
+ case __NR_arch_prctl:
+#endif
+ case __NR_prctl:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsSeccomp(int sysno) {
+ switch (sysno) {
+ case __NR_seccomp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAllowedBasicScheduler(int sysno) {
+ switch (sysno) {
+ case __NR_sched_yield:
+#if !defined(__aarch64__)
+ case __NR_pause:
+#endif
+ case __NR_nanosleep:
+ return true;
+ case __NR_getpriority:
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_nice:
+#endif
+ case __NR_setpriority:
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAdminOperation(int sysno) {
+ switch (sysno) {
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+ case __NR_bdflush:
+#endif
+ case __NR_kexec_load:
+ case __NR_reboot:
+ case __NR_setdomainname:
+ case __NR_sethostname:
+ case __NR_syslog:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsKernelModule(int sysno) {
+ switch (sysno) {
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_create_module:
+ case __NR_get_kernel_syms: // Should ENOSYS.
+ case __NR_query_module:
+#endif
+ case __NR_delete_module:
+ case __NR_init_module:
+ case __NR_finit_module:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsGlobalFSViewChange(int sysno) {
+ switch (sysno) {
+ case __NR_pivot_root:
+ case __NR_chroot:
+ case __NR_sync:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsFsControl(int sysno) {
+ switch (sysno) {
+ case __NR_mount:
+ case __NR_nfsservctl:
+ case __NR_quotactl:
+ case __NR_swapoff:
+ case __NR_swapon:
+#if defined(__i386__) || defined(__mips__)
+ case __NR_umount:
+#endif
+ case __NR_umount2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsNuma(int sysno) {
+ switch (sysno) {
+ case __NR_get_mempolicy:
+ case __NR_getcpu:
+ case __NR_mbind:
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_migrate_pages:
+#endif
+ case __NR_move_pages:
+ case __NR_set_mempolicy:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsMessageQueue(int sysno) {
+ switch (sysno) {
+ case __NR_mq_getsetattr:
+ case __NR_mq_notify:
+ case __NR_mq_open:
+ case __NR_mq_timedreceive:
+ case __NR_mq_timedsend:
+ case __NR_mq_unlink:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsGlobalProcessEnvironment(int sysno) {
+ switch (sysno) {
+ case __NR_acct: // Privileged.
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__)
+ case __NR_getrlimit:
+#endif
+#if defined(__i386__) || defined(__arm__)
+ case __NR_ugetrlimit:
+#endif
+#if defined(__i386__) || defined(__mips__)
+ case __NR_ulimit:
+#endif
+ case __NR_getrusage:
+ case __NR_personality: // Can change its personality as well.
+ case __NR_prlimit64: // Like setrlimit / getrlimit.
+ case __NR_setrlimit:
+ case __NR_times:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsDebug(int sysno) {
+ switch (sysno) {
+ case __NR_ptrace:
+ case __NR_process_vm_readv:
+ case __NR_process_vm_writev:
+ case __NR_kcmp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsGlobalSystemStatus(int sysno) {
+ switch (sysno) {
+#if !defined(__aarch64__)
+ case __NR__sysctl:
+ case __NR_sysfs:
+#endif
+ case __NR_sysinfo:
+ case __NR_uname:
+#if defined(__i386__)
+ case __NR_olduname:
+ case __NR_oldolduname:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsEventFd(int sysno) {
+ switch (sysno) {
+#if !defined(__aarch64__)
+ case __NR_eventfd:
+#endif
+ case __NR_eventfd2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Asynchronous I/O API.
+bool SyscallSets::IsAsyncIo(int sysno) {
+ switch (sysno) {
+ case __NR_io_cancel:
+ case __NR_io_destroy:
+ case __NR_io_getevents:
+ case __NR_io_setup:
+ case __NR_io_submit:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsKeyManagement(int sysno) {
+ switch (sysno) {
+ case __NR_add_key:
+ case __NR_keyctl:
+ case __NR_request_key:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+bool SyscallSets::IsSystemVSemaphores(int sysno) {
+ switch (sysno) {
+ case __NR_semctl:
+ case __NR_semget:
+ case __NR_semop:
+ case __NR_semtimedop:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+// These give a lot of ambient authority and bypass the setuid sandbox.
+bool SyscallSets::IsSystemVSharedMemory(int sysno) {
+ switch (sysno) {
+ case __NR_shmat:
+ case __NR_shmctl:
+ case __NR_shmdt:
+ case __NR_shmget:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+bool SyscallSets::IsSystemVMessageQueue(int sysno) {
+ switch (sysno) {
+ case __NR_msgctl:
+ case __NR_msgget:
+ case __NR_msgrcv:
+ case __NR_msgsnd:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+#if defined(__i386__) || defined(__mips__)
+// Big system V multiplexing system call.
+bool SyscallSets::IsSystemVIpc(int sysno) {
+ switch (sysno) {
+ case __NR_ipc:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+bool SyscallSets::IsAnySystemV(int sysno) {
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+ return IsSystemVMessageQueue(sysno) || IsSystemVSemaphores(sysno) ||
+ IsSystemVSharedMemory(sysno);
+#elif defined(__i386__) || defined(__mips__)
+ return IsSystemVIpc(sysno);
+#endif
+}
+
+bool SyscallSets::IsAdvancedScheduler(int sysno) {
+ switch (sysno) {
+ case __NR_ioprio_get: // IO scheduler.
+ case __NR_ioprio_set:
+ case __NR_sched_get_priority_max:
+ case __NR_sched_get_priority_min:
+ case __NR_sched_getaffinity:
+ case __NR_sched_getattr:
+ case __NR_sched_getparam:
+ case __NR_sched_getscheduler:
+ case __NR_sched_rr_get_interval:
+ case __NR_sched_setaffinity:
+ case __NR_sched_setattr:
+ case __NR_sched_setparam:
+ case __NR_sched_setscheduler:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsInotify(int sysno) {
+ switch (sysno) {
+ case __NR_inotify_add_watch:
+#if !defined(__aarch64__)
+ case __NR_inotify_init:
+#endif
+ case __NR_inotify_init1:
+ case __NR_inotify_rm_watch:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsFaNotify(int sysno) {
+ switch (sysno) {
+ case __NR_fanotify_init:
+ case __NR_fanotify_mark:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsTimer(int sysno) {
+ switch (sysno) {
+ case __NR_getitimer:
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_alarm:
+#endif
+ case __NR_setitimer:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsAdvancedTimer(int sysno) {
+ switch (sysno) {
+ case __NR_timer_create:
+ case __NR_timer_delete:
+ case __NR_timer_getoverrun:
+ case __NR_timer_gettime:
+ case __NR_timer_settime:
+ case __NR_timerfd_create:
+ case __NR_timerfd_gettime:
+ case __NR_timerfd_settime:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsExtendedAttributes(int sysno) {
+ switch (sysno) {
+ case __NR_fgetxattr:
+ case __NR_flistxattr:
+ case __NR_fremovexattr:
+ case __NR_fsetxattr:
+ case __NR_getxattr:
+ case __NR_lgetxattr:
+ case __NR_listxattr:
+ case __NR_llistxattr:
+ case __NR_lremovexattr:
+ case __NR_lsetxattr:
+ case __NR_removexattr:
+ case __NR_setxattr:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Various system calls that need to be researched.
+// TODO(jln): classify this better.
+bool SyscallSets::IsMisc(int sysno) {
+ switch (sysno) {
+#if !defined(__mips__)
+ case __NR_getrandom:
+#endif
+ case __NR_name_to_handle_at:
+ case __NR_open_by_handle_at:
+ case __NR_perf_event_open:
+ case __NR_syncfs:
+ case __NR_vhangup:
+// The system calls below are not implemented.
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_afs_syscall:
+#endif
+#if defined(__i386__) || defined(__mips__)
+ case __NR_break:
+#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_getpmsg:
+#endif
+#if defined(__i386__) || defined(__mips__)
+ case __NR_gtty:
+ case __NR_idle:
+ case __NR_lock:
+ case __NR_mpx:
+ case __NR_prof:
+ case __NR_profil:
+#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
+ case __NR_putpmsg:
+#endif
+#if defined(__x86_64__)
+ case __NR_security:
+#endif
+#if defined(__i386__) || defined(__mips__)
+ case __NR_stty:
+#endif
+#if defined(__x86_64__)
+ case __NR_tuxcall:
+#endif
+#if !defined(__aarch64__)
+ case __NR_vserver:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+#if defined(__arm__)
+bool SyscallSets::IsArmPciConfig(int sysno) {
+ switch (sysno) {
+ case __NR_pciconfig_iobase:
+ case __NR_pciconfig_read:
+ case __NR_pciconfig_write:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsArmPrivate(int sysno) {
+ switch (sysno) {
+ case __ARM_NR_breakpoint:
+ case __ARM_NR_cacheflush:
+ case __ARM_NR_set_tls:
+ case __ARM_NR_usr26:
+ case __ARM_NR_usr32:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // defined(__arm__)
+
+#if defined(__mips__)
+bool SyscallSets::IsMipsPrivate(int sysno) {
+ switch (sysno) {
+ case __NR_cacheflush:
+ case __NR_cachectl:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SyscallSets::IsMipsMisc(int sysno) {
+ switch (sysno) {
+ case __NR_sysmips:
+ case __NR_unused150:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif // defined(__mips__)
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h
new file mode 100644
index 0000000..5ba6335
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf-helpers/syscall_sets.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_SETS_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_SETS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "sandbox/sandbox_export.h"
+
+// These are helpers to build seccomp-bpf policies, i.e. policies for a
+// sandbox that reduces the Linux kernel's attack surface. Given their
+// nature, they don't have any clear semantics and are completely
+// "implementation-defined".
+
+namespace sandbox {
+
+class SANDBOX_EXPORT SyscallSets {
+ public:
+ static bool IsKill(int sysno);
+ static bool IsAllowedGettime(int sysno);
+ static bool IsCurrentDirectory(int sysno);
+ static bool IsUmask(int sysno);
+ // System calls that directly access the file system. They might acquire
+ // a new file descriptor or otherwise perform an operation directly
+ // via a path.
+ static bool IsFileSystem(int sysno);
+ static bool IsAllowedFileSystemAccessViaFd(int sysno);
+ static bool IsDeniedFileSystemAccessViaFd(int sysno);
+ static bool IsGetSimpleId(int sysno);
+ static bool IsProcessPrivilegeChange(int sysno);
+ static bool IsProcessGroupOrSession(int sysno);
+ static bool IsAllowedSignalHandling(int sysno);
+ static bool IsAllowedOperationOnFd(int sysno);
+ static bool IsKernelInternalApi(int sysno);
+ // This should be thought through in conjunction with IsFutex().
+ static bool IsAllowedProcessStartOrDeath(int sysno);
+ // It's difficult to restrict those, but there is attack surface here.
+ static bool IsAllowedFutex(int sysno);
+ static bool IsAllowedEpoll(int sysno);
+ static bool IsAllowedGetOrModifySocket(int sysno);
+ static bool IsDeniedGetOrModifySocket(int sysno);
+
+#if defined(__i386__) || defined(__mips__)
+ // Big multiplexing system call for sockets.
+ static bool IsSocketCall(int sysno);
+#endif
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
+ defined(__aarch64__)
+ static bool IsNetworkSocketInformation(int sysno);
+#endif
+
+ static bool IsAllowedAddressSpaceAccess(int sysno);
+ static bool IsAllowedGeneralIo(int sysno);
+ static bool IsPrctl(int sysno);
+ static bool IsSeccomp(int sysno);
+ static bool IsAllowedBasicScheduler(int sysno);
+ static bool IsAdminOperation(int sysno);
+ static bool IsKernelModule(int sysno);
+ static bool IsGlobalFSViewChange(int sysno);
+ static bool IsFsControl(int sysno);
+ static bool IsNuma(int sysno);
+ static bool IsMessageQueue(int sysno);
+ static bool IsGlobalProcessEnvironment(int sysno);
+ static bool IsDebug(int sysno);
+ static bool IsGlobalSystemStatus(int sysno);
+ static bool IsEventFd(int sysno);
+ // Asynchronous I/O API.
+ static bool IsAsyncIo(int sysno);
+ static bool IsKeyManagement(int sysno);
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+ static bool IsSystemVSemaphores(int sysno);
+#endif
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+ // These give a lot of ambient authority and bypass the setuid sandbox.
+ static bool IsSystemVSharedMemory(int sysno);
+#endif
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+ static bool IsSystemVMessageQueue(int sysno);
+#endif
+
+#if defined(__i386__) || defined(__mips__)
+ // Big system V multiplexing system call.
+ static bool IsSystemVIpc(int sysno);
+#endif
+
+ static bool IsAnySystemV(int sysno);
+ static bool IsAdvancedScheduler(int sysno);
+ static bool IsInotify(int sysno);
+ static bool IsFaNotify(int sysno);
+ static bool IsTimer(int sysno);
+ static bool IsAdvancedTimer(int sysno);
+ static bool IsExtendedAttributes(int sysno);
+ static bool IsMisc(int sysno);
+#if defined(__arm__)
+ static bool IsArmPciConfig(int sysno);
+ static bool IsArmPrivate(int sysno);
+#endif // defined(__arm__)
+#if defined(__mips__)
+ static bool IsMipsPrivate(int sysno);
+ static bool IsMipsMisc(int sysno);
+#endif // defined(__mips__)
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SyscallSets);
+};
+
+} // namespace sandbox.
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_SETS_H_
diff --git a/libchrome/sandbox/linux/seccomp-bpf/DEPS b/libchrome/sandbox/linux/seccomp-bpf/DEPS
new file mode 100644
index 0000000..149c463
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "+sandbox/linux/bpf_dsl",
+ "+sandbox/linux/services",
+ "+sandbox/linux/system_headers",
+]
diff --git a/libchrome/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h b/libchrome/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
new file mode 100644
index 0000000..a4315ba
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
@@ -0,0 +1,56 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf_test_runner.h"
+
+namespace sandbox {
+
+// This templated class allows building a BPFTesterDelegate from a
+// deprecated-style BPF policy (that is a SyscallEvaluator function pointer,
+// instead of a SandboxBPFPolicy class), specified in |policy_function| and a
+// function pointer to a test in |test_function|.
+// This allows both the policy and the test function to take a pointer to an
+// object of type "Aux" as a parameter. This is used to implement the BPF_TEST
+// macro and should generally not be used directly.
+template <class Policy, class Aux>
+class BPFTesterCompatibilityDelegate : public BPFTesterDelegate {
+ public:
+ typedef void (*TestFunction)(Aux*);
+
+ explicit BPFTesterCompatibilityDelegate(TestFunction test_function)
+ : aux_(), test_function_(test_function) {}
+
+ ~BPFTesterCompatibilityDelegate() override {}
+
+ std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+ // The current method is guaranteed to only run in the child process
+ // running the test. In this process, the current object is guaranteed
+ // to live forever. So it's ok to pass aux_pointer_for_policy_ to
+ // the policy, which could in turn pass it to the kernel via Trap().
+ return std::unique_ptr<bpf_dsl::Policy>(new Policy(&aux_));
+ }
+
+ void RunTestFunction() override {
+ // Run the actual test.
+ // The current object is guaranteed to live forever in the child process
+ // where this will run.
+ test_function_(&aux_);
+ }
+
+ private:
+ Aux aux_;
+ TestFunction test_function_;
+
+ DISALLOW_COPY_AND_ASSIGN(BPFTesterCompatibilityDelegate);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
diff --git a/libchrome/sandbox/linux/seccomp-bpf/bpf_tests.h b/libchrome/sandbox/linux/seccomp-bpf/bpf_tests.h
new file mode 100644
index 0000000..8b2b12a
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/bpf_tests.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+namespace sandbox {
+
+// BPF_TEST_C() is a special version of SANDBOX_TEST(). It runs a test function
+// in a sub-process, under a seccomp-bpf policy specified in
+// |bpf_policy_class_name| without failing on configurations that are allowed
+// to not support seccomp-bpf in their kernels.
+// This is the preferred format for new BPF tests. |bpf_policy_class_name| is a
+// class name (which will be default-constructed) that implements the
+// Policy interface.
+// The test function's body can simply follow. Test functions should use
+// the BPF_ASSERT macros defined below, not GTEST's macros. The use of
+// CHECK* macros is supported but less robust.
+#define BPF_TEST_C(test_case_name, test_name, bpf_policy_class_name) \
+ BPF_DEATH_TEST_C( \
+ test_case_name, test_name, DEATH_SUCCESS(), bpf_policy_class_name)
+
+// Identical to BPF_TEST_C but allows to specify the nature of death.
+#define BPF_DEATH_TEST_C( \
+ test_case_name, test_name, death, bpf_policy_class_name) \
+ void BPF_TEST_C_##test_name(); \
+ TEST(test_case_name, DISABLE_ON_TSAN(test_name)) { \
+ sandbox::SandboxBPFTestRunner bpf_test_runner( \
+ new sandbox::BPFTesterSimpleDelegate<bpf_policy_class_name>( \
+ BPF_TEST_C_##test_name)); \
+ sandbox::UnitTests::RunTestInProcess(&bpf_test_runner, death); \
+ } \
+ void BPF_TEST_C_##test_name()
+
+// This form of BPF_TEST is a little verbose and should be reserved for complex
+// tests where a lot of control is required.
+// |bpf_tester_delegate_class| must be a classname implementing the
+// BPFTesterDelegate interface.
+#define BPF_TEST_D(test_case_name, test_name, bpf_tester_delegate_class) \
+ BPF_DEATH_TEST_D( \
+ test_case_name, test_name, DEATH_SUCCESS(), bpf_tester_delegate_class)
+
+// Identical to BPF_TEST_D but allows to specify the nature of death.
+#define BPF_DEATH_TEST_D( \
+ test_case_name, test_name, death, bpf_tester_delegate_class) \
+ TEST(test_case_name, DISABLE_ON_TSAN(test_name)) { \
+ sandbox::SandboxBPFTestRunner bpf_test_runner( \
+ new bpf_tester_delegate_class()); \
+ sandbox::UnitTests::RunTestInProcess(&bpf_test_runner, death); \
+ }
+
+// Assertions are handled exactly the same as with a normal SANDBOX_TEST()
+#define BPF_ASSERT SANDBOX_ASSERT
+#define BPF_ASSERT_EQ(x, y) BPF_ASSERT((x) == (y))
+#define BPF_ASSERT_NE(x, y) BPF_ASSERT((x) != (y))
+#define BPF_ASSERT_LT(x, y) BPF_ASSERT((x) < (y))
+#define BPF_ASSERT_GT(x, y) BPF_ASSERT((x) > (y))
+#define BPF_ASSERT_LE(x, y) BPF_ASSERT((x) <= (y))
+#define BPF_ASSERT_GE(x, y) BPF_ASSERT((x) >= (y))
+
+// This form of BPF_TEST is now discouraged (but still allowed) in favor of
+// BPF_TEST_D and BPF_TEST_C.
+// The |policy| parameter should be a Policy subclass.
+// BPF_TEST() takes a C++ data type as an fourth parameter. A variable
+// of this type will be allocated and a pointer to it will be
+// available within the test function as "BPF_AUX". The pointer will
+// also be passed as an argument to the policy's constructor. Policies
+// would typically use it as an argument to SandboxBPF::Trap(), if
+// they want to communicate data between the BPF_TEST() and a Trap()
+// function. The life-time of this object is the same as the life-time
+// of the process running under the seccomp-bpf policy.
+// |aux| must not be void.
+#define BPF_TEST(test_case_name, test_name, policy, aux) \
+ BPF_DEATH_TEST(test_case_name, test_name, DEATH_SUCCESS(), policy, aux)
+
+// A BPF_DEATH_TEST is just the same as a BPF_TEST, but it assumes that the
+// test will fail with a particular known error condition. Use the DEATH_XXX()
+// macros from unit_tests.h to specify the expected error condition.
+#define BPF_DEATH_TEST(test_case_name, test_name, death, policy, aux) \
+ void BPF_TEST_##test_name(aux* BPF_AUX); \
+ TEST(test_case_name, DISABLE_ON_TSAN(test_name)) { \
+ sandbox::SandboxBPFTestRunner bpf_test_runner( \
+ new sandbox::BPFTesterCompatibilityDelegate<policy, aux>( \
+ BPF_TEST_##test_name)); \
+ sandbox::UnitTests::RunTestInProcess(&bpf_test_runner, death); \
+ } \
+ void BPF_TEST_##test_name(aux* BPF_AUX)
+
+// This class takes a simple function pointer as a constructor parameter and a
+// class name as a template parameter to implement the BPFTesterDelegate
+// interface which can be used to build BPF unittests with
+// the SandboxBPFTestRunner class.
+template <class PolicyClass>
+class BPFTesterSimpleDelegate : public BPFTesterDelegate {
+ public:
+ explicit BPFTesterSimpleDelegate(void (*test_function)(void))
+ : test_function_(test_function) {}
+ ~BPFTesterSimpleDelegate() override {}
+
+ std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+ return std::unique_ptr<bpf_dsl::Policy>(new PolicyClass());
+ }
+ void RunTestFunction() override {
+ DCHECK(test_function_);
+ test_function_();
+ }
+
+ private:
+ void (*test_function_)(void);
+ DISALLOW_COPY_AND_ASSIGN(BPFTesterSimpleDelegate);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
diff --git a/libchrome/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc b/libchrome/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
new file mode 100644
index 0000000..c16cd72
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
@@ -0,0 +1,155 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
+
+#include <errno.h>
+#include <sys/ptrace.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using sandbox::bpf_dsl::Allow;
+using sandbox::bpf_dsl::Error;
+using sandbox::bpf_dsl::ResultExpr;
+
+namespace sandbox {
+
+namespace {
+
+class FourtyTwo {
+ public:
+ static const int kMagicValue = 42;
+ FourtyTwo() : value_(kMagicValue) {}
+ int value() { return value_; }
+
+ private:
+ int value_;
+ DISALLOW_COPY_AND_ASSIGN(FourtyTwo);
+};
+
+class EmptyClassTakingPolicy : public bpf_dsl::Policy {
+ public:
+ explicit EmptyClassTakingPolicy(FourtyTwo* fourty_two) {
+ BPF_ASSERT(fourty_two);
+ BPF_ASSERT(FourtyTwo::kMagicValue == fourty_two->value());
+ }
+ ~EmptyClassTakingPolicy() override {}
+
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
+ return Allow();
+ }
+};
+
+BPF_TEST(BPFTest,
+ BPFAUXPointsToClass,
+ EmptyClassTakingPolicy,
+ FourtyTwo /* *BPF_AUX */) {
+ // BPF_AUX should point to an instance of FourtyTwo.
+ BPF_ASSERT(BPF_AUX);
+ BPF_ASSERT(FourtyTwo::kMagicValue == BPF_AUX->value());
+}
+
+void DummyTestFunction(FourtyTwo *fourty_two) {
+}
+
+TEST(BPFTest, BPFTesterCompatibilityDelegateLeakTest) {
+ // Don't do anything, simply gives dynamic tools an opportunity to detect
+ // leaks.
+ {
+ BPFTesterCompatibilityDelegate<EmptyClassTakingPolicy, FourtyTwo>
+ simple_delegate(DummyTestFunction);
+ }
+ {
+ // Test polymorphism.
+ std::unique_ptr<BPFTesterDelegate> simple_delegate(
+ new BPFTesterCompatibilityDelegate<EmptyClassTakingPolicy, FourtyTwo>(
+ DummyTestFunction));
+ }
+}
+
+class EnosysPtracePolicy : public bpf_dsl::Policy {
+ public:
+ EnosysPtracePolicy() { my_pid_ = sys_getpid(); }
+ ~EnosysPtracePolicy() override {
+ // Policies should be able to bind with the process on which they are
+ // created. They should never be created in a parent process.
+ BPF_ASSERT_EQ(my_pid_, sys_getpid());
+ }
+
+ ResultExpr EvaluateSyscall(int system_call_number) const override {
+ CHECK(SandboxBPF::IsValidSyscallNumber(system_call_number));
+ if (system_call_number == __NR_ptrace) {
+ // The EvaluateSyscall function should run in the process that created
+ // the current object.
+ BPF_ASSERT_EQ(my_pid_, sys_getpid());
+ return Error(ENOSYS);
+ } else {
+ return Allow();
+ }
+ }
+
+ private:
+ pid_t my_pid_;
+ DISALLOW_COPY_AND_ASSIGN(EnosysPtracePolicy);
+};
+
+class BasicBPFTesterDelegate : public BPFTesterDelegate {
+ public:
+ BasicBPFTesterDelegate() {}
+ ~BasicBPFTesterDelegate() override {}
+
+ std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+ return std::unique_ptr<bpf_dsl::Policy>(new EnosysPtracePolicy());
+ }
+ void RunTestFunction() override {
+ errno = 0;
+ int ret = ptrace(PTRACE_TRACEME, -1, NULL, NULL);
+ BPF_ASSERT(-1 == ret);
+ BPF_ASSERT(ENOSYS == errno);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicBPFTesterDelegate);
+};
+
+// This is the most powerful and complex way to create a BPF test, but it
+// requires a full class definition (BasicBPFTesterDelegate).
+BPF_TEST_D(BPFTest, BPFTestWithDelegateClass, BasicBPFTesterDelegate);
+
+// This is the simplest form of BPF tests.
+BPF_TEST_C(BPFTest, BPFTestWithInlineTest, EnosysPtracePolicy) {
+ errno = 0;
+ int ret = ptrace(PTRACE_TRACEME, -1, NULL, NULL);
+ BPF_ASSERT(-1 == ret);
+ BPF_ASSERT(ENOSYS == errno);
+}
+
+const char kHelloMessage[] = "Hello";
+
+BPF_DEATH_TEST_C(BPFTest,
+ BPFDeathTestWithInlineTest,
+ DEATH_MESSAGE(kHelloMessage),
+ EnosysPtracePolicy) {
+ LOG(ERROR) << kHelloMessage;
+ _exit(1);
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf/die.cc b/libchrome/sandbox/linux/seccomp-bpf/die.cc
new file mode 100644
index 0000000..3baf1f1
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/die.cc
@@ -0,0 +1,93 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/die.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+
+namespace sandbox {
+
+void Die::ExitGroup() {
+ // exit_group() should exit our program. After all, it is defined as a
+ // function that doesn't return. But things can theoretically go wrong.
+ // Especially, since we are dealing with system call filters. Continuing
+ // execution would be very bad in most cases where ExitGroup() gets called.
+ // So, we'll try a few other strategies too.
+ Syscall::Call(__NR_exit_group, 1);
+
+ // We have no idea what our run-time environment looks like. So, signal
+ // handlers might or might not do the right thing. Try to reset settings
+ // to a defined state; but we have not way to verify whether we actually
+ // succeeded in doing so. Nonetheless, triggering a fatal signal could help
+ // us terminate.
+ struct sigaction sa = {};
+ sa.sa_handler = LINUX_SIG_DFL;
+ sa.sa_flags = LINUX_SA_RESTART;
+ sys_sigaction(LINUX_SIGSEGV, &sa, nullptr);
+ Syscall::Call(__NR_prctl, PR_SET_DUMPABLE, (void*)0, (void*)0, (void*)0);
+ if (*(volatile char*)0) {
+ }
+
+ // If there is no way for us to ask for the program to exit, the next
+ // best thing we can do is to loop indefinitely. Maybe, somebody will notice
+ // and file a bug...
+ // We in fact retry the system call inside of our loop so that it will
+ // stand out when somebody tries to diagnose the problem by using "strace".
+ for (;;) {
+ Syscall::Call(__NR_exit_group, 1);
+ }
+}
+
+void Die::SandboxDie(const char* msg, const char* file, int line) {
+ if (simple_exit_) {
+ LogToStderr(msg, file, line);
+ } else {
+ logging::LogMessage(file, line, logging::LOG_FATAL).stream() << msg;
+ }
+ ExitGroup();
+}
+
+void Die::RawSandboxDie(const char* msg) {
+ if (!msg)
+ msg = "";
+ RAW_LOG(FATAL, msg);
+ ExitGroup();
+}
+
+void Die::SandboxInfo(const char* msg, const char* file, int line) {
+ if (!suppress_info_) {
+ logging::LogMessage(file, line, logging::LOG_INFO).stream() << msg;
+ }
+}
+
+void Die::LogToStderr(const char* msg, const char* file, int line) {
+ if (msg) {
+ char buf[40];
+ snprintf(buf, sizeof(buf), "%d", line);
+ std::string s = std::string(file) + ":" + buf + ":" + msg + "\n";
+
+ // No need to loop. Short write()s are unlikely and if they happen we
+ // probably prefer them over a loop that blocks.
+ ignore_result(
+ HANDLE_EINTR(Syscall::Call(__NR_write, 2, s.c_str(), s.length())));
+ }
+}
+
+bool Die::simple_exit_ = false;
+bool Die::suppress_info_ = false;
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf/die.h b/libchrome/sandbox/linux/seccomp-bpf/die.h
new file mode 100644
index 0000000..b3f3f72
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/die.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// This is the main API for using this file. Prints a error message and
+// exits with a fatal error. This is not async-signal safe.
+#define SANDBOX_DIE(m) sandbox::Die::SandboxDie(m, __FILE__, __LINE__)
+
+// An async signal safe version of the same API. Won't print the filename
+// and line numbers.
+#define RAW_SANDBOX_DIE(m) sandbox::Die::RawSandboxDie(m)
+
+// Adds an informational message to the log file or stderr as appropriate.
+#define SANDBOX_INFO(m) sandbox::Die::SandboxInfo(m, __FILE__, __LINE__)
+
+class SANDBOX_EXPORT Die {
+ public:
+ // Terminate the program, even if the current sandbox policy prevents some
+ // of the more commonly used functions used for exiting.
+ // Most users would want to call SANDBOX_DIE() instead, as it logs extra
+ // information. But calling ExitGroup() is correct and in some rare cases
+ // preferable. So, we make it part of the public API.
+ static void ExitGroup() __attribute__((noreturn));
+
+ // This method gets called by SANDBOX_DIE(). There is normally no reason
+ // to call it directly unless you are defining your own exiting macro.
+ static void SandboxDie(const char* msg, const char* file, int line)
+ __attribute__((noreturn));
+
+ static void RawSandboxDie(const char* msg) __attribute__((noreturn));
+
+ // This method gets called by SANDBOX_INFO(). There is normally no reason
+ // to call it directly unless you are defining your own logging macro.
+ static void SandboxInfo(const char* msg, const char* file, int line);
+
+ // Writes a message to stderr. Used as a fall-back choice, if we don't have
+ // any other way to report an error.
+ static void LogToStderr(const char* msg, const char* file, int line);
+
+ // We generally want to run all exit handlers. This means, on SANDBOX_DIE()
+ // we should be calling LOG(FATAL). But there are some situations where
+ // we just need to print a message and then terminate. This would typically
+ // happen in cases where we consume the error message internally (e.g. in
+ // unit tests or in the supportsSeccompSandbox() method).
+ static void EnableSimpleExit() { simple_exit_ = true; }
+
+ // Sometimes we need to disable all informational messages (e.g. from within
+ // unittests).
+ static void SuppressInfoMessages(bool flag) { suppress_info_ = flag; }
+
+ private:
+ static bool simple_exit_;
+ static bool suppress_info_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Die);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
diff --git a/libchrome/sandbox/linux/seccomp-bpf/sandbox_bpf.cc b/libchrome/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
new file mode 100644
index 0000000..4d8d436
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
@@ -0,0 +1,279 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+
+#include <errno.h>
+#include <stdint.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/compiler_specific.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/third_party/valgrind/valgrind.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/linux/bpf_dsl/policy_compiler.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/bpf_dsl/syscall_set.h"
+#include "sandbox/linux/seccomp-bpf/die.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/seccomp-bpf/trap.h"
+#include "sandbox/linux/services/proc_util.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/services/thread_helpers.h"
+#include "sandbox/linux/system_headers/linux_filter.h"
+#include "sandbox/linux/system_headers/linux_seccomp.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+namespace sandbox {
+
+namespace {
+
+bool IsRunningOnValgrind() { return RUNNING_ON_VALGRIND; }
+
+bool IsSingleThreaded(int proc_fd) {
+ return ThreadHelpers::IsSingleThreaded(proc_fd);
+}
+
+// Check if the kernel supports seccomp-filter (a.k.a. seccomp mode 2) via
+// prctl().
+bool KernelSupportsSeccompBPF() {
+ errno = 0;
+ const int rv = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, nullptr);
+
+ if (rv == -1 && EFAULT == errno) {
+ return true;
+ }
+ return false;
+}
+
+// LG introduced a buggy syscall, sys_set_media_ext, with the same number as
+// seccomp. Return true if the current kernel has this buggy syscall.
+//
+// We want this to work with upcoming versions of seccomp, so we pass bogus
+// flags that are unlikely to ever be used by the kernel. A normal kernel would
+// return -EINVAL, but a buggy LG kernel would return 1.
+bool KernelHasLGBug() {
+#if defined(OS_ANDROID)
+ // sys_set_media will see this as NULL, which should be a safe (non-crashing)
+ // way to invoke it. A genuine seccomp syscall will see it as
+ // SECCOMP_SET_MODE_STRICT.
+ const unsigned int operation = 0;
+ // Chosen by fair dice roll. Guaranteed to be random.
+ const unsigned int flags = 0xf7a46a5c;
+ const int rv = sys_seccomp(operation, flags, nullptr);
+ // A genuine kernel would return -EINVAL (which would set rv to -1 and errno
+ // to EINVAL), or at the very least return some kind of error (which would
+ // set rv to -1). Any other behavior indicates that whatever code received
+ // our syscall was not the real seccomp.
+ if (rv != -1) {
+ return true;
+ }
+#endif // defined(OS_ANDROID)
+
+ return false;
+}
+
+// Check if the kernel supports seccomp-filter via the seccomp system call
+// and the TSYNC feature to enable seccomp on all threads.
+bool KernelSupportsSeccompTsync() {
+ if (KernelHasLGBug()) {
+ return false;
+ }
+
+ errno = 0;
+ const int rv =
+ sys_seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, nullptr);
+
+ if (rv == -1 && errno == EFAULT) {
+ return true;
+ } else {
+ // TODO(jln): turn these into DCHECK after 417888 is considered fixed.
+ CHECK_EQ(-1, rv);
+ CHECK(ENOSYS == errno || EINVAL == errno);
+ return false;
+ }
+}
+
+uint64_t EscapePC() {
+ intptr_t rv = Syscall::Call(-1);
+ if (rv == -1 && errno == ENOSYS) {
+ return 0;
+ }
+ return static_cast<uint64_t>(static_cast<uintptr_t>(rv));
+}
+
+intptr_t SandboxPanicTrap(const struct arch_seccomp_data&, void* aux) {
+ SANDBOX_DIE(static_cast<const char*>(aux));
+}
+
+bpf_dsl::ResultExpr SandboxPanic(const char* error) {
+ return bpf_dsl::Trap(SandboxPanicTrap, error);
+}
+
+} // namespace
+
+SandboxBPF::SandboxBPF(bpf_dsl::Policy* policy)
+ : proc_fd_(), sandbox_has_started_(false), policy_(policy) {
+}
+
+SandboxBPF::~SandboxBPF() {
+}
+
+// static
+bool SandboxBPF::SupportsSeccompSandbox(SeccompLevel level) {
+ // Never pretend to support seccomp with Valgrind, as it
+ // throws the tool off.
+ if (IsRunningOnValgrind()) {
+ return false;
+ }
+
+ switch (level) {
+ case SeccompLevel::SINGLE_THREADED:
+ return KernelSupportsSeccompBPF();
+ case SeccompLevel::MULTI_THREADED:
+ return KernelSupportsSeccompTsync();
+ }
+ NOTREACHED();
+ return false;
+}
+
+bool SandboxBPF::StartSandbox(SeccompLevel seccomp_level) {
+ DCHECK(policy_);
+ CHECK(seccomp_level == SeccompLevel::SINGLE_THREADED ||
+ seccomp_level == SeccompLevel::MULTI_THREADED);
+
+ if (sandbox_has_started_) {
+ SANDBOX_DIE(
+ "Cannot repeatedly start sandbox. Create a separate Sandbox "
+ "object instead.");
+ return false;
+ }
+
+ if (!proc_fd_.is_valid()) {
+ SetProcFd(ProcUtil::OpenProc());
+ }
+
+ const bool supports_tsync = KernelSupportsSeccompTsync();
+
+ if (seccomp_level == SeccompLevel::SINGLE_THREADED) {
+ // Wait for /proc/self/task/ to update if needed and assert the
+ // process is single threaded.
+ ThreadHelpers::AssertSingleThreaded(proc_fd_.get());
+ } else if (seccomp_level == SeccompLevel::MULTI_THREADED) {
+ if (IsSingleThreaded(proc_fd_.get())) {
+ SANDBOX_DIE("Cannot start sandbox; "
+ "process may be single-threaded when reported as not");
+ return false;
+ }
+ if (!supports_tsync) {
+ SANDBOX_DIE("Cannot start sandbox; kernel does not support synchronizing "
+ "filters for a threadgroup");
+ return false;
+ }
+ }
+
+ // We no longer need access to any files in /proc. We want to do this
+ // before installing the filters, just in case that our policy denies
+ // close().
+ if (proc_fd_.is_valid()) {
+ proc_fd_.reset();
+ }
+
+ // Install the filters.
+ InstallFilter(supports_tsync ||
+ seccomp_level == SeccompLevel::MULTI_THREADED);
+
+ return true;
+}
+
+void SandboxBPF::SetProcFd(base::ScopedFD proc_fd) {
+ proc_fd_.swap(proc_fd);
+}
+
+// static
+bool SandboxBPF::IsValidSyscallNumber(int sysnum) {
+ return SyscallSet::IsValid(sysnum);
+}
+
+// static
+bool SandboxBPF::IsRequiredForUnsafeTrap(int sysno) {
+ return bpf_dsl::PolicyCompiler::IsRequiredForUnsafeTrap(sysno);
+}
+
+// static
+intptr_t SandboxBPF::ForwardSyscall(const struct arch_seccomp_data& args) {
+ return Syscall::Call(
+ args.nr, static_cast<intptr_t>(args.args[0]),
+ static_cast<intptr_t>(args.args[1]), static_cast<intptr_t>(args.args[2]),
+ static_cast<intptr_t>(args.args[3]), static_cast<intptr_t>(args.args[4]),
+ static_cast<intptr_t>(args.args[5]));
+}
+
+CodeGen::Program SandboxBPF::AssembleFilter() {
+ DCHECK(policy_);
+
+ bpf_dsl::PolicyCompiler compiler(policy_.get(), Trap::Registry());
+ if (Trap::SandboxDebuggingAllowedByUser()) {
+ compiler.DangerousSetEscapePC(EscapePC());
+ }
+ compiler.SetPanicFunc(SandboxPanic);
+ return compiler.Compile();
+}
+
+void SandboxBPF::InstallFilter(bool must_sync_threads) {
+ // We want to be very careful in not imposing any requirements on the
+ // policies that are set with SetSandboxPolicy(). This means, as soon as
+ // the sandbox is active, we shouldn't be relying on libraries that could
+ // be making system calls. This, for example, means we should avoid
+ // using the heap and we should avoid using STL functions.
+ // Temporarily copy the contents of the "program" vector into a
+ // stack-allocated array; and then explicitly destroy that object.
+ // This makes sure we don't ex- or implicitly call new/delete after we
+ // installed the BPF filter program in the kernel. Depending on the
+ // system memory allocator that is in effect, these operators can result
+ // in system calls to things like munmap() or brk().
+ CodeGen::Program program = AssembleFilter();
+
+ struct sock_filter bpf[program.size()];
+ const struct sock_fprog prog = {static_cast<unsigned short>(program.size()),
+ bpf};
+ memcpy(bpf, &program[0], sizeof(bpf));
+ CodeGen::Program().swap(program); // vector swap trick
+
+ // Make an attempt to release memory that is no longer needed here, rather
+ // than in the destructor. Try to avoid as much as possible to presume of
+ // what will be possible to do in the new (sandboxed) execution environment.
+ policy_.reset();
+
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+ SANDBOX_DIE("Kernel refuses to enable no-new-privs");
+ }
+
+ // Install BPF filter program. If the thread state indicates multi-threading
+ // support, then the kernel hass the seccomp system call. Otherwise, fall
+ // back on prctl, which requires the process to be single-threaded.
+ if (must_sync_threads) {
+ int rv =
+ sys_seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &prog);
+ if (rv) {
+ SANDBOX_DIE(
+ "Kernel refuses to turn on and synchronize threads for BPF filters");
+ }
+ } else {
+ if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) {
+ SANDBOX_DIE("Kernel refuses to turn on BPF filters");
+ }
+ }
+
+ sandbox_has_started_ = true;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf/sandbox_bpf.h b/libchrome/sandbox/linux/seccomp-bpf/sandbox_bpf.h
new file mode 100644
index 0000000..1637b26
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/sandbox_bpf.h
@@ -0,0 +1,115 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/codegen.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+struct arch_seccomp_data;
+namespace bpf_dsl {
+class Policy;
+}
+
+// This class can be used to apply a syscall sandboxing policy expressed in a
+// bpf_dsl::Policy object to the current process.
+// Syscall sandboxing policies get inherited by subprocesses and, once applied,
+// can never be removed for the lifetime of the process.
+class SANDBOX_EXPORT SandboxBPF {
+ public:
+ enum class SeccompLevel {
+ SINGLE_THREADED,
+ MULTI_THREADED,
+ };
+
+ // Ownership of |policy| is transfered here to the sandbox object.
+ // nullptr is allowed for unit tests.
+ explicit SandboxBPF(bpf_dsl::Policy* policy);
+ // NOTE: Setting a policy and starting the sandbox is a one-way operation.
+ // The kernel does not provide any option for unloading a loaded sandbox. The
+ // sandbox remains engaged even when the object is destructed.
+ ~SandboxBPF();
+
+ // Detect if the kernel supports the specified seccomp level.
+ // See StartSandbox() for a description of these.
+ static bool SupportsSeccompSandbox(SeccompLevel level);
+
+ // This is the main public entry point. It sets up the resources needed by
+ // the sandbox, and enters Seccomp mode.
+ // The calling process must provide a |level| to tell the sandbox which type
+ // of kernel support it should engage.
+ // SINGLE_THREADED will only sandbox the calling thread. Since it would be a
+ // security risk, the sandbox will also check that the current process is
+ // single threaded and crash if it isn't the case.
+ // MULTI_THREADED requires more recent kernel support and allows to sandbox
+ // all the threads of the current process. Be mindful of potential races,
+ // with other threads using disallowed system calls either before or after
+ // the sandbox is engaged.
+ //
+ // It is possible to stack multiple sandboxes by creating separate "Sandbox"
+ // objects and calling "StartSandbox()" on each of them. Please note, that
+ // this requires special care, though, as newly stacked sandboxes can never
+ // relax restrictions imposed by earlier sandboxes. Furthermore, installing
+ // a new policy requires making system calls, that might already be
+ // disallowed.
+ // Finally, stacking does add more kernel overhead than having a single
+ // combined policy. So, it should only be used if there are no alternatives.
+ bool StartSandbox(SeccompLevel level) WARN_UNUSED_RESULT;
+
+ // The sandbox needs to be able to access files in "/proc/self/". If
+ // this directory is not accessible when "StartSandbox()" gets called, the
+ // caller must provide an already opened file descriptor by calling
+ // "SetProcFd()".
+ // The sandbox becomes the new owner of this file descriptor and will
+ // close it when "StartSandbox()" executes or when the sandbox object
+ // disappears.
+ void SetProcFd(base::ScopedFD proc_fd);
+
+ // Checks whether a particular system call number is valid on the current
+ // architecture.
+ static bool IsValidSyscallNumber(int sysnum);
+
+ // UnsafeTraps require some syscalls to always be allowed.
+ // This helper function returns true for these calls.
+ static bool IsRequiredForUnsafeTrap(int sysno);
+
+ // From within an UnsafeTrap() it is often useful to be able to execute
+ // the system call that triggered the trap. The ForwardSyscall() method
+ // makes this easy. It is more efficient than calling glibc's syscall()
+ // function, as it avoid the extra round-trip to the signal handler. And
+ // it automatically does the correct thing to report kernel-style error
+ // conditions, rather than setting errno. See the comments for TrapFnc for
+ // details. In other words, the return value from ForwardSyscall() is
+ // directly suitable as a return value for a trap handler.
+ static intptr_t ForwardSyscall(const struct arch_seccomp_data& args);
+
+ private:
+ friend class SandboxBPFTestRunner;
+
+ // Assembles a BPF filter program from the current policy. After calling this
+ // function, you must not call any other sandboxing function.
+ CodeGen::Program AssembleFilter();
+
+ // Assembles and installs a filter based on the policy that has previously
+ // been configured with SetSandboxPolicy().
+ void InstallFilter(bool must_sync_threads);
+
+ base::ScopedFD proc_fd_;
+ bool sandbox_has_started_;
+ std::unique_ptr<bpf_dsl::Policy> policy_;
+
+ DISALLOW_COPY_AND_ASSIGN(SandboxBPF);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H_
diff --git a/libchrome/sandbox/linux/seccomp-bpf/syscall.cc b/libchrome/sandbox/linux/seccomp-bpf/syscall.cc
new file mode 100644
index 0000000..4d55936
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/syscall.cc
@@ -0,0 +1,433 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+
+#include <errno.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+
+namespace sandbox {
+
+namespace {
+
+#if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY)
+// Number that's not currently used by any Linux kernel ABIs.
+const int kInvalidSyscallNumber = 0x351d3;
+#else
+#error Unrecognized architecture
+#endif
+
+asm(// We need to be able to tell the kernel exactly where we made a
+ // system call. The C++ compiler likes to sometimes clone or
+ // inline code, which would inadvertently end up duplicating
+ // the entry point.
+ // "gcc" can suppress code duplication with suitable function
+ // attributes, but "clang" doesn't have this ability.
+ // The "clang" developer mailing list suggested that the correct
+ // and portable solution is a file-scope assembly block.
+ // N.B. We do mark our code as a proper function so that backtraces
+ // work correctly. But we make absolutely no attempt to use the
+ // ABI's calling conventions for passing arguments. We will only
+ // ever be called from assembly code and thus can pick more
+ // suitable calling conventions.
+#if defined(__i386__)
+ ".text\n"
+ ".align 16, 0x90\n"
+ ".type SyscallAsm, @function\n"
+ "SyscallAsm:.cfi_startproc\n"
+ // Check if "%eax" is negative. If so, do not attempt to make a
+ // system call. Instead, compute the return address that is visible
+ // to the kernel after we execute "int $0x80". This address can be
+ // used as a marker that BPF code inspects.
+ "test %eax, %eax\n"
+ "jge 1f\n"
+ // Always, make sure that our code is position-independent, or
+ // address space randomization might not work on i386. This means,
+ // we can't use "lea", but instead have to rely on "call/pop".
+ "call 0f; .cfi_adjust_cfa_offset 4\n"
+ "0:pop %eax; .cfi_adjust_cfa_offset -4\n"
+ "addl $2f-0b, %eax\n"
+ "ret\n"
+ // Save register that we don't want to clobber. On i386, we need to
+ // save relatively aggressively, as there are a couple or registers
+ // that are used internally (e.g. %ebx for position-independent
+ // code, and %ebp for the frame pointer), and as we need to keep at
+ // least a few registers available for the register allocator.
+ "1:push %esi; .cfi_adjust_cfa_offset 4; .cfi_rel_offset esi, 0\n"
+ "push %edi; .cfi_adjust_cfa_offset 4; .cfi_rel_offset edi, 0\n"
+ "push %ebx; .cfi_adjust_cfa_offset 4; .cfi_rel_offset ebx, 0\n"
+ "push %ebp; .cfi_adjust_cfa_offset 4; .cfi_rel_offset ebp, 0\n"
+ // Copy entries from the array holding the arguments into the
+ // correct CPU registers.
+ "movl 0(%edi), %ebx\n"
+ "movl 4(%edi), %ecx\n"
+ "movl 8(%edi), %edx\n"
+ "movl 12(%edi), %esi\n"
+ "movl 20(%edi), %ebp\n"
+ "movl 16(%edi), %edi\n"
+ // Enter the kernel.
+ "int $0x80\n"
+ // This is our "magic" return address that the BPF filter sees.
+ "2:"
+ // Restore any clobbered registers that we didn't declare to the
+ // compiler.
+ "pop %ebp; .cfi_restore ebp; .cfi_adjust_cfa_offset -4\n"
+ "pop %ebx; .cfi_restore ebx; .cfi_adjust_cfa_offset -4\n"
+ "pop %edi; .cfi_restore edi; .cfi_adjust_cfa_offset -4\n"
+ "pop %esi; .cfi_restore esi; .cfi_adjust_cfa_offset -4\n"
+ "ret\n"
+ ".cfi_endproc\n"
+ "9:.size SyscallAsm, 9b-SyscallAsm\n"
+#elif defined(__x86_64__)
+ ".text\n"
+ ".align 16, 0x90\n"
+ ".type SyscallAsm, @function\n"
+ "SyscallAsm:.cfi_startproc\n"
+ // Check if "%rdi" is negative. If so, do not attempt to make a
+ // system call. Instead, compute the return address that is visible
+ // to the kernel after we execute "syscall". This address can be
+ // used as a marker that BPF code inspects.
+ "test %rdi, %rdi\n"
+ "jge 1f\n"
+ // Always make sure that our code is position-independent, or the
+ // linker will throw a hissy fit on x86-64.
+ "lea 2f(%rip), %rax\n"
+ "ret\n"
+ // Now we load the registers used to pass arguments to the system
+ // call: system call number in %rax, and arguments in %rdi, %rsi,
+ // %rdx, %r10, %r8, %r9. Note: These are all caller-save registers
+ // (only %rbx, %rbp, %rsp, and %r12-%r15 are callee-save), so no
+ // need to worry here about spilling registers or CFI directives.
+ "1:movq %rdi, %rax\n"
+ "movq 0(%rsi), %rdi\n"
+ "movq 16(%rsi), %rdx\n"
+ "movq 24(%rsi), %r10\n"
+ "movq 32(%rsi), %r8\n"
+ "movq 40(%rsi), %r9\n"
+ "movq 8(%rsi), %rsi\n"
+ // Enter the kernel.
+ "syscall\n"
+ // This is our "magic" return address that the BPF filter sees.
+ "2:ret\n"
+ ".cfi_endproc\n"
+ "9:.size SyscallAsm, 9b-SyscallAsm\n"
+#elif defined(__arm__)
+ // Throughout this file, we use the same mode (ARM vs. thumb)
+ // that the C++ compiler uses. This means, when transfering control
+ // from C++ to assembly code, we do not need to switch modes (e.g.
+ // by using the "bx" instruction). It also means that our assembly
+ // code should not be invoked directly from code that lives in
+ // other compilation units, as we don't bother implementing thumb
+ // interworking. That's OK, as we don't make any of the assembly
+ // symbols public. They are all local to this file.
+ ".text\n"
+ ".align 2\n"
+ ".type SyscallAsm, %function\n"
+#if defined(__thumb__)
+ ".thumb_func\n"
+#else
+ ".arm\n"
+#endif
+ "SyscallAsm:\n"
+#if !defined(__native_client_nonsfi__)
+ // .fnstart and .fnend pseudo operations creates unwind table.
+ // It also creates a reference to the symbol __aeabi_unwind_cpp_pr0, which
+ // is not provided by PNaCl toolchain. Disable it.
+ ".fnstart\n"
+#endif
+ "@ args = 0, pretend = 0, frame = 8\n"
+ "@ frame_needed = 1, uses_anonymous_args = 0\n"
+#if defined(__thumb__)
+ ".cfi_startproc\n"
+ "push {r7, lr}\n"
+ ".save {r7, lr}\n"
+ ".cfi_offset 14, -4\n"
+ ".cfi_offset 7, -8\n"
+ ".cfi_def_cfa_offset 8\n"
+#else
+ "stmfd sp!, {fp, lr}\n"
+ "add fp, sp, #4\n"
+#endif
+ // Check if "r0" is negative. If so, do not attempt to make a
+ // system call. Instead, compute the return address that is visible
+ // to the kernel after we execute "swi 0". This address can be
+ // used as a marker that BPF code inspects.
+ "cmp r0, #0\n"
+ "bge 1f\n"
+ "adr r0, 2f\n"
+ "b 2f\n"
+ // We declared (almost) all clobbered registers to the compiler. On
+ // ARM there is no particular register pressure. So, we can go
+ // ahead and directly copy the entries from the arguments array
+ // into the appropriate CPU registers.
+ "1:ldr r5, [r6, #20]\n"
+ "ldr r4, [r6, #16]\n"
+ "ldr r3, [r6, #12]\n"
+ "ldr r2, [r6, #8]\n"
+ "ldr r1, [r6, #4]\n"
+ "mov r7, r0\n"
+ "ldr r0, [r6, #0]\n"
+ // Enter the kernel
+ "swi 0\n"
+// Restore the frame pointer. Also restore the program counter from
+// the link register; this makes us return to the caller.
+#if defined(__thumb__)
+ "2:pop {r7, pc}\n"
+ ".cfi_endproc\n"
+#else
+ "2:ldmfd sp!, {fp, pc}\n"
+#endif
+#if !defined(__native_client_nonsfi__)
+ // Do not use .fnstart and .fnend for PNaCl toolchain. See above comment,
+ // for more details.
+ ".fnend\n"
+#endif
+ "9:.size SyscallAsm, 9b-SyscallAsm\n"
+#elif defined(__mips__)
+ ".text\n"
+ ".option pic2\n"
+ ".align 4\n"
+ ".global SyscallAsm\n"
+ ".type SyscallAsm, @function\n"
+ "SyscallAsm:.ent SyscallAsm\n"
+ ".frame $sp, 40, $ra\n"
+ ".set push\n"
+ ".set noreorder\n"
+ ".cpload $t9\n"
+ "addiu $sp, $sp, -40\n"
+ "sw $ra, 36($sp)\n"
+ // Check if "v0" is negative. If so, do not attempt to make a
+ // system call. Instead, compute the return address that is visible
+ // to the kernel after we execute "syscall". This address can be
+ // used as a marker that BPF code inspects.
+ "bgez $v0, 1f\n"
+ " nop\n"
+ // This is equivalent to "la $v0, 2f".
+ // LA macro has to be avoided since LLVM-AS has issue with LA in PIC mode
+ // https://llvm.org/bugs/show_bug.cgi?id=27644
+ "lw $v0, %got(2f)($gp)\n"
+ "addiu $v0, $v0, %lo(2f)\n"
+ "b 2f\n"
+ " nop\n"
+ // On MIPS first four arguments go to registers a0 - a3 and any
+ // argument after that goes to stack. We can go ahead and directly
+ // copy the entries from the arguments array into the appropriate
+ // CPU registers and on the stack.
+ "1:lw $a3, 28($a0)\n"
+ "lw $a2, 24($a0)\n"
+ "lw $a1, 20($a0)\n"
+ "lw $t0, 16($a0)\n"
+ "sw $a3, 28($sp)\n"
+ "sw $a2, 24($sp)\n"
+ "sw $a1, 20($sp)\n"
+ "sw $t0, 16($sp)\n"
+ "lw $a3, 12($a0)\n"
+ "lw $a2, 8($a0)\n"
+ "lw $a1, 4($a0)\n"
+ "lw $a0, 0($a0)\n"
+ // Enter the kernel
+ "syscall\n"
+ // This is our "magic" return address that the BPF filter sees.
+ // Restore the return address from the stack.
+ "2:lw $ra, 36($sp)\n"
+ "jr $ra\n"
+ " addiu $sp, $sp, 40\n"
+ ".set pop\n"
+ ".end SyscallAsm\n"
+ ".size SyscallAsm,.-SyscallAsm\n"
+#elif defined(__aarch64__)
+ ".text\n"
+ ".align 2\n"
+ ".type SyscallAsm, %function\n"
+ "SyscallAsm:\n"
+ ".cfi_startproc\n"
+ "cmp x0, #0\n"
+ "b.ge 1f\n"
+ "adr x0,2f\n"
+ "b 2f\n"
+ "1:ldr x5, [x6, #40]\n"
+ "ldr x4, [x6, #32]\n"
+ "ldr x3, [x6, #24]\n"
+ "ldr x2, [x6, #16]\n"
+ "ldr x1, [x6, #8]\n"
+ "mov x8, x0\n"
+ "ldr x0, [x6, #0]\n"
+ // Enter the kernel
+ "svc 0\n"
+ "2:ret\n"
+ ".cfi_endproc\n"
+ ".size SyscallAsm, .-SyscallAsm\n"
+#endif
+ ); // asm
+
+#if defined(__x86_64__)
+extern "C" {
+intptr_t SyscallAsm(intptr_t nr, const intptr_t args[6]);
+}
+#elif defined(__mips__)
+extern "C" {
+intptr_t SyscallAsm(intptr_t nr, const intptr_t args[8]);
+}
+#endif
+
+} // namespace
+
+intptr_t Syscall::InvalidCall() {
+ // Explicitly pass eight zero arguments just in case.
+ return Call(kInvalidSyscallNumber, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+intptr_t Syscall::Call(int nr,
+ intptr_t p0,
+ intptr_t p1,
+ intptr_t p2,
+ intptr_t p3,
+ intptr_t p4,
+ intptr_t p5,
+ intptr_t p6,
+ intptr_t p7) {
+ // We rely on "intptr_t" to be the exact size as a "void *". This is
+ // typically true, but just in case, we add a check. The language
+ // specification allows platforms some leeway in cases, where
+ // "sizeof(void *)" is not the same as "sizeof(void (*)())". We expect
+ // that this would only be an issue for IA64, which we are currently not
+ // planning on supporting. And it is even possible that this would work
+ // on IA64, but for lack of actual hardware, I cannot test.
+ static_assert(sizeof(void*) == sizeof(intptr_t),
+ "pointer types and intptr_t must be exactly the same size");
+
+ // TODO(nedeljko): Enable use of more than six parameters on architectures
+ // where that makes sense.
+#if defined(__mips__)
+ const intptr_t args[8] = {p0, p1, p2, p3, p4, p5, p6, p7};
+#else
+ DCHECK_EQ(p6, 0) << " Support for syscalls with more than six arguments not "
+ "added for this architecture";
+ DCHECK_EQ(p7, 0) << " Support for syscalls with more than six arguments not "
+ "added for this architecture";
+ const intptr_t args[6] = {p0, p1, p2, p3, p4, p5};
+#endif // defined(__mips__)
+
+// Invoke our file-scope assembly code. The constraints have been picked
+// carefully to match what the rest of the assembly code expects in input,
+// output, and clobbered registers.
+#if defined(__i386__)
+ intptr_t ret = nr;
+ asm volatile(
+ "call SyscallAsm\n"
+ // N.B. These are not the calling conventions normally used by the ABI.
+ : "=a"(ret)
+ : "0"(ret), "D"(args)
+ : "cc", "esp", "memory", "ecx", "edx");
+#elif defined(__x86_64__)
+ intptr_t ret = SyscallAsm(nr, args);
+#elif defined(__arm__)
+ intptr_t ret;
+ {
+ register intptr_t inout __asm__("r0") = nr;
+ register const intptr_t* data __asm__("r6") = args;
+ asm volatile(
+ "bl SyscallAsm\n"
+ // N.B. These are not the calling conventions normally used by the ABI.
+ : "=r"(inout)
+ : "0"(inout), "r"(data)
+ : "cc",
+ "lr",
+ "memory",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5"
+#if !defined(__thumb__)
+ // In thumb mode, we cannot use "r7" as a general purpose register, as
+ // it is our frame pointer. We have to manually manage and preserve
+ // it.
+ // In ARM mode, we have a dedicated frame pointer register and "r7" is
+ // thus available as a general purpose register. We don't preserve it,
+ // but instead mark it as clobbered.
+ ,
+ "r7"
+#endif // !defined(__thumb__)
+ );
+ ret = inout;
+ }
+#elif defined(__mips__)
+ int err_status;
+ intptr_t ret = Syscall::SandboxSyscallRaw(nr, args, &err_status);
+
+ if (err_status) {
+ // On error, MIPS returns errno from syscall instead of -errno.
+ // The purpose of this negation is for SandboxSyscall() to behave
+ // more like it would on other architectures.
+ ret = -ret;
+ }
+#elif defined(__aarch64__)
+ intptr_t ret;
+ {
+ register intptr_t inout __asm__("x0") = nr;
+ register const intptr_t* data __asm__("x6") = args;
+ asm volatile("bl SyscallAsm\n"
+ : "=r"(inout)
+ : "0"(inout), "r"(data)
+ : "memory", "x1", "x2", "x3", "x4", "x5", "x8", "x30");
+ ret = inout;
+ }
+
+#else
+#error "Unimplemented architecture"
+#endif
+ return ret;
+}
+
+void Syscall::PutValueInUcontext(intptr_t ret_val, ucontext_t* ctx) {
+#if defined(__mips__)
+ // Mips ABI states that on error a3 CPU register has non zero value and if
+ // there is no error, it should be zero.
+ if (ret_val <= -1 && ret_val >= -4095) {
+ // |ret_val| followes the Syscall::Call() convention of being -errno on
+ // errors. In order to write correct value to return register this sign
+ // needs to be changed back.
+ ret_val = -ret_val;
+ SECCOMP_PARM4(ctx) = 1;
+ } else
+ SECCOMP_PARM4(ctx) = 0;
+#endif
+ SECCOMP_RESULT(ctx) = static_cast<greg_t>(ret_val);
+}
+
+#if defined(__mips__)
+intptr_t Syscall::SandboxSyscallRaw(int nr,
+ const intptr_t* args,
+ intptr_t* err_ret) {
+ register intptr_t ret __asm__("v0") = nr;
+ register intptr_t syscallasm __asm__("t9") = (intptr_t) &SyscallAsm;
+ // a3 register becomes non zero on error.
+ register intptr_t err_stat __asm__("a3") = 0;
+ {
+ register const intptr_t* data __asm__("a0") = args;
+ asm volatile(
+ "jalr $t9\n"
+ " nop\n"
+ : "=r"(ret), "=r"(err_stat)
+ : "0"(ret),
+ "r"(data),
+ "r"(syscallasm)
+ // a2 is in the clober list so inline assembly can not change its
+ // value.
+ : "memory", "ra", "a2");
+ }
+
+ // Set an error status so it can be used outside of this function
+ *err_ret = err_stat;
+
+ return ret;
+}
+#endif // defined(__mips__)
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf/syscall.h b/libchrome/sandbox/linux/seccomp-bpf/syscall.h
new file mode 100644
index 0000000..ccfc88d
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/syscall.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
+
+#include <signal.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// This purely static class can be used to perform system calls with some
+// low-level control.
+class SANDBOX_EXPORT Syscall {
+ public:
+ // InvalidCall() invokes Call() with a platform-appropriate syscall
+ // number that is guaranteed to not be implemented (i.e., normally
+ // returns -ENOSYS).
+ // This is primarily meant to be useful for writing sandbox policy
+ // unit tests.
+ static intptr_t InvalidCall();
+
+ // System calls can take up to six parameters (up to eight on some
+ // architectures). Traditionally, glibc
+ // implements this property by using variadic argument lists. This works, but
+ // confuses modern tools such as valgrind, because we are nominally passing
+ // uninitialized data whenever we call through this function and pass less
+ // than the full six arguments.
+ // So, instead, we use C++'s template system to achieve a very similar
+ // effect. C++ automatically sets the unused parameters to zero for us, and
+ // it also does the correct type expansion (e.g. from 32bit to 64bit) where
+ // necessary.
+ // We have to use C-style cast operators as we want to be able to accept both
+ // integer and pointer types.
+ template <class T0,
+ class T1,
+ class T2,
+ class T3,
+ class T4,
+ class T5,
+ class T6,
+ class T7>
+ static inline intptr_t
+ Call(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5, T6 p6, T7 p7) {
+ return Call(nr,
+ (intptr_t)p0,
+ (intptr_t)p1,
+ (intptr_t)p2,
+ (intptr_t)p3,
+ (intptr_t)p4,
+ (intptr_t)p5,
+ (intptr_t)p6,
+ (intptr_t)p7);
+ }
+
+ template <class T0,
+ class T1,
+ class T2,
+ class T3,
+ class T4,
+ class T5,
+ class T6>
+ static inline intptr_t
+ Call(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5, T6 p6) {
+ return Call(nr,
+ (intptr_t)p0,
+ (intptr_t)p1,
+ (intptr_t)p2,
+ (intptr_t)p3,
+ (intptr_t)p4,
+ (intptr_t)p5,
+ (intptr_t)p6,
+ 0);
+ }
+
+ template <class T0, class T1, class T2, class T3, class T4, class T5>
+ static inline intptr_t
+ Call(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
+ return Call(nr,
+ (intptr_t)p0,
+ (intptr_t)p1,
+ (intptr_t)p2,
+ (intptr_t)p3,
+ (intptr_t)p4,
+ (intptr_t)p5,
+ 0,
+ 0);
+ }
+
+ template <class T0, class T1, class T2, class T3, class T4>
+ static inline intptr_t Call(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4) {
+ return Call(nr, p0, p1, p2, p3, p4, 0, 0, 0);
+ }
+
+ template <class T0, class T1, class T2, class T3>
+ static inline intptr_t Call(int nr, T0 p0, T1 p1, T2 p2, T3 p3) {
+ return Call(nr, p0, p1, p2, p3, 0, 0, 0, 0);
+ }
+
+ template <class T0, class T1, class T2>
+ static inline intptr_t Call(int nr, T0 p0, T1 p1, T2 p2) {
+ return Call(nr, p0, p1, p2, 0, 0, 0, 0, 0);
+ }
+
+ template <class T0, class T1>
+ static inline intptr_t Call(int nr, T0 p0, T1 p1) {
+ return Call(nr, p0, p1, 0, 0, 0, 0, 0, 0);
+ }
+
+ template <class T0>
+ static inline intptr_t Call(int nr, T0 p0) {
+ return Call(nr, p0, 0, 0, 0, 0, 0, 0, 0);
+ }
+
+ static inline intptr_t Call(int nr) {
+ return Call(nr, 0, 0, 0, 0, 0, 0, 0, 0);
+ }
+
+ // Set the registers in |ctx| to match what they would be after a system call
+ // returning |ret_val|. |ret_val| must follow the Syscall::Call() convention
+ // of being -errno on errors.
+ static void PutValueInUcontext(intptr_t ret_val, ucontext_t* ctx);
+
+ private:
+ // This performs system call |nr| with the arguments p0 to p7 from a constant
+ // userland address, which is for instance observable by seccomp-bpf filters.
+ // The constant userland address from which these system calls are made will
+ // be returned if |nr| is passed as -1.
+ // On error, this function will return a value between -1 and -4095 which
+ // should be interpreted as -errno.
+ static intptr_t Call(int nr,
+ intptr_t p0,
+ intptr_t p1,
+ intptr_t p2,
+ intptr_t p3,
+ intptr_t p4,
+ intptr_t p5,
+ intptr_t p6,
+ intptr_t p7);
+
+#if defined(__mips__)
+ // This function basically does on MIPS what SandboxSyscall() is doing on
+ // other architectures. However, because of specificity of MIPS regarding
+ // handling syscall errors, SandboxSyscall() is made as a wrapper for this
+ // function in order for SandboxSyscall() to behave more like on other
+ // architectures on places where return value from SandboxSyscall() is used
+ // directly (like in most tests).
+ // The syscall "nr" is called with arguments that are set in an array on which
+ // pointer "args" points to and an information weather there is an error or no
+ // is returned to SandboxSyscall() by err_stat.
+ static intptr_t SandboxSyscallRaw(int nr,
+ const intptr_t* args,
+ intptr_t* err_stat);
+#endif // defined(__mips__)
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Syscall);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
diff --git a/libchrome/sandbox/linux/seccomp-bpf/syscall_unittest.cc b/libchrome/sandbox/linux/seccomp-bpf/syscall_unittest.cc
new file mode 100644
index 0000000..01336f9
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/syscall_unittest.cc
@@ -0,0 +1,244 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+
+#include <asm/unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/bpf_dsl.h"
+#include "sandbox/linux/bpf_dsl/policy.h"
+#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using sandbox::bpf_dsl::Allow;
+using sandbox::bpf_dsl::ResultExpr;
+using sandbox::bpf_dsl::Trap;
+
+namespace sandbox {
+
+namespace {
+
+// Different platforms use different symbols for the six-argument version
+// of the mmap() system call. Test for the correct symbol at compile time.
+#ifdef __NR_mmap2
+const int kMMapNr = __NR_mmap2;
+#else
+const int kMMapNr = __NR_mmap;
+#endif
+
+TEST(Syscall, InvalidCallReturnsENOSYS) {
+ EXPECT_EQ(-ENOSYS, Syscall::InvalidCall());
+}
+
+TEST(Syscall, WellKnownEntryPoint) {
+// Test that Syscall::Call(-1) is handled specially. Don't do this on ARM,
+// where syscall(-1) crashes with SIGILL. Not running the test is fine, as we
+// are still testing ARM code in the next set of tests.
+#if !defined(__arm__) && !defined(__aarch64__)
+ EXPECT_NE(Syscall::Call(-1), syscall(-1));
+#endif
+
+// If possible, test that Syscall::Call(-1) returns the address right
+// after
+// a kernel entry point.
+#if defined(__i386__)
+ EXPECT_EQ(0x80CDu, ((uint16_t*)Syscall::Call(-1))[-1]); // INT 0x80
+#elif defined(__x86_64__)
+ EXPECT_EQ(0x050Fu, ((uint16_t*)Syscall::Call(-1))[-1]); // SYSCALL
+#elif defined(__arm__)
+#if defined(__thumb__)
+ EXPECT_EQ(0xDF00u, ((uint16_t*)Syscall::Call(-1))[-1]); // SWI 0
+#else
+ EXPECT_EQ(0xEF000000u, ((uint32_t*)Syscall::Call(-1))[-1]); // SVC 0
+#endif
+#elif defined(__mips__)
+ // Opcode for MIPS sycall is in the lower 16-bits
+ EXPECT_EQ(0x0cu, (((uint32_t*)Syscall::Call(-1))[-1]) & 0x0000FFFF);
+#elif defined(__aarch64__)
+ EXPECT_EQ(0xD4000001u, ((uint32_t*)Syscall::Call(-1))[-1]); // SVC 0
+#else
+#warning Incomplete test case; need port for target platform
+#endif
+}
+
+TEST(Syscall, TrivialSyscallNoArgs) {
+ // Test that we can do basic system calls
+ EXPECT_EQ(Syscall::Call(__NR_getpid), syscall(__NR_getpid));
+}
+
+TEST(Syscall, TrivialSyscallOneArg) {
+ int new_fd;
+ // Duplicate standard error and close it.
+ ASSERT_GE(new_fd = Syscall::Call(__NR_dup, 2), 0);
+ int close_return_value = IGNORE_EINTR(Syscall::Call(__NR_close, new_fd));
+ ASSERT_EQ(close_return_value, 0);
+}
+
+TEST(Syscall, TrivialFailingSyscall) {
+ errno = -42;
+ int ret = Syscall::Call(__NR_dup, -1);
+ ASSERT_EQ(-EBADF, ret);
+ // Verify that Syscall::Call does not touch errno.
+ ASSERT_EQ(-42, errno);
+}
+
+// SIGSYS trap handler that will be called on __NR_uname.
+intptr_t CopySyscallArgsToAux(const struct arch_seccomp_data& args, void* aux) {
+ // |aux| is our BPF_AUX pointer.
+ std::vector<uint64_t>* const seen_syscall_args =
+ static_cast<std::vector<uint64_t>*>(aux);
+ BPF_ASSERT(arraysize(args.args) == 6);
+ seen_syscall_args->assign(args.args, args.args + arraysize(args.args));
+ return -ENOMEM;
+}
+
+class CopyAllArgsOnUnamePolicy : public bpf_dsl::Policy {
+ public:
+ explicit CopyAllArgsOnUnamePolicy(std::vector<uint64_t>* aux) : aux_(aux) {}
+ ~CopyAllArgsOnUnamePolicy() override {}
+
+ ResultExpr EvaluateSyscall(int sysno) const override {
+ DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
+ if (sysno == __NR_uname) {
+ return Trap(CopySyscallArgsToAux, aux_);
+ } else {
+ return Allow();
+ }
+ }
+
+ private:
+ std::vector<uint64_t>* aux_;
+
+ DISALLOW_COPY_AND_ASSIGN(CopyAllArgsOnUnamePolicy);
+};
+
+// We are testing Syscall::Call() by making use of a BPF filter that
+// allows us
+// to inspect the system call arguments that the kernel saw.
+BPF_TEST(Syscall,
+ SyntheticSixArgs,
+ CopyAllArgsOnUnamePolicy,
+ std::vector<uint64_t> /* (*BPF_AUX) */) {
+ const int kExpectedValue = 42;
+ // In this test we only pass integers to the kernel. We might want to make
+ // additional tests to try other types. What we will see depends on
+ // implementation details of kernel BPF filters and we will need to document
+ // the expected behavior very clearly.
+ int syscall_args[6];
+ for (size_t i = 0; i < arraysize(syscall_args); ++i) {
+ syscall_args[i] = kExpectedValue + i;
+ }
+
+ // We could use pretty much any system call we don't need here. uname() is
+ // nice because it doesn't have any dangerous side effects.
+ BPF_ASSERT(Syscall::Call(__NR_uname,
+ syscall_args[0],
+ syscall_args[1],
+ syscall_args[2],
+ syscall_args[3],
+ syscall_args[4],
+ syscall_args[5]) == -ENOMEM);
+
+ // We expect the trap handler to have copied the 6 arguments.
+ BPF_ASSERT(BPF_AUX->size() == 6);
+
+ // Don't loop here so that we can see which argument does cause the failure
+ // easily from the failing line.
+ // uint64_t is the type passed to our SIGSYS handler.
+ BPF_ASSERT((*BPF_AUX)[0] == static_cast<uint64_t>(syscall_args[0]));
+ BPF_ASSERT((*BPF_AUX)[1] == static_cast<uint64_t>(syscall_args[1]));
+ BPF_ASSERT((*BPF_AUX)[2] == static_cast<uint64_t>(syscall_args[2]));
+ BPF_ASSERT((*BPF_AUX)[3] == static_cast<uint64_t>(syscall_args[3]));
+ BPF_ASSERT((*BPF_AUX)[4] == static_cast<uint64_t>(syscall_args[4]));
+ BPF_ASSERT((*BPF_AUX)[5] == static_cast<uint64_t>(syscall_args[5]));
+}
+
+TEST(Syscall, ComplexSyscallSixArgs) {
+ int fd;
+ ASSERT_LE(0,
+ fd = Syscall::Call(__NR_openat, AT_FDCWD, "/dev/null", O_RDWR, 0L));
+
+ // Use mmap() to allocate some read-only memory
+ char* addr0;
+ ASSERT_NE(
+ (char*)NULL,
+ addr0 = reinterpret_cast<char*>(Syscall::Call(kMMapNr,
+ (void*)NULL,
+ 4096,
+ PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ fd,
+ 0L)));
+
+ // Try to replace the existing mapping with a read-write mapping
+ char* addr1;
+ ASSERT_EQ(addr0,
+ addr1 = reinterpret_cast<char*>(
+ Syscall::Call(kMMapNr,
+ addr0,
+ 4096L,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ fd,
+ 0L)));
+ ++*addr1; // This should not seg fault
+
+ // Clean up
+ EXPECT_EQ(0, Syscall::Call(__NR_munmap, addr1, 4096L));
+ EXPECT_EQ(0, IGNORE_EINTR(Syscall::Call(__NR_close, fd)));
+
+ // Check that the offset argument (i.e. the sixth argument) is processed
+ // correctly.
+ ASSERT_GE(
+ fd = Syscall::Call(__NR_openat, AT_FDCWD, "/proc/self/exe", O_RDONLY, 0L),
+ 0);
+ char* addr2, *addr3;
+ ASSERT_NE((char*)NULL,
+ addr2 = reinterpret_cast<char*>(Syscall::Call(
+ kMMapNr, (void*)NULL, 8192L, PROT_READ, MAP_PRIVATE, fd, 0L)));
+ ASSERT_NE((char*)NULL,
+ addr3 = reinterpret_cast<char*>(Syscall::Call(kMMapNr,
+ (void*)NULL,
+ 4096L,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
+#if defined(__NR_mmap2)
+ 1L
+#else
+ 4096L
+#endif
+ )));
+ EXPECT_EQ(0, memcmp(addr2 + 4096, addr3, 4096));
+
+ // Just to be absolutely on the safe side, also verify that the file
+ // contents matches what we are getting from a read() operation.
+ char buf[8192];
+ EXPECT_EQ(8192, Syscall::Call(__NR_read, fd, buf, 8192L));
+ EXPECT_EQ(0, memcmp(addr2, buf, 8192));
+
+ // Clean up
+ EXPECT_EQ(0, Syscall::Call(__NR_munmap, addr2, 8192L));
+ EXPECT_EQ(0, Syscall::Call(__NR_munmap, addr3, 4096L));
+ EXPECT_EQ(0, IGNORE_EINTR(Syscall::Call(__NR_close, fd)));
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf/trap.cc b/libchrome/sandbox/linux/seccomp-bpf/trap.cc
new file mode 100644
index 0000000..003708d
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/trap.cc
@@ -0,0 +1,387 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/trap.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/syscall.h>
+
+#include <algorithm>
+#include <limits>
+#include <tuple>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "sandbox/linux/bpf_dsl/seccomp_macros.h"
+#include "sandbox/linux/seccomp-bpf/die.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_seccomp.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+
+namespace {
+
+struct arch_sigsys {
+ void* ip;
+ int nr;
+ unsigned int arch;
+};
+
+const int kCapacityIncrement = 20;
+
+// Unsafe traps can only be turned on, if the user explicitly allowed them
+// by setting the CHROME_SANDBOX_DEBUGGING environment variable.
+const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
+
+// We need to tell whether we are performing a "normal" callback, or
+// whether we were called recursively from within a UnsafeTrap() callback.
+// This is a little tricky to do, because we need to somehow get access to
+// per-thread data from within a signal context. Normal TLS storage is not
+// safely accessible at this time. We could roll our own, but that involves
+// a lot of complexity. Instead, we co-opt one bit in the signal mask.
+// If BUS is blocked, we assume that we have been called recursively.
+// There is a possibility for collision with other code that needs to do
+// this, but in practice the risks are low.
+// If SIGBUS turns out to be a problem, we could instead co-opt one of the
+// realtime signals. There are plenty of them. Unfortunately, there is no
+// way to mark a signal as allocated. So, the potential for collision is
+// possibly even worse.
+bool GetIsInSigHandler(const ucontext_t* ctx) {
+ // Note: on Android, sigismember does not take a pointer to const.
+ return sigismember(const_cast<sigset_t*>(&ctx->uc_sigmask), LINUX_SIGBUS);
+}
+
+void SetIsInSigHandler() {
+ sigset_t mask;
+ if (sigemptyset(&mask) || sigaddset(&mask, LINUX_SIGBUS) ||
+ sandbox::sys_sigprocmask(LINUX_SIG_BLOCK, &mask, NULL)) {
+ SANDBOX_DIE("Failed to block SIGBUS");
+ }
+}
+
+bool IsDefaultSignalAction(const struct sigaction& sa) {
+ if (sa.sa_flags & SA_SIGINFO || sa.sa_handler != SIG_DFL) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+namespace sandbox {
+
+Trap::Trap()
+ : trap_array_(NULL),
+ trap_array_size_(0),
+ trap_array_capacity_(0),
+ has_unsafe_traps_(false) {
+ // Set new SIGSYS handler
+ struct sigaction sa = {};
+ // In some toolchain, sa_sigaction is not declared in struct sigaction.
+ // So, here cast the pointer to the sa_handler's type. This works because
+ // |sa_handler| and |sa_sigaction| shares the same memory.
+ sa.sa_handler = reinterpret_cast<void (*)(int)>(SigSysAction);
+ sa.sa_flags = LINUX_SA_SIGINFO | LINUX_SA_NODEFER;
+ struct sigaction old_sa = {};
+ if (sys_sigaction(LINUX_SIGSYS, &sa, &old_sa) < 0) {
+ SANDBOX_DIE("Failed to configure SIGSYS handler");
+ }
+
+ if (!IsDefaultSignalAction(old_sa)) {
+ static const char kExistingSIGSYSMsg[] =
+ "Existing signal handler when trying to install SIGSYS. SIGSYS needs "
+ "to be reserved for seccomp-bpf.";
+ DLOG(FATAL) << kExistingSIGSYSMsg;
+ LOG(ERROR) << kExistingSIGSYSMsg;
+ }
+
+ // Unmask SIGSYS
+ sigset_t mask;
+ if (sigemptyset(&mask) || sigaddset(&mask, LINUX_SIGSYS) ||
+ sys_sigprocmask(LINUX_SIG_UNBLOCK, &mask, NULL)) {
+ SANDBOX_DIE("Failed to configure SIGSYS handler");
+ }
+}
+
+bpf_dsl::TrapRegistry* Trap::Registry() {
+ // Note: This class is not thread safe. It is the caller's responsibility
+ // to avoid race conditions. Normally, this is a non-issue as the sandbox
+ // can only be initialized if there are no other threads present.
+ // Also, this is not a normal singleton. Once created, the global trap
+ // object must never be destroyed again.
+ if (!global_trap_) {
+ global_trap_ = new Trap();
+ if (!global_trap_) {
+ SANDBOX_DIE("Failed to allocate global trap handler");
+ }
+ }
+ return global_trap_;
+}
+
+void Trap::SigSysAction(int nr, LinuxSigInfo* info, void* void_context) {
+ if (info) {
+ MSAN_UNPOISON(info, sizeof(*info));
+ }
+
+ // Obtain the signal context. This, most notably, gives us access to
+ // all CPU registers at the time of the signal.
+ ucontext_t* ctx = reinterpret_cast<ucontext_t*>(void_context);
+ if (ctx) {
+ MSAN_UNPOISON(ctx, sizeof(*ctx));
+ }
+
+ if (!global_trap_) {
+ RAW_SANDBOX_DIE(
+ "This can't happen. Found no global singleton instance "
+ "for Trap() handling.");
+ }
+ global_trap_->SigSys(nr, info, ctx);
+}
+
+void Trap::SigSys(int nr, LinuxSigInfo* info, ucontext_t* ctx) {
+ // Signal handlers should always preserve "errno". Otherwise, we could
+ // trigger really subtle bugs.
+ const int old_errno = errno;
+
+ // Various sanity checks to make sure we actually received a signal
+ // triggered by a BPF filter. If something else triggered SIGSYS
+ // (e.g. kill()), there is really nothing we can do with this signal.
+ if (nr != LINUX_SIGSYS || info->si_code != SYS_SECCOMP || !ctx ||
+ info->si_errno <= 0 ||
+ static_cast<size_t>(info->si_errno) > trap_array_size_) {
+ // ATI drivers seem to send SIGSYS, so this cannot be FATAL.
+ // See crbug.com/178166.
+ // TODO(jln): add a DCHECK or move back to FATAL.
+ RAW_LOG(ERROR, "Unexpected SIGSYS received.");
+ errno = old_errno;
+ return;
+ }
+
+
+ // Obtain the siginfo information that is specific to SIGSYS. Unfortunately,
+ // most versions of glibc don't include this information in siginfo_t. So,
+ // we need to explicitly copy it into a arch_sigsys structure.
+ struct arch_sigsys sigsys;
+ memcpy(&sigsys, &info->_sifields, sizeof(sigsys));
+
+#if defined(__mips__)
+ // When indirect syscall (syscall(__NR_foo, ...)) is made on Mips, the
+ // number in register SECCOMP_SYSCALL(ctx) is always __NR_syscall and the
+ // real number of a syscall (__NR_foo) is in SECCOMP_PARM1(ctx)
+ bool sigsys_nr_is_bad = sigsys.nr != static_cast<int>(SECCOMP_SYSCALL(ctx)) &&
+ sigsys.nr != static_cast<int>(SECCOMP_PARM1(ctx));
+#else
+ bool sigsys_nr_is_bad = sigsys.nr != static_cast<int>(SECCOMP_SYSCALL(ctx));
+#endif
+
+ // Some more sanity checks.
+ if (sigsys.ip != reinterpret_cast<void*>(SECCOMP_IP(ctx)) ||
+ sigsys_nr_is_bad || sigsys.arch != SECCOMP_ARCH) {
+ // TODO(markus):
+ // SANDBOX_DIE() can call LOG(FATAL). This is not normally async-signal
+ // safe and can lead to bugs. We should eventually implement a different
+ // logging and reporting mechanism that is safe to be called from
+ // the sigSys() handler.
+ RAW_SANDBOX_DIE("Sanity checks are failing after receiving SIGSYS.");
+ }
+
+ intptr_t rc;
+ if (has_unsafe_traps_ && GetIsInSigHandler(ctx)) {
+ errno = old_errno;
+ if (sigsys.nr == __NR_clone) {
+ RAW_SANDBOX_DIE("Cannot call clone() from an UnsafeTrap() handler.");
+ }
+#if defined(__mips__)
+ // Mips supports up to eight arguments for syscall.
+ // However, seccomp bpf can filter only up to six arguments, so using eight
+ // arguments has sense only when using UnsafeTrap() handler.
+ rc = Syscall::Call(SECCOMP_SYSCALL(ctx),
+ SECCOMP_PARM1(ctx),
+ SECCOMP_PARM2(ctx),
+ SECCOMP_PARM3(ctx),
+ SECCOMP_PARM4(ctx),
+ SECCOMP_PARM5(ctx),
+ SECCOMP_PARM6(ctx),
+ SECCOMP_PARM7(ctx),
+ SECCOMP_PARM8(ctx));
+#else
+ rc = Syscall::Call(SECCOMP_SYSCALL(ctx),
+ SECCOMP_PARM1(ctx),
+ SECCOMP_PARM2(ctx),
+ SECCOMP_PARM3(ctx),
+ SECCOMP_PARM4(ctx),
+ SECCOMP_PARM5(ctx),
+ SECCOMP_PARM6(ctx));
+#endif // defined(__mips__)
+ } else {
+ const TrapKey& trap = trap_array_[info->si_errno - 1];
+ if (!trap.safe) {
+ SetIsInSigHandler();
+ }
+
+ // Copy the seccomp-specific data into a arch_seccomp_data structure. This
+ // is what we are showing to TrapFnc callbacks that the system call
+ // evaluator registered with the sandbox.
+ struct arch_seccomp_data data = {
+ static_cast<int>(SECCOMP_SYSCALL(ctx)),
+ SECCOMP_ARCH,
+ reinterpret_cast<uint64_t>(sigsys.ip),
+ {static_cast<uint64_t>(SECCOMP_PARM1(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM2(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM3(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM4(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM5(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM6(ctx))}};
+
+ // Now call the TrapFnc callback associated with this particular instance
+ // of SECCOMP_RET_TRAP.
+ rc = trap.fnc(data, const_cast<void*>(trap.aux));
+ }
+
+ // Update the CPU register that stores the return code of the system call
+ // that we just handled, and restore "errno" to the value that it had
+ // before entering the signal handler.
+ Syscall::PutValueInUcontext(rc, ctx);
+ errno = old_errno;
+
+ return;
+}
+
+bool Trap::TrapKey::operator<(const TrapKey& o) const {
+ return std::tie(fnc, aux, safe) < std::tie(o.fnc, o.aux, o.safe);
+}
+
+uint16_t Trap::Add(TrapFnc fnc, const void* aux, bool safe) {
+ if (!safe && !SandboxDebuggingAllowedByUser()) {
+ // Unless the user set the CHROME_SANDBOX_DEBUGGING environment variable,
+ // we never return an ErrorCode that is marked as "unsafe". This also
+ // means, the BPF compiler will never emit code that allow unsafe system
+ // calls to by-pass the filter (because they use the magic return address
+ // from Syscall::Call(-1)).
+
+ // This SANDBOX_DIE() can optionally be removed. It won't break security,
+ // but it might make error messages from the BPF compiler a little harder
+ // to understand. Removing the SANDBOX_DIE() allows callers to easily check
+ // whether unsafe traps are supported (by checking whether the returned
+ // ErrorCode is ET_INVALID).
+ SANDBOX_DIE(
+ "Cannot use unsafe traps unless CHROME_SANDBOX_DEBUGGING "
+ "is enabled");
+
+ return 0;
+ }
+
+ // Each unique pair of TrapFnc and auxiliary data make up a distinct instance
+ // of a SECCOMP_RET_TRAP.
+ TrapKey key(fnc, aux, safe);
+
+ // We return unique identifiers together with SECCOMP_RET_TRAP. This allows
+ // us to associate trap with the appropriate handler. The kernel allows us
+ // identifiers in the range from 0 to SECCOMP_RET_DATA (0xFFFF). We want to
+ // avoid 0, as it could be confused for a trap without any specific id.
+ // The nice thing about sequentially numbered identifiers is that we can also
+ // trivially look them up from our signal handler without making any system
+ // calls that might be async-signal-unsafe.
+ // In order to do so, we store all of our traps in a C-style trap_array_.
+
+ TrapIds::const_iterator iter = trap_ids_.find(key);
+ if (iter != trap_ids_.end()) {
+ // We have seen this pair before. Return the same id that we assigned
+ // earlier.
+ return iter->second;
+ }
+
+ // This is a new pair. Remember it and assign a new id.
+ if (trap_array_size_ >= SECCOMP_RET_DATA /* 0xFFFF */ ||
+ trap_array_size_ >= std::numeric_limits<uint16_t>::max()) {
+ // In practice, this is pretty much impossible to trigger, as there
+ // are other kernel limitations that restrict overall BPF program sizes.
+ SANDBOX_DIE("Too many SECCOMP_RET_TRAP callback instances");
+ }
+
+ // Our callers ensure that there are no other threads accessing trap_array_
+ // concurrently (typically this is done by ensuring that we are single-
+ // threaded while the sandbox is being set up). But we nonetheless are
+ // modifying a live data structure that could be accessed any time a
+ // system call is made; as system calls could be triggering SIGSYS.
+ // So, we have to be extra careful that we update trap_array_ atomically.
+ // In particular, this means we shouldn't be using realloc() to resize it.
+ // Instead, we allocate a new array, copy the values, and then switch the
+ // pointer. We only really care about the pointer being updated atomically
+ // and the data that is pointed to being valid, as these are the only
+ // values accessed from the signal handler. It is OK if trap_array_size_
+ // is inconsistent with the pointer, as it is monotonously increasing.
+ // Also, we only care about compiler barriers, as the signal handler is
+ // triggered synchronously from a system call. We don't have to protect
+ // against issues with the memory model or with completely asynchronous
+ // events.
+ if (trap_array_size_ >= trap_array_capacity_) {
+ trap_array_capacity_ += kCapacityIncrement;
+ TrapKey* old_trap_array = trap_array_;
+ TrapKey* new_trap_array = new TrapKey[trap_array_capacity_];
+ std::copy_n(old_trap_array, trap_array_size_, new_trap_array);
+
+ // Language specs are unclear on whether the compiler is allowed to move
+ // the "delete[]" above our preceding assignments and/or memory moves,
+ // iff the compiler believes that "delete[]" doesn't have any other
+ // global side-effects.
+ // We insert optimization barriers to prevent this from happening.
+ // The first barrier is probably not needed, but better be explicit in
+ // what we want to tell the compiler.
+ // The clang developer mailing list couldn't answer whether this is a
+ // legitimate worry; but they at least thought that the barrier is
+ // sufficient to prevent the (so far hypothetical) problem of re-ordering
+ // of instructions by the compiler.
+ //
+ // TODO(mdempsky): Try to clean this up using base/atomicops or C++11
+ // atomics; see crbug.com/414363.
+ asm volatile("" : "=r"(new_trap_array) : "0"(new_trap_array) : "memory");
+ trap_array_ = new_trap_array;
+ asm volatile("" : "=r"(trap_array_) : "0"(trap_array_) : "memory");
+
+ delete[] old_trap_array;
+ }
+
+ uint16_t id = trap_array_size_ + 1;
+ trap_ids_[key] = id;
+ trap_array_[trap_array_size_] = key;
+ trap_array_size_++;
+ return id;
+}
+
+bool Trap::SandboxDebuggingAllowedByUser() {
+ const char* debug_flag = getenv(kSandboxDebuggingEnv);
+ return debug_flag && *debug_flag;
+}
+
+bool Trap::EnableUnsafeTraps() {
+ if (!has_unsafe_traps_) {
+ // Unsafe traps are a one-way fuse. Once enabled, they can never be turned
+ // off again.
+ // We only allow enabling unsafe traps, if the user explicitly set an
+ // appropriate environment variable. This prevents bugs that accidentally
+ // disable all sandboxing for all users.
+ if (SandboxDebuggingAllowedByUser()) {
+ // We only ever print this message once, when we enable unsafe traps the
+ // first time.
+ SANDBOX_INFO("WARNING! Disabling sandbox for debugging purposes");
+ has_unsafe_traps_ = true;
+ } else {
+ SANDBOX_INFO(
+ "Cannot disable sandbox and use unsafe traps unless "
+ "CHROME_SANDBOX_DEBUGGING is turned on first");
+ }
+ }
+ // Returns the, possibly updated, value of has_unsafe_traps_.
+ return has_unsafe_traps_;
+}
+
+Trap* Trap::global_trap_;
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/seccomp-bpf/trap.h b/libchrome/sandbox/linux/seccomp-bpf/trap.h
new file mode 100644
index 0000000..a73d206
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/trap.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <map>
+
+#include "base/macros.h"
+#include "sandbox/linux/bpf_dsl/trap_registry.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// The Trap class allows a BPF filter program to branch out to user space by
+// raising a SIGSYS signal.
+// N.B.: This class does not perform any synchronization operations. If
+// modifications are made to any of the traps, it is the caller's
+// responsibility to ensure that this happens in a thread-safe fashion.
+// Preferably, that means that no other threads should be running at that
+// time. For the purposes of our sandbox, this assertion should always be
+// true. Threads are incompatible with the seccomp sandbox anyway.
+class SANDBOX_EXPORT Trap : public bpf_dsl::TrapRegistry {
+ public:
+ uint16_t Add(TrapFnc fnc, const void* aux, bool safe) override;
+
+ bool EnableUnsafeTraps() override;
+
+ // Registry returns the trap registry used by Trap's SIGSYS handler,
+ // creating it if necessary.
+ static bpf_dsl::TrapRegistry* Registry();
+
+ // SandboxDebuggingAllowedByUser returns whether the
+ // "CHROME_SANDBOX_DEBUGGING" environment variable is set.
+ static bool SandboxDebuggingAllowedByUser();
+
+ private:
+ struct TrapKey {
+ TrapKey() : fnc(NULL), aux(NULL), safe(false) {}
+ TrapKey(TrapFnc f, const void* a, bool s) : fnc(f), aux(a), safe(s) {}
+ TrapFnc fnc;
+ const void* aux;
+ bool safe;
+ bool operator<(const TrapKey&) const;
+ };
+ typedef std::map<TrapKey, uint16_t> TrapIds;
+
+ // Our constructor is private. A shared global instance is created
+ // automatically as needed.
+ Trap();
+
+ // The destructor is unimplemented as destroying this object would
+ // break subsequent system calls that trigger a SIGSYS.
+ ~Trap() = delete;
+
+ static void SigSysAction(int nr, LinuxSigInfo* info, void* void_context);
+
+ // Make sure that SigSys is not inlined in order to get slightly better crash
+ // dumps.
+ void SigSys(int nr, LinuxSigInfo* info, ucontext_t* ctx)
+ __attribute__((noinline));
+ // We have a global singleton that handles all of our SIGSYS traps. This
+ // variable must never be deallocated after it has been set up initially, as
+ // there is no way to reset in-kernel BPF filters that generate SIGSYS
+ // events.
+ static Trap* global_trap_;
+
+ TrapIds trap_ids_; // Maps from TrapKeys to numeric ids
+ TrapKey* trap_array_; // Array of TrapKeys indexed by ids
+ size_t trap_array_size_; // Currently used size of array
+ size_t trap_array_capacity_; // Currently allocated capacity of array
+ bool has_unsafe_traps_; // Whether unsafe traps have been enabled
+
+ // Copying and assigning is unimplemented. It doesn't make sense for a
+ // singleton.
+ DISALLOW_COPY_AND_ASSIGN(Trap);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
diff --git a/libchrome/sandbox/linux/seccomp-bpf/trap_unittest.cc b/libchrome/sandbox/linux/seccomp-bpf/trap_unittest.cc
new file mode 100644
index 0000000..99f94bf
--- /dev/null
+++ b/libchrome/sandbox/linux/seccomp-bpf/trap_unittest.cc
@@ -0,0 +1,28 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/trap.h"
+
+#include <signal.h>
+
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+namespace {
+
+SANDBOX_TEST_ALLOW_NOISE(Trap, SigSysAction) {
+ // This creates a global Trap instance, and registers the signal handler
+ // (Trap::SigSysAction).
+ Trap::Registry();
+
+ // Send SIGSYS to self. If signal handler (SigSysAction) is not registered,
+ // the process will be terminated with status code -SIGSYS.
+ // Note that, SigSysAction handler would output an error message
+ // "Unexpected SIGSYS received." so it is necessary to allow the noise.
+ raise(SIGSYS);
+}
+
+} // namespace
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/DEPS b/libchrome/sandbox/linux/services/DEPS
new file mode 100644
index 0000000..70d9b18
--- /dev/null
+++ b/libchrome/sandbox/linux/services/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+sandbox/linux/system_headers",
+]
diff --git a/libchrome/sandbox/linux/services/credentials.cc b/libchrome/sandbox/linux/services/credentials.cc
new file mode 100644
index 0000000..0c617d4
--- /dev/null
+++ b/libchrome/sandbox/linux/services/credentials.cc
@@ -0,0 +1,335 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/credentials.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/launch.h"
+#include "base/third_party/valgrind/valgrind.h"
+#include "build/build_config.h"
+#include "sandbox/linux/services/namespace_utils.h"
+#include "sandbox/linux/services/proc_util.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/services/thread_helpers.h"
+#include "sandbox/linux/system_headers/capability.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+
+namespace sandbox {
+
+namespace {
+
+bool IsRunningOnValgrind() { return RUNNING_ON_VALGRIND; }
+
+// Checks that the set of RES-uids and the set of RES-gids have
+// one element each and return that element in |resuid| and |resgid|
+// respectively. It's ok to pass NULL as one or both of the ids.
+bool GetRESIds(uid_t* resuid, gid_t* resgid) {
+ uid_t ruid, euid, suid;
+ gid_t rgid, egid, sgid;
+ PCHECK(sys_getresuid(&ruid, &euid, &suid) == 0);
+ PCHECK(sys_getresgid(&rgid, &egid, &sgid) == 0);
+ const bool uids_are_equal = (ruid == euid) && (ruid == suid);
+ const bool gids_are_equal = (rgid == egid) && (rgid == sgid);
+ if (!uids_are_equal || !gids_are_equal) return false;
+ if (resuid) *resuid = euid;
+ if (resgid) *resgid = egid;
+ return true;
+}
+
+const int kExitSuccess = 0;
+
+#if defined(__clang__)
+// Disable sanitizers that rely on TLS and may write to non-stack memory.
+__attribute__((no_sanitize_address))
+__attribute__((no_sanitize_thread))
+__attribute__((no_sanitize_memory))
+#endif
+int ChrootToSelfFdinfo(void*) {
+ // This function can be run from a vforked child, so it should not write to
+ // any memory other than the stack or errno. Reads from TLS may be different
+ // from in the parent process.
+ RAW_CHECK(sys_chroot("/proc/self/fdinfo/") == 0);
+
+ // CWD is essentially an implicit file descriptor, so be careful to not
+ // leave it behind.
+ RAW_CHECK(chdir("/") == 0);
+ _exit(kExitSuccess);
+}
+
+// chroot() to an empty dir that is "safe". To be safe, it must not contain
+// any subdirectory (chroot-ing there would allow a chroot escape) and it must
+// be impossible to create an empty directory there.
+// We achieve this by doing the following:
+// 1. We create a new process sharing file system information.
+// 2. In the child, we chroot to /proc/self/fdinfo/
+// This is already "safe", since fdinfo/ does not contain another directory and
+// one cannot create another directory there.
+// 3. The process dies
+// After (3) happens, the directory is not available anymore in /proc.
+bool ChrootToSafeEmptyDir() {
+ // We need to chroot to a fdinfo that is unique to a process and have that
+ // process die.
+ // 1. We don't want to simply fork() because duplicating the page tables is
+ // slow with a big address space.
+ // 2. We do not use a regular thread (that would unshare CLONE_FILES) because
+ // when we are in a PID namespace, we cannot easily get a handle to the
+ // /proc/tid directory for the thread (since /proc may not be aware of the
+ // PID namespace). With a process, we can just use /proc/self.
+ pid_t pid = -1;
+ char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
+#if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY)
+ // The stack grows downward.
+ void* stack = stack_buf + sizeof(stack_buf);
+#else
+#error "Unsupported architecture"
+#endif
+
+ int clone_flags = CLONE_FS | LINUX_SIGCHLD;
+ void* tls = nullptr;
+#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY)
+ // Use CLONE_VM | CLONE_VFORK as an optimization to avoid copying page tables.
+ // Since clone writes to the new child's TLS before returning, we must set a
+ // new TLS to avoid corrupting the current process's TLS. On ARCH_CPU_X86,
+ // glibc performs syscalls by calling a function pointer in TLS, so we do not
+ // attempt this optimization.
+ clone_flags |= CLONE_VM | CLONE_VFORK | CLONE_SETTLS;
+
+ char tls_buf[PTHREAD_STACK_MIN] = {0};
+ tls = tls_buf;
+#endif
+
+ pid = clone(ChrootToSelfFdinfo, stack, clone_flags, nullptr, nullptr, tls,
+ nullptr);
+ PCHECK(pid != -1);
+
+ int status = -1;
+ PCHECK(HANDLE_EINTR(waitpid(pid, &status, 0)) == pid);
+
+ return WIFEXITED(status) && WEXITSTATUS(status) == kExitSuccess;
+}
+
+// CHECK() that an attempt to move to a new user namespace raised an expected
+// errno.
+void CheckCloneNewUserErrno(int error) {
+ // EPERM can happen if already in a chroot. EUSERS if too many nested
+ // namespaces are used. EINVAL for kernels that don't support the feature.
+ // Valgrind will ENOSYS unshare().
+ PCHECK(error == EPERM || error == EUSERS || error == EINVAL ||
+ error == ENOSYS);
+}
+
+// Converts a Capability to the corresponding Linux CAP_XXX value.
+int CapabilityToKernelValue(Credentials::Capability cap) {
+ switch (cap) {
+ case Credentials::Capability::SYS_CHROOT:
+ return CAP_SYS_CHROOT;
+ case Credentials::Capability::SYS_ADMIN:
+ return CAP_SYS_ADMIN;
+ }
+
+ LOG(FATAL) << "Invalid Capability: " << static_cast<int>(cap);
+ return 0;
+}
+
+} // namespace.
+
+// static
+bool Credentials::DropAllCapabilities(int proc_fd) {
+ if (!SetCapabilities(proc_fd, std::vector<Capability>())) {
+ return false;
+ }
+
+ CHECK(!HasAnyCapability());
+ return true;
+}
+
+// static
+bool Credentials::DropAllCapabilities() {
+ base::ScopedFD proc_fd(ProcUtil::OpenProc());
+ return Credentials::DropAllCapabilities(proc_fd.get());
+}
+
+// static
+bool Credentials::DropAllCapabilitiesOnCurrentThread() {
+ return SetCapabilitiesOnCurrentThread(std::vector<Capability>());
+}
+
+// static
+bool Credentials::SetCapabilitiesOnCurrentThread(
+ const std::vector<Capability>& caps) {
+ struct cap_hdr hdr = {};
+ hdr.version = _LINUX_CAPABILITY_VERSION_3;
+ struct cap_data data[_LINUX_CAPABILITY_U32S_3] = {{}};
+
+ // Initially, cap has no capability flags set. Enable the effective and
+ // permitted flags only for the requested capabilities.
+ for (const Capability cap : caps) {
+ const int cap_num = CapabilityToKernelValue(cap);
+ const size_t index = CAP_TO_INDEX(cap_num);
+ const uint32_t mask = CAP_TO_MASK(cap_num);
+ data[index].effective |= mask;
+ data[index].permitted |= mask;
+ }
+
+ return sys_capset(&hdr, data) == 0;
+}
+
+// static
+bool Credentials::SetCapabilities(int proc_fd,
+ const std::vector<Capability>& caps) {
+ DCHECK_LE(0, proc_fd);
+
+#if !defined(THREAD_SANITIZER)
+ // With TSAN, accept to break the security model as it is a testing
+ // configuration.
+ CHECK(ThreadHelpers::IsSingleThreaded(proc_fd));
+#endif
+
+ return SetCapabilitiesOnCurrentThread(caps);
+}
+
+bool Credentials::HasAnyCapability() {
+ struct cap_hdr hdr = {};
+ hdr.version = _LINUX_CAPABILITY_VERSION_3;
+ struct cap_data data[_LINUX_CAPABILITY_U32S_3] = {{}};
+
+ PCHECK(sys_capget(&hdr, data) == 0);
+
+ for (size_t i = 0; i < arraysize(data); ++i) {
+ if (data[i].effective || data[i].permitted || data[i].inheritable) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool Credentials::HasCapability(Capability cap) {
+ struct cap_hdr hdr = {};
+ hdr.version = _LINUX_CAPABILITY_VERSION_3;
+ struct cap_data data[_LINUX_CAPABILITY_U32S_3] = {{}};
+
+ PCHECK(sys_capget(&hdr, data) == 0);
+
+ const int cap_num = CapabilityToKernelValue(cap);
+ const size_t index = CAP_TO_INDEX(cap_num);
+ const uint32_t mask = CAP_TO_MASK(cap_num);
+
+ return (data[index].effective | data[index].permitted |
+ data[index].inheritable) &
+ mask;
+}
+
+// static
+bool Credentials::CanCreateProcessInNewUserNS() {
+ // Valgrind will let clone(2) pass-through, but doesn't support unshare(),
+ // so always consider UserNS unsupported there.
+ if (IsRunningOnValgrind()) {
+ return false;
+ }
+
+#if defined(THREAD_SANITIZER)
+ // With TSAN, processes will always have threads running and can never
+ // enter a new user namespace with MoveToNewUserNS().
+ return false;
+#endif
+
+ // This is roughly a fork().
+ const pid_t pid = sys_clone(CLONE_NEWUSER | SIGCHLD, 0, 0, 0, 0);
+
+ if (pid == -1) {
+ CheckCloneNewUserErrno(errno);
+ return false;
+ }
+
+ // The parent process could have had threads. In the child, these threads
+ // have disappeared. Make sure to not do anything in the child, as this is a
+ // fragile execution environment.
+ if (pid == 0) {
+ _exit(kExitSuccess);
+ }
+
+ // Always reap the child.
+ int status = -1;
+ PCHECK(HANDLE_EINTR(waitpid(pid, &status, 0)) == pid);
+ CHECK(WIFEXITED(status));
+ CHECK_EQ(kExitSuccess, WEXITSTATUS(status));
+
+ // clone(2) succeeded, we can use CLONE_NEWUSER.
+ return true;
+}
+
+bool Credentials::MoveToNewUserNS() {
+ uid_t uid;
+ gid_t gid;
+ if (!GetRESIds(&uid, &gid)) {
+ // If all the uids (or gids) are not equal to each other, the security
+ // model will most likely confuse the caller, abort.
+ DVLOG(1) << "uids or gids differ!";
+ return false;
+ }
+ int ret = sys_unshare(CLONE_NEWUSER);
+ if (ret) {
+ const int unshare_errno = errno;
+ VLOG(1) << "Looks like unprivileged CLONE_NEWUSER may not be available "
+ << "on this kernel.";
+ CheckCloneNewUserErrno(unshare_errno);
+ return false;
+ }
+
+ if (NamespaceUtils::KernelSupportsDenySetgroups()) {
+ PCHECK(NamespaceUtils::DenySetgroups());
+ }
+
+ // The current {r,e,s}{u,g}id is now an overflow id (c.f.
+ // /proc/sys/kernel/overflowuid). Setup the uid and gid maps.
+ DCHECK(GetRESIds(NULL, NULL));
+ const char kGidMapFile[] = "/proc/self/gid_map";
+ const char kUidMapFile[] = "/proc/self/uid_map";
+ PCHECK(NamespaceUtils::WriteToIdMapFile(kGidMapFile, gid));
+ PCHECK(NamespaceUtils::WriteToIdMapFile(kUidMapFile, uid));
+ DCHECK(GetRESIds(NULL, NULL));
+ return true;
+}
+
+bool Credentials::DropFileSystemAccess(int proc_fd) {
+ CHECK_LE(0, proc_fd);
+
+ CHECK(ChrootToSafeEmptyDir());
+ CHECK(!base::DirectoryExists(base::FilePath("/proc")));
+ CHECK(!ProcUtil::HasOpenDirectory(proc_fd));
+ // We never let this function fail.
+ return true;
+}
+
+pid_t Credentials::ForkAndDropCapabilitiesInChild() {
+ pid_t pid = fork();
+ if (pid != 0) {
+ return pid;
+ }
+
+ // Since we just forked, we are single threaded.
+ PCHECK(DropAllCapabilitiesOnCurrentThread());
+ return 0;
+}
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/services/credentials.h b/libchrome/sandbox/linux/services/credentials.h
new file mode 100644
index 0000000..b89a6aa
--- /dev/null
+++ b/libchrome/sandbox/linux/services/credentials.h
@@ -0,0 +1,106 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_CREDENTIALS_H_
+#define SANDBOX_LINUX_SERVICES_CREDENTIALS_H_
+
+#include "build/build_config.h"
+// Link errors are tedious to track, raise a compile-time error instead.
+#if defined(OS_ANDROID)
+#error "Android is not supported."
+#endif // defined(OS_ANDROID).
+
+#include <string>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "sandbox/linux/system_headers/capability.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// This class should be used to manipulate the current process' credentials.
+// It is currently a stub used to manipulate POSIX.1e capabilities as
+// implemented by the Linux kernel.
+class SANDBOX_EXPORT Credentials {
+ public:
+ // For brevity, we only expose enums for the subset of capabilities we use.
+ // This can be expanded as the need arises.
+ enum class Capability {
+ SYS_CHROOT,
+ SYS_ADMIN,
+ };
+
+ // Drop all capabilities in the effective, inheritable and permitted sets for
+ // the current thread. For security reasons, since capabilities are
+ // per-thread, the caller is responsible for ensuring it is single-threaded
+ // when calling this API.
+ // |proc_fd| must be a file descriptor to /proc/ and remains owned by
+ // the caller.
+ static bool DropAllCapabilities(int proc_fd) WARN_UNUSED_RESULT;
+ // A similar API which assumes that it can open /proc/self/ by itself.
+ static bool DropAllCapabilities() WARN_UNUSED_RESULT;
+ // Sets the effective and permitted capability sets for the current thread to
+ // the list of capabiltiies in |caps|. All other capability flags are cleared.
+ static bool SetCapabilities(int proc_fd,
+ const std::vector<Capability>& caps)
+ WARN_UNUSED_RESULT;
+
+ // Versions of the above functions which do not check that the process is
+ // single-threaded. After calling these functions, capabilities of other
+ // threads will not be changed. This is dangerous, do not use unless you nkow
+ // what you are doing.
+ static bool DropAllCapabilitiesOnCurrentThread() WARN_UNUSED_RESULT;
+ static bool SetCapabilitiesOnCurrentThread(
+ const std::vector<Capability>& caps) WARN_UNUSED_RESULT;
+
+ // Returns true if the current thread has either the effective, permitted, or
+ // inheritable flag set for the given capability.
+ static bool HasCapability(Capability cap);
+
+ // Return true iff there is any capability in any of the capabilities sets
+ // of the current thread.
+ static bool HasAnyCapability();
+
+ // Returns whether the kernel supports CLONE_NEWUSER and whether it would be
+ // possible to immediately move to a new user namespace. There is no point
+ // in using this method right before calling MoveToNewUserNS(), simply call
+ // MoveToNewUserNS() immediately. This method is only useful to test the
+ // ability to move to a user namespace ahead of time.
+ static bool CanCreateProcessInNewUserNS();
+
+ // Move the current process to a new "user namespace" as supported by Linux
+ // 3.8+ (CLONE_NEWUSER).
+ // The uid map will be set-up so that the perceived uid and gid will not
+ // change.
+ // If this call succeeds, the current process will be granted a full set of
+ // capabilities in the new namespace.
+ // This will fail if the process is not mono-threaded.
+ static bool MoveToNewUserNS() WARN_UNUSED_RESULT;
+
+ // Remove the ability of the process to access the file system. File
+ // descriptors which are already open prior to calling this API remain
+ // available.
+ // The implementation currently uses chroot(2) and requires CAP_SYS_CHROOT.
+ // CAP_SYS_CHROOT can be acquired by using the MoveToNewUserNS() API.
+ // |proc_fd| must be a file descriptor to /proc/ and must be the only open
+ // directory file descriptor of the process.
+ //
+ // CRITICAL:
+ // - the caller must close |proc_fd| eventually or access to the file
+ // system can be recovered.
+ // - DropAllCapabilities() must be called to prevent escapes.
+ static bool DropFileSystemAccess(int proc_fd) WARN_UNUSED_RESULT;
+
+ // Forks and drops capabilities in the child.
+ static pid_t ForkAndDropCapabilitiesInChild();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Credentials);
+};
+
+} // namespace sandbox.
+
+#endif // SANDBOX_LINUX_SERVICES_CREDENTIALS_H_
diff --git a/libchrome/sandbox/linux/services/credentials_unittest.cc b/libchrome/sandbox/linux/services/credentials_unittest.cc
new file mode 100644
index 0000000..b95ba0b
--- /dev/null
+++ b/libchrome/sandbox/linux/services/credentials_unittest.cc
@@ -0,0 +1,270 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/credentials.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/capability.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "sandbox/linux/services/proc_util.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/capability.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+struct CapFreeDeleter {
+ inline void operator()(cap_t cap) const {
+ int ret = cap_free(cap);
+ CHECK_EQ(0, ret);
+ }
+};
+
+// Wrapper to manage libcap2's cap_t type.
+typedef std::unique_ptr<typeof(*((cap_t)0)), CapFreeDeleter> ScopedCap;
+
+bool WorkingDirectoryIsRoot() {
+ char current_dir[PATH_MAX];
+ char* cwd = getcwd(current_dir, sizeof(current_dir));
+ PCHECK(cwd);
+ if (strcmp("/", cwd)) return false;
+
+ // The current directory is the root. Add a few paranoid checks.
+ struct stat current;
+ CHECK_EQ(0, stat(".", ¤t));
+ struct stat parrent;
+ CHECK_EQ(0, stat("..", &parrent));
+ CHECK_EQ(current.st_dev, parrent.st_dev);
+ CHECK_EQ(current.st_ino, parrent.st_ino);
+ CHECK_EQ(current.st_mode, parrent.st_mode);
+ CHECK_EQ(current.st_uid, parrent.st_uid);
+ CHECK_EQ(current.st_gid, parrent.st_gid);
+ return true;
+}
+
+SANDBOX_TEST(Credentials, DropAllCaps) {
+ CHECK(Credentials::DropAllCapabilities());
+ CHECK(!Credentials::HasAnyCapability());
+}
+
+SANDBOX_TEST(Credentials, MoveToNewUserNS) {
+ CHECK(Credentials::DropAllCapabilities());
+ bool moved_to_new_ns = Credentials::MoveToNewUserNS();
+ fprintf(stdout,
+ "Unprivileged CLONE_NEWUSER supported: %s\n",
+ moved_to_new_ns ? "true." : "false.");
+ fflush(stdout);
+ if (!moved_to_new_ns) {
+ fprintf(stdout, "This kernel does not support unprivileged namespaces. "
+ "USERNS tests will succeed without running.\n");
+ fflush(stdout);
+ return;
+ }
+ CHECK(Credentials::HasAnyCapability());
+ CHECK(Credentials::DropAllCapabilities());
+ CHECK(!Credentials::HasAnyCapability());
+}
+
+SANDBOX_TEST(Credentials, CanCreateProcessInNewUserNS) {
+ CHECK(Credentials::DropAllCapabilities());
+ bool user_ns_supported = Credentials::CanCreateProcessInNewUserNS();
+ bool moved_to_new_ns = Credentials::MoveToNewUserNS();
+ CHECK_EQ(user_ns_supported, moved_to_new_ns);
+}
+
+SANDBOX_TEST(Credentials, UidIsPreserved) {
+ CHECK(Credentials::DropAllCapabilities());
+ uid_t old_ruid, old_euid, old_suid;
+ gid_t old_rgid, old_egid, old_sgid;
+ PCHECK(0 == getresuid(&old_ruid, &old_euid, &old_suid));
+ PCHECK(0 == getresgid(&old_rgid, &old_egid, &old_sgid));
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS()) return;
+ uid_t new_ruid, new_euid, new_suid;
+ PCHECK(0 == getresuid(&new_ruid, &new_euid, &new_suid));
+ CHECK(old_ruid == new_ruid);
+ CHECK(old_euid == new_euid);
+ CHECK(old_suid == new_suid);
+
+ gid_t new_rgid, new_egid, new_sgid;
+ PCHECK(0 == getresgid(&new_rgid, &new_egid, &new_sgid));
+ CHECK(old_rgid == new_rgid);
+ CHECK(old_egid == new_egid);
+ CHECK(old_sgid == new_sgid);
+}
+
+bool NewUserNSCycle() {
+ if (!Credentials::MoveToNewUserNS() ||
+ !Credentials::HasAnyCapability() ||
+ !Credentials::DropAllCapabilities() ||
+ Credentials::HasAnyCapability()) {
+ return false;
+ }
+ return true;
+}
+
+SANDBOX_TEST(Credentials, NestedUserNS) {
+ CHECK(Credentials::DropAllCapabilities());
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS()) return;
+ CHECK(Credentials::DropAllCapabilities());
+ // As of 3.12, the kernel has a limit of 32. See create_user_ns().
+ const int kNestLevel = 10;
+ for (int i = 0; i < kNestLevel; ++i) {
+ CHECK(NewUserNSCycle()) << "Creating new user NS failed at iteration "
+ << i << ".";
+ }
+}
+
+// Test the WorkingDirectoryIsRoot() helper.
+SANDBOX_TEST(Credentials, CanDetectRoot) {
+ PCHECK(0 == chdir("/proc/"));
+ CHECK(!WorkingDirectoryIsRoot());
+ PCHECK(0 == chdir("/"));
+ CHECK(WorkingDirectoryIsRoot());
+}
+
+// Disabled on ASAN because of crbug.com/451603.
+SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(DropFileSystemAccessIsSafe)) {
+ CHECK(Credentials::DropAllCapabilities());
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS()) return;
+ CHECK(Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
+ CHECK(!base::DirectoryExists(base::FilePath("/proc")));
+ CHECK(WorkingDirectoryIsRoot());
+ CHECK(base::IsDirectoryEmpty(base::FilePath("/")));
+ // We want the chroot to never have a subdirectory. A subdirectory
+ // could allow a chroot escape.
+ CHECK_NE(0, mkdir("/test", 0700));
+}
+
+// Check that after dropping filesystem access and dropping privileges
+// it is not possible to regain capabilities.
+SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(CannotRegainPrivileges)) {
+ base::ScopedFD proc_fd(ProcUtil::OpenProc());
+ CHECK(Credentials::DropAllCapabilities(proc_fd.get()));
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS()) return;
+ CHECK(Credentials::DropFileSystemAccess(proc_fd.get()));
+ CHECK(Credentials::DropAllCapabilities(proc_fd.get()));
+
+ // The kernel should now prevent us from regaining capabilities because we
+ // are in a chroot.
+ CHECK(!Credentials::CanCreateProcessInNewUserNS());
+ CHECK(!Credentials::MoveToNewUserNS());
+}
+
+SANDBOX_TEST(Credentials, SetCapabilities) {
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS())
+ return;
+
+ base::ScopedFD proc_fd(ProcUtil::OpenProc());
+
+ CHECK(Credentials::HasCapability(Credentials::Capability::SYS_ADMIN));
+ CHECK(Credentials::HasCapability(Credentials::Capability::SYS_CHROOT));
+
+ std::vector<Credentials::Capability> caps;
+ caps.push_back(Credentials::Capability::SYS_CHROOT);
+ CHECK(Credentials::SetCapabilities(proc_fd.get(), caps));
+
+ CHECK(!Credentials::HasCapability(Credentials::Capability::SYS_ADMIN));
+ CHECK(Credentials::HasCapability(Credentials::Capability::SYS_CHROOT));
+
+ const std::vector<Credentials::Capability> no_caps;
+ CHECK(Credentials::SetCapabilities(proc_fd.get(), no_caps));
+ CHECK(!Credentials::HasAnyCapability());
+}
+
+SANDBOX_TEST(Credentials, SetCapabilitiesAndChroot) {
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS())
+ return;
+
+ base::ScopedFD proc_fd(ProcUtil::OpenProc());
+
+ CHECK(Credentials::HasCapability(Credentials::Capability::SYS_CHROOT));
+ PCHECK(chroot("/") == 0);
+
+ std::vector<Credentials::Capability> caps;
+ caps.push_back(Credentials::Capability::SYS_CHROOT);
+ CHECK(Credentials::SetCapabilities(proc_fd.get(), caps));
+ PCHECK(chroot("/") == 0);
+
+ CHECK(Credentials::DropAllCapabilities());
+ PCHECK(chroot("/") == -1 && errno == EPERM);
+}
+
+SANDBOX_TEST(Credentials, SetCapabilitiesMatchesLibCap2) {
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS())
+ return;
+
+ base::ScopedFD proc_fd(ProcUtil::OpenProc());
+
+ std::vector<Credentials::Capability> caps;
+ caps.push_back(Credentials::Capability::SYS_CHROOT);
+ CHECK(Credentials::SetCapabilities(proc_fd.get(), caps));
+
+ ScopedCap actual_cap(cap_get_proc());
+ PCHECK(actual_cap != nullptr);
+
+ ScopedCap expected_cap(cap_init());
+ PCHECK(expected_cap != nullptr);
+
+ const cap_value_t allowed_cap = CAP_SYS_CHROOT;
+ for (const cap_flag_t flag : {CAP_EFFECTIVE, CAP_PERMITTED}) {
+ PCHECK(cap_set_flag(expected_cap.get(), flag, 1, &allowed_cap, CAP_SET) ==
+ 0);
+ }
+
+ CHECK_EQ(0, cap_compare(expected_cap.get(), actual_cap.get()));
+}
+
+volatile sig_atomic_t signal_handler_called;
+void SignalHandler(int sig) {
+ signal_handler_called = 1;
+}
+
+// Disabled on ASAN because of crbug.com/451603.
+SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(DropFileSystemAccessPreservesTLS)) {
+ // Probably missing kernel support.
+ if (!Credentials::MoveToNewUserNS()) return;
+ CHECK(Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
+
+ // In glibc, pthread_getattr_np makes an assertion about the cached PID/TID in
+ // TLS.
+ pthread_attr_t attr;
+ EXPECT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
+
+ // raise also uses the cached TID in glibc.
+ struct sigaction action = {};
+ action.sa_handler = &SignalHandler;
+ PCHECK(sigaction(SIGUSR1, &action, nullptr) == 0);
+
+ PCHECK(raise(SIGUSR1) == 0);
+ CHECK_EQ(1, signal_handler_called);
+}
+
+} // namespace.
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/services/init_process_reaper.cc b/libchrome/sandbox/linux/services/init_process_reaper.cc
new file mode 100644
index 0000000..2ff18b5
--- /dev/null
+++ b/libchrome/sandbox/linux/services/init_process_reaper.cc
@@ -0,0 +1,101 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/init_process_reaper.h"
+
+#include <signal.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+
+namespace sandbox {
+
+namespace {
+
+void DoNothingSignalHandler(int signal) {}
+
+} // namespace
+
+bool CreateInitProcessReaper(base::Closure* post_fork_parent_callback) {
+ int sync_fds[2];
+ // We want to use send, so we can't use a pipe
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sync_fds)) {
+ PLOG(ERROR) << "Failed to create socketpair";
+ return false;
+ }
+ pid_t child_pid = fork();
+ if (child_pid == -1) {
+ int close_ret;
+ close_ret = IGNORE_EINTR(close(sync_fds[0]));
+ DPCHECK(!close_ret);
+ close_ret = IGNORE_EINTR(close(sync_fds[1]));
+ DPCHECK(!close_ret);
+ return false;
+ }
+ if (child_pid) {
+ // In the parent, assuming the role of an init process.
+ // The disposition for SIGCHLD cannot be SIG_IGN or wait() will only return
+ // once all of our childs are dead. Since we're init we need to reap childs
+ // as they come.
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ action.sa_handler = &DoNothingSignalHandler;
+ CHECK(sigaction(SIGCHLD, &action, NULL) == 0);
+
+ int close_ret;
+ close_ret = IGNORE_EINTR(close(sync_fds[0]));
+ DPCHECK(!close_ret);
+ close_ret = shutdown(sync_fds[1], SHUT_RD);
+ DPCHECK(!close_ret);
+ if (post_fork_parent_callback)
+ post_fork_parent_callback->Run();
+ // Tell the child to continue
+ CHECK(HANDLE_EINTR(send(sync_fds[1], "C", 1, MSG_NOSIGNAL)) == 1);
+ close_ret = IGNORE_EINTR(close(sync_fds[1]));
+ DPCHECK(!close_ret);
+
+ for (;;) {
+ // Loop until we have reaped our one natural child
+ siginfo_t reaped_child_info;
+ int wait_ret =
+ HANDLE_EINTR(waitid(P_ALL, 0, &reaped_child_info, WEXITED));
+ if (wait_ret)
+ _exit(1);
+ if (reaped_child_info.si_pid == child_pid) {
+ int exit_code = 0;
+ // We're done waiting
+ if (reaped_child_info.si_code == CLD_EXITED) {
+ exit_code = reaped_child_info.si_status;
+ }
+ // Exit with the same exit code as our child. Exit with 0 if we got
+ // signaled.
+ _exit(exit_code);
+ }
+ }
+ } else {
+ // The child needs to wait for the parent to run the callback to avoid a
+ // race condition.
+ int close_ret;
+ close_ret = IGNORE_EINTR(close(sync_fds[1]));
+ DPCHECK(!close_ret);
+ close_ret = shutdown(sync_fds[0], SHUT_WR);
+ DPCHECK(!close_ret);
+ char should_continue;
+ int read_ret = HANDLE_EINTR(read(sync_fds[0], &should_continue, 1));
+ close_ret = IGNORE_EINTR(close(sync_fds[0]));
+ DPCHECK(!close_ret);
+ if (read_ret == 1)
+ return true;
+ else
+ return false;
+ }
+}
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/services/init_process_reaper.h b/libchrome/sandbox/linux/services/init_process_reaper.h
new file mode 100644
index 0000000..840f6fc
--- /dev/null
+++ b/libchrome/sandbox/linux/services/init_process_reaper.h
@@ -0,0 +1,25 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_INIT_PROCESS_REAPER_H_
+#define SANDBOX_LINUX_SERVICES_INIT_PROCESS_REAPER_H_
+
+#include "base/callback_forward.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// The current process will fork(). The parent will become a process reaper
+// like init(1). The child will continue normally (after this function
+// returns).
+// If not NULL, |post_fork_parent_callback| will run in the parent almost
+// immediately after fork().
+// Since this function calls fork(), it's very important that the caller has
+// only one thread running.
+SANDBOX_EXPORT bool CreateInitProcessReaper(
+ base::Closure* post_fork_parent_callback);
+
+} // namespace sandbox.
+
+#endif // SANDBOX_LINUX_SERVICES_INIT_PROCESS_REAPER_H_
diff --git a/libchrome/sandbox/linux/services/namespace_sandbox.cc b/libchrome/sandbox/linux/services/namespace_sandbox.cc
new file mode 100644
index 0000000..00e5947
--- /dev/null
+++ b/libchrome/sandbox/linux/services/namespace_sandbox.cc
@@ -0,0 +1,245 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/namespace_sandbox.h"
+
+#include <sched.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/launch.h"
+#include "base/process/process.h"
+#include "sandbox/linux/services/credentials.h"
+#include "sandbox/linux/services/namespace_utils.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+
+namespace sandbox {
+
+namespace {
+
+const char kSandboxUSERNSEnvironmentVarName[] = "SBX_USER_NS";
+const char kSandboxPIDNSEnvironmentVarName[] = "SBX_PID_NS";
+const char kSandboxNETNSEnvironmentVarName[] = "SBX_NET_NS";
+
+#if !defined(OS_NACL_NONSFI)
+class WriteUidGidMapDelegate : public base::LaunchOptions::PreExecDelegate {
+ public:
+ WriteUidGidMapDelegate()
+ : uid_(getuid()),
+ gid_(getgid()),
+ supports_deny_setgroups_(
+ NamespaceUtils::KernelSupportsDenySetgroups()) {}
+
+ ~WriteUidGidMapDelegate() override {}
+
+ void RunAsyncSafe() override {
+ if (supports_deny_setgroups_) {
+ RAW_CHECK(NamespaceUtils::DenySetgroups());
+ }
+ RAW_CHECK(NamespaceUtils::WriteToIdMapFile("/proc/self/uid_map", uid_));
+ RAW_CHECK(NamespaceUtils::WriteToIdMapFile("/proc/self/gid_map", gid_));
+ }
+
+ private:
+ const uid_t uid_;
+ const gid_t gid_;
+ const bool supports_deny_setgroups_;
+ DISALLOW_COPY_AND_ASSIGN(WriteUidGidMapDelegate);
+};
+
+void SetEnvironForNamespaceType(base::EnvironmentMap* environ,
+ base::NativeEnvironmentString env_var,
+ bool value) {
+ // An empty string causes the env var to be unset in the child process.
+ (*environ)[env_var] = value ? "1" : "";
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+// Linux supports up to 64 signals. This should be updated if that ever changes.
+int g_signal_exit_codes[64];
+
+void TerminationSignalHandler(int sig) {
+ // Return a special exit code so that the process is detected as terminated by
+ // a signal.
+ const size_t sig_idx = static_cast<size_t>(sig);
+ if (sig_idx < arraysize(g_signal_exit_codes)) {
+ _exit(g_signal_exit_codes[sig_idx]);
+ }
+
+ _exit(NamespaceSandbox::SignalExitCode(sig));
+}
+
+} // namespace
+
+#if !defined(OS_NACL_NONSFI)
+NamespaceSandbox::Options::Options()
+ : ns_types(CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET),
+ fail_on_unsupported_ns_type(false) {}
+
+NamespaceSandbox::Options::~Options() {}
+
+// static
+base::Process NamespaceSandbox::LaunchProcess(
+ const base::CommandLine& cmdline,
+ const base::LaunchOptions& launch_options) {
+ return LaunchProcessWithOptions(cmdline.argv(), launch_options, Options());
+}
+
+// static
+base::Process NamespaceSandbox::LaunchProcess(
+ const std::vector<std::string>& argv,
+ const base::LaunchOptions& launch_options) {
+ return LaunchProcessWithOptions(argv, launch_options, Options());
+}
+
+// static
+base::Process NamespaceSandbox::LaunchProcessWithOptions(
+ const base::CommandLine& cmdline,
+ const base::LaunchOptions& launch_options,
+ const Options& ns_sandbox_options) {
+ return LaunchProcessWithOptions(cmdline.argv(), launch_options,
+ ns_sandbox_options);
+}
+
+// static
+base::Process NamespaceSandbox::LaunchProcessWithOptions(
+ const std::vector<std::string>& argv,
+ const base::LaunchOptions& launch_options,
+ const Options& ns_sandbox_options) {
+ // These fields may not be set by the caller.
+ CHECK(launch_options.pre_exec_delegate == nullptr);
+ CHECK_EQ(0, launch_options.clone_flags);
+
+ int clone_flags = 0;
+ const int kSupportedTypes[] = {CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET};
+ for (const int ns_type : kSupportedTypes) {
+ if ((ns_type & ns_sandbox_options.ns_types) == 0) {
+ continue;
+ }
+
+ if (NamespaceUtils::KernelSupportsUnprivilegedNamespace(ns_type)) {
+ clone_flags |= ns_type;
+ } else if (ns_sandbox_options.fail_on_unsupported_ns_type) {
+ return base::Process();
+ }
+ }
+ CHECK(clone_flags & CLONE_NEWUSER);
+
+ WriteUidGidMapDelegate write_uid_gid_map_delegate;
+
+ base::LaunchOptions launch_options_copy = launch_options;
+ launch_options_copy.pre_exec_delegate = &write_uid_gid_map_delegate;
+ launch_options_copy.clone_flags = clone_flags;
+
+ const std::pair<int, const char*> clone_flag_environ[] = {
+ std::make_pair(CLONE_NEWUSER, kSandboxUSERNSEnvironmentVarName),
+ std::make_pair(CLONE_NEWPID, kSandboxPIDNSEnvironmentVarName),
+ std::make_pair(CLONE_NEWNET, kSandboxNETNSEnvironmentVarName),
+ };
+
+ base::EnvironmentMap* environ = &launch_options_copy.environ;
+ for (const auto& entry : clone_flag_environ) {
+ const int flag = entry.first;
+ const char* environ_name = entry.second;
+ SetEnvironForNamespaceType(environ, environ_name, clone_flags & flag);
+ }
+
+ return base::LaunchProcess(argv, launch_options_copy);
+}
+#endif // !defined(OS_NACL_NONSFI)
+
+// static
+pid_t NamespaceSandbox::ForkInNewPidNamespace(bool drop_capabilities_in_child) {
+ const pid_t pid =
+ base::ForkWithFlags(CLONE_NEWPID | LINUX_SIGCHLD, nullptr, nullptr);
+ if (pid < 0) {
+ return pid;
+ }
+
+ if (pid == 0) {
+ DCHECK_EQ(1, getpid());
+ if (drop_capabilities_in_child) {
+ // Since we just forked, we are single-threaded, so this should be safe.
+ CHECK(Credentials::DropAllCapabilitiesOnCurrentThread());
+ }
+ return 0;
+ }
+
+ return pid;
+}
+
+// static
+void NamespaceSandbox::InstallDefaultTerminationSignalHandlers() {
+ static const int kDefaultTermSignals[] = {
+ LINUX_SIGHUP, LINUX_SIGINT, LINUX_SIGABRT, LINUX_SIGQUIT,
+ LINUX_SIGPIPE, LINUX_SIGTERM, LINUX_SIGUSR1, LINUX_SIGUSR2,
+ };
+
+ for (const int sig : kDefaultTermSignals) {
+ InstallTerminationSignalHandler(sig, SignalExitCode(sig));
+ }
+}
+
+// static
+bool NamespaceSandbox::InstallTerminationSignalHandler(
+ int sig,
+ int exit_code) {
+ struct sigaction old_action;
+ PCHECK(sys_sigaction(sig, nullptr, &old_action) == 0);
+
+#if !defined(OS_NACL_NONSFI)
+ if (old_action.sa_flags & SA_SIGINFO &&
+ old_action.sa_sigaction != nullptr) {
+ return false;
+ }
+#endif
+
+ if (old_action.sa_handler != LINUX_SIG_DFL) {
+ return false;
+ }
+
+ const size_t sig_idx = static_cast<size_t>(sig);
+ CHECK_LT(sig_idx, arraysize(g_signal_exit_codes));
+
+ DCHECK_GE(exit_code, 0);
+ DCHECK_LT(exit_code, 256);
+
+ g_signal_exit_codes[sig_idx] = exit_code;
+
+ struct sigaction action = {};
+ action.sa_handler = &TerminationSignalHandler;
+ PCHECK(sys_sigaction(sig, &action, nullptr) == 0);
+ return true;
+}
+
+// static
+bool NamespaceSandbox::InNewUserNamespace() {
+ return getenv(kSandboxUSERNSEnvironmentVarName) != nullptr;
+}
+
+// static
+bool NamespaceSandbox::InNewPidNamespace() {
+ return getenv(kSandboxPIDNSEnvironmentVarName) != nullptr;
+}
+
+// static
+bool NamespaceSandbox::InNewNetNamespace() {
+ return getenv(kSandboxNETNSEnvironmentVarName) != nullptr;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/namespace_sandbox.h b/libchrome/sandbox/linux/services/namespace_sandbox.h
new file mode 100644
index 0000000..b5832fb
--- /dev/null
+++ b/libchrome/sandbox/linux/services/namespace_sandbox.h
@@ -0,0 +1,126 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_NAMESPACE_SANDBOX_H_
+#define SANDBOX_LINUX_SERVICES_NAMESPACE_SANDBOX_H_
+
+#include <sys/types.h>
+
+#include <string>
+#include <vector>
+
+#include "base/command_line.h"
+#include "base/macros.h"
+#include "base/process/launch.h"
+#include "base/process/process.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// Helper class for starting a process inside a new user, PID, and network
+// namespace. Before using a namespace sandbox, check for namespaces support
+// using Credentials::CanCreateProcessInNewUserNS.
+//
+// A typical use for "A" launching a sandboxed process "B" would be:
+// 1. A sets up a command line and launch options for process B.
+// 2. A launches B with LaunchProcess.
+// 3. B should be prepared to assume the role of init(1). In particular, apart
+// from SIGKILL and SIGSTOP, B cannot receive any signal for which it does
+// not have an explicit signal handler registered.
+// If B dies, all the processes in the namespace will die.
+// B can fork() and the parent can assume the role of init(1), by using
+// CreateInitProcessReaper().
+// 4. B chroots using Credentials::MoveToNewUserNS() and
+// Credentials::DropFileSystemAccess()
+// 5. B drops capabilities gained by entering the new user namespace with
+// Credentials::DropAllCapabilities().
+class SANDBOX_EXPORT NamespaceSandbox {
+ public:
+#if !defined(OS_NACL_NONSFI)
+ struct Options {
+ Options();
+ ~Options();
+
+ // Bitmask of namespace types. Must be some combination of CLONE_NEWUSER
+ // (required), CLONE_NEWPID, and CLONE_NEWNET. Defaults to all of the above.
+ int ns_types;
+
+ // Fail if any of the namespace types are not supported. Defaults to false.
+ bool fail_on_unsupported_ns_type;
+ };
+
+ // Launch a new process inside its own user/PID/network namespaces (depending
+ // on kernel support). Requires at a minimum that user namespaces are
+ // supported (use Credentials::CanCreateProcessInNewUserNS to check this).
+ //
+ // pre_exec_delegate and clone_flags fields of LaunchOptions should be nullptr
+ // and 0, respectively, since this function makes a copy of options and
+ // overrides them.
+ static base::Process LaunchProcess(const base::CommandLine& cmdline,
+ const base::LaunchOptions& launch_options);
+ static base::Process LaunchProcess(const std::vector<std::string>& argv,
+ const base::LaunchOptions& launch_options);
+
+ // Versions which take namespace sandbox options. These allow fine grained
+ // control over the types of namespaces used.
+ static base::Process LaunchProcessWithOptions(
+ const base::CommandLine& cmdline,
+ const base::LaunchOptions& launch_options,
+ const Options& ns_sandbox_options);
+ static base::Process LaunchProcessWithOptions(
+ const std::vector<std::string>& argv,
+ const base::LaunchOptions& launch_options,
+ const Options& ns_sandbox_options);
+#endif // !defined(OS_NACL_NONSFI)
+
+ // Forks a process in its own PID namespace. The child process is the init
+ // process inside of the PID namespace, so if the child needs to fork further,
+ // it should call CreateInitProcessReaper, which turns the init process into a
+ // reaper process.
+ //
+ // Otherwise, the child should setup handlers for signals which should
+ // terminate the process using InstallDefaultTerminationSignalHandlers or
+ // InstallTerminationSignalHandler. This works around the fact that init
+ // processes ignore such signals unless they have an explicit handler set.
+ //
+ // This function requries CAP_SYS_ADMIN. If |drop_capabilities_in_child| is
+ // true, then capabilities are dropped in the child.
+ static pid_t ForkInNewPidNamespace(bool drop_capabilities_in_child);
+
+ // Installs a signal handler for:
+ //
+ // SIGHUP, SIGINT, SIGABRT, SIGQUIT, SIGPIPE, SIGTERM, SIGUSR1, SIGUSR2
+ //
+ // that exits with SignalExitCode(sig). These are signals whose default action
+ // is to terminate the program (apart from SIGILL, SIGFPE, and SIGSEGV, which
+ // will still terminate the process if e.g. an illegal instruction is
+ // encountered, etc.).
+ //
+ // If any of these already had a signal handler installed, this function will
+ // not override them.
+ static void InstallDefaultTerminationSignalHandlers();
+
+ // Installs a signal handler for |sig| which exits with |exit_code|. If a
+ // signal handler was already present for |sig|, does nothing and returns
+ // false.
+ static bool InstallTerminationSignalHandler(int sig, int exit_code);
+
+ // Returns an exit code corresponding to the process being killed by sig. This
+ // is the same as exit code that NaCl's default signal handler uses.
+ static int SignalExitCode(int sig) { return -sig & 0xff; }
+
+ // Returns whether the namespace sandbox created a new user, PID, and network
+ // namespace. In particular, InNewUserNamespace should return true iff the
+ // process was started via this class.
+ static bool InNewUserNamespace();
+ static bool InNewPidNamespace();
+ static bool InNewNetNamespace();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(NamespaceSandbox);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_NAMESPACE_SANDBOX_H_
diff --git a/libchrome/sandbox/linux/services/namespace_sandbox_unittest.cc b/libchrome/sandbox/linux/services/namespace_sandbox_unittest.cc
new file mode 100644
index 0000000..c1acca6
--- /dev/null
+++ b/libchrome/sandbox/linux/services/namespace_sandbox_unittest.cc
@@ -0,0 +1,241 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/namespace_sandbox.h"
+
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <string>
+#include <utility>
+
+#include "base/command_line.h"
+#include "base/files/file_enumerator.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/process/launch.h"
+#include "base/process/process.h"
+#include "base/test/multiprocess_test.h"
+#include "sandbox/linux/services/credentials.h"
+#include "sandbox/linux/services/namespace_utils.h"
+#include "sandbox/linux/services/proc_util.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace sandbox {
+
+namespace {
+
+bool RootDirectoryIsEmpty() {
+ base::FilePath root("/");
+ int file_type =
+ base::FileEnumerator::DIRECTORIES | base::FileEnumerator::FILES;
+ base::FileEnumerator enumerator_before(root, false, file_type);
+ return enumerator_before.Next().empty();
+}
+
+class NamespaceSandboxTest : public base::MultiProcessTest {
+ public:
+ void TestProc(const std::string& procname) {
+ TestProcWithOptions(procname, NamespaceSandbox::Options());
+ }
+
+ void TestProcWithOptions(
+ const std::string& procname,
+ const NamespaceSandbox::Options& ns_sandbox_options) {
+ if (!Credentials::CanCreateProcessInNewUserNS()) {
+ return;
+ }
+
+ base::FileHandleMappingVector fds_to_remap = {
+ std::make_pair(STDOUT_FILENO, STDOUT_FILENO),
+ std::make_pair(STDERR_FILENO, STDERR_FILENO),
+ };
+ base::LaunchOptions launch_options;
+ launch_options.fds_to_remap = &fds_to_remap;
+
+ base::Process process = NamespaceSandbox::LaunchProcessWithOptions(
+ MakeCmdLine(procname), launch_options, ns_sandbox_options);
+ ASSERT_TRUE(process.IsValid());
+
+ const int kDummyExitCode = 42;
+ int exit_code = kDummyExitCode;
+ EXPECT_TRUE(process.WaitForExit(&exit_code));
+ EXPECT_EQ(0, exit_code);
+ }
+};
+
+MULTIPROCESS_TEST_MAIN(SimpleChildProcess) {
+ const bool in_user_ns = NamespaceSandbox::InNewUserNamespace();
+ const bool in_pid_ns = NamespaceSandbox::InNewPidNamespace();
+ const bool in_net_ns = NamespaceSandbox::InNewNetNamespace();
+ CHECK(in_user_ns);
+ CHECK_EQ(in_pid_ns,
+ NamespaceUtils::KernelSupportsUnprivilegedNamespace(CLONE_NEWPID));
+ CHECK_EQ(in_net_ns,
+ NamespaceUtils::KernelSupportsUnprivilegedNamespace(CLONE_NEWNET));
+ if (in_pid_ns) {
+ CHECK_EQ(1, getpid());
+ }
+ return 0;
+}
+
+TEST_F(NamespaceSandboxTest, BasicUsage) {
+ TestProc("SimpleChildProcess");
+}
+
+MULTIPROCESS_TEST_MAIN(PidNsOnlyChildProcess) {
+ const bool in_user_ns = NamespaceSandbox::InNewUserNamespace();
+ const bool in_pid_ns = NamespaceSandbox::InNewPidNamespace();
+ const bool in_net_ns = NamespaceSandbox::InNewNetNamespace();
+ CHECK(in_user_ns);
+ CHECK_EQ(in_pid_ns,
+ NamespaceUtils::KernelSupportsUnprivilegedNamespace(CLONE_NEWPID));
+ CHECK(!in_net_ns);
+ if (in_pid_ns) {
+ CHECK_EQ(1, getpid());
+ }
+ return 0;
+}
+
+
+TEST_F(NamespaceSandboxTest, BasicUsageWithOptions) {
+ NamespaceSandbox::Options options;
+ options.ns_types = CLONE_NEWUSER | CLONE_NEWPID;
+ TestProcWithOptions("PidNsOnlyChildProcess", options);
+}
+
+MULTIPROCESS_TEST_MAIN(ChrootMe) {
+ CHECK(!RootDirectoryIsEmpty());
+ CHECK(sandbox::Credentials::MoveToNewUserNS());
+ CHECK(sandbox::Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
+ CHECK(RootDirectoryIsEmpty());
+ return 0;
+}
+
+// Temporarily disabled on ASAN due to crbug.com/451603.
+TEST_F(NamespaceSandboxTest, DISABLE_ON_ASAN(ChrootAndDropCapabilities)) {
+ TestProc("ChrootMe");
+}
+
+MULTIPROCESS_TEST_MAIN(NestedNamespaceSandbox) {
+ base::FileHandleMappingVector fds_to_remap = {
+ std::make_pair(STDOUT_FILENO, STDOUT_FILENO),
+ std::make_pair(STDERR_FILENO, STDERR_FILENO),
+ };
+ base::LaunchOptions launch_options;
+ launch_options.fds_to_remap = &fds_to_remap;
+ base::Process process = NamespaceSandbox::LaunchProcess(
+ base::CommandLine(base::FilePath("/bin/true")), launch_options);
+ CHECK(process.IsValid());
+
+ const int kDummyExitCode = 42;
+ int exit_code = kDummyExitCode;
+ CHECK(process.WaitForExit(&exit_code));
+ CHECK_EQ(0, exit_code);
+ return 0;
+}
+
+TEST_F(NamespaceSandboxTest, NestedNamespaceSandbox) {
+ TestProc("NestedNamespaceSandbox");
+}
+
+const int kNormalExitCode = 0;
+
+// Ensure that CHECK(false) is distinguishable from _exit(kNormalExitCode).
+// Allowing noise since CHECK(false) will write a stack trace to stderr.
+SANDBOX_TEST_ALLOW_NOISE(ForkInNewPidNamespace, CheckDoesNotReturnZero) {
+ if (!Credentials::CanCreateProcessInNewUserNS()) {
+ return;
+ }
+
+ CHECK(sandbox::Credentials::MoveToNewUserNS());
+ const pid_t pid = NamespaceSandbox::ForkInNewPidNamespace(
+ /*drop_capabilities_in_child=*/true);
+ CHECK_GE(pid, 0);
+
+ if (pid == 0) {
+ CHECK(false);
+ _exit(kNormalExitCode);
+ }
+
+ int status;
+ PCHECK(waitpid(pid, &status, 0) == pid);
+ if (WIFEXITED(status)) {
+ CHECK_NE(kNormalExitCode, WEXITSTATUS(status));
+ }
+}
+
+SANDBOX_TEST(ForkInNewPidNamespace, BasicUsage) {
+ if (!Credentials::CanCreateProcessInNewUserNS()) {
+ return;
+ }
+
+ CHECK(sandbox::Credentials::MoveToNewUserNS());
+ const pid_t pid = NamespaceSandbox::ForkInNewPidNamespace(
+ /*drop_capabilities_in_child=*/true);
+ CHECK_GE(pid, 0);
+
+ if (pid == 0) {
+ CHECK_EQ(1, getpid());
+ CHECK(!Credentials::HasAnyCapability());
+ _exit(kNormalExitCode);
+ }
+
+ int status;
+ PCHECK(waitpid(pid, &status, 0) == pid);
+ CHECK(WIFEXITED(status));
+ CHECK_EQ(kNormalExitCode, WEXITSTATUS(status));
+}
+
+SANDBOX_TEST(ForkInNewPidNamespace, ExitWithSignal) {
+ if (!Credentials::CanCreateProcessInNewUserNS()) {
+ return;
+ }
+
+ CHECK(sandbox::Credentials::MoveToNewUserNS());
+ const pid_t pid = NamespaceSandbox::ForkInNewPidNamespace(
+ /*drop_capabilities_in_child=*/true);
+ CHECK_GE(pid, 0);
+
+ if (pid == 0) {
+ CHECK_EQ(1, getpid());
+ CHECK(!Credentials::HasAnyCapability());
+ CHECK(NamespaceSandbox::InstallTerminationSignalHandler(
+ SIGTERM, NamespaceSandbox::SignalExitCode(SIGTERM)));
+ while (true) {
+ raise(SIGTERM);
+ }
+ }
+
+ int status;
+ PCHECK(waitpid(pid, &status, 0) == pid);
+ CHECK(WIFEXITED(status));
+ CHECK_EQ(NamespaceSandbox::SignalExitCode(SIGTERM), WEXITSTATUS(status));
+}
+
+volatile sig_atomic_t signal_handler_called;
+void ExitSuccessfully(int sig) {
+ signal_handler_called = 1;
+}
+
+SANDBOX_TEST(InstallTerminationSignalHandler, DoesNotOverrideExistingHandlers) {
+ struct sigaction action = {};
+ action.sa_handler = &ExitSuccessfully;
+ PCHECK(sigaction(SIGUSR1, &action, nullptr) == 0);
+
+ NamespaceSandbox::InstallDefaultTerminationSignalHandlers();
+ CHECK(!NamespaceSandbox::InstallTerminationSignalHandler(
+ SIGUSR1, NamespaceSandbox::SignalExitCode(SIGUSR1)));
+
+ raise(SIGUSR1);
+ CHECK_EQ(1, signal_handler_called);
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/namespace_utils.cc b/libchrome/sandbox/linux/services/namespace_utils.cc
new file mode 100644
index 0000000..97add26
--- /dev/null
+++ b/libchrome/sandbox/linux/services/namespace_utils.cc
@@ -0,0 +1,118 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/namespace_utils.h"
+
+#include <fcntl.h>
+#include <sched.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/launch.h"
+#include "base/strings/safe_sprintf.h"
+#include "base/third_party/valgrind/valgrind.h"
+
+namespace sandbox {
+
+namespace {
+bool IsRunningOnValgrind() {
+ return RUNNING_ON_VALGRIND;
+}
+
+const char kProcSelfSetgroups[] = "/proc/self/setgroups";
+} // namespace
+
+// static
+bool NamespaceUtils::WriteToIdMapFile(const char* map_file, generic_id_t id) {
+ // This function needs to be async-signal-safe, as it may be called in between
+ // fork and exec.
+
+ int fd = HANDLE_EINTR(open(map_file, O_WRONLY));
+ if (fd == -1) {
+ return false;
+ }
+
+ const generic_id_t inside_id = id;
+ const generic_id_t outside_id = id;
+
+ char mapping[64];
+ const ssize_t len =
+ base::strings::SafeSPrintf(mapping, "%d %d 1\n", inside_id, outside_id);
+ const ssize_t rc = HANDLE_EINTR(write(fd, mapping, len));
+ RAW_CHECK(IGNORE_EINTR(close(fd)) == 0);
+ return rc == len;
+}
+
+// static
+bool NamespaceUtils::KernelSupportsUnprivilegedNamespace(int type) {
+ // Valgrind will let clone(2) pass-through, but doesn't support unshare(),
+ // so always consider namespaces unsupported there.
+ if (IsRunningOnValgrind()) {
+ return false;
+ }
+
+ // As of Linux 3.8, /proc/self/ns/* files exist for all namespace types. Since
+ // user namespaces were added in 3.8, it is OK to rely on the existence of
+ // /proc/self/ns/*.
+ if (!base::PathExists(base::FilePath("/proc/self/ns/user"))) {
+ return false;
+ }
+
+ const char* path;
+ switch (type) {
+ case CLONE_NEWUSER:
+ return true;
+ case CLONE_NEWIPC:
+ path = "/proc/self/ns/ipc";
+ break;
+ case CLONE_NEWNET:
+ path = "/proc/self/ns/net";
+ break;
+ case CLONE_NEWNS:
+ path = "/proc/self/ns/mnt";
+ break;
+ case CLONE_NEWPID:
+ path = "/proc/self/ns/pid";
+ break;
+ case CLONE_NEWUTS:
+ path = "/proc/self/ns/uts";
+ break;
+ default:
+ NOTREACHED();
+ return false;
+ }
+
+ return base::PathExists(base::FilePath(path));
+}
+
+// static
+bool NamespaceUtils::KernelSupportsDenySetgroups() {
+ return base::PathExists(base::FilePath(kProcSelfSetgroups));
+}
+
+// static
+bool NamespaceUtils::DenySetgroups() {
+ // This function needs to be async-signal-safe.
+ int fd = HANDLE_EINTR(open(kProcSelfSetgroups, O_WRONLY));
+ if (fd == -1) {
+ return false;
+ }
+
+ static const char kDeny[] = "deny";
+ const ssize_t len = sizeof(kDeny) - 1;
+ const ssize_t rc = HANDLE_EINTR(write(fd, kDeny, len));
+ RAW_CHECK(IGNORE_EINTR(close(fd)) == 0);
+ return rc == len;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/namespace_utils.h b/libchrome/sandbox/linux/services/namespace_utils.h
new file mode 100644
index 0000000..ec5d241
--- /dev/null
+++ b/libchrome/sandbox/linux/services/namespace_utils.h
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_NAMESPACE_UTILS_H_
+#define SANDBOX_LINUX_SERVICES_NAMESPACE_UTILS_H_
+
+#include <sys/types.h>
+
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// Utility functions for using Linux namepaces.
+class SANDBOX_EXPORT NamespaceUtils {
+ public:
+ static_assert(std::is_same<uid_t, gid_t>::value,
+ "uid_t and gid_t must be the same type");
+ // generic_id_t can be used for either uid_t or gid_t.
+ typedef uid_t generic_id_t;
+
+ // Write a uid or gid mapping from |id| to |id| in |map_file|. This function
+ // is async-signal-safe.
+ static bool WriteToIdMapFile(const char* map_file,
+ generic_id_t id) WARN_UNUSED_RESULT;
+
+ // Returns true if unprivileged namespaces of type |type| is supported
+ // (meaning that both CLONE_NEWUSER and type are are supported). |type| must
+ // be one of CLONE_NEWIPC, CLONE_NEWNET, CLONE_NEWNS, CLONE_NEWPID,
+ // CLONE_NEWUSER, or CLONE_NEWUTS. This relies on access to /proc, so it will
+ // not work from within a sandbox.
+ static bool KernelSupportsUnprivilegedNamespace(int type);
+
+ // Returns true if the kernel supports denying setgroups in a user namespace.
+ // On kernels where this is supported, DenySetgroups must be called before a
+ // gid mapping can be added.
+ static bool KernelSupportsDenySetgroups();
+
+ // Disables setgroups() within the current user namespace. On Linux 3.18.2 and
+ // later, this is required in order to write to /proc/self/gid_map without
+ // having CAP_SETGID. Callers can determine whether is this needed with
+ // KernelSupportsDenySetgroups. This function is async-signal-safe.
+ static bool DenySetgroups() WARN_UNUSED_RESULT;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(NamespaceUtils);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_NAMESPACE_UTILS_H_
diff --git a/libchrome/sandbox/linux/services/namespace_utils_unittest.cc b/libchrome/sandbox/linux/services/namespace_utils_unittest.cc
new file mode 100644
index 0000000..41ed7e8
--- /dev/null
+++ b/libchrome/sandbox/linux/services/namespace_utils_unittest.cc
@@ -0,0 +1,72 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/namespace_utils.h"
+
+#include <errno.h>
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/launch.h"
+#include "sandbox/linux/services/credentials.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+SANDBOX_TEST(NamespaceUtils, KernelSupportsUnprivilegedNamespace) {
+ const bool can_create_user_ns = Credentials::CanCreateProcessInNewUserNS();
+ const bool supports_user_ns =
+ NamespaceUtils::KernelSupportsUnprivilegedNamespace(CLONE_NEWUSER);
+ // can_create_user_ns implies supports_user_ns, but the converse is not
+ // necessarily true, as creating a user namespace can fail for various
+ // reasons.
+ if (can_create_user_ns) {
+ SANDBOX_ASSERT(supports_user_ns);
+ }
+}
+
+SANDBOX_TEST(NamespaceUtils, WriteToIdMapFile) {
+ if (!Credentials::CanCreateProcessInNewUserNS()) {
+ return;
+ }
+
+ const uid_t uid = getuid();
+ const gid_t gid = getgid();
+
+ const bool supports_deny_setgroups =
+ NamespaceUtils::KernelSupportsDenySetgroups();
+
+ const pid_t pid =
+ base::ForkWithFlags(CLONE_NEWUSER | SIGCHLD, nullptr, nullptr);
+ ASSERT_NE(-1, pid);
+ if (pid == 0) {
+ if (supports_deny_setgroups) {
+ RAW_CHECK(NamespaceUtils::DenySetgroups());
+ }
+
+ RAW_CHECK(getuid() != uid);
+ RAW_CHECK(NamespaceUtils::WriteToIdMapFile("/proc/self/uid_map", uid));
+ RAW_CHECK(getuid() == uid);
+
+ RAW_CHECK(getgid() != gid);
+ RAW_CHECK(NamespaceUtils::WriteToIdMapFile("/proc/self/gid_map", gid));
+ RAW_CHECK(getgid() == gid);
+
+ _exit(0);
+ }
+
+ int status = 42;
+ SANDBOX_ASSERT_EQ(pid, HANDLE_EINTR(waitpid(pid, &status, 0)));
+ SANDBOX_ASSERT_EQ(0, status);
+}
+
+} // namespace.
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/services/proc_util.cc b/libchrome/sandbox/linux/services/proc_util.cc
new file mode 100644
index 0000000..b6d58de
--- /dev/null
+++ b/libchrome/sandbox/linux/services/proc_util.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/proc_util.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <memory>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_number_conversions.h"
+
+namespace sandbox {
+namespace {
+
+struct DIRCloser {
+ void operator()(DIR* d) const {
+ DCHECK(d);
+ PCHECK(0 == closedir(d));
+ }
+};
+
+typedef std::unique_ptr<DIR, DIRCloser> ScopedDIR;
+
+base::ScopedFD OpenDirectory(const char* path) {
+ DCHECK(path);
+ base::ScopedFD directory_fd(
+ HANDLE_EINTR(open(path, O_RDONLY | O_DIRECTORY | O_CLOEXEC)));
+ PCHECK(directory_fd.is_valid());
+ return directory_fd;
+}
+
+} // namespace
+
+int ProcUtil::CountOpenFds(int proc_fd) {
+ DCHECK_LE(0, proc_fd);
+ int proc_self_fd = HANDLE_EINTR(
+ openat(proc_fd, "self/fd/", O_DIRECTORY | O_RDONLY | O_CLOEXEC));
+ PCHECK(0 <= proc_self_fd);
+
+ // Ownership of proc_self_fd is transferred here, it must not be closed
+ // or modified afterwards except via dir.
+ ScopedDIR dir(fdopendir(proc_self_fd));
+ CHECK(dir);
+
+ int count = 0;
+ struct dirent e;
+ struct dirent* de;
+ while (!readdir_r(dir.get(), &e, &de) && de) {
+ if (strcmp(e.d_name, ".") == 0 || strcmp(e.d_name, "..") == 0) {
+ continue;
+ }
+
+ int fd_num;
+ CHECK(base::StringToInt(e.d_name, &fd_num));
+ if (fd_num == proc_fd || fd_num == proc_self_fd) {
+ continue;
+ }
+
+ ++count;
+ }
+ return count;
+}
+
+bool ProcUtil::HasOpenDirectory(int proc_fd) {
+ DCHECK_LE(0, proc_fd);
+ int proc_self_fd =
+ openat(proc_fd, "self/fd/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
+
+ PCHECK(0 <= proc_self_fd);
+
+ // Ownership of proc_self_fd is transferred here, it must not be closed
+ // or modified afterwards except via dir.
+ ScopedDIR dir(fdopendir(proc_self_fd));
+ CHECK(dir);
+
+ struct dirent e;
+ struct dirent* de;
+ while (!readdir_r(dir.get(), &e, &de) && de) {
+ if (strcmp(e.d_name, ".") == 0 || strcmp(e.d_name, "..") == 0) {
+ continue;
+ }
+
+ int fd_num;
+ CHECK(base::StringToInt(e.d_name, &fd_num));
+ if (fd_num == proc_fd || fd_num == proc_self_fd) {
+ continue;
+ }
+
+ struct stat s;
+ // It's OK to use proc_self_fd here, fstatat won't modify it.
+ CHECK(fstatat(proc_self_fd, e.d_name, &s, 0) == 0);
+ if (S_ISDIR(s.st_mode)) {
+ return true;
+ }
+ }
+
+ // No open unmanaged directories found.
+ return false;
+}
+
+bool ProcUtil::HasOpenDirectory() {
+ base::ScopedFD proc_fd(
+ HANDLE_EINTR(open("/proc/", O_DIRECTORY | O_RDONLY | O_CLOEXEC)));
+ return HasOpenDirectory(proc_fd.get());
+}
+
+// static
+base::ScopedFD ProcUtil::OpenProc() {
+ return OpenDirectory("/proc/");
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/proc_util.h b/libchrome/sandbox/linux/services/proc_util.h
new file mode 100644
index 0000000..bc14c5e
--- /dev/null
+++ b/libchrome/sandbox/linux/services/proc_util.h
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_PROC_UTIL_H_
+#define SANDBOX_LINUX_SERVICES_PROC_UTIL_H_
+
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+class SANDBOX_EXPORT ProcUtil {
+ public:
+ // Returns the number of file descriptors in the current process's FD
+ // table, excluding |proc_fd|, which should be a file descriptor for
+ // /proc/.
+ static int CountOpenFds(int proc_fd);
+
+ // Checks whether the current process has any directory file descriptor open.
+ // Directory file descriptors are "capabilities" that would let a process use
+ // system calls such as openat() to bypass restrictions such as
+ // DropFileSystemAccess().
+ // Sometimes it's useful to call HasOpenDirectory() after file system access
+ // has been dropped. In this case, |proc_fd| should be a file descriptor to
+ // /proc/. The file descriptor in |proc_fd| will be ignored by
+ // HasOpenDirectory() and remains owned by the caller. It is very important
+ // for the caller to close it.
+ static bool HasOpenDirectory(int proc_fd) WARN_UNUSED_RESULT;
+ static bool HasOpenDirectory() WARN_UNUSED_RESULT;
+
+ // Open /proc/ or crash if not possible.
+ static base::ScopedFD OpenProc();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ProcUtil);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_PROC_UTIL_H_
diff --git a/libchrome/sandbox/linux/services/proc_util_unittest.cc b/libchrome/sandbox/linux/services/proc_util_unittest.cc
new file mode 100644
index 0000000..bf25151
--- /dev/null
+++ b/libchrome/sandbox/linux/services/proc_util_unittest.cc
@@ -0,0 +1,62 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/proc_util.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "base/files/scoped_file.h"
+#include "base/posix/eintr_wrapper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+TEST(ProcUtil, CountOpenFds) {
+ base::ScopedFD proc_fd(open("/proc/", O_RDONLY | O_DIRECTORY));
+ ASSERT_TRUE(proc_fd.is_valid());
+ int fd_count = ProcUtil::CountOpenFds(proc_fd.get());
+ int fd = open("/dev/null", O_RDONLY);
+ ASSERT_LE(0, fd);
+ EXPECT_EQ(fd_count + 1, ProcUtil::CountOpenFds(proc_fd.get()));
+ ASSERT_EQ(0, IGNORE_EINTR(close(fd)));
+ EXPECT_EQ(fd_count, ProcUtil::CountOpenFds(proc_fd.get()));
+}
+
+TEST(ProcUtil, HasOpenDirectory) {
+ // No open directory should exist at startup.
+ EXPECT_FALSE(ProcUtil::HasOpenDirectory());
+ {
+ // Have a "/proc" file descriptor around.
+ int proc_fd = open("/proc/", O_RDONLY | O_DIRECTORY);
+ base::ScopedFD proc_fd_closer(proc_fd);
+ EXPECT_TRUE(ProcUtil::HasOpenDirectory());
+ }
+ EXPECT_FALSE(ProcUtil::HasOpenDirectory());
+}
+
+TEST(ProcUtil, HasOpenDirectoryWithFD) {
+ int proc_fd = open("/proc/", O_RDONLY | O_DIRECTORY);
+ base::ScopedFD proc_fd_closer(proc_fd);
+ ASSERT_LE(0, proc_fd);
+
+ // Don't pass |proc_fd|, an open directory (proc_fd) should
+ // be detected.
+ EXPECT_TRUE(ProcUtil::HasOpenDirectory());
+ // Pass |proc_fd| and no open directory should be detected.
+ EXPECT_FALSE(ProcUtil::HasOpenDirectory(proc_fd));
+
+ {
+ // Have a directory file descriptor around.
+ int open_directory_fd = open("/proc/self/", O_RDONLY | O_DIRECTORY);
+ base::ScopedFD open_directory_fd_closer(open_directory_fd);
+ EXPECT_TRUE(ProcUtil::HasOpenDirectory(proc_fd));
+ }
+
+ // The "/proc/" file descriptor should now be closed, |proc_fd| is the
+ // only directory file descriptor open.
+ EXPECT_FALSE(ProcUtil::HasOpenDirectory(proc_fd));
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/resource_limits.cc b/libchrome/sandbox/linux/services/resource_limits.cc
new file mode 100644
index 0000000..1ec1129
--- /dev/null
+++ b/libchrome/sandbox/linux/services/resource_limits.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/resource_limits.h"
+
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#include <algorithm>
+
+namespace sandbox {
+
+// static
+bool ResourceLimits::Lower(int resource, rlim_t limit) {
+ struct rlimit old_rlimit;
+ if (getrlimit(resource, &old_rlimit))
+ return false;
+ // Make sure we don't raise the existing limit.
+ const struct rlimit new_rlimit = {std::min(old_rlimit.rlim_cur, limit),
+ std::min(old_rlimit.rlim_max, limit)};
+ int rc = setrlimit(resource, &new_rlimit);
+ return rc == 0;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/resource_limits.h b/libchrome/sandbox/linux/services/resource_limits.h
new file mode 100644
index 0000000..3464dab
--- /dev/null
+++ b/libchrome/sandbox/linux/services/resource_limits.h
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_RESOURCE_LIMITS_H_
+#define SANDBOX_LINUX_SERVICES_RESOURCE_LIMITS_H_
+
+#include <sys/resource.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// This class provides a small wrapper around setrlimit().
+class SANDBOX_EXPORT ResourceLimits {
+ public:
+ // Lower the soft and hard limit of |resource| to |limit|. If the current
+ // limit is lower than |limit|, keep it.
+ static bool Lower(int resource, rlim_t limit) WARN_UNUSED_RESULT;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ResourceLimits);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_RESOURCE_LIMITS_H_
diff --git a/libchrome/sandbox/linux/services/resource_limits_unittests.cc b/libchrome/sandbox/linux/services/resource_limits_unittests.cc
new file mode 100644
index 0000000..910c740
--- /dev/null
+++ b/libchrome/sandbox/linux/services/resource_limits_unittests.cc
@@ -0,0 +1,43 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/resource_limits.h"
+
+#include <errno.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "sandbox/linux/tests/test_utils.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+// Fails on Android: crbug.com/459158
+#if !defined(OS_ANDROID)
+#define MAYBE_NoFork DISABLE_ON_ASAN(NoFork)
+#else
+#define MAYBE_NoFork DISABLED_NoFork
+#endif // OS_ANDROID
+
+// Not being able to fork breaks LeakSanitizer, so disable on
+// all ASAN builds.
+SANDBOX_TEST(ResourceLimits, MAYBE_NoFork) {
+ // Make sure that fork will fail with EAGAIN.
+ SANDBOX_ASSERT(ResourceLimits::Lower(RLIMIT_NPROC, 0));
+ errno = 0;
+ pid_t pid = fork();
+ // Reap any child if fork succeeded.
+ TestUtils::HandlePostForkReturn(pid);
+ SANDBOX_ASSERT_EQ(-1, pid);
+ CHECK_EQ(EAGAIN, errno);
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/scoped_process.cc b/libchrome/sandbox/linux/services/scoped_process.cc
new file mode 100644
index 0000000..65af487
--- /dev/null
+++ b/libchrome/sandbox/linux/services/scoped_process.cc
@@ -0,0 +1,119 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/scoped_process.h"
+
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "build/build_config.h"
+#include "sandbox/linux/services/syscall_wrappers.h"
+#include "sandbox/linux/services/thread_helpers.h"
+
+namespace sandbox {
+
+namespace {
+
+const char kSynchronisationChar[] = "D";
+
+void WaitForever() {
+ while(true) {
+ pause();
+ }
+}
+
+} // namespace
+
+ScopedProcess::ScopedProcess(const base::Closure& child_callback)
+ : child_process_id_(-1), process_id_(getpid()) {
+ PCHECK(0 == pipe(pipe_fds_));
+#if !defined(THREAD_SANITIZER)
+ // Make sure that we can safely fork().
+ CHECK(ThreadHelpers::IsSingleThreaded());
+#endif
+ child_process_id_ = fork();
+ PCHECK(0 <= child_process_id_);
+
+ if (0 == child_process_id_) {
+ PCHECK(0 == IGNORE_EINTR(close(pipe_fds_[0])));
+ pipe_fds_[0] = -1;
+ child_callback.Run();
+ // Notify the parent that the closure has run.
+ CHECK_EQ(1, HANDLE_EINTR(write(pipe_fds_[1], kSynchronisationChar, 1)));
+ WaitForever();
+ NOTREACHED();
+ _exit(1);
+ }
+
+ PCHECK(0 == IGNORE_EINTR(close(pipe_fds_[1])));
+ pipe_fds_[1] = -1;
+}
+
+ScopedProcess::~ScopedProcess() {
+ CHECK(IsOriginalProcess());
+ if (child_process_id_ >= 0) {
+ PCHECK(0 == kill(child_process_id_, SIGKILL));
+ siginfo_t process_info;
+
+ PCHECK(0 == HANDLE_EINTR(
+ waitid(P_PID, child_process_id_, &process_info, WEXITED)));
+ }
+ if (pipe_fds_[0] >= 0) {
+ PCHECK(0 == IGNORE_EINTR(close(pipe_fds_[0])));
+ }
+ if (pipe_fds_[1] >= 0) {
+ PCHECK(0 == IGNORE_EINTR(close(pipe_fds_[1])));
+ }
+}
+
+int ScopedProcess::WaitForExit(bool* got_signaled) {
+ DCHECK(got_signaled);
+ CHECK(IsOriginalProcess());
+ siginfo_t process_info;
+ // WNOWAIT to make sure that the destructor can wait on the child.
+ int ret = HANDLE_EINTR(
+ waitid(P_PID, child_process_id_, &process_info, WEXITED | WNOWAIT));
+ PCHECK(0 == ret) << "Did something else wait on the child?";
+
+ if (process_info.si_code == CLD_EXITED) {
+ *got_signaled = false;
+ } else if (process_info.si_code == CLD_KILLED ||
+ process_info.si_code == CLD_DUMPED) {
+ *got_signaled = true;
+ } else {
+ CHECK(false) << "ScopedProcess needs to be extended for si_code "
+ << process_info.si_code;
+ }
+ return process_info.si_status;
+}
+
+bool ScopedProcess::WaitForClosureToRun() {
+ char c = 0;
+ int ret = HANDLE_EINTR(read(pipe_fds_[0], &c, 1));
+ PCHECK(ret >= 0);
+ if (0 == ret)
+ return false;
+
+ CHECK_EQ(c, kSynchronisationChar[0]);
+ return true;
+}
+
+// It would be problematic if after a fork(), another process would start using
+// this object.
+// This method allows to assert it is not happening.
+bool ScopedProcess::IsOriginalProcess() {
+ // Make a direct syscall to bypass glibc caching of PIDs.
+ pid_t pid = sys_getpid();
+ return pid == process_id_;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/scoped_process.h b/libchrome/sandbox/linux/services/scoped_process.h
new file mode 100644
index 0000000..bddbd55
--- /dev/null
+++ b/libchrome/sandbox/linux/services/scoped_process.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_SCOPED_PROCESS_H_
+#define SANDBOX_LINUX_SERVICES_SCOPED_PROCESS_H_
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// fork() a child process that will run a Closure.
+// After the Closure has run, the child will pause forever. If this object
+// is detroyed, the child will be destroyed, even if the closure did not
+// finish running. It's ok to signal the child from outside of this class to
+// destroy it.
+// This class cannot be instanciated from a multi-threaded process, as it needs
+// to fork().
+class SANDBOX_EXPORT ScopedProcess {
+ public:
+ // A new process will be created and |child_callback| will run in the child
+ // process. This callback is allowed to terminate the process or to simply
+ // return. If the callback returns, the process will wait forever.
+ explicit ScopedProcess(const base::Closure& child_callback);
+ ~ScopedProcess();
+
+ // Wait for the process to exit.
+ // |got_signaled| tells how to interpret the return value: either as an exit
+ // code, or as a signal number.
+ // When this returns, the process will still not have been reaped and will
+ // survive as a zombie for the lifetime of this object. This method can be
+ // called multiple times.
+ int WaitForExit(bool* got_signaled);
+
+ // Wait for the |child_callback| passed at construction to run. Return false
+ // if |child_callback| did not finish running and we know it never will (for
+ // instance the child crashed or used _exit()).
+ bool WaitForClosureToRun();
+ base::ProcessId GetPid() { return child_process_id_; }
+
+ private:
+ bool IsOriginalProcess();
+
+ base::ProcessId child_process_id_;
+ base::ProcessId process_id_;
+ int pipe_fds_[2];
+ DISALLOW_COPY_AND_ASSIGN(ScopedProcess);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_SCOPED_PROCESS_H_
diff --git a/libchrome/sandbox/linux/services/scoped_process_unittest.cc b/libchrome/sandbox/linux/services/scoped_process_unittest.cc
new file mode 100644
index 0000000..86f97a8
--- /dev/null
+++ b/libchrome/sandbox/linux/services/scoped_process_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/scoped_process.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+void DoExit() { _exit(0); }
+
+void ExitWithCode(int exit_code) { _exit(exit_code); }
+
+void RaiseAndExit(int signal) {
+ PCHECK(0 == raise(signal));
+ _exit(0);
+}
+
+TEST(ScopedProcess, ScopedProcessNormalExit) {
+ const int kCustomExitCode = 12;
+ ScopedProcess process(base::Bind(&ExitWithCode, kCustomExitCode));
+ bool got_signaled = true;
+ int exit_code = process.WaitForExit(&got_signaled);
+ EXPECT_FALSE(got_signaled);
+ EXPECT_EQ(kCustomExitCode, exit_code);
+
+ // Verify that WaitForExit() can be called multiple times on the same
+ // process.
+ bool got_signaled2 = true;
+ int exit_code2 = process.WaitForExit(&got_signaled2);
+ EXPECT_FALSE(got_signaled2);
+ EXPECT_EQ(kCustomExitCode, exit_code2);
+}
+
+// Disable this test on Android, SIGABRT is funky there.
+TEST(ScopedProcess, DISABLE_ON_ANDROID(ScopedProcessAbort)) {
+ PCHECK(SIG_ERR != signal(SIGABRT, SIG_DFL));
+ ScopedProcess process(base::Bind(&RaiseAndExit, SIGABRT));
+ bool got_signaled = false;
+ int exit_code = process.WaitForExit(&got_signaled);
+ EXPECT_TRUE(got_signaled);
+ EXPECT_EQ(SIGABRT, exit_code);
+}
+
+TEST(ScopedProcess, ScopedProcessSignaled) {
+ ScopedProcess process(base::Bind(&base::DoNothing));
+ bool got_signaled = false;
+ ASSERT_EQ(0, kill(process.GetPid(), SIGKILL));
+ int exit_code = process.WaitForExit(&got_signaled);
+ EXPECT_TRUE(got_signaled);
+ EXPECT_EQ(SIGKILL, exit_code);
+}
+
+TEST(ScopedProcess, DiesForReal) {
+ int pipe_fds[2];
+ ASSERT_EQ(0, pipe(pipe_fds));
+ base::ScopedFD read_end_closer(pipe_fds[0]);
+ base::ScopedFD write_end_closer(pipe_fds[1]);
+
+ { ScopedProcess process(base::Bind(&DoExit)); }
+
+ // Close writing end of the pipe.
+ write_end_closer.reset();
+ pipe_fds[1] = -1;
+
+ ASSERT_EQ(0, fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK));
+ char c;
+ // If the child process is dead for real, there will be no writing end
+ // for this pipe left and read will EOF instead of returning EWOULDBLOCK.
+ ASSERT_EQ(0, read(pipe_fds[0], &c, 1));
+}
+
+TEST(ScopedProcess, SynchronizationBasic) {
+ ScopedProcess process1(base::Bind(&base::DoNothing));
+ EXPECT_TRUE(process1.WaitForClosureToRun());
+
+ ScopedProcess process2(base::Bind(&DoExit));
+ // The closure didn't finish running normally. This case is simple enough
+ // that process.WaitForClosureToRun() should return false, even though the
+ // API does not guarantees that it will return at all.
+ EXPECT_FALSE(process2.WaitForClosureToRun());
+}
+
+void SleepInMsAndWriteOneByte(int time_to_sleep, int fd) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(time_to_sleep));
+ CHECK(1 == write(fd, "1", 1));
+}
+
+TEST(ScopedProcess, SynchronizationWorks) {
+ int pipe_fds[2];
+ ASSERT_EQ(0, pipe(pipe_fds));
+ base::ScopedFD read_end_closer(pipe_fds[0]);
+ base::ScopedFD write_end_closer(pipe_fds[1]);
+
+ // Start a process with a closure that takes a little bit to run.
+ ScopedProcess process(
+ base::Bind(&SleepInMsAndWriteOneByte, 100, pipe_fds[1]));
+ EXPECT_TRUE(process.WaitForClosureToRun());
+
+ // Verify that the closure did, indeed, run.
+ ASSERT_EQ(0, fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK));
+ char c = 0;
+ EXPECT_EQ(1, read(pipe_fds[0], &c, 1));
+ EXPECT_EQ('1', c);
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/syscall_wrappers.cc b/libchrome/sandbox/linux/services/syscall_wrappers.cc
new file mode 100644
index 0000000..7132d2a
--- /dev/null
+++ b/libchrome/sandbox/linux/services/syscall_wrappers.cc
@@ -0,0 +1,261 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/syscall_wrappers.h"
+
+#include <pthread.h>
+#include <sched.h>
+#include <setjmp.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <cstring>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/third_party/valgrind/valgrind.h"
+#include "build/build_config.h"
+#include "sandbox/linux/system_headers/capability.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+namespace sandbox {
+
+pid_t sys_getpid(void) {
+ return syscall(__NR_getpid);
+}
+
+pid_t sys_gettid(void) {
+ return syscall(__NR_gettid);
+}
+
+long sys_clone(unsigned long flags,
+ std::nullptr_t child_stack,
+ pid_t* ptid,
+ pid_t* ctid,
+ std::nullptr_t tls) {
+ const bool clone_tls_used = flags & CLONE_SETTLS;
+ const bool invalid_ctid =
+ (flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) && !ctid;
+ const bool invalid_ptid = (flags & CLONE_PARENT_SETTID) && !ptid;
+
+ // We do not support CLONE_VM.
+ const bool clone_vm_used = flags & CLONE_VM;
+ if (clone_tls_used || invalid_ctid || invalid_ptid || clone_vm_used) {
+ RAW_LOG(FATAL, "Invalid usage of sys_clone");
+ }
+
+ if (ptid) MSAN_UNPOISON(ptid, sizeof(*ptid));
+ if (ctid) MSAN_UNPOISON(ctid, sizeof(*ctid));
+ // See kernel/fork.c in Linux. There is different ordering of sys_clone
+ // parameters depending on CONFIG_CLONE_BACKWARDS* configuration options.
+#if defined(ARCH_CPU_X86_64)
+ return syscall(__NR_clone, flags, child_stack, ptid, ctid, tls);
+#elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
+ defined(ARCH_CPU_MIPS_FAMILY)
+ // CONFIG_CLONE_BACKWARDS defined.
+ return syscall(__NR_clone, flags, child_stack, ptid, tls, ctid);
+#endif
+}
+
+long sys_clone(unsigned long flags) {
+ return sys_clone(flags, nullptr, nullptr, nullptr, nullptr);
+}
+
+void sys_exit_group(int status) {
+ syscall(__NR_exit_group, status);
+}
+
+int sys_seccomp(unsigned int operation,
+ unsigned int flags,
+ const struct sock_fprog* args) {
+ return syscall(__NR_seccomp, operation, flags, args);
+}
+
+int sys_prlimit64(pid_t pid,
+ int resource,
+ const struct rlimit64* new_limit,
+ struct rlimit64* old_limit) {
+ int res = syscall(__NR_prlimit64, pid, resource, new_limit, old_limit);
+ if (res == 0 && old_limit) MSAN_UNPOISON(old_limit, sizeof(*old_limit));
+ return res;
+}
+
+int sys_capget(cap_hdr* hdrp, cap_data* datap) {
+ int res = syscall(__NR_capget, hdrp, datap);
+ if (res == 0) {
+ if (hdrp) MSAN_UNPOISON(hdrp, sizeof(*hdrp));
+ if (datap) MSAN_UNPOISON(datap, sizeof(*datap));
+ }
+ return res;
+}
+
+int sys_capset(cap_hdr* hdrp, const cap_data* datap) {
+ return syscall(__NR_capset, hdrp, datap);
+}
+
+int sys_getresuid(uid_t* ruid, uid_t* euid, uid_t* suid) {
+ int res;
+#if defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARMEL)
+ // On 32-bit x86 or 32-bit arm, getresuid supports 16bit values only.
+ // Use getresuid32 instead.
+ res = syscall(__NR_getresuid32, ruid, euid, suid);
+#else
+ res = syscall(__NR_getresuid, ruid, euid, suid);
+#endif
+ if (res == 0) {
+ if (ruid) MSAN_UNPOISON(ruid, sizeof(*ruid));
+ if (euid) MSAN_UNPOISON(euid, sizeof(*euid));
+ if (suid) MSAN_UNPOISON(suid, sizeof(*suid));
+ }
+ return res;
+}
+
+int sys_getresgid(gid_t* rgid, gid_t* egid, gid_t* sgid) {
+ int res;
+#if defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARMEL)
+ // On 32-bit x86 or 32-bit arm, getresgid supports 16bit values only.
+ // Use getresgid32 instead.
+ res = syscall(__NR_getresgid32, rgid, egid, sgid);
+#else
+ res = syscall(__NR_getresgid, rgid, egid, sgid);
+#endif
+ if (res == 0) {
+ if (rgid) MSAN_UNPOISON(rgid, sizeof(*rgid));
+ if (egid) MSAN_UNPOISON(egid, sizeof(*egid));
+ if (sgid) MSAN_UNPOISON(sgid, sizeof(*sgid));
+ }
+ return res;
+}
+
+int sys_chroot(const char* path) {
+ return syscall(__NR_chroot, path);
+}
+
+int sys_unshare(int flags) {
+ return syscall(__NR_unshare, flags);
+}
+
+int sys_sigprocmask(int how, const sigset_t* set, std::nullptr_t oldset) {
+ // In some toolchain (in particular Android and PNaCl toolchain),
+ // sigset_t is 32 bits, but the Linux ABI uses more.
+ LinuxSigSet linux_value;
+ std::memset(&linux_value, 0, sizeof(LinuxSigSet));
+ std::memcpy(&linux_value, set, std::min(sizeof(sigset_t),
+ sizeof(LinuxSigSet)));
+
+ return syscall(__NR_rt_sigprocmask, how, &linux_value, nullptr,
+ sizeof(linux_value));
+}
+
+// When this is built with PNaCl toolchain, we should always use sys_sigaction
+// below, because sigaction() provided by the toolchain is incompatible with
+// Linux's ABI.
+#if !defined(OS_NACL_NONSFI)
+int sys_sigaction(int signum,
+ const struct sigaction* act,
+ struct sigaction* oldact) {
+ return sigaction(signum, act, oldact);
+}
+#else
+#if defined(ARCH_CPU_X86_FAMILY)
+
+// On x86_64, sa_restorer is required. We specify it on x86 as well in order to
+// support kernels with VDSO disabled.
+#if !defined(SA_RESTORER)
+#define SA_RESTORER 0x04000000
+#endif
+
+// XSTR(__NR_foo) expands to a string literal containing the value value of
+// __NR_foo.
+#define STR(x) #x
+#define XSTR(x) STR(x)
+
+// rt_sigreturn is a special system call that interacts with the user land
+// stack. Thus, here prologue must not be created, which implies syscall()
+// does not work properly, too. Note that rt_sigreturn does not return.
+// TODO(rickyz): These assembly functions may still break stack unwinding on
+// nonsfi NaCl builds.
+#if defined(ARCH_CPU_X86_64)
+
+extern "C" {
+ void sys_rt_sigreturn();
+}
+
+asm(
+ ".text\n"
+ "sys_rt_sigreturn:\n"
+ "mov $" XSTR(__NR_rt_sigreturn) ", %eax\n"
+ "syscall\n");
+
+#elif defined(ARCH_CPU_X86)
+extern "C" {
+ void sys_sigreturn();
+ void sys_rt_sigreturn();
+}
+
+asm(
+ ".text\n"
+ "sys_rt_sigreturn:\n"
+ "mov $" XSTR(__NR_rt_sigreturn) ", %eax\n"
+ "int $0x80\n"
+
+ "sys_sigreturn:\n"
+ "pop %eax\n"
+ "mov $" XSTR(__NR_sigreturn) ", %eax\n"
+ "int $0x80\n");
+#else
+#error "Unsupported architecture."
+#endif
+
+#undef STR
+#undef XSTR
+
+#endif
+
+int sys_sigaction(int signum,
+ const struct sigaction* act,
+ struct sigaction* oldact) {
+ LinuxSigAction linux_act = {};
+ if (act) {
+ linux_act.kernel_handler = act->sa_handler;
+ std::memcpy(&linux_act.sa_mask, &act->sa_mask,
+ std::min(sizeof(linux_act.sa_mask), sizeof(act->sa_mask)));
+ linux_act.sa_flags = act->sa_flags;
+
+#if defined(ARCH_CPU_X86_FAMILY)
+ if (!(linux_act.sa_flags & SA_RESTORER)) {
+ linux_act.sa_flags |= SA_RESTORER;
+#if defined(ARCH_CPU_X86_64)
+ linux_act.sa_restorer = sys_rt_sigreturn;
+#elif defined(ARCH_CPU_X86)
+ linux_act.sa_restorer =
+ linux_act.sa_flags & SA_SIGINFO ? sys_rt_sigreturn : sys_sigreturn;
+#else
+#error "Unsupported architecture."
+#endif
+ }
+#endif
+ }
+
+ LinuxSigAction linux_oldact = {};
+ int result = syscall(__NR_rt_sigaction, signum, act ? &linux_act : nullptr,
+ oldact ? &linux_oldact : nullptr,
+ sizeof(LinuxSigSet));
+
+ if (result == 0 && oldact) {
+ oldact->sa_handler = linux_oldact.kernel_handler;
+ sigemptyset(&oldact->sa_mask);
+ std::memcpy(&oldact->sa_mask, &linux_oldact.sa_mask,
+ std::min(sizeof(linux_act.sa_mask), sizeof(act->sa_mask)));
+ oldact->sa_flags = linux_oldact.sa_flags;
+ }
+ return result;
+}
+
+#endif // defined(MEMORY_SANITIZER)
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/syscall_wrappers.h b/libchrome/sandbox/linux/services/syscall_wrappers.h
new file mode 100644
index 0000000..057e4c8
--- /dev/null
+++ b/libchrome/sandbox/linux/services/syscall_wrappers.h
@@ -0,0 +1,85 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_SYSCALL_WRAPPERS_H_
+#define SANDBOX_LINUX_SERVICES_SYSCALL_WRAPPERS_H_
+
+#include <signal.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <cstddef>
+
+#include "sandbox/sandbox_export.h"
+
+struct sock_fprog;
+struct rlimit64;
+struct cap_hdr;
+struct cap_data;
+
+namespace sandbox {
+
+// Provide direct system call wrappers for a few common system calls.
+// These are guaranteed to perform a system call and do not rely on things such
+// as caching the current pid (c.f. getpid()) unless otherwise specified.
+
+SANDBOX_EXPORT pid_t sys_getpid(void);
+
+SANDBOX_EXPORT pid_t sys_gettid(void);
+
+SANDBOX_EXPORT long sys_clone(unsigned long flags);
+
+// |regs| is not supported and must be passed as nullptr. |child_stack| must be
+// nullptr, since otherwise this function cannot safely return. As a
+// consequence, this function does not support CLONE_VM.
+SANDBOX_EXPORT long sys_clone(unsigned long flags,
+ std::nullptr_t child_stack,
+ pid_t* ptid,
+ pid_t* ctid,
+ std::nullptr_t regs);
+
+SANDBOX_EXPORT void sys_exit_group(int status);
+
+// The official system call takes |args| as void* (in order to be extensible),
+// but add more typing for the cases that are currently used.
+SANDBOX_EXPORT int sys_seccomp(unsigned int operation,
+ unsigned int flags,
+ const struct sock_fprog* args);
+
+// Some libcs do not expose a prlimit64 wrapper.
+SANDBOX_EXPORT int sys_prlimit64(pid_t pid,
+ int resource,
+ const struct rlimit64* new_limit,
+ struct rlimit64* old_limit);
+
+// Some libcs do not expose capget/capset wrappers. We want to use these
+// directly in order to avoid pulling in libcap2.
+SANDBOX_EXPORT int sys_capget(struct cap_hdr* hdrp, struct cap_data* datap);
+SANDBOX_EXPORT int sys_capset(struct cap_hdr* hdrp,
+ const struct cap_data* datap);
+
+// Some libcs do not expose getresuid/getresgid wrappers.
+SANDBOX_EXPORT int sys_getresuid(uid_t* ruid, uid_t* euid, uid_t* suid);
+SANDBOX_EXPORT int sys_getresgid(gid_t* rgid, gid_t* egid, gid_t* sgid);
+
+// Some libcs do not expose a chroot wrapper.
+SANDBOX_EXPORT int sys_chroot(const char* path);
+
+// Some libcs do not expose a unshare wrapper.
+SANDBOX_EXPORT int sys_unshare(int flags);
+
+// Some libcs do not expose a sigprocmask. Note that oldset must be a nullptr,
+// because of some ABI gap between toolchain's and Linux's.
+SANDBOX_EXPORT int sys_sigprocmask(int how,
+ const sigset_t* set,
+ std::nullptr_t oldset);
+
+// Some libcs do not expose a sigaction().
+SANDBOX_EXPORT int sys_sigaction(int signum,
+ const struct sigaction* act,
+ struct sigaction* oldact);
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_SYSCALL_WRAPPERS_H_
diff --git a/libchrome/sandbox/linux/services/syscall_wrappers_unittest.cc b/libchrome/sandbox/linux/services/syscall_wrappers_unittest.cc
new file mode 100644
index 0000000..34ac740
--- /dev/null
+++ b/libchrome/sandbox/linux/services/syscall_wrappers_unittest.cc
@@ -0,0 +1,100 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/syscall_wrappers.h"
+
+#include <stdint.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <cstring>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/third_party/valgrind/valgrind.h"
+#include "build/build_config.h"
+#include "sandbox/linux/system_headers/linux_signal.h"
+#include "sandbox/linux/tests/test_utils.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+TEST(SyscallWrappers, BasicSyscalls) {
+ EXPECT_EQ(getpid(), sys_getpid());
+}
+
+TEST(SyscallWrappers, CloneBasic) {
+ pid_t child = sys_clone(SIGCHLD);
+ TestUtils::HandlePostForkReturn(child);
+ EXPECT_LT(0, child);
+}
+
+TEST(SyscallWrappers, CloneParentSettid) {
+ pid_t ptid = 0;
+ pid_t child = sys_clone(CLONE_PARENT_SETTID | SIGCHLD, nullptr, &ptid,
+ nullptr, nullptr);
+ TestUtils::HandlePostForkReturn(child);
+ EXPECT_LT(0, child);
+ EXPECT_EQ(child, ptid);
+}
+
+TEST(SyscallWrappers, CloneChildSettid) {
+ pid_t ctid = 0;
+ pid_t pid =
+ sys_clone(CLONE_CHILD_SETTID | SIGCHLD, nullptr, nullptr, &ctid, nullptr);
+
+ const int kSuccessExit = 0;
+ if (0 == pid) {
+ // In child.
+ if (sys_getpid() == ctid)
+ _exit(kSuccessExit);
+ _exit(1);
+ }
+
+ ASSERT_NE(-1, pid);
+ int status = 0;
+ ASSERT_EQ(pid, HANDLE_EINTR(waitpid(pid, &status, 0)));
+ ASSERT_TRUE(WIFEXITED(status));
+ EXPECT_EQ(kSuccessExit, WEXITSTATUS(status));
+}
+
+TEST(SyscallWrappers, GetRESUid) {
+ uid_t ruid, euid, suid;
+ uid_t sys_ruid, sys_euid, sys_suid;
+ ASSERT_EQ(0, getresuid(&ruid, &euid, &suid));
+ ASSERT_EQ(0, sys_getresuid(&sys_ruid, &sys_euid, &sys_suid));
+ EXPECT_EQ(ruid, sys_ruid);
+ EXPECT_EQ(euid, sys_euid);
+ EXPECT_EQ(suid, sys_suid);
+}
+
+TEST(SyscallWrappers, GetRESGid) {
+ gid_t rgid, egid, sgid;
+ gid_t sys_rgid, sys_egid, sys_sgid;
+ ASSERT_EQ(0, getresgid(&rgid, &egid, &sgid));
+ ASSERT_EQ(0, sys_getresgid(&sys_rgid, &sys_egid, &sys_sgid));
+ EXPECT_EQ(rgid, sys_rgid);
+ EXPECT_EQ(egid, sys_egid);
+ EXPECT_EQ(sgid, sys_sgid);
+}
+
+TEST(SyscallWrappers, LinuxSigSet) {
+ sigset_t sigset;
+ ASSERT_EQ(0, sigemptyset(&sigset));
+ ASSERT_EQ(0, sigaddset(&sigset, LINUX_SIGSEGV));
+ ASSERT_EQ(0, sigaddset(&sigset, LINUX_SIGBUS));
+ uint64_t linux_sigset = 0;
+ std::memcpy(&linux_sigset, &sigset,
+ std::min(sizeof(sigset), sizeof(linux_sigset)));
+ EXPECT_EQ((1ULL << (LINUX_SIGSEGV - 1)) | (1ULL << (LINUX_SIGBUS - 1)),
+ linux_sigset);
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/thread_helpers.cc b/libchrome/sandbox/linux/services/thread_helpers.cc
new file mode 100644
index 0000000..20752c8
--- /dev/null
+++ b/libchrome/sandbox/linux/services/thread_helpers.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/thread_helpers.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "sandbox/linux/services/proc_util.h"
+
+namespace sandbox {
+
+namespace {
+
+const char kAssertSingleThreadedError[] =
+ "Current process is not mono-threaded!";
+const char kAssertThreadDoesNotAppearInProcFS[] =
+ "Started thread does not appear in /proc";
+const char kAssertThreadDoesNotDisappearInProcFS[] =
+ "Stopped thread does not disappear in /proc";
+
+bool IsSingleThreadedImpl(int proc_fd) {
+ CHECK_LE(0, proc_fd);
+ struct stat task_stat;
+ int fstat_ret = fstatat(proc_fd, "self/task/", &task_stat, 0);
+ PCHECK(0 == fstat_ret);
+
+ // At least "..", "." and the current thread should be present.
+ CHECK_LE(3UL, task_stat.st_nlink);
+ // Counting threads via /proc/self/task could be racy. For the purpose of
+ // determining if the current proces is monothreaded it works: if at any
+ // time it becomes monothreaded, it'll stay so.
+ return task_stat.st_nlink == 3;
+}
+
+bool IsThreadPresentInProcFS(int proc_fd,
+ const std::string& thread_id_dir_str) {
+ struct stat task_stat;
+ const int fstat_ret =
+ fstatat(proc_fd, thread_id_dir_str.c_str(), &task_stat, 0);
+ if (fstat_ret < 0) {
+ PCHECK(ENOENT == errno);
+ return false;
+ }
+ return true;
+}
+
+bool IsNotThreadPresentInProcFS(int proc_fd,
+ const std::string& thread_id_dir_str) {
+ return !IsThreadPresentInProcFS(proc_fd, thread_id_dir_str);
+}
+
+// Run |cb| in a loop until it returns false. Every time |cb| runs, sleep
+// for an exponentially increasing amount of time. |cb| is expected to return
+// false very quickly and this will crash if it doesn't happen within ~64ms on
+// Debug builds (2s on Release builds).
+// This is guaranteed to not sleep more than twice as much as the bare minimum
+// amount of time.
+void RunWhileTrue(const base::Callback<bool(void)>& cb, const char* message) {
+#if defined(NDEBUG)
+ // In Release mode, crash after 30 iterations, which means having spent
+ // roughly 2s in
+ // nanosleep(2) cumulatively.
+ const unsigned int kMaxIterations = 30U;
+#else
+ // In practice, this never goes through more than a couple iterations. In
+ // debug mode, crash after 64ms (+ eventually 25 times the granularity of
+ // the clock) in nanosleep(2). This ensures that this is not becoming too
+ // slow.
+ const unsigned int kMaxIterations = 25U;
+#endif
+
+ // Run |cb| with an exponential back-off, sleeping 2^iterations nanoseconds
+ // in nanosleep(2).
+ // Note: the clock may not allow for nanosecond granularity, in this case the
+ // first iterations would sleep a tiny bit more instead, which would not
+ // change the calculations significantly.
+ for (unsigned int i = 0; i < kMaxIterations; ++i) {
+ if (!cb.Run()) {
+ return;
+ }
+
+ // Increase the waiting time exponentially.
+ struct timespec ts = {0, 1L << i /* nanoseconds */};
+ PCHECK(0 == HANDLE_EINTR(nanosleep(&ts, &ts)));
+ }
+
+ LOG(FATAL) << message << " (iterations: " << kMaxIterations << ")";
+
+ NOTREACHED();
+}
+
+bool IsMultiThreaded(int proc_fd) {
+ return !ThreadHelpers::IsSingleThreaded(proc_fd);
+}
+
+enum class ThreadAction { Start, Stop };
+
+bool ChangeThreadStateAndWatchProcFS(
+ int proc_fd, base::Thread* thread, ThreadAction action) {
+ DCHECK_LE(0, proc_fd);
+ DCHECK(thread);
+ DCHECK(action == ThreadAction::Start || action == ThreadAction::Stop);
+
+ base::Callback<bool(void)> cb;
+ const char* message;
+
+ if (action == ThreadAction::Start) {
+ // Should start the thread before calling thread_id().
+ if (!thread->Start())
+ return false;
+ }
+
+ const base::PlatformThreadId thread_id = thread->GetThreadId();
+ const std::string thread_id_dir_str =
+ "self/task/" + base::IntToString(thread_id) + "/";
+
+ if (action == ThreadAction::Stop) {
+ // The target thread should exist in /proc.
+ DCHECK(IsThreadPresentInProcFS(proc_fd, thread_id_dir_str));
+ thread->Stop();
+ }
+
+ // The kernel is at liberty to wake the thread id futex before updating
+ // /proc. Start() above or following Stop(), the thread is started or joined,
+ // but entries in /proc may not have been updated.
+ if (action == ThreadAction::Start) {
+ cb = base::Bind(&IsNotThreadPresentInProcFS, proc_fd, thread_id_dir_str);
+ message = kAssertThreadDoesNotAppearInProcFS;
+ } else {
+ cb = base::Bind(&IsThreadPresentInProcFS, proc_fd, thread_id_dir_str);
+ message = kAssertThreadDoesNotDisappearInProcFS;
+ }
+ RunWhileTrue(cb, message);
+
+ DCHECK_EQ(action == ThreadAction::Start,
+ IsThreadPresentInProcFS(proc_fd, thread_id_dir_str));
+
+ return true;
+}
+
+} // namespace
+
+// static
+bool ThreadHelpers::IsSingleThreaded(int proc_fd) {
+ DCHECK_LE(0, proc_fd);
+ return IsSingleThreadedImpl(proc_fd);
+}
+
+// static
+bool ThreadHelpers::IsSingleThreaded() {
+ base::ScopedFD task_fd(ProcUtil::OpenProc());
+ return IsSingleThreaded(task_fd.get());
+}
+
+// static
+void ThreadHelpers::AssertSingleThreaded(int proc_fd) {
+ DCHECK_LE(0, proc_fd);
+ const base::Callback<bool(void)> cb = base::Bind(&IsMultiThreaded, proc_fd);
+ RunWhileTrue(cb, kAssertSingleThreadedError);
+}
+
+void ThreadHelpers::AssertSingleThreaded() {
+ base::ScopedFD task_fd(ProcUtil::OpenProc());
+ AssertSingleThreaded(task_fd.get());
+}
+
+// static
+bool ThreadHelpers::StartThreadAndWatchProcFS(int proc_fd,
+ base::Thread* thread) {
+ return ChangeThreadStateAndWatchProcFS(proc_fd, thread, ThreadAction::Start);
+}
+
+// static
+bool ThreadHelpers::StopThreadAndWatchProcFS(int proc_fd,
+ base::Thread* thread) {
+ return ChangeThreadStateAndWatchProcFS(proc_fd, thread, ThreadAction::Stop);
+}
+
+// static
+const char* ThreadHelpers::GetAssertSingleThreadedErrorMessageForTests() {
+ return kAssertSingleThreadedError;
+}
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/thread_helpers.h b/libchrome/sandbox/linux/services/thread_helpers.h
new file mode 100644
index 0000000..73e041a
--- /dev/null
+++ b/libchrome/sandbox/linux/services/thread_helpers.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_THREAD_HELPERS_H_
+#define SANDBOX_LINUX_SERVICES_THREAD_HELPERS_H_
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace base { class Thread; }
+
+namespace sandbox {
+
+class SANDBOX_EXPORT ThreadHelpers {
+ public:
+ // Checks whether the current process is single threaded. |proc_fd|
+ // must be a file descriptor to /proc/ and remains owned by the
+ // caller.
+ static bool IsSingleThreaded(int proc_fd);
+ static bool IsSingleThreaded();
+
+ // Crashes if the current process is not single threaded. This will wait
+ // on /proc to be updated. In the case where this doesn't crash, this will
+ // return promptly. In the case where this does crash, this will first wait
+ // for a few ms in Debug mode, a few seconds in Release mode.
+ static void AssertSingleThreaded(int proc_fd);
+ static void AssertSingleThreaded();
+
+ // Starts |thread| and ensure that it has an entry in /proc/self/task/ from
+ // the point of view of the current thread.
+ static bool StartThreadAndWatchProcFS(int proc_fd, base::Thread* thread);
+
+ // Stops |thread| and ensure that it does not have an entry in
+ // /proc/self/task/ from the point of view of the current thread. This is
+ // the way to stop threads before calling IsSingleThreaded().
+ static bool StopThreadAndWatchProcFS(int proc_fd, base::Thread* thread);
+
+ static const char* GetAssertSingleThreadedErrorMessageForTests();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ThreadHelpers);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_THREAD_HELPERS_H_
diff --git a/libchrome/sandbox/linux/services/thread_helpers_unittests.cc b/libchrome/sandbox/linux/services/thread_helpers_unittests.cc
new file mode 100644
index 0000000..fe1080b
--- /dev/null
+++ b/libchrome/sandbox/linux/services/thread_helpers_unittests.cc
@@ -0,0 +1,152 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/thread_helpers.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::PlatformThread;
+
+namespace sandbox {
+
+namespace {
+
+// These tests fail under ThreadSanitizer, see http://crbug.com/342305
+#if !defined(THREAD_SANITIZER)
+
+int GetRaceTestIterations() {
+ if (IsRunningOnValgrind()) {
+ return 2;
+ } else {
+ return 1000;
+ }
+}
+
+class ScopedProc {
+ public:
+ ScopedProc() : fd_(-1) {
+ fd_ = open("/proc/", O_RDONLY | O_DIRECTORY);
+ CHECK_LE(0, fd_);
+ }
+
+ ~ScopedProc() { PCHECK(0 == IGNORE_EINTR(close(fd_))); }
+
+ int fd() { return fd_; }
+
+ private:
+ int fd_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedProc);
+};
+
+TEST(ThreadHelpers, IsSingleThreadedBasic) {
+ ScopedProc proc_fd;
+ ASSERT_TRUE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+ ASSERT_TRUE(ThreadHelpers::IsSingleThreaded());
+
+ base::Thread thread("sandbox_tests");
+ ASSERT_TRUE(ThreadHelpers::StartThreadAndWatchProcFS(proc_fd.fd(), &thread));
+ ASSERT_FALSE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+ ASSERT_FALSE(ThreadHelpers::IsSingleThreaded());
+ // Explicitly stop the thread here to not pollute the next test.
+ ASSERT_TRUE(ThreadHelpers::StopThreadAndWatchProcFS(proc_fd.fd(), &thread));
+}
+
+SANDBOX_TEST(ThreadHelpers, AssertSingleThreaded) {
+ ScopedProc proc_fd;
+ SANDBOX_ASSERT(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+ SANDBOX_ASSERT(ThreadHelpers::IsSingleThreaded());
+
+ ThreadHelpers::AssertSingleThreaded(proc_fd.fd());
+ ThreadHelpers::AssertSingleThreaded();
+}
+
+TEST(ThreadHelpers, IsSingleThreadedIterated) {
+ ScopedProc proc_fd;
+ ASSERT_TRUE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+
+ // Iterate to check for race conditions.
+ for (int i = 0; i < GetRaceTestIterations(); ++i) {
+ base::Thread thread("sandbox_tests");
+ ASSERT_TRUE(
+ ThreadHelpers::StartThreadAndWatchProcFS(proc_fd.fd(), &thread));
+ ASSERT_FALSE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+ // Explicitly stop the thread here to not pollute the next test.
+ ASSERT_TRUE(ThreadHelpers::StopThreadAndWatchProcFS(proc_fd.fd(), &thread));
+ }
+}
+
+TEST(ThreadHelpers, IsSingleThreadedStartAndStop) {
+ ScopedProc proc_fd;
+ ASSERT_TRUE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+
+ base::Thread thread("sandbox_tests");
+ // This is testing for a race condition, so iterate.
+ // Manually, this has been tested with more that 1M iterations.
+ for (int i = 0; i < GetRaceTestIterations(); ++i) {
+ ASSERT_TRUE(
+ ThreadHelpers::StartThreadAndWatchProcFS(proc_fd.fd(), &thread));
+ ASSERT_FALSE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+
+ ASSERT_TRUE(ThreadHelpers::StopThreadAndWatchProcFS(proc_fd.fd(), &thread));
+ ASSERT_TRUE(ThreadHelpers::IsSingleThreaded(proc_fd.fd()));
+ ASSERT_EQ(1, base::GetNumberOfThreads(base::GetCurrentProcessHandle()));
+ }
+}
+
+SANDBOX_TEST(ThreadHelpers, AssertSingleThreadedAfterThreadStopped) {
+ ScopedProc proc_fd;
+ SANDBOX_ASSERT(ThreadHelpers::IsSingleThreaded());
+
+ base::Thread thread1("sandbox_tests");
+ base::Thread thread2("sandbox_tests");
+
+ for (int i = 0; i < GetRaceTestIterations(); ++i) {
+ SANDBOX_ASSERT(
+ ThreadHelpers::StartThreadAndWatchProcFS(proc_fd.fd(), &thread1));
+ SANDBOX_ASSERT(
+ ThreadHelpers::StartThreadAndWatchProcFS(proc_fd.fd(), &thread2));
+ SANDBOX_ASSERT(!ThreadHelpers::IsSingleThreaded());
+
+ thread1.Stop();
+ thread2.Stop();
+ // This will wait on /proc/ to reflect the state of threads in the
+ // process.
+ ThreadHelpers::AssertSingleThreaded();
+ SANDBOX_ASSERT(ThreadHelpers::IsSingleThreaded());
+ }
+}
+
+// Only run this test in Debug mode, where AssertSingleThreaded() will return
+// in less than 64ms.
+#if !defined(NDEBUG)
+SANDBOX_DEATH_TEST(
+ ThreadHelpers,
+ AssertSingleThreadedDies,
+ DEATH_MESSAGE(
+ ThreadHelpers::GetAssertSingleThreadedErrorMessageForTests())) {
+ base::Thread thread1("sandbox_tests");
+ SANDBOX_ASSERT(thread1.Start());
+ ThreadHelpers::AssertSingleThreaded();
+}
+#endif // !defined(NDEBUG)
+
+#endif // !defined(THREAD_SANITIZER)
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/yama.cc b/libchrome/sandbox/linux/services/yama.cc
new file mode 100644
index 0000000..6831cd9
--- /dev/null
+++ b/libchrome/sandbox/linux/services/yama.cc
@@ -0,0 +1,117 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/services/yama.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+
+#if !defined(PR_SET_PTRACER_ANY)
+#define PR_SET_PTRACER_ANY ((unsigned long)-1)
+#endif
+
+#if !defined(PR_SET_PTRACER)
+#define PR_SET_PTRACER 0x59616d61
+#endif
+
+namespace sandbox {
+
+namespace {
+
+// Enable or disable the Yama ptracers restrictions.
+// Return false if Yama is not present on this kernel.
+bool SetYamaPtracersRestriction(bool enable_restrictions) {
+ unsigned long set_ptracer_arg;
+ if (enable_restrictions) {
+ set_ptracer_arg = 0;
+ } else {
+ set_ptracer_arg = PR_SET_PTRACER_ANY;
+ }
+
+ const int ret = prctl(PR_SET_PTRACER, set_ptracer_arg);
+ const int prctl_errno = errno;
+
+ if (0 == ret) {
+ return true;
+ } else {
+ // ENOSYS or EINVAL means Yama is not in the current kernel.
+ CHECK(ENOSYS == prctl_errno || EINVAL == prctl_errno);
+ return false;
+ }
+}
+
+bool CanAccessProcFS() {
+ static const char kProcfsKernelSysPath[] = "/proc/sys/kernel/";
+ int ret = access(kProcfsKernelSysPath, F_OK);
+ if (ret) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+// static
+bool Yama::RestrictPtracersToAncestors() {
+ return SetYamaPtracersRestriction(true /* enable_restrictions */);
+}
+
+// static
+bool Yama::DisableYamaRestrictions() {
+ return SetYamaPtracersRestriction(false /* enable_restrictions */);
+}
+
+// static
+int Yama::GetStatus() {
+ if (!CanAccessProcFS()) {
+ return 0;
+ }
+
+ static const char kPtraceScopePath[] = "/proc/sys/kernel/yama/ptrace_scope";
+
+ base::ScopedFD yama_scope(HANDLE_EINTR(open(kPtraceScopePath, O_RDONLY)));
+
+ if (!yama_scope.is_valid()) {
+ const int open_errno = errno;
+ DCHECK(ENOENT == open_errno);
+ // The status is known, yama is not present.
+ return STATUS_KNOWN;
+ }
+
+ char yama_scope_value = 0;
+ ssize_t num_read = HANDLE_EINTR(read(yama_scope.get(), &yama_scope_value, 1));
+ PCHECK(1 == num_read);
+
+ switch (yama_scope_value) {
+ case '0':
+ return STATUS_KNOWN | STATUS_PRESENT;
+ case '1':
+ return STATUS_KNOWN | STATUS_PRESENT | STATUS_ENFORCING;
+ case '2':
+ case '3':
+ return STATUS_KNOWN | STATUS_PRESENT | STATUS_ENFORCING |
+ STATUS_STRICT_ENFORCING;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+// static
+bool Yama::IsPresent() { return GetStatus() & STATUS_PRESENT; }
+
+// static
+bool Yama::IsEnforcing() { return GetStatus() & STATUS_ENFORCING; }
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/services/yama.h b/libchrome/sandbox/linux/services/yama.h
new file mode 100644
index 0000000..e6c5c45
--- /dev/null
+++ b/libchrome/sandbox/linux/services/yama.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_YAMA_H_
+#define SANDBOX_LINUX_SERVICES_YAMA_H_
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+// Yama is a LSM kernel module which can restrict ptrace().
+// This class provides ways to detect if Yama is present and enabled
+// and to restrict which processes can ptrace the current process.
+class SANDBOX_EXPORT Yama {
+ public:
+ // This enum should be used to set or check a bitmask.
+ // A value of 0 would indicate that the status is not known.
+ enum GlobalStatus {
+ STATUS_KNOWN = 1 << 0,
+ STATUS_PRESENT = 1 << 1,
+ STATUS_ENFORCING = 1 << 2,
+ // STATUS_STRICT_ENFORCING corresponds to either mode 2 or mode 3 of Yama.
+ // Ptrace could be entirely denied, or restricted to CAP_SYS_PTRACE
+ // and PTRACE_TRACEME.
+ STATUS_STRICT_ENFORCING = 1 << 3
+ };
+
+ // Restrict who can ptrace() the current process to its ancestors.
+ // If this succeeds, then Yama is available on this kernel.
+ // However, Yama may not be enforcing at this time.
+ static bool RestrictPtracersToAncestors();
+
+ // Disable Yama restrictions for the current process.
+ // This will fail if Yama is not available on this kernel.
+ // This is meant for testing only. If you need this, implement
+ // a per-pid authorization instead.
+ static bool DisableYamaRestrictions();
+
+ // Checks if Yama is currently in enforcing mode for the machine (not the
+ // current process). This requires access to the filesystem and will use
+ // /proc/sys/kernel/yama/ptrace_scope.
+ static int GetStatus();
+
+ // Helper for checking for STATUS_PRESENT in GetStatus().
+ static bool IsPresent();
+ // Helper for checkking for STATUS_ENFORCING in GetStatus().
+ static bool IsEnforcing();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Yama);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_YAMA_H_
diff --git a/libchrome/sandbox/linux/services/yama_unittests.cc b/libchrome/sandbox/linux/services/yama_unittests.cc
new file mode 100644
index 0000000..0e8355d
--- /dev/null
+++ b/libchrome/sandbox/linux/services/yama_unittests.cc
@@ -0,0 +1,172 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/ptrace.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "sandbox/linux/services/scoped_process.h"
+#include "sandbox/linux/services/yama.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+bool HasLinux32Bug() {
+#if defined(__i386__)
+ // On 3.2 kernels, yama doesn't work for 32-bit binaries on 64-bit kernels.
+ // This is fixed in 3.4.
+ bool is_kernel_64bit =
+ base::SysInfo::OperatingSystemArchitecture() == "x86_64";
+ bool is_linux = base::SysInfo::OperatingSystemName() == "Linux";
+ bool is_3_dot_2 = base::StartsWith(
+ base::SysInfo::OperatingSystemVersion(), "3.2",
+ base::CompareCase::INSENSITIVE_ASCII);
+ if (is_kernel_64bit && is_linux && is_3_dot_2)
+ return true;
+#endif // defined(__i386__)
+ return false;
+}
+
+bool CanPtrace(pid_t pid) {
+ int ret;
+ ret = ptrace(PTRACE_ATTACH, pid, NULL, NULL);
+ if (ret == -1) {
+ CHECK_EQ(EPERM, errno);
+ return false;
+ }
+ // Wait for the process to be stopped so that it can be detached.
+ siginfo_t process_info;
+ int wait_ret = HANDLE_EINTR(waitid(P_PID, pid, &process_info, WSTOPPED));
+ PCHECK(0 == wait_ret);
+ PCHECK(0 == ptrace(PTRACE_DETACH, pid, NULL, NULL));
+ return true;
+}
+
+// _exit(0) if pid can be ptraced by the current process.
+// _exit(1) otherwise.
+void ExitZeroIfCanPtrace(pid_t pid) {
+ if (CanPtrace(pid)) {
+ _exit(0);
+ } else {
+ _exit(1);
+ }
+}
+
+bool CanSubProcessPtrace(pid_t pid) {
+ ScopedProcess process(base::Bind(&ExitZeroIfCanPtrace, pid));
+ bool signaled;
+ int exit_code = process.WaitForExit(&signaled);
+ CHECK(!signaled);
+ return 0 == exit_code;
+}
+
+// The tests below assume that the system-level configuration will not change
+// while they run.
+
+TEST(Yama, GetStatus) {
+ int status1 = Yama::GetStatus();
+
+ // Check that the value is a possible bitmask.
+ ASSERT_LE(0, status1);
+ ASSERT_GE(Yama::STATUS_KNOWN | Yama::STATUS_PRESENT | Yama::STATUS_ENFORCING |
+ Yama::STATUS_STRICT_ENFORCING,
+ status1);
+
+ // The status should not just be a random value.
+ int status2 = Yama::GetStatus();
+ EXPECT_EQ(status1, status2);
+
+ // This test is not running sandboxed, there is no reason to not know the
+ // status.
+ EXPECT_NE(0, Yama::STATUS_KNOWN & status1);
+
+ if (status1 & Yama::STATUS_STRICT_ENFORCING) {
+ // If Yama is strictly enforcing, it is also enforcing.
+ EXPECT_TRUE(status1 & Yama::STATUS_ENFORCING);
+ }
+
+ if (status1 & Yama::STATUS_ENFORCING) {
+ // If Yama is enforcing, Yama is present.
+ EXPECT_NE(0, status1 & Yama::STATUS_PRESENT);
+ }
+
+ // Verify that the helper functions work as intended.
+ EXPECT_EQ(static_cast<bool>(status1 & Yama::STATUS_ENFORCING),
+ Yama::IsEnforcing());
+ EXPECT_EQ(static_cast<bool>(status1 & Yama::STATUS_PRESENT),
+ Yama::IsPresent());
+
+ fprintf(stdout,
+ "Yama present: %s - enforcing: %s\n",
+ Yama::IsPresent() ? "Y" : "N",
+ Yama::IsEnforcing() ? "Y" : "N");
+}
+
+SANDBOX_TEST(Yama, RestrictPtraceSucceedsWhenYamaPresent) {
+ // This call will succeed iff Yama is present.
+ bool restricted = Yama::RestrictPtracersToAncestors();
+ CHECK_EQ(restricted, Yama::IsPresent());
+}
+
+// Attempts to enable or disable Yama restrictions.
+void SetYamaRestrictions(bool enable_restriction) {
+ if (enable_restriction) {
+ Yama::RestrictPtracersToAncestors();
+ } else {
+ Yama::DisableYamaRestrictions();
+ }
+}
+
+TEST(Yama, RestrictPtraceWorks) {
+ if (HasLinux32Bug())
+ return;
+
+ ScopedProcess process1(base::Bind(&SetYamaRestrictions, true));
+ ASSERT_TRUE(process1.WaitForClosureToRun());
+
+ if (Yama::IsEnforcing()) {
+ // A sibling process cannot ptrace process1.
+ ASSERT_FALSE(CanSubProcessPtrace(process1.GetPid()));
+ }
+
+ if (!(Yama::GetStatus() & Yama::STATUS_STRICT_ENFORCING)) {
+ // However, parent can ptrace process1.
+ ASSERT_TRUE(CanPtrace(process1.GetPid()));
+
+ // A sibling can ptrace process2 which disables any Yama protection.
+ ScopedProcess process2(base::Bind(&SetYamaRestrictions, false));
+ ASSERT_TRUE(process2.WaitForClosureToRun());
+ ASSERT_TRUE(CanSubProcessPtrace(process2.GetPid()));
+ }
+}
+
+SANDBOX_TEST(Yama, RestrictPtraceIsDefault) {
+ if (!Yama::IsPresent() || HasLinux32Bug())
+ return;
+
+ CHECK(Yama::DisableYamaRestrictions());
+ ScopedProcess process1(base::Bind(&base::DoNothing));
+
+ if (Yama::IsEnforcing()) {
+ // Check that process1 is protected by Yama, even though it has
+ // been created from a process that disabled Yama.
+ CHECK(!CanSubProcessPtrace(process1.GetPid()));
+ }
+}
+
+} // namespace
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/suid/client/DEPS b/libchrome/sandbox/linux/suid/client/DEPS
new file mode 100644
index 0000000..99a337d
--- /dev/null
+++ b/libchrome/sandbox/linux/suid/client/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+sandbox/linux/services",
+]
diff --git a/libchrome/sandbox/linux/suid/common/sandbox.h b/libchrome/sandbox/linux/suid/common/sandbox.h
new file mode 100644
index 0000000..52ef10c
--- /dev/null
+++ b/libchrome/sandbox/linux/suid/common/sandbox.h
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SUID_SANDBOX_H_
+#define SANDBOX_LINUX_SUID_SANDBOX_H_
+
+#if defined(__cplusplus)
+namespace sandbox {
+#endif
+
+// These are command line switches that may be used by other programs
+// (e.g. Chrome) to construct a command line for the sandbox.
+static const char kSuidSandboxGetApiSwitch[] = "--get-api";
+static const char kAdjustOOMScoreSwitch[] = "--adjust-oom-score";
+
+static const char kSandboxDescriptorEnvironmentVarName[] = "SBX_D";
+static const char kSandboxHelperPidEnvironmentVarName[] = "SBX_HELPER_PID";
+
+static const int kSUIDSandboxApiNumber = 1;
+static const char kSandboxEnvironmentApiRequest[] = "SBX_CHROME_API_RQ";
+static const char kSandboxEnvironmentApiProvides[] = "SBX_CHROME_API_PRV";
+
+// This number must be kept in sync with common/zygote_commands_linux.h
+static const int kZygoteIdFd = 7;
+
+// These are the magic byte values which the sandboxed process uses to request
+// that it be chrooted.
+static const char kMsgChrootMe = 'C';
+static const char kMsgChrootSuccessful = 'O';
+
+// These are set if we have respectively switched to a new PID or NET namespace
+// by going through the setuid binary helper.
+static const char kSandboxPIDNSEnvironmentVarName[] = "SBX_PID_NS";
+static const char kSandboxNETNSEnvironmentVarName[] = "SBX_NET_NS";
+
+#if defined(__cplusplus)
+} // namespace sandbox
+#endif
+
+#endif // SANDBOX_LINUX_SUID_SANDBOX_H_
diff --git a/libchrome/sandbox/linux/suid/common/suid_unsafe_environment_variables.h b/libchrome/sandbox/linux/suid/common/suid_unsafe_environment_variables.h
new file mode 100644
index 0000000..e955e0c
--- /dev/null
+++ b/libchrome/sandbox/linux/suid/common/suid_unsafe_environment_variables.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a list of environment variables which the ELF loader unsets when
+// loading a SUID binary. Because they are unset rather than just ignored, they
+// aren't passed to child processes of SUID processes either.
+//
+// We need to save these environment variables before running a SUID sandbox
+// and restore them before running child processes (but after dropping root).
+//
+// List gathered from glibc sources (00ebd7ed58df389a78e41dece058048725cb585e):
+// sysdeps/unix/sysv/linux/i386/dl-librecon.h
+// sysdeps/generic/unsecvars.h
+
+#ifndef SANDBOX_LINUX_SUID_COMMON_SUID_UNSAFE_ENVIRONMENT_VARIABLES_H_
+#define SANDBOX_LINUX_SUID_COMMON_SUID_UNSAFE_ENVIRONMENT_VARIABLES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h> // malloc
+#include <string.h> // memcpy
+
+static const char* const kSUIDUnsafeEnvironmentVariables[] = {
+ "LD_AOUT_LIBRARY_PATH",
+ "LD_AOUT_PRELOAD",
+ "GCONV_PATH",
+ "GETCONF_DIR",
+ "HOSTALIASES",
+ "LD_AUDIT",
+ "LD_DEBUG",
+ "LD_DEBUG_OUTPUT",
+ "LD_DYNAMIC_WEAK",
+ "LD_LIBRARY_PATH",
+ "LD_ORIGIN_PATH",
+ "LD_PRELOAD",
+ "LD_PROFILE",
+ "LD_SHOW_AUXV",
+ "LD_USE_LOAD_BIAS",
+ "LOCALDOMAIN",
+ "LOCPATH",
+ "MALLOC_TRACE",
+ "NIS_PATH",
+ "NLSPATH",
+ "RESOLV_HOST_CONF",
+ "RES_OPTIONS",
+ "TMPDIR",
+ "TZDIR",
+ NULL,
+};
+
+// Return a malloc allocated string containing the 'saved' environment variable
+// name for a given environment variable.
+static inline char* SandboxSavedEnvironmentVariable(const char* envvar) {
+ const size_t envvar_len = strlen(envvar);
+ const size_t kMaxSizeT = (size_t) -1;
+
+ if (envvar_len > kMaxSizeT - 1 - 8)
+ return NULL;
+
+ const size_t saved_envvarlen = envvar_len + 1 /* NUL terminator */ +
+ 8 /* strlen("SANDBOX_") */;
+ char* const saved_envvar = (char*) malloc(saved_envvarlen);
+ if (!saved_envvar)
+ return NULL;
+
+ memcpy(saved_envvar, "SANDBOX_", 8);
+ memcpy(saved_envvar + 8, envvar, envvar_len);
+ saved_envvar[8 + envvar_len] = 0;
+
+ return saved_envvar;
+}
+
+#endif // SANDBOX_LINUX_SUID_COMMON_SUID_UNSAFE_ENVIRONMENT_VARIABLES_H_
diff --git a/libchrome/sandbox/linux/suid/process_util.h b/libchrome/sandbox/linux/suid/process_util.h
new file mode 100644
index 0000000..10071d3
--- /dev/null
+++ b/libchrome/sandbox/linux/suid/process_util.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The following is duplicated from base/process_utils.h.
+// We shouldn't link against C++ code in a setuid binary.
+
+#ifndef SANDBOX_LINUX_SUID_PROCESS_UTIL_H_
+#define SANDBOX_LINUX_SUID_PROCESS_UTIL_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+// This adjusts /proc/process/oom_score_adj so the Linux OOM killer
+// will prefer certain process types over others. The range for the
+// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
+//
+// If the Linux system isn't new enough to use oom_score_adj, then we
+// try to set the older oom_adj value instead, scaling the score to
+// the required range of [0, 15]. This may result in some aliasing of
+// values, of course.
+bool AdjustOOMScore(pid_t process, int score);
+
+// This adjusts /sys/kernel/mm/chromeos-low_mem/margin so that
+// the kernel notifies us that we are low on memory when less than
+// |margin_mb| megabytes are available. Setting |margin_mb| to -1
+// turns off low memory notification.
+bool AdjustLowMemoryMargin(int64_t margin_mb);
+
+#endif // SANDBOX_LINUX_SUID_PROCESS_UTIL_H_
diff --git a/libchrome/sandbox/linux/suid/process_util_linux.c b/libchrome/sandbox/linux/suid/process_util_linux.c
new file mode 100644
index 0000000..40949bd
--- /dev/null
+++ b/libchrome/sandbox/linux/suid/process_util_linux.c
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The following is the C version of code from base/process_utils_linux.cc.
+// We shouldn't link against C++ code in a setuid binary.
+
+// Needed for O_DIRECTORY, must be defined before fcntl.h is included
+// (and it can be included earlier than the explicit #include below
+// in some versions of glibc).
+#define _GNU_SOURCE
+
+#include "sandbox/linux/suid/process_util.h"
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Ranges for the current (oom_score_adj) and previous (oom_adj)
+// flavors of OOM score.
+static const int kMaxOomScore = 1000;
+static const int kMaxOldOomScore = 15;
+
+// NOTE: This is not the only version of this function in the source:
+// the base library (in process_util_linux.cc) also has its own C++ version.
+bool AdjustOOMScore(pid_t process, int score) {
+ if (score < 0 || score > kMaxOomScore)
+ return false;
+
+ char oom_adj[27]; // "/proc/" + log_10(2**64) + "\0"
+ // 6 + 20 + 1 = 27
+ snprintf(oom_adj, sizeof(oom_adj), "/proc/%" PRIdMAX, (intmax_t)process);
+
+ const int dirfd = open(oom_adj, O_RDONLY | O_DIRECTORY);
+ if (dirfd < 0)
+ return false;
+
+ struct stat statbuf;
+ if (fstat(dirfd, &statbuf) < 0) {
+ close(dirfd);
+ return false;
+ }
+ if (getuid() != statbuf.st_uid) {
+ close(dirfd);
+ return false;
+ }
+
+ int fd = openat(dirfd, "oom_score_adj", O_WRONLY);
+ if (fd < 0) {
+ // We failed to open oom_score_adj, so let's try for the older
+ // oom_adj file instead.
+ fd = openat(dirfd, "oom_adj", O_WRONLY);
+ if (fd < 0) {
+ // Nope, that doesn't work either.
+ return false;
+ } else {
+ // If we're using the old oom_adj file, the allowed range is now
+ // [0, kMaxOldOomScore], so we scale the score. This may result in some
+ // aliasing of values, of course.
+ score = score * kMaxOldOomScore / kMaxOomScore;
+ }
+ }
+ close(dirfd);
+
+ char buf[11]; // 0 <= |score| <= kMaxOomScore; using log_10(2**32) + 1 size
+ snprintf(buf, sizeof(buf), "%d", score);
+ size_t len = strlen(buf);
+
+ ssize_t bytes_written = write(fd, buf, len);
+ close(fd);
+ return (bytes_written == (ssize_t)len);
+}
diff --git a/libchrome/sandbox/linux/suid/sandbox.c b/libchrome/sandbox/linux/suid/sandbox.c
new file mode 100644
index 0000000..b655d1c
--- /dev/null
+++ b/libchrome/sandbox/linux/suid/sandbox.c
@@ -0,0 +1,483 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// https://chromium.googlesource.com/chromium/src/+/master/docs/linux_suid_sandbox.md
+
+#include "sandbox/linux/suid/common/sandbox.h"
+
+#define _GNU_SOURCE
+#include <asm/unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/vfs.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "sandbox/linux/suid/common/suid_unsafe_environment_variables.h"
+#include "sandbox/linux/suid/process_util.h"
+
+#if !defined(CLONE_NEWPID)
+#define CLONE_NEWPID 0x20000000
+#endif
+#if !defined(CLONE_NEWNET)
+#define CLONE_NEWNET 0x40000000
+#endif
+
+static bool DropRoot();
+
+#define HANDLE_EINTR(x) TEMP_FAILURE_RETRY(x)
+
+static void FatalError(const char* msg, ...)
+ __attribute__((noreturn, format(printf, 1, 2)));
+
+static void FatalError(const char* msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+
+ vfprintf(stderr, msg, ap);
+ fprintf(stderr, ": %s\n", strerror(errno));
+ fflush(stderr);
+ va_end(ap);
+ _exit(1);
+}
+
+static void ExitWithErrorSignalHandler(int signal) {
+ const char msg[] = "\nThe setuid sandbox got signaled, exiting.\n";
+ if (-1 == write(2, msg, sizeof(msg) - 1)) {
+ // Do nothing.
+ }
+
+ _exit(1);
+}
+
+// We will chroot() to the helper's /proc/self directory. Anything there will
+// not exist anymore if we make sure to wait() for the helper.
+//
+// /proc/self/fdinfo or /proc/self/fd are especially safe and will be empty
+// even if the helper survives as a zombie.
+//
+// There is very little reason to use fdinfo/ instead of fd/ but we are
+// paranoid. fdinfo/ only exists since 2.6.22 so we allow fallback to fd/
+#define SAFE_DIR "/proc/self/fdinfo"
+#define SAFE_DIR2 "/proc/self/fd"
+
+static bool SpawnChrootHelper() {
+ int sv[2];
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ perror("socketpair");
+ return false;
+ }
+
+ char* safedir = NULL;
+ struct stat sdir_stat;
+ if (!stat(SAFE_DIR, &sdir_stat) && S_ISDIR(sdir_stat.st_mode)) {
+ safedir = SAFE_DIR;
+ } else if (!stat(SAFE_DIR2, &sdir_stat) && S_ISDIR(sdir_stat.st_mode)) {
+ safedir = SAFE_DIR2;
+ } else {
+ fprintf(stderr, "Could not find %s\n", SAFE_DIR2);
+ return false;
+ }
+
+ const pid_t pid = syscall(__NR_clone, CLONE_FS | SIGCHLD, 0, 0, 0);
+
+ if (pid == -1) {
+ perror("clone");
+ close(sv[0]);
+ close(sv[1]);
+ return false;
+ }
+
+ if (pid == 0) {
+ // We share our files structure with an untrusted process. As a security in
+ // depth measure, we make sure that we can't open anything by mistake.
+ // TODO(agl): drop CAP_SYS_RESOURCE / use SECURE_NOROOT
+
+ const struct rlimit nofile = {0, 0};
+ if (setrlimit(RLIMIT_NOFILE, &nofile))
+ FatalError("Setting RLIMIT_NOFILE");
+
+ if (close(sv[1]))
+ FatalError("close");
+
+ // wait for message
+ char msg;
+ ssize_t bytes;
+ do {
+ bytes = read(sv[0], &msg, 1);
+ } while (bytes == -1 && errno == EINTR);
+
+ if (bytes == 0)
+ _exit(0);
+ if (bytes != 1)
+ FatalError("read");
+
+ // do chrooting
+ if (msg != kMsgChrootMe)
+ FatalError("Unknown message from sandboxed process");
+
+ // sanity check
+ if (chdir(safedir))
+ FatalError("Cannot chdir into /proc/ directory");
+
+ if (chroot(safedir))
+ FatalError("Cannot chroot into /proc/ directory");
+
+ if (chdir("/"))
+ FatalError("Cannot chdir to / after chroot");
+
+ const char reply = kMsgChrootSuccessful;
+ do {
+ bytes = write(sv[0], &reply, 1);
+ } while (bytes == -1 && errno == EINTR);
+
+ if (bytes != 1)
+ FatalError("Writing reply");
+
+ _exit(0);
+ // We now become a zombie. /proc/self/fd(info) is now an empty dir and we
+ // are chrooted there.
+ // Our (unprivileged) parent should not even be able to open "." or "/"
+ // since they would need to pass the ptrace() check. If our parent wait()
+ // for us, our root directory will completely disappear.
+ }
+
+ if (close(sv[0])) {
+ close(sv[1]);
+ perror("close");
+ return false;
+ }
+
+ // In the parent process, we install an environment variable containing the
+ // number of the file descriptor.
+ char desc_str[64];
+ int printed = snprintf(desc_str, sizeof(desc_str), "%u", sv[1]);
+ if (printed < 0 || printed >= (int)sizeof(desc_str)) {
+ fprintf(stderr, "Failed to snprintf\n");
+ return false;
+ }
+
+ if (setenv(kSandboxDescriptorEnvironmentVarName, desc_str, 1)) {
+ perror("setenv");
+ close(sv[1]);
+ return false;
+ }
+
+ // We also install an environment variable containing the pid of the child
+ char helper_pid_str[64];
+ printed = snprintf(helper_pid_str, sizeof(helper_pid_str), "%u", pid);
+ if (printed < 0 || printed >= (int)sizeof(helper_pid_str)) {
+ fprintf(stderr, "Failed to snprintf\n");
+ return false;
+ }
+
+ if (setenv(kSandboxHelperPidEnvironmentVarName, helper_pid_str, 1)) {
+ perror("setenv");
+ close(sv[1]);
+ return false;
+ }
+
+ return true;
+}
+
+// Block until child_pid exits, then exit. Try to preserve the exit code.
+static void WaitForChildAndExit(pid_t child_pid) {
+ int exit_code = -1;
+ siginfo_t reaped_child_info;
+
+ // Don't "Core" on SIGABRT. SIGABRT is sent by the Chrome OS session manager
+ // when things are hanging.
+ // Here, the current process is going to waitid() and _exit(), so there is no
+ // point in generating a crash report. The child process is the one
+ // blocking us.
+ if (signal(SIGABRT, ExitWithErrorSignalHandler) == SIG_ERR) {
+ FatalError("Failed to change signal handler");
+ }
+
+ int wait_ret =
+ HANDLE_EINTR(waitid(P_PID, child_pid, &reaped_child_info, WEXITED));
+
+ if (!wait_ret && reaped_child_info.si_pid == child_pid) {
+ if (reaped_child_info.si_code == CLD_EXITED) {
+ exit_code = reaped_child_info.si_status;
+ } else {
+ // Exit with code 0 if the child got signaled.
+ exit_code = 0;
+ }
+ }
+ _exit(exit_code);
+}
+
+static bool MoveToNewNamespaces() {
+ // These are the sets of flags which we'll try, in order.
+ const int kCloneExtraFlags[] = {CLONE_NEWPID | CLONE_NEWNET, CLONE_NEWPID, };
+
+ // We need to close kZygoteIdFd before the child can continue. We use this
+ // socketpair to tell the child when to continue;
+ int sync_fds[2];
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sync_fds)) {
+ FatalError("Failed to create a socketpair");
+ }
+
+ for (size_t i = 0; i < sizeof(kCloneExtraFlags) / sizeof(kCloneExtraFlags[0]);
+ i++) {
+ pid_t pid = syscall(__NR_clone, SIGCHLD | kCloneExtraFlags[i], 0, 0, 0);
+ const int clone_errno = errno;
+
+ if (pid > 0) {
+ if (!DropRoot()) {
+ FatalError("Could not drop privileges");
+ } else {
+ if (close(sync_fds[0]) || shutdown(sync_fds[1], SHUT_RD))
+ FatalError("Could not close socketpair");
+ // The kZygoteIdFd needs to be closed in the parent before
+ // Zygote gets started.
+ if (close(kZygoteIdFd))
+ FatalError("close");
+ // Tell our child to continue
+ if (HANDLE_EINTR(send(sync_fds[1], "C", 1, MSG_NOSIGNAL)) != 1)
+ FatalError("send");
+ if (close(sync_fds[1]))
+ FatalError("close");
+ // We want to keep a full process tree and we don't want our childs to
+ // be reparented to (the outer PID namespace) init. So we wait for it.
+ WaitForChildAndExit(pid);
+ }
+ // NOTREACHED
+ FatalError("Not reached");
+ }
+
+ if (pid == 0) {
+ if (close(sync_fds[1]) || shutdown(sync_fds[0], SHUT_WR))
+ FatalError("Could not close socketpair");
+
+ // Wait for the parent to confirm it closed kZygoteIdFd before we
+ // continue
+ char should_continue;
+ if (HANDLE_EINTR(read(sync_fds[0], &should_continue, 1)) != 1)
+ FatalError("Read on socketpair");
+ if (close(sync_fds[0]))
+ FatalError("close");
+
+ if (kCloneExtraFlags[i] & CLONE_NEWPID) {
+ setenv(kSandboxPIDNSEnvironmentVarName, "", 1 /* overwrite */);
+ } else {
+ unsetenv(kSandboxPIDNSEnvironmentVarName);
+ }
+
+ if (kCloneExtraFlags[i] & CLONE_NEWNET) {
+ setenv(kSandboxNETNSEnvironmentVarName, "", 1 /* overwrite */);
+ } else {
+ unsetenv(kSandboxNETNSEnvironmentVarName);
+ }
+
+ break;
+ }
+
+ // If EINVAL then the system doesn't support the requested flags, so
+ // continue to try a different set.
+ // On any other errno value the system *does* support these flags but
+ // something went wrong, hence we bail with an error message rather then
+ // provide less security.
+ if (errno != EINVAL) {
+ fprintf(stderr, "Failed to move to new namespace:");
+ if (kCloneExtraFlags[i] & CLONE_NEWPID) {
+ fprintf(stderr, " PID namespaces supported,");
+ }
+ if (kCloneExtraFlags[i] & CLONE_NEWNET) {
+ fprintf(stderr, " Network namespace supported,");
+ }
+ fprintf(stderr, " but failed: errno = %s\n", strerror(clone_errno));
+ return false;
+ }
+ }
+
+ // If the system doesn't support NEWPID then we carry on anyway.
+ return true;
+}
+
+static bool DropRoot() {
+ if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0)) {
+ perror("prctl(PR_SET_DUMPABLE)");
+ return false;
+ }
+
+ if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0)) {
+ perror("Still dumpable after prctl(PR_SET_DUMPABLE)");
+ return false;
+ }
+
+ gid_t rgid, egid, sgid;
+ if (getresgid(&rgid, &egid, &sgid)) {
+ perror("getresgid");
+ return false;
+ }
+
+ if (setresgid(rgid, rgid, rgid)) {
+ perror("setresgid");
+ return false;
+ }
+
+ uid_t ruid, euid, suid;
+ if (getresuid(&ruid, &euid, &suid)) {
+ perror("getresuid");
+ return false;
+ }
+
+ if (setresuid(ruid, ruid, ruid)) {
+ perror("setresuid");
+ return false;
+ }
+
+ return true;
+}
+
+static bool SetupChildEnvironment() {
+ unsigned i;
+
+ // ld.so may have cleared several environment variables because we are SUID.
+ // However, the child process might need them so zygote_host_linux.cc saves a
+ // copy in SANDBOX_$x. This is safe because we have dropped root by this
+ // point, so we can only exec a binary with the permissions of the user who
+ // ran us in the first place.
+
+ for (i = 0; kSUIDUnsafeEnvironmentVariables[i]; ++i) {
+ const char* const envvar = kSUIDUnsafeEnvironmentVariables[i];
+ char* const saved_envvar = SandboxSavedEnvironmentVariable(envvar);
+ if (!saved_envvar)
+ return false;
+
+ const char* const value = getenv(saved_envvar);
+ if (value) {
+ setenv(envvar, value, 1 /* overwrite */);
+ unsetenv(saved_envvar);
+ }
+
+ free(saved_envvar);
+ }
+
+ return true;
+}
+
+bool CheckAndExportApiVersion() {
+ // Check the environment to see if a specific API version was requested.
+ // assume version 0 if none.
+ int api_number = -1;
+ char* api_string = getenv(kSandboxEnvironmentApiRequest);
+ if (!api_string) {
+ api_number = 0;
+ } else {
+ errno = 0;
+ char* endptr = NULL;
+ long long_api_number = strtol(api_string, &endptr, 10);
+ if (!endptr || *endptr || errno != 0 || long_api_number < INT_MIN ||
+ long_api_number > INT_MAX) {
+ return false;
+ }
+ api_number = long_api_number;
+ }
+
+ // Warn only for now.
+ if (api_number != kSUIDSandboxApiNumber) {
+ fprintf(
+ stderr,
+ "The setuid sandbox provides API version %d, "
+ "but you need %d\n"
+ "Please read "
+ "https://chromium.googlesource.com/chromium/src/+/master/docs/linux_suid_sandbox_development.md."
+ "\n\n",
+ kSUIDSandboxApiNumber,
+ api_number);
+ }
+
+ // Export our version so that the sandboxed process can verify it did not
+ // use an old sandbox.
+ char version_string[64];
+ snprintf(version_string, sizeof(version_string), "%d", kSUIDSandboxApiNumber);
+ if (setenv(kSandboxEnvironmentApiProvides, version_string, 1)) {
+ perror("setenv");
+ return false;
+ }
+
+ return true;
+}
+
+int main(int argc, char** argv) {
+ if (argc <= 1) {
+ if (argc <= 0) {
+ return 1;
+ }
+
+ fprintf(stderr, "Usage: %s <renderer process> <args...>\n", argv[0]);
+ return 1;
+ }
+
+ // Allow someone to query our API version
+ if (argc == 2 && 0 == strcmp(argv[1], kSuidSandboxGetApiSwitch)) {
+ printf("%d\n", kSUIDSandboxApiNumber);
+ return 0;
+ }
+
+ // We cannot adjust /proc/pid/oom_adj for sandboxed renderers
+ // because those files are owned by root. So we need a helper here.
+ if (argc == 4 && (0 == strcmp(argv[1], kAdjustOOMScoreSwitch))) {
+ char* endptr = NULL;
+ long score;
+ errno = 0;
+ unsigned long pid_ul = strtoul(argv[2], &endptr, 10);
+ if (pid_ul == ULONG_MAX || !endptr || *endptr || errno != 0)
+ return 1;
+ pid_t pid = pid_ul;
+ endptr = NULL;
+ errno = 0;
+ score = strtol(argv[3], &endptr, 10);
+ if (score == LONG_MAX || score == LONG_MIN || !endptr || *endptr ||
+ errno != 0) {
+ return 1;
+ }
+ return AdjustOOMScore(pid, score);
+ }
+
+ // Protect the core setuid sandbox functionality with an API version
+ if (!CheckAndExportApiVersion()) {
+ return 1;
+ }
+
+ if (geteuid() != 0) {
+ fprintf(stderr,
+ "The setuid sandbox is not running as root. Common causes:\n"
+ " * An unprivileged process using ptrace on it, like a debugger.\n"
+ " * A parent process set prctl(PR_SET_NO_NEW_PRIVS, ...)\n");
+ }
+
+ if (!MoveToNewNamespaces())
+ return 1;
+ if (!SpawnChrootHelper())
+ return 1;
+ if (!DropRoot())
+ return 1;
+ if (!SetupChildEnvironment())
+ return 1;
+
+ execv(argv[1], &argv[1]);
+ FatalError("execv failed");
+
+ return 1;
+}
diff --git a/libchrome/sandbox/linux/syscall_broker/DEPS b/libchrome/sandbox/linux/syscall_broker/DEPS
new file mode 100644
index 0000000..70d9b18
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+sandbox/linux/system_headers",
+]
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_channel.cc b/libchrome/sandbox/linux/syscall_broker/broker_channel.cc
new file mode 100644
index 0000000..fa0f761
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_channel.cc
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_channel.h"
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+// static
+void BrokerChannel::CreatePair(EndPoint* reader, EndPoint* writer) {
+ DCHECK(reader);
+ DCHECK(writer);
+ int socket_pair[2];
+ // Use SOCK_SEQPACKET, to preserve message boundaries but we also want to be
+ // notified (recvmsg should return and not block) when the connection has
+ // been broken which could mean that the other end has been closed.
+ PCHECK(0 == socketpair(AF_UNIX, SOCK_SEQPACKET, 0, socket_pair));
+
+ reader->reset(socket_pair[0]);
+ PCHECK(0 == shutdown(reader->get(), SHUT_WR));
+
+ writer->reset(socket_pair[1]);
+ PCHECK(0 == shutdown(writer->get(), SHUT_RD));
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_channel.h b/libchrome/sandbox/linux/syscall_broker/broker_channel.h
new file mode 100644
index 0000000..2abdba4
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_channel.h
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSCALL_BROKER_BROKER_CHANNEL_H_
+#define SANDBOX_LINUX_SYSCALL_BROKER_BROKER_CHANNEL_H_
+
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+// A small class to create a pipe-like communication channel. It is based on a
+// SOCK_SEQPACKET unix socket, which is connection-based and guaranteed to
+// preserve message boundaries.
+class BrokerChannel {
+ public:
+ typedef base::ScopedFD EndPoint;
+ static void CreatePair(EndPoint* reader, EndPoint* writer);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BrokerChannel);
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SYSCALL_BROKER_BROKER_CHANNEL_H_
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_client.cc b/libchrome/sandbox/linux/syscall_broker/broker_client.cc
new file mode 100644
index 0000000..36c92cd
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_client.cc
@@ -0,0 +1,146 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_client.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <utility>
+
+#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/posix/unix_domain_socket_linux.h"
+#include "build/build_config.h"
+#include "sandbox/linux/syscall_broker/broker_channel.h"
+#include "sandbox/linux/syscall_broker/broker_common.h"
+#include "sandbox/linux/syscall_broker/broker_policy.h"
+
+#if defined(OS_ANDROID) && !defined(MSG_CMSG_CLOEXEC)
+#define MSG_CMSG_CLOEXEC 0x40000000
+#endif
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+// Make a remote system call over IPC for syscalls that take a path and flags
+// as arguments, currently open() and access().
+// Will return -errno like a real system call.
+// This function needs to be async signal safe.
+int BrokerClient::PathAndFlagsSyscall(IPCCommand syscall_type,
+ const char* pathname,
+ int flags) const {
+ int recvmsg_flags = 0;
+ RAW_CHECK(syscall_type == COMMAND_OPEN || syscall_type == COMMAND_ACCESS);
+ if (!pathname)
+ return -EFAULT;
+
+ // For this "remote system call" to work, we need to handle any flag that
+ // cannot be sent over a Unix socket in a special way.
+ // See the comments around kCurrentProcessOpenFlagsMask.
+ if (syscall_type == COMMAND_OPEN && (flags & kCurrentProcessOpenFlagsMask)) {
+ // This implementation only knows about O_CLOEXEC, someone needs to look at
+ // this code if other flags are added.
+ RAW_CHECK(kCurrentProcessOpenFlagsMask == O_CLOEXEC);
+ recvmsg_flags |= MSG_CMSG_CLOEXEC;
+ flags &= ~O_CLOEXEC;
+ }
+
+ // There is no point in forwarding a request that we know will be denied.
+ // Of course, the real security check needs to be on the other side of the
+ // IPC.
+ if (fast_check_in_client_) {
+ if (syscall_type == COMMAND_OPEN &&
+ !broker_policy_.GetFileNameIfAllowedToOpen(
+ pathname, flags, NULL /* file_to_open */,
+ NULL /* unlink_after_open */)) {
+ return -broker_policy_.denied_errno();
+ }
+ if (syscall_type == COMMAND_ACCESS &&
+ !broker_policy_.GetFileNameIfAllowedToAccess(pathname, flags, NULL)) {
+ return -broker_policy_.denied_errno();
+ }
+ }
+
+ base::Pickle write_pickle;
+ write_pickle.WriteInt(syscall_type);
+ write_pickle.WriteString(pathname);
+ write_pickle.WriteInt(flags);
+ RAW_CHECK(write_pickle.size() <= kMaxMessageLength);
+
+ int returned_fd = -1;
+ uint8_t reply_buf[kMaxMessageLength];
+
+ // Send a request (in write_pickle) as well that will include a new
+ // temporary socketpair (created internally by SendRecvMsg()).
+ // Then read the reply on this new socketpair in reply_buf and put an
+ // eventual attached file descriptor in |returned_fd|.
+ ssize_t msg_len = base::UnixDomainSocket::SendRecvMsgWithFlags(
+ ipc_channel_.get(), reply_buf, sizeof(reply_buf), recvmsg_flags,
+ &returned_fd, write_pickle);
+ if (msg_len <= 0) {
+ if (!quiet_failures_for_tests_)
+ RAW_LOG(ERROR, "Could not make request to broker process");
+ return -ENOMEM;
+ }
+
+ base::Pickle read_pickle(reinterpret_cast<char*>(reply_buf), msg_len);
+ base::PickleIterator iter(read_pickle);
+ int return_value = -1;
+ // Now deserialize the return value and eventually return the file
+ // descriptor.
+ if (iter.ReadInt(&return_value)) {
+ switch (syscall_type) {
+ case COMMAND_ACCESS:
+ // We should never have a fd to return.
+ RAW_CHECK(returned_fd == -1);
+ return return_value;
+ case COMMAND_OPEN:
+ if (return_value < 0) {
+ RAW_CHECK(returned_fd == -1);
+ return return_value;
+ } else {
+ // We have a real file descriptor to return.
+ RAW_CHECK(returned_fd >= 0);
+ return returned_fd;
+ }
+ default:
+ RAW_LOG(ERROR, "Unsupported command");
+ return -ENOSYS;
+ }
+ } else {
+ RAW_LOG(ERROR, "Could not read pickle");
+ NOTREACHED();
+ return -ENOMEM;
+ }
+}
+
+BrokerClient::BrokerClient(const BrokerPolicy& broker_policy,
+ BrokerChannel::EndPoint ipc_channel,
+ bool fast_check_in_client,
+ bool quiet_failures_for_tests)
+ : broker_policy_(broker_policy),
+ ipc_channel_(std::move(ipc_channel)),
+ fast_check_in_client_(fast_check_in_client),
+ quiet_failures_for_tests_(quiet_failures_for_tests) {}
+
+BrokerClient::~BrokerClient() {
+}
+
+int BrokerClient::Access(const char* pathname, int mode) const {
+ return PathAndFlagsSyscall(COMMAND_ACCESS, pathname, mode);
+}
+
+int BrokerClient::Open(const char* pathname, int flags) const {
+ return PathAndFlagsSyscall(COMMAND_OPEN, pathname, flags);
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_client.h b/libchrome/sandbox/linux/syscall_broker/broker_client.h
new file mode 100644
index 0000000..2dfef81
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_client.h
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSCALL_BROKER_BROKER_CLIENT_H_
+#define SANDBOX_LINUX_SYSCALL_BROKER_BROKER_CLIENT_H_
+
+#include "base/macros.h"
+#include "sandbox/linux/syscall_broker/broker_channel.h"
+#include "sandbox/linux/syscall_broker/broker_common.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+class BrokerPolicy;
+
+// This class can be embedded in a sandboxed process and can be
+// used to perform certain system calls in another, presumably
+// non-sandboxed process (which embeds BrokerHost).
+// A key feature of this class is the ability to use some of its methods in a
+// thread-safe and async-signal safe way. The goal is to be able to use it to
+// replace the open() or access() system calls happening anywhere in a process
+// (as allowed for instance by seccomp-bpf's SIGSYS mechanism).
+class BrokerClient {
+ public:
+ // |policy| needs to match the policy used by BrokerHost. This
+ // allows to predict some of the requests which will be denied
+ // and save an IPC round trip.
+ // |ipc_channel| needs to be a suitable SOCK_SEQPACKET unix socket.
+ // |fast_check_in_client| should be set to true and
+ // |quiet_failures_for_tests| to false unless you are writing tests.
+ BrokerClient(const BrokerPolicy& policy,
+ BrokerChannel::EndPoint ipc_channel,
+ bool fast_check_in_client,
+ bool quiet_failures_for_tests);
+ ~BrokerClient();
+
+ // Can be used in place of access().
+ // X_OK will always return an error in practice since the broker process
+ // doesn't support execute permissions.
+ // It's similar to the access() system call and will return -errno on errors.
+ // This is async signal safe.
+ int Access(const char* pathname, int mode) const;
+ // Can be used in place of open().
+ // The implementation only supports certain white listed flags and will
+ // return -EPERM on other flags.
+ // It's similar to the open() system call and will return -errno on errors.
+ // This is async signal safe.
+ int Open(const char* pathname, int flags) const;
+
+ // Get the file descriptor used for IPC. This is used for tests.
+ int GetIPCDescriptor() const { return ipc_channel_.get(); }
+
+ private:
+ const BrokerPolicy& broker_policy_;
+ const BrokerChannel::EndPoint ipc_channel_;
+ const bool fast_check_in_client_; // Whether to forward a request that we
+ // know will be denied to the broker. (Used
+ // for tests).
+ const bool quiet_failures_for_tests_; // Disable certain error message when
+ // testing for failures.
+
+ int PathAndFlagsSyscall(IPCCommand syscall_type,
+ const char* pathname,
+ int flags) const;
+
+ DISALLOW_COPY_AND_ASSIGN(BrokerClient);
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SYSCALL_BROKER_BROKER_CLIENT_H_
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_common.h b/libchrome/sandbox/linux/syscall_broker/broker_common.h
new file mode 100644
index 0000000..25aafa7
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_common.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSCALL_BROKER_BROKER_COMMON_H_
+#define SANDBOX_LINUX_SYSCALL_BROKER_BROKER_COMMON_H_
+
+#include <fcntl.h>
+#include <stddef.h>
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+const size_t kMaxMessageLength = 4096;
+
+// Some flags are local to the current process and cannot be sent over a Unix
+// socket. They need special treatment from the client.
+// O_CLOEXEC is tricky because in theory another thread could call execve()
+// before special treatment is made on the client, so a client needs to call
+// recvmsg(2) with MSG_CMSG_CLOEXEC.
+// To make things worse, there are two CLOEXEC related flags, FD_CLOEXEC (see
+// F_GETFD in fcntl(2)) and O_CLOEXEC (see F_GETFL in fcntl(2)). O_CLOEXEC
+// doesn't affect the semantics on execve(), it's merely a note that the
+// descriptor was originally opened with O_CLOEXEC as a flag. And it is sent
+// over unix sockets just fine, so a receiver that would (incorrectly) look at
+// O_CLOEXEC instead of FD_CLOEXEC may be tricked in thinking that the file
+// descriptor will or won't be closed on execve().
+const int kCurrentProcessOpenFlagsMask = O_CLOEXEC;
+
+enum IPCCommand {
+ COMMAND_INVALID = 0,
+ COMMAND_OPEN,
+ COMMAND_ACCESS,
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SYSCALL_BROKER_BROKER_COMMON_H_
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_file_permission.cc b/libchrome/sandbox/linux/syscall_broker/broker_file_permission.cc
new file mode 100644
index 0000000..3907344
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_file_permission.cc
@@ -0,0 +1,244 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_file_permission.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <string>
+
+#include "base/logging.h"
+#include "sandbox/linux/syscall_broker/broker_common.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+// Async signal safe
+bool BrokerFilePermission::ValidatePath(const char* path) {
+ if (!path)
+ return false;
+
+ const size_t len = strlen(path);
+ // No empty paths
+ if (len == 0)
+ return false;
+ // Paths must be absolute and not relative
+ if (path[0] != '/')
+ return false;
+ // No trailing / (but "/" is valid)
+ if (len > 1 && path[len - 1] == '/')
+ return false;
+ // No trailing /..
+ if (len >= 3 && path[len - 3] == '/' && path[len - 2] == '.' &&
+ path[len - 1] == '.')
+ return false;
+ // No /../ anywhere
+ for (size_t i = 0; i < len; i++) {
+ if (path[i] == '/' && (len - i) > 3) {
+ if (path[i + 1] == '.' && path[i + 2] == '.' && path[i + 3] == '/') {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Async signal safe
+// Calls std::string::c_str(), strncmp and strlen. All these
+// methods are async signal safe in common standard libs.
+// TODO(leecam): remove dependency on std::string
+bool BrokerFilePermission::MatchPath(const char* requested_filename) const {
+ const char* path = path_.c_str();
+ if ((recursive_ && strncmp(requested_filename, path, strlen(path)) == 0)) {
+ // Note: This prefix match will allow any path under the whitelisted
+ // path, for any number of directory levels. E.g. if the whitelisted
+ // path is /good/ then the following will be permitted by the policy.
+ // /good/file1
+ // /good/folder/file2
+ // /good/folder/folder2/file3
+ // If an attacker could make 'folder' a symlink to ../../ they would have
+ // access to the entire filesystem.
+ // Whitelisting with multiple depths is useful, e.g /proc/ but
+ // the system needs to ensure symlinks can not be created!
+ // That said if an attacker can convert any of the absolute paths
+ // to a symlink they can control any file on the system also.
+ return true;
+ } else if (strcmp(requested_filename, path) == 0) {
+ return true;
+ }
+ return false;
+}
+
+// Async signal safe.
+// External call to std::string::c_str() is
+// called in MatchPath.
+// TODO(leecam): remove dependency on std::string
+bool BrokerFilePermission::CheckAccess(const char* requested_filename,
+ int mode,
+ const char** file_to_access) const {
+ // First, check if |mode| is existence, ability to read or ability
+ // to write. We do not support X_OK.
+ if (mode != F_OK && mode & ~(R_OK | W_OK)) {
+ return false;
+ }
+
+ if (!ValidatePath(requested_filename))
+ return false;
+
+ if (!MatchPath(requested_filename)) {
+ return false;
+ }
+ bool allowed = false;
+ switch (mode) {
+ case F_OK:
+ if (allow_read_ || allow_write_)
+ allowed = true;
+ break;
+ case R_OK:
+ if (allow_read_)
+ allowed = true;
+ break;
+ case W_OK:
+ if (allow_write_)
+ allowed = true;
+ break;
+ case R_OK | W_OK:
+ if (allow_read_ && allow_write_)
+ allowed = true;
+ break;
+ default:
+ return false;
+ }
+
+ if (allowed && file_to_access) {
+ if (!recursive_)
+ *file_to_access = path_.c_str();
+ else
+ *file_to_access = requested_filename;
+ }
+ return allowed;
+}
+
+// Async signal safe.
+// External call to std::string::c_str() is
+// called in MatchPath.
+// TODO(leecam): remove dependency on std::string
+bool BrokerFilePermission::CheckOpen(const char* requested_filename,
+ int flags,
+ const char** file_to_open,
+ bool* unlink_after_open) const {
+ if (!ValidatePath(requested_filename))
+ return false;
+
+ if (!MatchPath(requested_filename)) {
+ return false;
+ }
+
+ // First, check the access mode is valid.
+ const int access_mode = flags & O_ACCMODE;
+ if (access_mode != O_RDONLY && access_mode != O_WRONLY &&
+ access_mode != O_RDWR) {
+ return false;
+ }
+
+ // Check if read is allowed
+ if (!allow_read_ && (access_mode == O_RDONLY || access_mode == O_RDWR)) {
+ return false;
+ }
+
+ // Check if write is allowed
+ if (!allow_write_ && (access_mode == O_WRONLY || access_mode == O_RDWR)) {
+ return false;
+ }
+
+ // Check if file creation is allowed.
+ if (!allow_create_ && (flags & O_CREAT)) {
+ return false;
+ }
+
+ // If O_CREAT is present, ensure O_EXCL
+ if ((flags & O_CREAT) && !(flags & O_EXCL)) {
+ return false;
+ }
+
+ // If this file is to be unlinked, ensure it's created.
+ if (unlink_ && !(flags & O_CREAT)) {
+ return false;
+ }
+
+ // Some flags affect the behavior of the current process. We don't support
+ // them and don't allow them for now.
+ if (flags & kCurrentProcessOpenFlagsMask) {
+ return false;
+ }
+
+ // Now check that all the flags are known to us.
+ const int creation_and_status_flags = flags & ~O_ACCMODE;
+
+ const int known_flags = O_APPEND | O_ASYNC | O_CLOEXEC | O_CREAT | O_DIRECT |
+ O_DIRECTORY | O_EXCL | O_LARGEFILE | O_NOATIME |
+ O_NOCTTY | O_NOFOLLOW | O_NONBLOCK | O_NDELAY |
+ O_SYNC | O_TRUNC;
+
+ const int unknown_flags = ~known_flags;
+ const bool has_unknown_flags = creation_and_status_flags & unknown_flags;
+
+ if (has_unknown_flags)
+ return false;
+
+ if (file_to_open) {
+ if (!recursive_)
+ *file_to_open = path_.c_str();
+ else
+ *file_to_open = requested_filename;
+ }
+ if (unlink_after_open)
+ *unlink_after_open = unlink_;
+
+ return true;
+}
+
+const char* BrokerFilePermission::GetErrorMessageForTests() {
+ static char kInvalidBrokerFileString[] = "Invalid BrokerFilePermission";
+ return kInvalidBrokerFileString;
+}
+
+BrokerFilePermission::BrokerFilePermission(const std::string& path,
+ bool recursive,
+ bool unlink,
+ bool allow_read,
+ bool allow_write,
+ bool allow_create)
+ : path_(path),
+ recursive_(recursive),
+ unlink_(unlink),
+ allow_read_(allow_read),
+ allow_write_(allow_write),
+ allow_create_(allow_create) {
+ // Validate this permission and die if invalid!
+
+ // Must have enough length for a '/'
+ CHECK(path_.length() > 0) << GetErrorMessageForTests();
+ // Whitelisted paths must be absolute.
+ CHECK(path_[0] == '/') << GetErrorMessageForTests();
+
+ // Don't allow unlinking on creation without create permission
+ if (unlink_) {
+ CHECK(allow_create) << GetErrorMessageForTests();
+ }
+ const char last_char = *(path_.rbegin());
+ // Recursive paths must have a trailing slash
+ if (recursive_) {
+ CHECK(last_char == '/') << GetErrorMessageForTests();
+ } else {
+ CHECK(last_char != '/') << GetErrorMessageForTests();
+ }
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox
\ No newline at end of file
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_file_permission.h b/libchrome/sandbox/linux/syscall_broker/broker_file_permission.h
new file mode 100644
index 0000000..03300d1
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_file_permission.h
@@ -0,0 +1,119 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSCALL_BROKER_BROKER_FILE_PERMISSION_H_
+#define SANDBOX_LINUX_SYSCALL_BROKER_BROKER_FILE_PERMISSION_H_
+
+#include <string>
+
+#include "base/macros.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+// BrokerFilePermission defines a path for whitelisting.
+// Pick the correct static factory method to create a permission.
+// CheckOpen and CheckAccess are async signal safe.
+// Constuction and Destruction are not async signal safe.
+// |path| is the path to be whitelisted.
+class SANDBOX_EXPORT BrokerFilePermission {
+ public:
+ ~BrokerFilePermission() {}
+ BrokerFilePermission(const BrokerFilePermission&) = default;
+ BrokerFilePermission& operator=(const BrokerFilePermission&) = default;
+
+ static BrokerFilePermission ReadOnly(const std::string& path) {
+ return BrokerFilePermission(path, false, false, true, false, false);
+ }
+
+ static BrokerFilePermission ReadOnlyRecursive(const std::string& path) {
+ return BrokerFilePermission(path, true, false, true, false, false);
+ }
+
+ static BrokerFilePermission WriteOnly(const std::string& path) {
+ return BrokerFilePermission(path, false, false, false, true, false);
+ }
+
+ static BrokerFilePermission ReadWrite(const std::string& path) {
+ return BrokerFilePermission(path, false, false, true, true, false);
+ }
+
+ static BrokerFilePermission ReadWriteCreate(const std::string& path) {
+ return BrokerFilePermission(path, false, false, true, true, true);
+ }
+
+ static BrokerFilePermission ReadWriteCreateUnlink(const std::string& path) {
+ return BrokerFilePermission(path, false, true, true, true, true);
+ }
+
+ static BrokerFilePermission ReadWriteCreateUnlinkRecursive(
+ const std::string& path) {
+ return BrokerFilePermission(path, true, true, true, true, true);
+ }
+
+ // Returns true if |requested_filename| is allowed to be opened
+ // by this permission.
+ // If |file_to_open| is not NULL it is set to point to either
+ // the |requested_filename| in the case of a recursive match,
+ // or a pointer the matched path in the whitelist if an absolute
+ // match.
+ // If not NULL |unlink_after_open| is set to point to true if the
+ // caller should unlink the path after openning.
+ // Async signal safe if |file_to_open| is NULL.
+ bool CheckOpen(const char* requested_filename,
+ int flags,
+ const char** file_to_open,
+ bool* unlink_after_open) const;
+ // Returns true if |requested_filename| is allowed to be accessed
+ // by this permission as per access(2).
+ // If |file_to_open| is not NULL it is set to point to either
+ // the |requested_filename| in the case of a recursive match,
+ // or a pointer to the matched path in the whitelist if an absolute
+ // match.
+ // |mode| is per mode argument of access(2).
+ // Async signal safe if |file_to_access| is NULL
+ bool CheckAccess(const char* requested_filename,
+ int mode,
+ const char** file_to_access) const;
+
+ private:
+ friend class BrokerFilePermissionTester;
+ BrokerFilePermission(const std::string& path,
+ bool recursive,
+ bool unlink,
+ bool allow_read,
+ bool allow_write,
+ bool allow_create);
+
+ // ValidatePath checks |path| and returns true if these conditions are met
+ // * Greater than 0 length
+ // * Is an absolute path
+ // * No trailing slash
+ // * No /../ path traversal
+ static bool ValidatePath(const char* path);
+
+ // MatchPath returns true if |requested_filename| is covered by this instance
+ bool MatchPath(const char* requested_filename) const;
+
+ // Used in by BrokerFilePermissionTester for tests.
+ static const char* GetErrorMessageForTests();
+
+ // These are not const as std::vector requires copy-assignment and this class
+ // is stored in vectors. All methods are marked const so
+ // the compiler will still enforce no changes outside of the constructor.
+ std::string path_;
+ bool recursive_; // Allow everything under this path. |path| must be a dir.
+ bool unlink_; // unlink after opening.
+ bool allow_read_;
+ bool allow_write_;
+ bool allow_create_;
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SYSCALL_BROKER_BROKER_FILE_PERMISSION_H_
\ No newline at end of file
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc b/libchrome/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
new file mode 100644
index 0000000..b58a901
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
@@ -0,0 +1,262 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_file_permission.h"
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "sandbox/linux/tests/test_utils.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+class BrokerFilePermissionTester {
+ public:
+ static bool ValidatePath(const char* path) {
+ return BrokerFilePermission::ValidatePath(path);
+ }
+ static const char* GetErrorMessage() {
+ return BrokerFilePermission::GetErrorMessageForTests();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BrokerFilePermissionTester);
+};
+
+namespace {
+
+// Creation tests are DEATH tests as a bad permission causes termination.
+SANDBOX_TEST(BrokerFilePermission, CreateGood) {
+ const char kPath[] = "/tmp/good";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
+}
+
+SANDBOX_TEST(BrokerFilePermission, CreateGoodRecursive) {
+ const char kPath[] = "/tmp/good/";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
+}
+
+SANDBOX_DEATH_TEST(
+ BrokerFilePermission,
+ CreateBad,
+ DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ const char kPath[] = "/tmp/bad/";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
+}
+
+SANDBOX_DEATH_TEST(
+ BrokerFilePermission,
+ CreateBadRecursive,
+ DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ const char kPath[] = "/tmp/bad";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
+}
+
+SANDBOX_DEATH_TEST(
+ BrokerFilePermission,
+ CreateBadNotAbs,
+ DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ const char kPath[] = "tmp/bad";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
+}
+
+SANDBOX_DEATH_TEST(
+ BrokerFilePermission,
+ CreateBadEmpty,
+ DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
+ const char kPath[] = "";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
+}
+
+// CheckPerm tests |path| against |perm| given |access_flags|.
+// If |create| is true then file creation is tested for success.
+void CheckPerm(const BrokerFilePermission& perm,
+ const char* path,
+ int access_flags,
+ bool create) {
+ const char* file_to_open = NULL;
+
+ ASSERT_FALSE(perm.CheckAccess(path, X_OK, NULL));
+ ASSERT_TRUE(perm.CheckAccess(path, F_OK, NULL));
+ // check bad perms
+ switch (access_flags) {
+ case O_RDONLY:
+ ASSERT_TRUE(perm.CheckOpen(path, O_RDONLY, &file_to_open, NULL));
+ ASSERT_FALSE(perm.CheckOpen(path, O_WRONLY, &file_to_open, NULL));
+ ASSERT_FALSE(perm.CheckOpen(path, O_RDWR, &file_to_open, NULL));
+ ASSERT_TRUE(perm.CheckAccess(path, R_OK, NULL));
+ ASSERT_FALSE(perm.CheckAccess(path, W_OK, NULL));
+ break;
+ case O_WRONLY:
+ ASSERT_FALSE(perm.CheckOpen(path, O_RDONLY, &file_to_open, NULL));
+ ASSERT_TRUE(perm.CheckOpen(path, O_WRONLY, &file_to_open, NULL));
+ ASSERT_FALSE(perm.CheckOpen(path, O_RDWR, &file_to_open, NULL));
+ ASSERT_FALSE(perm.CheckAccess(path, R_OK, NULL));
+ ASSERT_TRUE(perm.CheckAccess(path, W_OK, NULL));
+ break;
+ case O_RDWR:
+ ASSERT_TRUE(perm.CheckOpen(path, O_RDONLY, &file_to_open, NULL));
+ ASSERT_TRUE(perm.CheckOpen(path, O_WRONLY, &file_to_open, NULL));
+ ASSERT_TRUE(perm.CheckOpen(path, O_RDWR, &file_to_open, NULL));
+ ASSERT_TRUE(perm.CheckAccess(path, R_OK, NULL));
+ ASSERT_TRUE(perm.CheckAccess(path, W_OK, NULL));
+ break;
+ default:
+ // Bad test case
+ NOTREACHED();
+ }
+
+// O_SYNC can be defined as (__O_SYNC|O_DSYNC)
+#ifdef O_DSYNC
+ const int kSyncFlag = O_SYNC & ~O_DSYNC;
+#else
+ const int kSyncFlag = O_SYNC;
+#endif
+
+ const int kNumberOfBitsInOAccMode = 2;
+ static_assert(O_ACCMODE == ((1 << kNumberOfBitsInOAccMode) - 1),
+ "incorrect number of bits");
+ // check every possible flag and act accordingly.
+ // Skipping AccMode bits as they are present in every case.
+ for (int i = kNumberOfBitsInOAccMode; i < 32; i++) {
+ int flag = 1 << i;
+ switch (flag) {
+ case O_APPEND:
+ case O_ASYNC:
+ case O_DIRECT:
+ case O_DIRECTORY:
+#ifdef O_DSYNC
+ case O_DSYNC:
+#endif
+ case O_EXCL:
+ case O_LARGEFILE:
+ case O_NOATIME:
+ case O_NOCTTY:
+ case O_NOFOLLOW:
+ case O_NONBLOCK:
+#if (O_NONBLOCK != O_NDELAY)
+ case O_NDELAY:
+#endif
+ case kSyncFlag:
+ case O_TRUNC:
+ ASSERT_TRUE(
+ perm.CheckOpen(path, access_flags | flag, &file_to_open, NULL));
+ break;
+ case O_CLOEXEC:
+ case O_CREAT:
+ default:
+ ASSERT_FALSE(
+ perm.CheckOpen(path, access_flags | flag, &file_to_open, NULL));
+ }
+ }
+ if (create) {
+ bool unlink;
+ ASSERT_TRUE(perm.CheckOpen(path, O_CREAT | O_EXCL | access_flags,
+ &file_to_open, &unlink));
+ ASSERT_FALSE(unlink);
+ } else {
+ ASSERT_FALSE(perm.CheckOpen(path, O_CREAT | O_EXCL | access_flags,
+ &file_to_open, NULL));
+ }
+}
+
+TEST(BrokerFilePermission, ReadOnly) {
+ const char kPath[] = "/tmp/good";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
+ CheckPerm(perm, kPath, O_RDONLY, false);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerFilePermission, ReadOnlyRecursive) {
+ const char kPath[] = "/tmp/good/";
+ const char kPathFile[] = "/tmp/good/file";
+ BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
+ CheckPerm(perm, kPathFile, O_RDONLY, false);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerFilePermission, WriteOnly) {
+ const char kPath[] = "/tmp/good";
+ BrokerFilePermission perm = BrokerFilePermission::WriteOnly(kPath);
+ CheckPerm(perm, kPath, O_WRONLY, false);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerFilePermission, ReadWrite) {
+ const char kPath[] = "/tmp/good";
+ BrokerFilePermission perm = BrokerFilePermission::ReadWrite(kPath);
+ CheckPerm(perm, kPath, O_RDWR, false);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerFilePermission, ReadWriteCreate) {
+ const char kPath[] = "/tmp/good";
+ BrokerFilePermission perm = BrokerFilePermission::ReadWriteCreate(kPath);
+ CheckPerm(perm, kPath, O_RDWR, true);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+void CheckUnlink(BrokerFilePermission& perm,
+ const char* path,
+ int access_flags) {
+ bool unlink;
+ ASSERT_FALSE(perm.CheckOpen(path, access_flags, NULL, &unlink));
+ ASSERT_FALSE(perm.CheckOpen(path, access_flags | O_CREAT, NULL, &unlink));
+ ASSERT_TRUE(
+ perm.CheckOpen(path, access_flags | O_CREAT | O_EXCL, NULL, &unlink));
+ ASSERT_TRUE(unlink);
+}
+
+TEST(BrokerFilePermission, ReadWriteCreateUnlink) {
+ const char kPath[] = "/tmp/good";
+ BrokerFilePermission perm =
+ BrokerFilePermission::ReadWriteCreateUnlink(kPath);
+ CheckUnlink(perm, kPath, O_RDWR);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerFilePermission, ReadWriteCreateUnlinkRecursive) {
+ const char kPath[] = "/tmp/good/";
+ const char kPathFile[] = "/tmp/good/file";
+ BrokerFilePermission perm =
+ BrokerFilePermission::ReadWriteCreateUnlinkRecursive(kPath);
+ CheckUnlink(perm, kPathFile, O_RDWR);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerFilePermission, ValidatePath) {
+ EXPECT_TRUE(BrokerFilePermissionTester::ValidatePath("/path"));
+ EXPECT_TRUE(BrokerFilePermissionTester::ValidatePath("/"));
+ EXPECT_TRUE(BrokerFilePermissionTester::ValidatePath("/..path"));
+
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath(""));
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath("bad"));
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath("/bad/"));
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath("bad/"));
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath("/bad/.."));
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath("/bad/../bad"));
+ EXPECT_FALSE(BrokerFilePermissionTester::ValidatePath("/../bad"));
+}
+
+} // namespace
+
+} // namespace syscall_broker
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_host.cc b/libchrome/sandbox/linux/syscall_broker/broker_host.cc
new file mode 100644
index 0000000..dd61dac
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_host.cc
@@ -0,0 +1,232 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_host.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/unix_domain_socket_linux.h"
+#include "base/third_party/valgrind/valgrind.h"
+#include "sandbox/linux/syscall_broker/broker_common.h"
+#include "sandbox/linux/syscall_broker/broker_policy.h"
+#include "sandbox/linux/system_headers/linux_syscalls.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+namespace {
+
+bool IsRunningOnValgrind() {
+ return RUNNING_ON_VALGRIND;
+}
+
+// A little open(2) wrapper to handle some oddities for us. In the general case
+// make a direct system call since we want to keep in control of the broker
+// process' system calls profile to be able to loosely sandbox it.
+int sys_open(const char* pathname, int flags) {
+ // Hardcode mode to rw------- when creating files.
+ int mode;
+ if (flags & O_CREAT) {
+ mode = 0600;
+ } else {
+ mode = 0;
+ }
+ if (IsRunningOnValgrind()) {
+ // Valgrind does not support AT_FDCWD, just use libc's open() in this case.
+ return open(pathname, flags, mode);
+ } else {
+ return syscall(__NR_openat, AT_FDCWD, pathname, flags, mode);
+ }
+}
+
+// Open |requested_filename| with |flags| if allowed by our policy.
+// Write the syscall return value (-errno) to |write_pickle| and append
+// a file descriptor to |opened_files| if relevant.
+void OpenFileForIPC(const BrokerPolicy& policy,
+ const std::string& requested_filename,
+ int flags,
+ base::Pickle* write_pickle,
+ std::vector<int>* opened_files) {
+ DCHECK(write_pickle);
+ DCHECK(opened_files);
+ const char* file_to_open = NULL;
+ bool unlink_after_open = false;
+ const bool safe_to_open_file = policy.GetFileNameIfAllowedToOpen(
+ requested_filename.c_str(), flags, &file_to_open, &unlink_after_open);
+
+ if (safe_to_open_file) {
+ CHECK(file_to_open);
+ int opened_fd = sys_open(file_to_open, flags);
+ if (opened_fd < 0) {
+ write_pickle->WriteInt(-errno);
+ } else {
+ // Success.
+ if (unlink_after_open) {
+ unlink(file_to_open);
+ }
+ opened_files->push_back(opened_fd);
+ write_pickle->WriteInt(0);
+ }
+ } else {
+ write_pickle->WriteInt(-policy.denied_errno());
+ }
+}
+
+// Perform access(2) on |requested_filename| with mode |mode| if allowed by our
+// policy. Write the syscall return value (-errno) to |write_pickle|.
+void AccessFileForIPC(const BrokerPolicy& policy,
+ const std::string& requested_filename,
+ int mode,
+ base::Pickle* write_pickle) {
+ DCHECK(write_pickle);
+ const char* file_to_access = NULL;
+ const bool safe_to_access_file = policy.GetFileNameIfAllowedToAccess(
+ requested_filename.c_str(), mode, &file_to_access);
+
+ if (safe_to_access_file) {
+ CHECK(file_to_access);
+ int access_ret = access(file_to_access, mode);
+ int access_errno = errno;
+ if (!access_ret)
+ write_pickle->WriteInt(0);
+ else
+ write_pickle->WriteInt(-access_errno);
+ } else {
+ write_pickle->WriteInt(-policy.denied_errno());
+ }
+}
+
+// Handle a |command_type| request contained in |iter| and send the reply
+// on |reply_ipc|.
+// Currently COMMAND_OPEN and COMMAND_ACCESS are supported.
+bool HandleRemoteCommand(const BrokerPolicy& policy,
+ IPCCommand command_type,
+ int reply_ipc,
+ base::PickleIterator iter) {
+ // Currently all commands have two arguments: filename and flags.
+ std::string requested_filename;
+ int flags = 0;
+ if (!iter.ReadString(&requested_filename) || !iter.ReadInt(&flags))
+ return false;
+
+ base::Pickle write_pickle;
+ std::vector<int> opened_files;
+
+ switch (command_type) {
+ case COMMAND_ACCESS:
+ AccessFileForIPC(policy, requested_filename, flags, &write_pickle);
+ break;
+ case COMMAND_OPEN:
+ OpenFileForIPC(
+ policy, requested_filename, flags, &write_pickle, &opened_files);
+ break;
+ default:
+ LOG(ERROR) << "Invalid IPC command";
+ break;
+ }
+
+ CHECK_LE(write_pickle.size(), kMaxMessageLength);
+ ssize_t sent = base::UnixDomainSocket::SendMsg(
+ reply_ipc, write_pickle.data(), write_pickle.size(), opened_files);
+
+ // Close anything we have opened in this process.
+ for (std::vector<int>::iterator it = opened_files.begin();
+ it != opened_files.end();
+ ++it) {
+ int ret = IGNORE_EINTR(close(*it));
+ DCHECK(!ret) << "Could not close file descriptor";
+ }
+
+ if (sent <= 0) {
+ LOG(ERROR) << "Could not send IPC reply";
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+BrokerHost::BrokerHost(const BrokerPolicy& broker_policy,
+ BrokerChannel::EndPoint ipc_channel)
+ : broker_policy_(broker_policy), ipc_channel_(std::move(ipc_channel)) {}
+
+BrokerHost::~BrokerHost() {
+}
+
+// Handle a request on the IPC channel ipc_channel_.
+// A request should have a file descriptor attached on which we will reply and
+// that we will then close.
+// A request should start with an int that will be used as the command type.
+BrokerHost::RequestStatus BrokerHost::HandleRequest() const {
+ std::vector<base::ScopedFD> fds;
+ char buf[kMaxMessageLength];
+ errno = 0;
+ const ssize_t msg_len = base::UnixDomainSocket::RecvMsg(
+ ipc_channel_.get(), buf, sizeof(buf), &fds);
+
+ if (msg_len == 0 || (msg_len == -1 && errno == ECONNRESET)) {
+ // EOF from the client, or the client died, we should die.
+ return RequestStatus::LOST_CLIENT;
+ }
+
+ // The client should send exactly one file descriptor, on which we
+ // will write the reply.
+ if (msg_len < 0 || fds.size() != 1 || fds[0].get() < 0) {
+ PLOG(ERROR) << "Error reading message from the client";
+ return RequestStatus::FAILURE;
+ }
+
+ base::ScopedFD temporary_ipc(std::move(fds[0]));
+
+ base::Pickle pickle(buf, msg_len);
+ base::PickleIterator iter(pickle);
+ int command_type;
+ if (iter.ReadInt(&command_type)) {
+ bool command_handled = false;
+ // Go through all the possible IPC messages.
+ switch (command_type) {
+ case COMMAND_ACCESS:
+ case COMMAND_OPEN:
+ // We reply on the file descriptor sent to us via the IPC channel.
+ command_handled = HandleRemoteCommand(
+ broker_policy_, static_cast<IPCCommand>(command_type),
+ temporary_ipc.get(), iter);
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ if (command_handled) {
+ return RequestStatus::SUCCESS;
+ } else {
+ return RequestStatus::FAILURE;
+ }
+
+ NOTREACHED();
+ }
+
+ LOG(ERROR) << "Error parsing IPC request";
+ return RequestStatus::FAILURE;
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_host.h b/libchrome/sandbox/linux/syscall_broker/broker_host.h
new file mode 100644
index 0000000..9866507
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_host.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSCALL_BROKER_BROKER_HOST_H_
+#define SANDBOX_LINUX_SYSCALL_BROKER_BROKER_HOST_H_
+
+#include "base/macros.h"
+#include "sandbox/linux/syscall_broker/broker_channel.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+class BrokerPolicy;
+
+// The BrokerHost class should be embedded in a (presumably not sandboxed)
+// process. It will honor IPC requests from a BrokerClient sent over
+// |ipc_channel| according to |broker_policy|.
+class BrokerHost {
+ public:
+ enum class RequestStatus { LOST_CLIENT = 0, SUCCESS, FAILURE };
+
+ BrokerHost(const BrokerPolicy& broker_policy,
+ BrokerChannel::EndPoint ipc_channel);
+ ~BrokerHost();
+
+ RequestStatus HandleRequest() const;
+
+ private:
+ const BrokerPolicy& broker_policy_;
+ const BrokerChannel::EndPoint ipc_channel_;
+
+ DISALLOW_COPY_AND_ASSIGN(BrokerHost);
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SYSCALL_BROKER_BROKER_HOST_H_
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_policy.cc b/libchrome/sandbox/linux/syscall_broker/broker_policy.cc
new file mode 100644
index 0000000..cd09245
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_policy.cc
@@ -0,0 +1,100 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_policy.h"
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+#include <vector>
+
+#include "base/logging.h"
+#include "sandbox/linux/syscall_broker/broker_common.h"
+
+namespace sandbox {
+namespace syscall_broker {
+
+BrokerPolicy::BrokerPolicy(int denied_errno,
+ const std::vector<BrokerFilePermission>& permissions)
+ : denied_errno_(denied_errno),
+ permissions_(permissions),
+ num_of_permissions_(permissions.size()) {
+ // The spec guarantees vectors store their elements contiguously
+ // so set up a pointer to array of element so it can be used
+ // in async signal safe code instead of vector operations.
+ if (num_of_permissions_ > 0) {
+ permissions_array_ = &permissions_[0];
+ } else {
+ permissions_array_ = NULL;
+ }
+}
+
+BrokerPolicy::~BrokerPolicy() {
+}
+
+// Check if calling access() should be allowed on |requested_filename| with
+// mode |requested_mode|.
+// Note: access() being a system call to check permissions, this can get a bit
+// confusing. We're checking if calling access() should even be allowed with
+// the same policy we would use for open().
+// If |file_to_access| is not NULL, we will return the matching pointer from
+// the whitelist. For paranoia a caller should then use |file_to_access|. See
+// GetFileNameIfAllowedToOpen() for more explanation.
+// return true if calling access() on this file should be allowed, false
+// otherwise.
+// Async signal safe if and only if |file_to_access| is NULL.
+bool BrokerPolicy::GetFileNameIfAllowedToAccess(
+ const char* requested_filename,
+ int requested_mode,
+ const char** file_to_access) const {
+ if (file_to_access && *file_to_access) {
+ // Make sure that callers never pass a non-empty string. In case callers
+ // wrongly forget to check the return value and look at the string
+ // instead, this could catch bugs.
+ RAW_LOG(FATAL, "*file_to_access should be NULL");
+ return false;
+ }
+ for (size_t i = 0; i < num_of_permissions_; i++) {
+ if (permissions_array_[i].CheckAccess(requested_filename, requested_mode,
+ file_to_access)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Check if |requested_filename| can be opened with flags |requested_flags|.
+// If |file_to_open| is not NULL, we will return the matching pointer from the
+// whitelist. For paranoia, a caller should then use |file_to_open| rather
+// than |requested_filename|, so that it never attempts to open an
+// attacker-controlled file name, even if an attacker managed to fool the
+// string comparison mechanism.
+// Return true if opening should be allowed, false otherwise.
+// Async signal safe if and only if |file_to_open| is NULL.
+bool BrokerPolicy::GetFileNameIfAllowedToOpen(const char* requested_filename,
+ int requested_flags,
+ const char** file_to_open,
+ bool* unlink_after_open) const {
+ if (file_to_open && *file_to_open) {
+ // Make sure that callers never pass a non-empty string. In case callers
+ // wrongly forget to check the return value and look at the string
+ // instead, this could catch bugs.
+ RAW_LOG(FATAL, "*file_to_open should be NULL");
+ return false;
+ }
+ for (size_t i = 0; i < num_of_permissions_; i++) {
+ if (permissions_array_[i].CheckOpen(requested_filename, requested_flags,
+ file_to_open, unlink_after_open)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_policy.h b/libchrome/sandbox/linux/syscall_broker/broker_policy.h
new file mode 100644
index 0000000..58bc29a
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_policy.h
@@ -0,0 +1,89 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSCALL_BROKER_BROKER_POLICY_H_
+#define SANDBOX_LINUX_SYSCALL_BROKER_BROKER_POLICY_H_
+
+#include <stddef.h>
+
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+
+#include "sandbox/linux/syscall_broker/broker_file_permission.h"
+
+namespace sandbox {
+namespace syscall_broker {
+
+// BrokerPolicy allows to define the security policy enforced by a
+// BrokerHost. The BrokerHost will evaluate requests sent over its
+// IPC channel according to the BrokerPolicy.
+// Some of the methods of this class can be used in an async-signal safe
+// way.
+class BrokerPolicy {
+ public:
+ // |denied_errno| is the error code returned when IPC requests for system
+ // calls such as open() or access() are denied because a file is not in the
+ // whitelist. EACCESS would be a typical value.
+ // |permissions| is a list of BrokerPermission objects that define
+ // what the broker will allow.
+ BrokerPolicy(int denied_errno,
+ const std::vector<BrokerFilePermission>& permissions);
+
+ ~BrokerPolicy();
+
+ // Check if calling access() should be allowed on |requested_filename| with
+ // mode |requested_mode|.
+ // Note: access() being a system call to check permissions, this can get a bit
+ // confusing. We're checking if calling access() should even be allowed with
+ // If |file_to_open| is not NULL, a pointer to the path will be returned.
+ // In the case of a recursive match, this will be the requested_filename,
+ // otherwise it will return the matching pointer from the
+ // whitelist. For paranoia a caller should then use |file_to_access|. See
+ // GetFileNameIfAllowedToOpen() for more explanation.
+ // return true if calling access() on this file should be allowed, false
+ // otherwise.
+ // Async signal safe if and only if |file_to_access| is NULL.
+ bool GetFileNameIfAllowedToAccess(const char* requested_filename,
+ int requested_mode,
+ const char** file_to_access) const;
+
+ // Check if |requested_filename| can be opened with flags |requested_flags|.
+ // If |file_to_open| is not NULL, a pointer to the path will be returned.
+ // In the case of a recursive match, this will be the requested_filename,
+ // otherwise it will return the matching pointer from the
+ // whitelist. For paranoia, a caller should then use |file_to_open| rather
+ // than |requested_filename|, so that it never attempts to open an
+ // attacker-controlled file name, even if an attacker managed to fool the
+ // string comparison mechanism.
+ // |unlink_after_open| if not NULL will be set to point to true if the
+ // policy requests the caller unlink the path after opening.
+ // Return true if opening should be allowed, false otherwise.
+ // Async signal safe if and only if |file_to_open| is NULL.
+ bool GetFileNameIfAllowedToOpen(const char* requested_filename,
+ int requested_flags,
+ const char** file_to_open,
+ bool* unlink_after_open) const;
+ int denied_errno() const { return denied_errno_; }
+
+ private:
+ const int denied_errno_;
+ // The permissions_ vector is used as storage for the BrokerFilePermission
+ // objects but is not referenced outside of the constructor as
+ // vectors are unfriendly in async signal safe code.
+ const std::vector<BrokerFilePermission> permissions_;
+ // permissions_array_ is set up to point to the backing store of
+ // permissions_ and is used in async signal safe methods.
+ const BrokerFilePermission* permissions_array_;
+ const size_t num_of_permissions_;
+
+ DISALLOW_COPY_AND_ASSIGN(BrokerPolicy);
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SYSCALL_BROKER_BROKER_POLICY_H_
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_process.cc b/libchrome/sandbox/linux/syscall_broker/broker_process.cc
new file mode 100644
index 0000000..30713ce
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_process.cc
@@ -0,0 +1,120 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_process.h"
+
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "build/build_config.h"
+#include "sandbox/linux/syscall_broker/broker_channel.h"
+#include "sandbox/linux/syscall_broker/broker_client.h"
+#include "sandbox/linux/syscall_broker/broker_host.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+BrokerProcess::BrokerProcess(
+ int denied_errno,
+ const std::vector<syscall_broker::BrokerFilePermission>& permissions,
+ bool fast_check_in_client,
+ bool quiet_failures_for_tests)
+ : initialized_(false),
+ fast_check_in_client_(fast_check_in_client),
+ quiet_failures_for_tests_(quiet_failures_for_tests),
+ broker_pid_(-1),
+ policy_(denied_errno, permissions) {
+}
+
+BrokerProcess::~BrokerProcess() {
+ if (initialized_) {
+ if (broker_client_.get()) {
+ // Closing the socket should be enough to notify the child to die,
+ // unless it has been duplicated.
+ CloseChannel();
+ }
+ PCHECK(0 == kill(broker_pid_, SIGKILL));
+ siginfo_t process_info;
+ // Reap the child.
+ int ret = HANDLE_EINTR(waitid(P_PID, broker_pid_, &process_info, WEXITED));
+ PCHECK(0 == ret);
+ }
+}
+
+bool BrokerProcess::Init(
+ const base::Callback<bool(void)>& broker_process_init_callback) {
+ CHECK(!initialized_);
+ BrokerChannel::EndPoint ipc_reader;
+ BrokerChannel::EndPoint ipc_writer;
+ BrokerChannel::CreatePair(&ipc_reader, &ipc_writer);
+
+#if !defined(THREAD_SANITIZER)
+ DCHECK_EQ(1, base::GetNumberOfThreads(base::GetCurrentProcessHandle()));
+#endif
+ int child_pid = fork();
+ if (child_pid == -1) {
+ return false;
+ }
+ if (child_pid) {
+ // We are the parent and we have just forked our broker process.
+ ipc_reader.reset();
+ broker_pid_ = child_pid;
+ broker_client_.reset(new BrokerClient(policy_, std::move(ipc_writer),
+ fast_check_in_client_,
+ quiet_failures_for_tests_));
+ initialized_ = true;
+ return true;
+ } else {
+ // We are the broker process. Make sure to close the writer's end so that
+ // we get notified if the client disappears.
+ ipc_writer.reset();
+ CHECK(broker_process_init_callback.Run());
+ BrokerHost broker_host(policy_, std::move(ipc_reader));
+ for (;;) {
+ switch (broker_host.HandleRequest()) {
+ case BrokerHost::RequestStatus::LOST_CLIENT:
+ _exit(1);
+ case BrokerHost::RequestStatus::SUCCESS:
+ case BrokerHost::RequestStatus::FAILURE:
+ continue;
+ }
+ }
+ _exit(1);
+ }
+ NOTREACHED();
+ return false;
+}
+
+void BrokerProcess::CloseChannel() {
+ broker_client_.reset();
+}
+
+int BrokerProcess::Access(const char* pathname, int mode) const {
+ RAW_CHECK(initialized_);
+ return broker_client_->Access(pathname, mode);
+}
+
+int BrokerProcess::Open(const char* pathname, int flags) const {
+ RAW_CHECK(initialized_);
+ return broker_client_->Open(pathname, flags);
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox.
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_process.h b/libchrome/sandbox/linux/syscall_broker/broker_process.h
new file mode 100644
index 0000000..3c0c809
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_process.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
+#define SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/process/process.h"
+#include "sandbox/linux/syscall_broker/broker_policy.h"
+#include "sandbox/sandbox_export.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+class BrokerClient;
+class BrokerFilePermission;
+
+// Create a new "broker" process to which we can send requests via an IPC
+// channel by forking the current process.
+// This is a low level IPC mechanism that is suitable to be called from a
+// signal handler.
+// A process would typically create a broker process before entering
+// sandboxing.
+// 1. BrokerProcess open_broker(read_whitelist, write_whitelist);
+// 2. CHECK(open_broker.Init(NULL));
+// 3. Enable sandbox.
+// 4. Use open_broker.Open() to open files.
+class SANDBOX_EXPORT BrokerProcess {
+ public:
+ // |denied_errno| is the error code returned when methods such as Open()
+ // or Access() are invoked on a file which is not in the whitelist. EACCESS
+ // would be a typical value.
+ // |allowed_r_files| and |allowed_w_files| are white lists of files that can
+ // be opened later via the Open() API, respectively for reading and writing.
+ // A file available read-write should be listed in both.
+ // |fast_check_in_client| and |quiet_failures_for_tests| are reserved for
+ // unit tests, don't use it.
+
+ BrokerProcess(
+ int denied_errno,
+ const std::vector<syscall_broker::BrokerFilePermission>& permissions,
+ bool fast_check_in_client = true,
+ bool quiet_failures_for_tests = false);
+
+ ~BrokerProcess();
+ // Will initialize the broker process. There should be no threads at this
+ // point, since we need to fork().
+ // broker_process_init_callback will be called in the new broker process,
+ // after fork() returns.
+ bool Init(const base::Callback<bool(void)>& broker_process_init_callback);
+
+ // Can be used in place of access(). Will be async signal safe.
+ // X_OK will always return an error in practice since the broker process
+ // doesn't support execute permissions.
+ // It's similar to the access() system call and will return -errno on errors.
+ int Access(const char* pathname, int mode) const;
+ // Can be used in place of open(). Will be async signal safe.
+ // The implementation only supports certain white listed flags and will
+ // return -EPERM on other flags.
+ // It's similar to the open() system call and will return -errno on errors.
+ int Open(const char* pathname, int flags) const;
+
+ int broker_pid() const { return broker_pid_; }
+
+ private:
+ friend class BrokerProcessTestHelper;
+
+ // Close the IPC channel with the other party. This should only be used
+ // by tests an none of the class methods should be used afterwards.
+ void CloseChannel();
+
+ bool initialized_; // Whether we've been through Init() yet.
+ const bool fast_check_in_client_;
+ const bool quiet_failures_for_tests_;
+ pid_t broker_pid_; // The PID of the broker (child).
+ syscall_broker::BrokerPolicy policy_; // The sandboxing policy.
+ std::unique_ptr<syscall_broker::BrokerClient> broker_client_;
+
+ DISALLOW_COPY_AND_ASSIGN(BrokerProcess);
+};
+
+} // namespace syscall_broker
+
+} // namespace sandbox
+
+#endif // SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
diff --git a/libchrome/sandbox/linux/syscall_broker/broker_process_unittest.cc b/libchrome/sandbox/linux/syscall_broker/broker_process_unittest.cc
new file mode 100644
index 0000000..229764a
--- /dev/null
+++ b/libchrome/sandbox/linux/syscall_broker/broker_process_unittest.cc
@@ -0,0 +1,666 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/syscall_broker/broker_process.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stddef.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/unix_domain_socket_linux.h"
+#include "sandbox/linux/syscall_broker/broker_client.h"
+#include "sandbox/linux/tests/scoped_temporary_file.h"
+#include "sandbox/linux/tests/test_utils.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace syscall_broker {
+
+class BrokerProcessTestHelper {
+ public:
+ static void CloseChannel(BrokerProcess* broker) { broker->CloseChannel(); }
+ // Get the client's IPC descriptor to send IPC requests directly.
+ // TODO(jln): refator tests to get rid of this.
+ static int GetIPCDescriptor(const BrokerProcess* broker) {
+ return broker->broker_client_->GetIPCDescriptor();
+ }
+};
+
+namespace {
+
+bool NoOpCallback() {
+ return true;
+}
+
+} // namespace
+
+TEST(BrokerProcess, CreateAndDestroy) {
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadOnly("/proc/cpuinfo"));
+
+ std::unique_ptr<BrokerProcess> open_broker(
+ new BrokerProcess(EPERM, permissions));
+ ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
+
+ ASSERT_TRUE(TestUtils::CurrentProcessHasChildren());
+ // Destroy the broker and check it has exited properly.
+ open_broker.reset();
+ ASSERT_FALSE(TestUtils::CurrentProcessHasChildren());
+}
+
+TEST(BrokerProcess, TestOpenAccessNull) {
+ std::vector<BrokerFilePermission> empty;
+ BrokerProcess open_broker(EPERM, empty);
+ ASSERT_TRUE(open_broker.Init(base::Bind(&NoOpCallback)));
+
+ int fd = open_broker.Open(NULL, O_RDONLY);
+ ASSERT_EQ(fd, -EFAULT);
+
+ int ret = open_broker.Access(NULL, F_OK);
+ ASSERT_EQ(ret, -EFAULT);
+}
+
+void TestOpenFilePerms(bool fast_check_in_client, int denied_errno) {
+ const char kR_WhiteListed[] = "/proc/DOESNOTEXIST1";
+ // We can't debug the init process, and shouldn't be able to access
+ // its auxv file.
+ const char kR_WhiteListedButDenied[] = "/proc/1/auxv";
+ const char kW_WhiteListed[] = "/proc/DOESNOTEXIST2";
+ const char kRW_WhiteListed[] = "/proc/DOESNOTEXIST3";
+ const char k_NotWhitelisted[] = "/proc/DOESNOTEXIST4";
+
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadOnly(kR_WhiteListed));
+ permissions.push_back(
+ BrokerFilePermission::ReadOnly(kR_WhiteListedButDenied));
+ permissions.push_back(BrokerFilePermission::WriteOnly(kW_WhiteListed));
+ permissions.push_back(BrokerFilePermission::ReadWrite(kRW_WhiteListed));
+
+ BrokerProcess open_broker(denied_errno, permissions, fast_check_in_client);
+ ASSERT_TRUE(open_broker.Init(base::Bind(&NoOpCallback)));
+
+ int fd = -1;
+ fd = open_broker.Open(kR_WhiteListed, O_RDONLY);
+ ASSERT_EQ(fd, -ENOENT);
+ fd = open_broker.Open(kR_WhiteListed, O_WRONLY);
+ ASSERT_EQ(fd, -denied_errno);
+ fd = open_broker.Open(kR_WhiteListed, O_RDWR);
+ ASSERT_EQ(fd, -denied_errno);
+ int ret = -1;
+ ret = open_broker.Access(kR_WhiteListed, F_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kR_WhiteListed, R_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kR_WhiteListed, W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListed, R_OK | W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListed, X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListed, R_OK | X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+
+ // Android sometimes runs tests as root.
+ // This part of the test requires a process that doesn't have
+ // CAP_DAC_OVERRIDE. We check against a root euid as a proxy for that.
+ if (geteuid()) {
+ fd = open_broker.Open(kR_WhiteListedButDenied, O_RDONLY);
+ // The broker process will allow this, but the normal permission system
+ // won't.
+ ASSERT_EQ(fd, -EACCES);
+ fd = open_broker.Open(kR_WhiteListedButDenied, O_WRONLY);
+ ASSERT_EQ(fd, -denied_errno);
+ fd = open_broker.Open(kR_WhiteListedButDenied, O_RDWR);
+ ASSERT_EQ(fd, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListedButDenied, F_OK);
+ // The normal permission system will let us check that the file exists.
+ ASSERT_EQ(ret, 0);
+ ret = open_broker.Access(kR_WhiteListedButDenied, R_OK);
+ ASSERT_EQ(ret, -EACCES);
+ ret = open_broker.Access(kR_WhiteListedButDenied, W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListedButDenied, R_OK | W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListedButDenied, X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kR_WhiteListedButDenied, R_OK | X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ }
+
+ fd = open_broker.Open(kW_WhiteListed, O_RDONLY);
+ ASSERT_EQ(fd, -denied_errno);
+ fd = open_broker.Open(kW_WhiteListed, O_WRONLY);
+ ASSERT_EQ(fd, -ENOENT);
+ fd = open_broker.Open(kW_WhiteListed, O_RDWR);
+ ASSERT_EQ(fd, -denied_errno);
+ ret = open_broker.Access(kW_WhiteListed, F_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kW_WhiteListed, R_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kW_WhiteListed, W_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kW_WhiteListed, R_OK | W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kW_WhiteListed, X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kW_WhiteListed, R_OK | X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+
+ fd = open_broker.Open(kRW_WhiteListed, O_RDONLY);
+ ASSERT_EQ(fd, -ENOENT);
+ fd = open_broker.Open(kRW_WhiteListed, O_WRONLY);
+ ASSERT_EQ(fd, -ENOENT);
+ fd = open_broker.Open(kRW_WhiteListed, O_RDWR);
+ ASSERT_EQ(fd, -ENOENT);
+ ret = open_broker.Access(kRW_WhiteListed, F_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kRW_WhiteListed, R_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kRW_WhiteListed, W_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kRW_WhiteListed, R_OK | W_OK);
+ ASSERT_EQ(ret, -ENOENT);
+ ret = open_broker.Access(kRW_WhiteListed, X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(kRW_WhiteListed, R_OK | X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+
+ fd = open_broker.Open(k_NotWhitelisted, O_RDONLY);
+ ASSERT_EQ(fd, -denied_errno);
+ fd = open_broker.Open(k_NotWhitelisted, O_WRONLY);
+ ASSERT_EQ(fd, -denied_errno);
+ fd = open_broker.Open(k_NotWhitelisted, O_RDWR);
+ ASSERT_EQ(fd, -denied_errno);
+ ret = open_broker.Access(k_NotWhitelisted, F_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(k_NotWhitelisted, R_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(k_NotWhitelisted, W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(k_NotWhitelisted, R_OK | W_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(k_NotWhitelisted, X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+ ret = open_broker.Access(k_NotWhitelisted, R_OK | X_OK);
+ ASSERT_EQ(ret, -denied_errno);
+
+ // We have some extra sanity check for clearly wrong values.
+ fd = open_broker.Open(kRW_WhiteListed, O_RDONLY | O_WRONLY | O_RDWR);
+ ASSERT_EQ(fd, -denied_errno);
+
+ // It makes no sense to allow O_CREAT in a 2-parameters open. Ensure this
+ // is denied.
+ fd = open_broker.Open(kRW_WhiteListed, O_RDWR | O_CREAT);
+ ASSERT_EQ(fd, -denied_errno);
+}
+
+// Run the same thing twice. The second time, we make sure that no security
+// check is performed on the client.
+TEST(BrokerProcess, OpenFilePermsWithClientCheck) {
+ TestOpenFilePerms(true /* fast_check_in_client */, EPERM);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenOpenFilePermsNoClientCheck) {
+ TestOpenFilePerms(false /* fast_check_in_client */, EPERM);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+// Run the same twice again, but with ENOENT instead of EPERM.
+TEST(BrokerProcess, OpenFilePermsWithClientCheckNoEnt) {
+ TestOpenFilePerms(true /* fast_check_in_client */, ENOENT);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenOpenFilePermsNoClientCheckNoEnt) {
+ TestOpenFilePerms(false /* fast_check_in_client */, ENOENT);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+void TestBadPaths(bool fast_check_in_client) {
+ const char kFileCpuInfo[] = "/proc/cpuinfo";
+ const char kNotAbsPath[] = "proc/cpuinfo";
+ const char kDotDotStart[] = "/../proc/cpuinfo";
+ const char kDotDotMiddle[] = "/proc/self/../cpuinfo";
+ const char kDotDotEnd[] = "/proc/..";
+ const char kTrailingSlash[] = "/proc/";
+
+ std::vector<BrokerFilePermission> permissions;
+
+ permissions.push_back(BrokerFilePermission::ReadOnlyRecursive("/proc/"));
+ std::unique_ptr<BrokerProcess> open_broker(
+ new BrokerProcess(EPERM, permissions, fast_check_in_client));
+ ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
+ // Open cpuinfo via the broker.
+ int cpuinfo_fd = open_broker->Open(kFileCpuInfo, O_RDONLY);
+ base::ScopedFD cpuinfo_fd_closer(cpuinfo_fd);
+ ASSERT_GE(cpuinfo_fd, 0);
+
+ int fd = -1;
+ int can_access;
+
+ can_access = open_broker->Access(kNotAbsPath, R_OK);
+ ASSERT_EQ(can_access, -EPERM);
+ fd = open_broker->Open(kNotAbsPath, O_RDONLY);
+ ASSERT_EQ(fd, -EPERM);
+
+ can_access = open_broker->Access(kDotDotStart, R_OK);
+ ASSERT_EQ(can_access, -EPERM);
+ fd = open_broker->Open(kDotDotStart, O_RDONLY);
+ ASSERT_EQ(fd, -EPERM);
+
+ can_access = open_broker->Access(kDotDotMiddle, R_OK);
+ ASSERT_EQ(can_access, -EPERM);
+ fd = open_broker->Open(kDotDotMiddle, O_RDONLY);
+ ASSERT_EQ(fd, -EPERM);
+
+ can_access = open_broker->Access(kDotDotEnd, R_OK);
+ ASSERT_EQ(can_access, -EPERM);
+ fd = open_broker->Open(kDotDotEnd, O_RDONLY);
+ ASSERT_EQ(fd, -EPERM);
+
+ can_access = open_broker->Access(kTrailingSlash, R_OK);
+ ASSERT_EQ(can_access, -EPERM);
+ fd = open_broker->Open(kTrailingSlash, O_RDONLY);
+ ASSERT_EQ(fd, -EPERM);
+}
+
+TEST(BrokerProcess, BadPathsClientCheck) {
+ TestBadPaths(true /* fast_check_in_client */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, BadPathsNoClientCheck) {
+ TestBadPaths(false /* fast_check_in_client */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+void TestOpenCpuinfo(bool fast_check_in_client, bool recursive) {
+ const char kFileCpuInfo[] = "/proc/cpuinfo";
+ const char kDirProc[] = "/proc/";
+
+ std::vector<BrokerFilePermission> permissions;
+ if (recursive)
+ permissions.push_back(BrokerFilePermission::ReadOnlyRecursive(kDirProc));
+ else
+ permissions.push_back(BrokerFilePermission::ReadOnly(kFileCpuInfo));
+
+ std::unique_ptr<BrokerProcess> open_broker(
+ new BrokerProcess(EPERM, permissions, fast_check_in_client));
+ ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
+
+ int fd = -1;
+ fd = open_broker->Open(kFileCpuInfo, O_RDWR);
+ base::ScopedFD fd_closer(fd);
+ ASSERT_EQ(fd, -EPERM);
+
+ // Check we can read /proc/cpuinfo.
+ int can_access = open_broker->Access(kFileCpuInfo, R_OK);
+ ASSERT_EQ(can_access, 0);
+ can_access = open_broker->Access(kFileCpuInfo, W_OK);
+ ASSERT_EQ(can_access, -EPERM);
+ // Check we can not write /proc/cpuinfo.
+
+ // Open cpuinfo via the broker.
+ int cpuinfo_fd = open_broker->Open(kFileCpuInfo, O_RDONLY);
+ base::ScopedFD cpuinfo_fd_closer(cpuinfo_fd);
+ ASSERT_GE(cpuinfo_fd, 0);
+ char buf[3];
+ memset(buf, 0, sizeof(buf));
+ int read_len1 = read(cpuinfo_fd, buf, sizeof(buf));
+ ASSERT_GT(read_len1, 0);
+
+ // Open cpuinfo directly.
+ int cpuinfo_fd2 = open(kFileCpuInfo, O_RDONLY);
+ base::ScopedFD cpuinfo_fd2_closer(cpuinfo_fd2);
+ ASSERT_GE(cpuinfo_fd2, 0);
+ char buf2[3];
+ memset(buf2, 1, sizeof(buf2));
+ int read_len2 = read(cpuinfo_fd2, buf2, sizeof(buf2));
+ ASSERT_GT(read_len1, 0);
+
+ // The following is not guaranteed true, but will be in practice.
+ ASSERT_EQ(read_len1, read_len2);
+ // Compare the cpuinfo as returned by the broker with the one we opened
+ // ourselves.
+ ASSERT_EQ(memcmp(buf, buf2, read_len1), 0);
+
+ ASSERT_TRUE(TestUtils::CurrentProcessHasChildren());
+ open_broker.reset();
+ ASSERT_FALSE(TestUtils::CurrentProcessHasChildren());
+}
+
+// Run this test 4 times. With and without the check in client
+// and using a recursive path.
+TEST(BrokerProcess, OpenCpuinfoWithClientCheck) {
+ TestOpenCpuinfo(true /* fast_check_in_client */, false /* not recursive */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenCpuinfoNoClientCheck) {
+ TestOpenCpuinfo(false /* fast_check_in_client */, false /* not recursive */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenCpuinfoWithClientCheckRecursive) {
+ TestOpenCpuinfo(true /* fast_check_in_client */, true /* recursive */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenCpuinfoNoClientCheckRecursive) {
+ TestOpenCpuinfo(false /* fast_check_in_client */, true /* recursive */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenFileRW) {
+ ScopedTemporaryFile tempfile;
+ const char* tempfile_name = tempfile.full_file_name();
+
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadWrite(tempfile_name));
+
+ BrokerProcess open_broker(EPERM, permissions);
+ ASSERT_TRUE(open_broker.Init(base::Bind(&NoOpCallback)));
+
+ // Check we can access that file with read or write.
+ int can_access = open_broker.Access(tempfile_name, R_OK | W_OK);
+ ASSERT_EQ(can_access, 0);
+
+ int tempfile2 = -1;
+ tempfile2 = open_broker.Open(tempfile_name, O_RDWR);
+ ASSERT_GE(tempfile2, 0);
+
+ // Write to the descriptor opened by the broker.
+ char test_text[] = "TESTTESTTEST";
+ ssize_t len = write(tempfile2, test_text, sizeof(test_text));
+ ASSERT_EQ(len, static_cast<ssize_t>(sizeof(test_text)));
+
+ // Read back from the original file descriptor what we wrote through
+ // the descriptor provided by the broker.
+ char buf[1024];
+ len = read(tempfile.fd(), buf, sizeof(buf));
+
+ ASSERT_EQ(len, static_cast<ssize_t>(sizeof(test_text)));
+ ASSERT_EQ(memcmp(test_text, buf, sizeof(test_text)), 0);
+
+ ASSERT_EQ(close(tempfile2), 0);
+}
+
+// SANDBOX_TEST because the process could die with a SIGPIPE
+// and we want this to happen in a subprocess.
+SANDBOX_TEST(BrokerProcess, BrokerDied) {
+ const char kCpuInfo[] = "/proc/cpuinfo";
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadOnly(kCpuInfo));
+
+ BrokerProcess open_broker(EPERM, permissions, true /* fast_check_in_client */,
+ true /* quiet_failures_for_tests */);
+ SANDBOX_ASSERT(open_broker.Init(base::Bind(&NoOpCallback)));
+ const pid_t broker_pid = open_broker.broker_pid();
+ SANDBOX_ASSERT(kill(broker_pid, SIGKILL) == 0);
+
+ // Now we check that the broker has been signaled, but do not reap it.
+ siginfo_t process_info;
+ SANDBOX_ASSERT(HANDLE_EINTR(waitid(
+ P_PID, broker_pid, &process_info, WEXITED | WNOWAIT)) ==
+ 0);
+ SANDBOX_ASSERT(broker_pid == process_info.si_pid);
+ SANDBOX_ASSERT(CLD_KILLED == process_info.si_code);
+ SANDBOX_ASSERT(SIGKILL == process_info.si_status);
+
+ // Check that doing Open with a dead broker won't SIGPIPE us.
+ SANDBOX_ASSERT(open_broker.Open(kCpuInfo, O_RDONLY) == -ENOMEM);
+ SANDBOX_ASSERT(open_broker.Access(kCpuInfo, O_RDONLY) == -ENOMEM);
+}
+
+void TestOpenComplexFlags(bool fast_check_in_client) {
+ const char kCpuInfo[] = "/proc/cpuinfo";
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadOnly(kCpuInfo));
+
+ BrokerProcess open_broker(EPERM, permissions, fast_check_in_client);
+ ASSERT_TRUE(open_broker.Init(base::Bind(&NoOpCallback)));
+ // Test that we do the right thing for O_CLOEXEC and O_NONBLOCK.
+ int fd = -1;
+ int ret = 0;
+ fd = open_broker.Open(kCpuInfo, O_RDONLY);
+ ASSERT_GE(fd, 0);
+ ret = fcntl(fd, F_GETFL);
+ ASSERT_NE(-1, ret);
+ // The descriptor shouldn't have the O_CLOEXEC attribute, nor O_NONBLOCK.
+ ASSERT_EQ(0, ret & (O_CLOEXEC | O_NONBLOCK));
+ ASSERT_EQ(0, close(fd));
+
+ fd = open_broker.Open(kCpuInfo, O_RDONLY | O_CLOEXEC);
+ ASSERT_GE(fd, 0);
+ ret = fcntl(fd, F_GETFD);
+ ASSERT_NE(-1, ret);
+ // Important: use F_GETFD, not F_GETFL. The O_CLOEXEC flag in F_GETFL
+ // is actually not used by the kernel.
+ ASSERT_TRUE(FD_CLOEXEC & ret);
+ ASSERT_EQ(0, close(fd));
+
+ fd = open_broker.Open(kCpuInfo, O_RDONLY | O_NONBLOCK);
+ ASSERT_GE(fd, 0);
+ ret = fcntl(fd, F_GETFL);
+ ASSERT_NE(-1, ret);
+ ASSERT_TRUE(O_NONBLOCK & ret);
+ ASSERT_EQ(0, close(fd));
+}
+
+TEST(BrokerProcess, OpenComplexFlagsWithClientCheck) {
+ TestOpenComplexFlags(true /* fast_check_in_client */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+TEST(BrokerProcess, OpenComplexFlagsNoClientCheck) {
+ TestOpenComplexFlags(false /* fast_check_in_client */);
+ // Don't do anything here, so that ASSERT works in the subfunction as
+ // expected.
+}
+
+#if defined(OS_LINUX)
+// Flaky on Linux NG bots: https://crbug.com/595199.
+#define MAYBE_RecvMsgDescriptorLeak DISABLED_RecvMsgDescriptorLeak
+#else
+#define MAYBE_RecvMsgDescriptorLeak RecvMsgDescriptorLeak
+#endif
+
+// We need to allow noise because the broker will log when it receives our
+// bogus IPCs.
+SANDBOX_TEST_ALLOW_NOISE(BrokerProcess, MAYBE_RecvMsgDescriptorLeak) {
+ // Android creates a socket on first use of the LOG call.
+ // We need to ensure this socket is open before we
+ // begin the test.
+ LOG(INFO) << "Ensure Android LOG socket is allocated";
+
+ // Find the four lowest available file descriptors.
+ int available_fds[4];
+ SANDBOX_ASSERT(0 == pipe(available_fds));
+ SANDBOX_ASSERT(0 == pipe(available_fds + 2));
+
+ // Save one FD to send to the broker later, and close the others.
+ base::ScopedFD message_fd(available_fds[0]);
+ for (size_t i = 1; i < arraysize(available_fds); i++) {
+ SANDBOX_ASSERT(0 == IGNORE_EINTR(close(available_fds[i])));
+ }
+
+ // Lower our file descriptor limit to just allow three more file descriptors
+ // to be allocated. (N.B., RLIMIT_NOFILE doesn't limit the number of file
+ // descriptors a process can have: it only limits the highest value that can
+ // be assigned to newly-created descriptors allocated by the process.)
+ const rlim_t fd_limit =
+ 1 +
+ *std::max_element(available_fds,
+ available_fds + arraysize(available_fds));
+
+ // Valgrind doesn't allow changing the hard descriptor limit, so we only
+ // change the soft descriptor limit here.
+ struct rlimit rlim;
+ SANDBOX_ASSERT(0 == getrlimit(RLIMIT_NOFILE, &rlim));
+ SANDBOX_ASSERT(fd_limit <= rlim.rlim_cur);
+ rlim.rlim_cur = fd_limit;
+ SANDBOX_ASSERT(0 == setrlimit(RLIMIT_NOFILE, &rlim));
+
+ static const char kCpuInfo[] = "/proc/cpuinfo";
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadOnly(kCpuInfo));
+
+ BrokerProcess open_broker(EPERM, permissions);
+ SANDBOX_ASSERT(open_broker.Init(base::Bind(&NoOpCallback)));
+
+ const int ipc_fd = BrokerProcessTestHelper::GetIPCDescriptor(&open_broker);
+ SANDBOX_ASSERT(ipc_fd >= 0);
+
+ static const char kBogus[] = "not a pickle";
+ std::vector<int> fds;
+ fds.push_back(message_fd.get());
+
+ // The broker process should only have a couple spare file descriptors
+ // available, but for good measure we send it fd_limit bogus IPCs anyway.
+ for (rlim_t i = 0; i < fd_limit; ++i) {
+ SANDBOX_ASSERT(
+ base::UnixDomainSocket::SendMsg(ipc_fd, kBogus, sizeof(kBogus), fds));
+ }
+
+ const int fd = open_broker.Open(kCpuInfo, O_RDONLY);
+ SANDBOX_ASSERT(fd >= 0);
+ SANDBOX_ASSERT(0 == IGNORE_EINTR(close(fd)));
+}
+
+bool CloseFD(int fd) {
+ PCHECK(0 == IGNORE_EINTR(close(fd)));
+ return true;
+}
+
+// Return true if the other end of the |reader| pipe was closed,
+// false if |timeout_in_seconds| was reached or another event
+// or error occured.
+bool WaitForClosedPipeWriter(int reader, int timeout_in_ms) {
+ struct pollfd poll_fd = {reader, POLLIN | POLLRDHUP, 0};
+ const int num_events = HANDLE_EINTR(poll(&poll_fd, 1, timeout_in_ms));
+ if (1 == num_events && poll_fd.revents | POLLHUP)
+ return true;
+ return false;
+}
+
+// Closing the broker client's IPC channel should terminate the broker
+// process.
+TEST(BrokerProcess, BrokerDiesOnClosedChannel) {
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadOnly("/proc/cpuinfo"));
+
+ // Get the writing end of a pipe into the broker (child) process so
+ // that we can reliably detect when it dies.
+ int lifeline_fds[2];
+ PCHECK(0 == pipe(lifeline_fds));
+
+ BrokerProcess open_broker(EPERM, permissions, true /* fast_check_in_client */,
+ false /* quiet_failures_for_tests */);
+ ASSERT_TRUE(open_broker.Init(base::Bind(&CloseFD, lifeline_fds[0])));
+ // Make sure the writing end only exists in the broker process.
+ CloseFD(lifeline_fds[1]);
+ base::ScopedFD reader(lifeline_fds[0]);
+
+ const pid_t broker_pid = open_broker.broker_pid();
+
+ // This should cause the broker process to exit.
+ BrokerProcessTestHelper::CloseChannel(&open_broker);
+
+ const int kTimeoutInMilliseconds = 5000;
+ const bool broker_lifeline_closed =
+ WaitForClosedPipeWriter(reader.get(), kTimeoutInMilliseconds);
+ // If the broker exited, its lifeline fd should be closed.
+ ASSERT_TRUE(broker_lifeline_closed);
+ // Now check that the broker has exited, but do not reap it.
+ siginfo_t process_info;
+ ASSERT_EQ(0, HANDLE_EINTR(waitid(P_PID, broker_pid, &process_info,
+ WEXITED | WNOWAIT)));
+ EXPECT_EQ(broker_pid, process_info.si_pid);
+ EXPECT_EQ(CLD_EXITED, process_info.si_code);
+ EXPECT_EQ(1, process_info.si_status);
+}
+
+TEST(BrokerProcess, CreateFile) {
+ std::string temp_str;
+ {
+ ScopedTemporaryFile tmp_file;
+ temp_str = tmp_file.full_file_name();
+ }
+ const char* tempfile_name = temp_str.c_str();
+
+ std::vector<BrokerFilePermission> permissions;
+ permissions.push_back(BrokerFilePermission::ReadWriteCreate(tempfile_name));
+
+ BrokerProcess open_broker(EPERM, permissions);
+ ASSERT_TRUE(open_broker.Init(base::Bind(&NoOpCallback)));
+
+ int fd = -1;
+
+ // Try without O_EXCL
+ fd = open_broker.Open(tempfile_name, O_RDWR | O_CREAT);
+ ASSERT_EQ(fd, -EPERM);
+
+ const char kTestText[] = "TESTTESTTEST";
+ // Create a file
+ fd = open_broker.Open(tempfile_name, O_RDWR | O_CREAT | O_EXCL);
+ ASSERT_GE(fd, 0);
+ {
+ base::ScopedFD scoped_fd(fd);
+
+ // Confirm fail if file exists
+ int bad_fd = open_broker.Open(tempfile_name, O_RDWR | O_CREAT | O_EXCL);
+ ASSERT_EQ(bad_fd, -EEXIST);
+
+ // Write to the descriptor opened by the broker.
+
+ ssize_t len = HANDLE_EINTR(write(fd, kTestText, sizeof(kTestText)));
+ ASSERT_EQ(len, static_cast<ssize_t>(sizeof(kTestText)));
+ }
+
+ int fd_check = open(tempfile_name, O_RDONLY);
+ ASSERT_GE(fd_check, 0);
+ {
+ base::ScopedFD scoped_fd(fd_check);
+ char buf[1024];
+ ssize_t len = HANDLE_EINTR(read(fd_check, buf, sizeof(buf)));
+
+ ASSERT_EQ(len, static_cast<ssize_t>(sizeof(kTestText)));
+ ASSERT_EQ(memcmp(kTestText, buf, sizeof(kTestText)), 0);
+ }
+}
+
+} // namespace syscall_broker
+
+} // namespace sandbox
diff --git a/libchrome/sandbox/linux/system_headers/arm64_linux_syscalls.h b/libchrome/sandbox/linux/system_headers/arm64_linux_syscalls.h
new file mode 100644
index 0000000..8acb2d1
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/arm64_linux_syscalls.h
@@ -0,0 +1,1062 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_SYSCALLS_H_
+
+#include <asm-generic/unistd.h>
+
+#if !defined(__NR_io_setup)
+#define __NR_io_setup 0
+#endif
+
+#if !defined(__NR_io_destroy)
+#define __NR_io_destroy 1
+#endif
+
+#if !defined(__NR_io_submit)
+#define __NR_io_submit 2
+#endif
+
+#if !defined(__NR_io_cancel)
+#define __NR_io_cancel 3
+#endif
+
+#if !defined(__NR_io_getevents)
+#define __NR_io_getevents 4
+#endif
+
+#if !defined(__NR_setxattr)
+#define __NR_setxattr 5
+#endif
+
+#if !defined(__NR_lsetxattr)
+#define __NR_lsetxattr 6
+#endif
+
+#if !defined(__NR_fsetxattr)
+#define __NR_fsetxattr 7
+#endif
+
+#if !defined(__NR_getxattr)
+#define __NR_getxattr 8
+#endif
+
+#if !defined(__NR_lgetxattr)
+#define __NR_lgetxattr 9
+#endif
+
+#if !defined(__NR_fgetxattr)
+#define __NR_fgetxattr 10
+#endif
+
+#if !defined(__NR_listxattr)
+#define __NR_listxattr 11
+#endif
+
+#if !defined(__NR_llistxattr)
+#define __NR_llistxattr 12
+#endif
+
+#if !defined(__NR_flistxattr)
+#define __NR_flistxattr 13
+#endif
+
+#if !defined(__NR_removexattr)
+#define __NR_removexattr 14
+#endif
+
+#if !defined(__NR_lremovexattr)
+#define __NR_lremovexattr 15
+#endif
+
+#if !defined(__NR_fremovexattr)
+#define __NR_fremovexattr 16
+#endif
+
+#if !defined(__NR_getcwd)
+#define __NR_getcwd 17
+#endif
+
+#if !defined(__NR_lookup_dcookie)
+#define __NR_lookup_dcookie 18
+#endif
+
+#if !defined(__NR_eventfd2)
+#define __NR_eventfd2 19
+#endif
+
+#if !defined(__NR_epoll_create1)
+#define __NR_epoll_create1 20
+#endif
+
+#if !defined(__NR_epoll_ctl)
+#define __NR_epoll_ctl 21
+#endif
+
+#if !defined(__NR_epoll_pwait)
+#define __NR_epoll_pwait 22
+#endif
+
+#if !defined(__NR_dup)
+#define __NR_dup 23
+#endif
+
+#if !defined(__NR_dup3)
+#define __NR_dup3 24
+#endif
+
+#if !defined(__NR_fcntl)
+#define __NR_fcntl 25
+#endif
+
+#if !defined(__NR_inotify_init1)
+#define __NR_inotify_init1 26
+#endif
+
+#if !defined(__NR_inotify_add_watch)
+#define __NR_inotify_add_watch 27
+#endif
+
+#if !defined(__NR_inotify_rm_watch)
+#define __NR_inotify_rm_watch 28
+#endif
+
+#if !defined(__NR_ioctl)
+#define __NR_ioctl 29
+#endif
+
+#if !defined(__NR_ioprio_set)
+#define __NR_ioprio_set 30
+#endif
+
+#if !defined(__NR_ioprio_get)
+#define __NR_ioprio_get 31
+#endif
+
+#if !defined(__NR_flock)
+#define __NR_flock 32
+#endif
+
+#if !defined(__NR_mknodat)
+#define __NR_mknodat 33
+#endif
+
+#if !defined(__NR_mkdirat)
+#define __NR_mkdirat 34
+#endif
+
+#if !defined(__NR_unlinkat)
+#define __NR_unlinkat 35
+#endif
+
+#if !defined(__NR_symlinkat)
+#define __NR_symlinkat 36
+#endif
+
+#if !defined(__NR_linkat)
+#define __NR_linkat 37
+#endif
+
+#if !defined(__NR_renameat)
+#define __NR_renameat 38
+#endif
+
+#if !defined(__NR_umount2)
+#define __NR_umount2 39
+#endif
+
+#if !defined(__NR_mount)
+#define __NR_mount 40
+#endif
+
+#if !defined(__NR_pivot_root)
+#define __NR_pivot_root 41
+#endif
+
+#if !defined(__NR_nfsservctl)
+#define __NR_nfsservctl 42
+#endif
+
+#if !defined(__NR_statfs)
+#define __NR_statfs 43
+#endif
+
+#if !defined(__NR_fstatfs)
+#define __NR_fstatfs 44
+#endif
+
+#if !defined(__NR_truncate)
+#define __NR_truncate 45
+#endif
+
+#if !defined(__NR_ftruncate)
+#define __NR_ftruncate 46
+#endif
+
+#if !defined(__NR_fallocate)
+#define __NR_fallocate 47
+#endif
+
+#if !defined(__NR_faccessat)
+#define __NR_faccessat 48
+#endif
+
+#if !defined(__NR_chdir)
+#define __NR_chdir 49
+#endif
+
+#if !defined(__NR_fchdir)
+#define __NR_fchdir 50
+#endif
+
+#if !defined(__NR_chroot)
+#define __NR_chroot 51
+#endif
+
+#if !defined(__NR_fchmod)
+#define __NR_fchmod 52
+#endif
+
+#if !defined(__NR_fchmodat)
+#define __NR_fchmodat 53
+#endif
+
+#if !defined(__NR_fchownat)
+#define __NR_fchownat 54
+#endif
+
+#if !defined(__NR_fchown)
+#define __NR_fchown 55
+#endif
+
+#if !defined(__NR_openat)
+#define __NR_openat 56
+#endif
+
+#if !defined(__NR_close)
+#define __NR_close 57
+#endif
+
+#if !defined(__NR_vhangup)
+#define __NR_vhangup 58
+#endif
+
+#if !defined(__NR_pipe2)
+#define __NR_pipe2 59
+#endif
+
+#if !defined(__NR_quotactl)
+#define __NR_quotactl 60
+#endif
+
+#if !defined(__NR_getdents64)
+#define __NR_getdents64 61
+#endif
+
+#if !defined(__NR_lseek)
+#define __NR_lseek 62
+#endif
+
+#if !defined(__NR_read)
+#define __NR_read 63
+#endif
+
+#if !defined(__NR_write)
+#define __NR_write 64
+#endif
+
+#if !defined(__NR_readv)
+#define __NR_readv 65
+#endif
+
+#if !defined(__NR_writev)
+#define __NR_writev 66
+#endif
+
+#if !defined(__NR_pread64)
+#define __NR_pread64 67
+#endif
+
+#if !defined(__NR_pwrite64)
+#define __NR_pwrite64 68
+#endif
+
+#if !defined(__NR_preadv)
+#define __NR_preadv 69
+#endif
+
+#if !defined(__NR_pwritev)
+#define __NR_pwritev 70
+#endif
+
+#if !defined(__NR_sendfile)
+#define __NR_sendfile 71
+#endif
+
+#if !defined(__NR_pselect6)
+#define __NR_pselect6 72
+#endif
+
+#if !defined(__NR_ppoll)
+#define __NR_ppoll 73
+#endif
+
+#if !defined(__NR_signalfd4)
+#define __NR_signalfd4 74
+#endif
+
+#if !defined(__NR_vmsplice)
+#define __NR_vmsplice 75
+#endif
+
+#if !defined(__NR_splice)
+#define __NR_splice 76
+#endif
+
+#if !defined(__NR_tee)
+#define __NR_tee 77
+#endif
+
+#if !defined(__NR_readlinkat)
+#define __NR_readlinkat 78
+#endif
+
+#if !defined(__NR_newfstatat)
+#define __NR_newfstatat 79
+#endif
+
+#if !defined(__NR_fstat)
+#define __NR_fstat 80
+#endif
+
+#if !defined(__NR_sync)
+#define __NR_sync 81
+#endif
+
+#if !defined(__NR_fsync)
+#define __NR_fsync 82
+#endif
+
+#if !defined(__NR_fdatasync)
+#define __NR_fdatasync 83
+#endif
+
+#if !defined(__NR_sync_file_range)
+#define __NR_sync_file_range 84
+#endif
+
+#if !defined(__NR_timerfd_create)
+#define __NR_timerfd_create 85
+#endif
+
+#if !defined(__NR_timerfd_settime)
+#define __NR_timerfd_settime 86
+#endif
+
+#if !defined(__NR_timerfd_gettime)
+#define __NR_timerfd_gettime 87
+#endif
+
+#if !defined(__NR_utimensat)
+#define __NR_utimensat 88
+#endif
+
+#if !defined(__NR_acct)
+#define __NR_acct 89
+#endif
+
+#if !defined(__NR_capget)
+#define __NR_capget 90
+#endif
+
+#if !defined(__NR_capset)
+#define __NR_capset 91
+#endif
+
+#if !defined(__NR_personality)
+#define __NR_personality 92
+#endif
+
+#if !defined(__NR_exit)
+#define __NR_exit 93
+#endif
+
+#if !defined(__NR_exit_group)
+#define __NR_exit_group 94
+#endif
+
+#if !defined(__NR_waitid)
+#define __NR_waitid 95
+#endif
+
+#if !defined(__NR_set_tid_address)
+#define __NR_set_tid_address 96
+#endif
+
+#if !defined(__NR_unshare)
+#define __NR_unshare 97
+#endif
+
+#if !defined(__NR_futex)
+#define __NR_futex 98
+#endif
+
+#if !defined(__NR_set_robust_list)
+#define __NR_set_robust_list 99
+#endif
+
+#if !defined(__NR_get_robust_list)
+#define __NR_get_robust_list 100
+#endif
+
+#if !defined(__NR_nanosleep)
+#define __NR_nanosleep 101
+#endif
+
+#if !defined(__NR_getitimer)
+#define __NR_getitimer 102
+#endif
+
+#if !defined(__NR_setitimer)
+#define __NR_setitimer 103
+#endif
+
+#if !defined(__NR_kexec_load)
+#define __NR_kexec_load 104
+#endif
+
+#if !defined(__NR_init_module)
+#define __NR_init_module 105
+#endif
+
+#if !defined(__NR_delete_module)
+#define __NR_delete_module 106
+#endif
+
+#if !defined(__NR_timer_create)
+#define __NR_timer_create 107
+#endif
+
+#if !defined(__NR_timer_gettime)
+#define __NR_timer_gettime 108
+#endif
+
+#if !defined(__NR_timer_getoverrun)
+#define __NR_timer_getoverrun 109
+#endif
+
+#if !defined(__NR_timer_settime)
+#define __NR_timer_settime 110
+#endif
+
+#if !defined(__NR_timer_delete)
+#define __NR_timer_delete 111
+#endif
+
+#if !defined(__NR_clock_settime)
+#define __NR_clock_settime 112
+#endif
+
+#if !defined(__NR_clock_gettime)
+#define __NR_clock_gettime 113
+#endif
+
+#if !defined(__NR_clock_getres)
+#define __NR_clock_getres 114
+#endif
+
+#if !defined(__NR_clock_nanosleep)
+#define __NR_clock_nanosleep 115
+#endif
+
+#if !defined(__NR_syslog)
+#define __NR_syslog 116
+#endif
+
+#if !defined(__NR_ptrace)
+#define __NR_ptrace 117
+#endif
+
+#if !defined(__NR_sched_setparam)
+#define __NR_sched_setparam 118
+#endif
+
+#if !defined(__NR_sched_setscheduler)
+#define __NR_sched_setscheduler 119
+#endif
+
+#if !defined(__NR_sched_getscheduler)
+#define __NR_sched_getscheduler 120
+#endif
+
+#if !defined(__NR_sched_getparam)
+#define __NR_sched_getparam 121
+#endif
+
+#if !defined(__NR_sched_setaffinity)
+#define __NR_sched_setaffinity 122
+#endif
+
+#if !defined(__NR_sched_getaffinity)
+#define __NR_sched_getaffinity 123
+#endif
+
+#if !defined(__NR_sched_yield)
+#define __NR_sched_yield 124
+#endif
+
+#if !defined(__NR_sched_get_priority_max)
+#define __NR_sched_get_priority_max 125
+#endif
+
+#if !defined(__NR_sched_get_priority_min)
+#define __NR_sched_get_priority_min 126
+#endif
+
+#if !defined(__NR_sched_rr_get_interval)
+#define __NR_sched_rr_get_interval 127
+#endif
+
+#if !defined(__NR_restart_syscall)
+#define __NR_restart_syscall 128
+#endif
+
+#if !defined(__NR_kill)
+#define __NR_kill 129
+#endif
+
+#if !defined(__NR_tkill)
+#define __NR_tkill 130
+#endif
+
+#if !defined(__NR_tgkill)
+#define __NR_tgkill 131
+#endif
+
+#if !defined(__NR_sigaltstack)
+#define __NR_sigaltstack 132
+#endif
+
+#if !defined(__NR_rt_sigsuspend)
+#define __NR_rt_sigsuspend 133
+#endif
+
+#if !defined(__NR_rt_sigaction)
+#define __NR_rt_sigaction 134
+#endif
+
+#if !defined(__NR_rt_sigprocmask)
+#define __NR_rt_sigprocmask 135
+#endif
+
+#if !defined(__NR_rt_sigpending)
+#define __NR_rt_sigpending 136
+#endif
+
+#if !defined(__NR_rt_sigtimedwait)
+#define __NR_rt_sigtimedwait 137
+#endif
+
+#if !defined(__NR_rt_sigqueueinfo)
+#define __NR_rt_sigqueueinfo 138
+#endif
+
+#if !defined(__NR_rt_sigreturn)
+#define __NR_rt_sigreturn 139
+#endif
+
+#if !defined(__NR_setpriority)
+#define __NR_setpriority 140
+#endif
+
+#if !defined(__NR_getpriority)
+#define __NR_getpriority 141
+#endif
+
+#if !defined(__NR_reboot)
+#define __NR_reboot 142
+#endif
+
+#if !defined(__NR_setregid)
+#define __NR_setregid 143
+#endif
+
+#if !defined(__NR_setgid)
+#define __NR_setgid 144
+#endif
+
+#if !defined(__NR_setreuid)
+#define __NR_setreuid 145
+#endif
+
+#if !defined(__NR_setuid)
+#define __NR_setuid 146
+#endif
+
+#if !defined(__NR_setresuid)
+#define __NR_setresuid 147
+#endif
+
+#if !defined(__NR_getresuid)
+#define __NR_getresuid 148
+#endif
+
+#if !defined(__NR_setresgid)
+#define __NR_setresgid 149
+#endif
+
+#if !defined(__NR_getresgid)
+#define __NR_getresgid 150
+#endif
+
+#if !defined(__NR_setfsuid)
+#define __NR_setfsuid 151
+#endif
+
+#if !defined(__NR_setfsgid)
+#define __NR_setfsgid 152
+#endif
+
+#if !defined(__NR_times)
+#define __NR_times 153
+#endif
+
+#if !defined(__NR_setpgid)
+#define __NR_setpgid 154
+#endif
+
+#if !defined(__NR_getpgid)
+#define __NR_getpgid 155
+#endif
+
+#if !defined(__NR_getsid)
+#define __NR_getsid 156
+#endif
+
+#if !defined(__NR_setsid)
+#define __NR_setsid 157
+#endif
+
+#if !defined(__NR_getgroups)
+#define __NR_getgroups 158
+#endif
+
+#if !defined(__NR_setgroups)
+#define __NR_setgroups 159
+#endif
+
+#if !defined(__NR_uname)
+#define __NR_uname 160
+#endif
+
+#if !defined(__NR_sethostname)
+#define __NR_sethostname 161
+#endif
+
+#if !defined(__NR_setdomainname)
+#define __NR_setdomainname 162
+#endif
+
+#if !defined(__NR_getrlimit)
+#define __NR_getrlimit 163
+#endif
+
+#if !defined(__NR_setrlimit)
+#define __NR_setrlimit 164
+#endif
+
+#if !defined(__NR_getrusage)
+#define __NR_getrusage 165
+#endif
+
+#if !defined(__NR_umask)
+#define __NR_umask 166
+#endif
+
+#if !defined(__NR_prctl)
+#define __NR_prctl 167
+#endif
+
+#if !defined(__NR_getcpu)
+#define __NR_getcpu 168
+#endif
+
+#if !defined(__NR_gettimeofday)
+#define __NR_gettimeofday 169
+#endif
+
+#if !defined(__NR_settimeofday)
+#define __NR_settimeofday 170
+#endif
+
+#if !defined(__NR_adjtimex)
+#define __NR_adjtimex 171
+#endif
+
+#if !defined(__NR_getpid)
+#define __NR_getpid 172
+#endif
+
+#if !defined(__NR_getppid)
+#define __NR_getppid 173
+#endif
+
+#if !defined(__NR_getuid)
+#define __NR_getuid 174
+#endif
+
+#if !defined(__NR_geteuid)
+#define __NR_geteuid 175
+#endif
+
+#if !defined(__NR_getgid)
+#define __NR_getgid 176
+#endif
+
+#if !defined(__NR_getegid)
+#define __NR_getegid 177
+#endif
+
+#if !defined(__NR_gettid)
+#define __NR_gettid 178
+#endif
+
+#if !defined(__NR_sysinfo)
+#define __NR_sysinfo 179
+#endif
+
+#if !defined(__NR_mq_open)
+#define __NR_mq_open 180
+#endif
+
+#if !defined(__NR_mq_unlink)
+#define __NR_mq_unlink 181
+#endif
+
+#if !defined(__NR_mq_timedsend)
+#define __NR_mq_timedsend 182
+#endif
+
+#if !defined(__NR_mq_timedreceive)
+#define __NR_mq_timedreceive 183
+#endif
+
+#if !defined(__NR_mq_notify)
+#define __NR_mq_notify 184
+#endif
+
+#if !defined(__NR_mq_getsetattr)
+#define __NR_mq_getsetattr 185
+#endif
+
+#if !defined(__NR_msgget)
+#define __NR_msgget 186
+#endif
+
+#if !defined(__NR_msgctl)
+#define __NR_msgctl 187
+#endif
+
+#if !defined(__NR_msgrcv)
+#define __NR_msgrcv 188
+#endif
+
+#if !defined(__NR_msgsnd)
+#define __NR_msgsnd 189
+#endif
+
+#if !defined(__NR_semget)
+#define __NR_semget 190
+#endif
+
+#if !defined(__NR_semctl)
+#define __NR_semctl 191
+#endif
+
+#if !defined(__NR_semtimedop)
+#define __NR_semtimedop 192
+#endif
+
+#if !defined(__NR_semop)
+#define __NR_semop 193
+#endif
+
+#if !defined(__NR_shmget)
+#define __NR_shmget 194
+#endif
+
+#if !defined(__NR_shmctl)
+#define __NR_shmctl 195
+#endif
+
+#if !defined(__NR_shmat)
+#define __NR_shmat 196
+#endif
+
+#if !defined(__NR_shmdt)
+#define __NR_shmdt 197
+#endif
+
+#if !defined(__NR_socket)
+#define __NR_socket 198
+#endif
+
+#if !defined(__NR_socketpair)
+#define __NR_socketpair 199
+#endif
+
+#if !defined(__NR_bind)
+#define __NR_bind 200
+#endif
+
+#if !defined(__NR_listen)
+#define __NR_listen 201
+#endif
+
+#if !defined(__NR_accept)
+#define __NR_accept 202
+#endif
+
+#if !defined(__NR_connect)
+#define __NR_connect 203
+#endif
+
+#if !defined(__NR_getsockname)
+#define __NR_getsockname 204
+#endif
+
+#if !defined(__NR_getpeername)
+#define __NR_getpeername 205
+#endif
+
+#if !defined(__NR_sendto)
+#define __NR_sendto 206
+#endif
+
+#if !defined(__NR_recvfrom)
+#define __NR_recvfrom 207
+#endif
+
+#if !defined(__NR_setsockopt)
+#define __NR_setsockopt 208
+#endif
+
+#if !defined(__NR_getsockopt)
+#define __NR_getsockopt 209
+#endif
+
+#if !defined(__NR_shutdown)
+#define __NR_shutdown 210
+#endif
+
+#if !defined(__NR_sendmsg)
+#define __NR_sendmsg 211
+#endif
+
+#if !defined(__NR_recvmsg)
+#define __NR_recvmsg 212
+#endif
+
+#if !defined(__NR_readahead)
+#define __NR_readahead 213
+#endif
+
+#if !defined(__NR_brk)
+#define __NR_brk 214
+#endif
+
+#if !defined(__NR_munmap)
+#define __NR_munmap 215
+#endif
+
+#if !defined(__NR_mremap)
+#define __NR_mremap 216
+#endif
+
+#if !defined(__NR_add_key)
+#define __NR_add_key 217
+#endif
+
+#if !defined(__NR_request_key)
+#define __NR_request_key 218
+#endif
+
+#if !defined(__NR_keyctl)
+#define __NR_keyctl 219
+#endif
+
+#if !defined(__NR_clone)
+#define __NR_clone 220
+#endif
+
+#if !defined(__NR_execve)
+#define __NR_execve 221
+#endif
+
+#if !defined(__NR_mmap)
+#define __NR_mmap 222
+#endif
+
+#if !defined(__NR_fadvise64)
+#define __NR_fadvise64 223
+#endif
+
+#if !defined(__NR_swapon)
+#define __NR_swapon 224
+#endif
+
+#if !defined(__NR_swapoff)
+#define __NR_swapoff 225
+#endif
+
+#if !defined(__NR_mprotect)
+#define __NR_mprotect 226
+#endif
+
+#if !defined(__NR_msync)
+#define __NR_msync 227
+#endif
+
+#if !defined(__NR_mlock)
+#define __NR_mlock 228
+#endif
+
+#if !defined(__NR_munlock)
+#define __NR_munlock 229
+#endif
+
+#if !defined(__NR_mlockall)
+#define __NR_mlockall 230
+#endif
+
+#if !defined(__NR_munlockall)
+#define __NR_munlockall 231
+#endif
+
+#if !defined(__NR_mincore)
+#define __NR_mincore 232
+#endif
+
+#if !defined(__NR_madvise)
+#define __NR_madvise 233
+#endif
+
+#if !defined(__NR_remap_file_pages)
+#define __NR_remap_file_pages 234
+#endif
+
+#if !defined(__NR_mbind)
+#define __NR_mbind 235
+#endif
+
+#if !defined(__NR_get_mempolicy)
+#define __NR_get_mempolicy 236
+#endif
+
+#if !defined(__NR_set_mempolicy)
+#define __NR_set_mempolicy 237
+#endif
+
+#if !defined(__NR_migrate_pages)
+#define __NR_migrate_pages 238
+#endif
+
+#if !defined(__NR_move_pages)
+#define __NR_move_pages 239
+#endif
+
+#if !defined(__NR_rt_tgsigqueueinfo)
+#define __NR_rt_tgsigqueueinfo 240
+#endif
+
+#if !defined(__NR_perf_event_open)
+#define __NR_perf_event_open 241
+#endif
+
+#if !defined(__NR_accept4)
+#define __NR_accept4 242
+#endif
+
+#if !defined(__NR_recvmmsg)
+#define __NR_recvmmsg 243
+#endif
+
+#if !defined(__NR_wait4)
+#define __NR_wait4 260
+#endif
+
+#if !defined(__NR_prlimit64)
+#define __NR_prlimit64 261
+#endif
+
+#if !defined(__NR_fanotify_init)
+#define __NR_fanotify_init 262
+#endif
+
+#if !defined(__NR_fanotify_mark)
+#define __NR_fanotify_mark 263
+#endif
+
+#if !defined(__NR_name_to_handle_at)
+#define __NR_name_to_handle_at 264
+#endif
+
+#if !defined(__NR_open_by_handle_at)
+#define __NR_open_by_handle_at 265
+#endif
+
+#if !defined(__NR_clock_adjtime)
+#define __NR_clock_adjtime 266
+#endif
+
+#if !defined(__NR_syncfs)
+#define __NR_syncfs 267
+#endif
+
+#if !defined(__NR_setns)
+#define __NR_setns 268
+#endif
+
+#if !defined(__NR_sendmmsg)
+#define __NR_sendmmsg 269
+#endif
+
+#if !defined(__NR_process_vm_readv)
+#define __NR_process_vm_readv 270
+#endif
+
+#if !defined(__NR_process_vm_writev)
+#define __NR_process_vm_writev 271
+#endif
+
+#if !defined(__NR_kcmp)
+#define __NR_kcmp 272
+#endif
+
+#if !defined(__NR_finit_module)
+#define __NR_finit_module 273
+#endif
+
+#if !defined(__NR_sched_setattr)
+#define __NR_sched_setattr 274
+#endif
+
+#if !defined(__NR_sched_getattr)
+#define __NR_sched_getattr 275
+#endif
+
+#if !defined(__NR_renameat2)
+#define __NR_renameat2 276
+#endif
+
+#if !defined(__NR_seccomp)
+#define __NR_seccomp 277
+#endif
+
+#if !defined(__NR_getrandom)
+#define __NR_getrandom 278
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_SYSCALLS_H_
diff --git a/libchrome/sandbox/linux/system_headers/arm64_linux_ucontext.h b/libchrome/sandbox/linux/system_headers/arm64_linux_ucontext.h
new file mode 100644
index 0000000..48303ba
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/arm64_linux_ucontext.h
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_UCONTEXT_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_UCONTEXT_H_
+
+#if !defined(__BIONIC_HAVE_UCONTEXT_T)
+#include <asm/sigcontext.h>
+#include <signal.h>
+#include <stdint.h>
+// We also need greg_t for the sandbox, include it in this header as well.
+typedef uint64_t greg_t;
+
+struct ucontext_t {
+ unsigned long uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /* glibc uses a 1024-bit sigset_t */
+ uint8_t unused[1024 / 8 - sizeof(sigset_t)];
+ /* last for future expansion */
+ struct sigcontext uc_mcontext;
+};
+
+#else
+#include <sys/ucontext.h>
+#endif // __BIONIC_HAVE_UCONTEXT_T
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_UCONTEXT_H_
diff --git a/libchrome/sandbox/linux/system_headers/arm_linux_syscalls.h b/libchrome/sandbox/linux/system_headers/arm_linux_syscalls.h
new file mode 100644
index 0000000..1addd53
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/arm_linux_syscalls.h
@@ -0,0 +1,1418 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated from the Linux kernel's calls.S.
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_ARM_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_ARM_LINUX_SYSCALLS_H_
+
+#if !defined(__arm__) || !defined(__ARM_EABI__)
+#error "Including header on wrong architecture"
+#endif
+
+#if !defined(__NR_SYSCALL_BASE)
+// On ARM EABI arch, __NR_SYSCALL_BASE is 0.
+#define __NR_SYSCALL_BASE 0
+#endif
+
+// This syscall list has holes, because ARM EABI makes some syscalls obsolete.
+
+#if !defined(__NR_restart_syscall)
+#define __NR_restart_syscall (__NR_SYSCALL_BASE+0)
+#endif
+
+#if !defined(__NR_exit)
+#define __NR_exit (__NR_SYSCALL_BASE+1)
+#endif
+
+#if !defined(__NR_fork)
+#define __NR_fork (__NR_SYSCALL_BASE+2)
+#endif
+
+#if !defined(__NR_read)
+#define __NR_read (__NR_SYSCALL_BASE+3)
+#endif
+
+#if !defined(__NR_write)
+#define __NR_write (__NR_SYSCALL_BASE+4)
+#endif
+
+#if !defined(__NR_open)
+#define __NR_open (__NR_SYSCALL_BASE+5)
+#endif
+
+#if !defined(__NR_close)
+#define __NR_close (__NR_SYSCALL_BASE+6)
+#endif
+
+#if !defined(__NR_creat)
+#define __NR_creat (__NR_SYSCALL_BASE+8)
+#endif
+
+#if !defined(__NR_link)
+#define __NR_link (__NR_SYSCALL_BASE+9)
+#endif
+
+#if !defined(__NR_unlink)
+#define __NR_unlink (__NR_SYSCALL_BASE+10)
+#endif
+
+#if !defined(__NR_execve)
+#define __NR_execve (__NR_SYSCALL_BASE+11)
+#endif
+
+#if !defined(__NR_chdir)
+#define __NR_chdir (__NR_SYSCALL_BASE+12)
+#endif
+
+#if !defined(__NR_mknod)
+#define __NR_mknod (__NR_SYSCALL_BASE+14)
+#endif
+
+#if !defined(__NR_chmod)
+#define __NR_chmod (__NR_SYSCALL_BASE+15)
+#endif
+
+#if !defined(__NR_lchown)
+#define __NR_lchown (__NR_SYSCALL_BASE+16)
+#endif
+
+#if !defined(__NR_lseek)
+#define __NR_lseek (__NR_SYSCALL_BASE+19)
+#endif
+
+#if !defined(__NR_getpid)
+#define __NR_getpid (__NR_SYSCALL_BASE+20)
+#endif
+
+#if !defined(__NR_mount)
+#define __NR_mount (__NR_SYSCALL_BASE+21)
+#endif
+
+#if !defined(__NR_setuid)
+#define __NR_setuid (__NR_SYSCALL_BASE+23)
+#endif
+
+#if !defined(__NR_getuid)
+#define __NR_getuid (__NR_SYSCALL_BASE+24)
+#endif
+
+#if !defined(__NR_ptrace)
+#define __NR_ptrace (__NR_SYSCALL_BASE+26)
+#endif
+
+#if !defined(__NR_pause)
+#define __NR_pause (__NR_SYSCALL_BASE+29)
+#endif
+
+#if !defined(__NR_access)
+#define __NR_access (__NR_SYSCALL_BASE+33)
+#endif
+
+#if !defined(__NR_nice)
+#define __NR_nice (__NR_SYSCALL_BASE+34)
+#endif
+
+#if !defined(__NR_sync)
+#define __NR_sync (__NR_SYSCALL_BASE+36)
+#endif
+
+#if !defined(__NR_kill)
+#define __NR_kill (__NR_SYSCALL_BASE+37)
+#endif
+
+#if !defined(__NR_rename)
+#define __NR_rename (__NR_SYSCALL_BASE+38)
+#endif
+
+#if !defined(__NR_mkdir)
+#define __NR_mkdir (__NR_SYSCALL_BASE+39)
+#endif
+
+#if !defined(__NR_rmdir)
+#define __NR_rmdir (__NR_SYSCALL_BASE+40)
+#endif
+
+#if !defined(__NR_dup)
+#define __NR_dup (__NR_SYSCALL_BASE+41)
+#endif
+
+#if !defined(__NR_pipe)
+#define __NR_pipe (__NR_SYSCALL_BASE+42)
+#endif
+
+#if !defined(__NR_times)
+#define __NR_times (__NR_SYSCALL_BASE+43)
+#endif
+
+#if !defined(__NR_brk)
+#define __NR_brk (__NR_SYSCALL_BASE+45)
+#endif
+
+#if !defined(__NR_setgid)
+#define __NR_setgid (__NR_SYSCALL_BASE+46)
+#endif
+
+#if !defined(__NR_getgid)
+#define __NR_getgid (__NR_SYSCALL_BASE+47)
+#endif
+
+#if !defined(__NR_geteuid)
+#define __NR_geteuid (__NR_SYSCALL_BASE+49)
+#endif
+
+#if !defined(__NR_getegid)
+#define __NR_getegid (__NR_SYSCALL_BASE+50)
+#endif
+
+#if !defined(__NR_acct)
+#define __NR_acct (__NR_SYSCALL_BASE+51)
+#endif
+
+#if !defined(__NR_umount2)
+#define __NR_umount2 (__NR_SYSCALL_BASE+52)
+#endif
+
+#if !defined(__NR_ioctl)
+#define __NR_ioctl (__NR_SYSCALL_BASE+54)
+#endif
+
+#if !defined(__NR_fcntl)
+#define __NR_fcntl (__NR_SYSCALL_BASE+55)
+#endif
+
+#if !defined(__NR_setpgid)
+#define __NR_setpgid (__NR_SYSCALL_BASE+57)
+#endif
+
+#if !defined(__NR_umask)
+#define __NR_umask (__NR_SYSCALL_BASE+60)
+#endif
+
+#if !defined(__NR_chroot)
+#define __NR_chroot (__NR_SYSCALL_BASE+61)
+#endif
+
+#if !defined(__NR_ustat)
+#define __NR_ustat (__NR_SYSCALL_BASE+62)
+#endif
+
+#if !defined(__NR_dup2)
+#define __NR_dup2 (__NR_SYSCALL_BASE+63)
+#endif
+
+#if !defined(__NR_getppid)
+#define __NR_getppid (__NR_SYSCALL_BASE+64)
+#endif
+
+#if !defined(__NR_getpgrp)
+#define __NR_getpgrp (__NR_SYSCALL_BASE+65)
+#endif
+
+#if !defined(__NR_setsid)
+#define __NR_setsid (__NR_SYSCALL_BASE+66)
+#endif
+
+#if !defined(__NR_sigaction)
+#define __NR_sigaction (__NR_SYSCALL_BASE+67)
+#endif
+
+#if !defined(__NR_setreuid)
+#define __NR_setreuid (__NR_SYSCALL_BASE+70)
+#endif
+
+#if !defined(__NR_setregid)
+#define __NR_setregid (__NR_SYSCALL_BASE+71)
+#endif
+
+#if !defined(__NR_sigsuspend)
+#define __NR_sigsuspend (__NR_SYSCALL_BASE+72)
+#endif
+
+#if !defined(__NR_sigpending)
+#define __NR_sigpending (__NR_SYSCALL_BASE+73)
+#endif
+
+#if !defined(__NR_sethostname)
+#define __NR_sethostname (__NR_SYSCALL_BASE+74)
+#endif
+
+#if !defined(__NR_setrlimit)
+#define __NR_setrlimit (__NR_SYSCALL_BASE+75)
+#endif
+
+#if !defined(__NR_getrusage)
+#define __NR_getrusage (__NR_SYSCALL_BASE+77)
+#endif
+
+#if !defined(__NR_gettimeofday)
+#define __NR_gettimeofday (__NR_SYSCALL_BASE+78)
+#endif
+
+#if !defined(__NR_settimeofday)
+#define __NR_settimeofday (__NR_SYSCALL_BASE+79)
+#endif
+
+#if !defined(__NR_getgroups)
+#define __NR_getgroups (__NR_SYSCALL_BASE+80)
+#endif
+
+#if !defined(__NR_setgroups)
+#define __NR_setgroups (__NR_SYSCALL_BASE+81)
+#endif
+
+#if !defined(__NR_symlink)
+#define __NR_symlink (__NR_SYSCALL_BASE+83)
+#endif
+
+#if !defined(__NR_readlink)
+#define __NR_readlink (__NR_SYSCALL_BASE+85)
+#endif
+
+#if !defined(__NR_uselib)
+#define __NR_uselib (__NR_SYSCALL_BASE+86)
+#endif
+
+#if !defined(__NR_swapon)
+#define __NR_swapon (__NR_SYSCALL_BASE+87)
+#endif
+
+#if !defined(__NR_reboot)
+#define __NR_reboot (__NR_SYSCALL_BASE+88)
+#endif
+
+#if !defined(__NR_munmap)
+#define __NR_munmap (__NR_SYSCALL_BASE+91)
+#endif
+
+#if !defined(__NR_truncate)
+#define __NR_truncate (__NR_SYSCALL_BASE+92)
+#endif
+
+#if !defined(__NR_ftruncate)
+#define __NR_ftruncate (__NR_SYSCALL_BASE+93)
+#endif
+
+#if !defined(__NR_fchmod)
+#define __NR_fchmod (__NR_SYSCALL_BASE+94)
+#endif
+
+#if !defined(__NR_fchown)
+#define __NR_fchown (__NR_SYSCALL_BASE+95)
+#endif
+
+#if !defined(__NR_getpriority)
+#define __NR_getpriority (__NR_SYSCALL_BASE+96)
+#endif
+
+#if !defined(__NR_setpriority)
+#define __NR_setpriority (__NR_SYSCALL_BASE+97)
+#endif
+
+#if !defined(__NR_statfs)
+#define __NR_statfs (__NR_SYSCALL_BASE+99)
+#endif
+
+#if !defined(__NR_fstatfs)
+#define __NR_fstatfs (__NR_SYSCALL_BASE+100)
+#endif
+
+#if !defined(__NR_syslog)
+#define __NR_syslog (__NR_SYSCALL_BASE+103)
+#endif
+
+#if !defined(__NR_setitimer)
+#define __NR_setitimer (__NR_SYSCALL_BASE+104)
+#endif
+
+#if !defined(__NR_getitimer)
+#define __NR_getitimer (__NR_SYSCALL_BASE+105)
+#endif
+
+#if !defined(__NR_stat)
+#define __NR_stat (__NR_SYSCALL_BASE+106)
+#endif
+
+#if !defined(__NR_lstat)
+#define __NR_lstat (__NR_SYSCALL_BASE+107)
+#endif
+
+#if !defined(__NR_fstat)
+#define __NR_fstat (__NR_SYSCALL_BASE+108)
+#endif
+
+#if !defined(__NR_vhangup)
+#define __NR_vhangup (__NR_SYSCALL_BASE+111)
+#endif
+
+#if !defined(__NR_wait4)
+#define __NR_wait4 (__NR_SYSCALL_BASE+114)
+#endif
+
+#if !defined(__NR_swapoff)
+#define __NR_swapoff (__NR_SYSCALL_BASE+115)
+#endif
+
+#if !defined(__NR_sysinfo)
+#define __NR_sysinfo (__NR_SYSCALL_BASE+116)
+#endif
+
+#if !defined(__NR_fsync)
+#define __NR_fsync (__NR_SYSCALL_BASE+118)
+#endif
+
+#if !defined(__NR_sigreturn)
+#define __NR_sigreturn (__NR_SYSCALL_BASE+119)
+#endif
+
+#if !defined(__NR_clone)
+#define __NR_clone (__NR_SYSCALL_BASE+120)
+#endif
+
+#if !defined(__NR_setdomainname)
+#define __NR_setdomainname (__NR_SYSCALL_BASE+121)
+#endif
+
+#if !defined(__NR_uname)
+#define __NR_uname (__NR_SYSCALL_BASE+122)
+#endif
+
+#if !defined(__NR_adjtimex)
+#define __NR_adjtimex (__NR_SYSCALL_BASE+124)
+#endif
+
+#if !defined(__NR_mprotect)
+#define __NR_mprotect (__NR_SYSCALL_BASE+125)
+#endif
+
+#if !defined(__NR_sigprocmask)
+#define __NR_sigprocmask (__NR_SYSCALL_BASE+126)
+#endif
+
+#if !defined(__NR_init_module)
+#define __NR_init_module (__NR_SYSCALL_BASE+128)
+#endif
+
+#if !defined(__NR_delete_module)
+#define __NR_delete_module (__NR_SYSCALL_BASE+129)
+#endif
+
+#if !defined(__NR_quotactl)
+#define __NR_quotactl (__NR_SYSCALL_BASE+131)
+#endif
+
+#if !defined(__NR_getpgid)
+#define __NR_getpgid (__NR_SYSCALL_BASE+132)
+#endif
+
+#if !defined(__NR_fchdir)
+#define __NR_fchdir (__NR_SYSCALL_BASE+133)
+#endif
+
+#if !defined(__NR_bdflush)
+#define __NR_bdflush (__NR_SYSCALL_BASE+134)
+#endif
+
+#if !defined(__NR_sysfs)
+#define __NR_sysfs (__NR_SYSCALL_BASE+135)
+#endif
+
+#if !defined(__NR_personality)
+#define __NR_personality (__NR_SYSCALL_BASE+136)
+#endif
+
+#if !defined(__NR_setfsuid)
+#define __NR_setfsuid (__NR_SYSCALL_BASE+138)
+#endif
+
+#if !defined(__NR_setfsgid)
+#define __NR_setfsgid (__NR_SYSCALL_BASE+139)
+#endif
+
+#if !defined(__NR__llseek)
+#define __NR__llseek (__NR_SYSCALL_BASE+140)
+#endif
+
+#if !defined(__NR_getdents)
+#define __NR_getdents (__NR_SYSCALL_BASE+141)
+#endif
+
+#if !defined(__NR__newselect)
+#define __NR__newselect (__NR_SYSCALL_BASE+142)
+#endif
+
+#if !defined(__NR_flock)
+#define __NR_flock (__NR_SYSCALL_BASE+143)
+#endif
+
+#if !defined(__NR_msync)
+#define __NR_msync (__NR_SYSCALL_BASE+144)
+#endif
+
+#if !defined(__NR_readv)
+#define __NR_readv (__NR_SYSCALL_BASE+145)
+#endif
+
+#if !defined(__NR_writev)
+#define __NR_writev (__NR_SYSCALL_BASE+146)
+#endif
+
+#if !defined(__NR_getsid)
+#define __NR_getsid (__NR_SYSCALL_BASE+147)
+#endif
+
+#if !defined(__NR_fdatasync)
+#define __NR_fdatasync (__NR_SYSCALL_BASE+148)
+#endif
+
+#if !defined(__NR__sysctl)
+#define __NR__sysctl (__NR_SYSCALL_BASE+149)
+#endif
+
+#if !defined(__NR_mlock)
+#define __NR_mlock (__NR_SYSCALL_BASE+150)
+#endif
+
+#if !defined(__NR_munlock)
+#define __NR_munlock (__NR_SYSCALL_BASE+151)
+#endif
+
+#if !defined(__NR_mlockall)
+#define __NR_mlockall (__NR_SYSCALL_BASE+152)
+#endif
+
+#if !defined(__NR_munlockall)
+#define __NR_munlockall (__NR_SYSCALL_BASE+153)
+#endif
+
+#if !defined(__NR_sched_setparam)
+#define __NR_sched_setparam (__NR_SYSCALL_BASE+154)
+#endif
+
+#if !defined(__NR_sched_getparam)
+#define __NR_sched_getparam (__NR_SYSCALL_BASE+155)
+#endif
+
+#if !defined(__NR_sched_setscheduler)
+#define __NR_sched_setscheduler (__NR_SYSCALL_BASE+156)
+#endif
+
+#if !defined(__NR_sched_getscheduler)
+#define __NR_sched_getscheduler (__NR_SYSCALL_BASE+157)
+#endif
+
+#if !defined(__NR_sched_yield)
+#define __NR_sched_yield (__NR_SYSCALL_BASE+158)
+#endif
+
+#if !defined(__NR_sched_get_priority_max)
+#define __NR_sched_get_priority_max (__NR_SYSCALL_BASE+159)
+#endif
+
+#if !defined(__NR_sched_get_priority_min)
+#define __NR_sched_get_priority_min (__NR_SYSCALL_BASE+160)
+#endif
+
+#if !defined(__NR_sched_rr_get_interval)
+#define __NR_sched_rr_get_interval (__NR_SYSCALL_BASE+161)
+#endif
+
+#if !defined(__NR_nanosleep)
+#define __NR_nanosleep (__NR_SYSCALL_BASE+162)
+#endif
+
+#if !defined(__NR_mremap)
+#define __NR_mremap (__NR_SYSCALL_BASE+163)
+#endif
+
+#if !defined(__NR_setresuid)
+#define __NR_setresuid (__NR_SYSCALL_BASE+164)
+#endif
+
+#if !defined(__NR_getresuid)
+#define __NR_getresuid (__NR_SYSCALL_BASE+165)
+#endif
+
+#if !defined(__NR_poll)
+#define __NR_poll (__NR_SYSCALL_BASE+168)
+#endif
+
+#if !defined(__NR_nfsservctl)
+#define __NR_nfsservctl (__NR_SYSCALL_BASE+169)
+#endif
+
+#if !defined(__NR_setresgid)
+#define __NR_setresgid (__NR_SYSCALL_BASE+170)
+#endif
+
+#if !defined(__NR_getresgid)
+#define __NR_getresgid (__NR_SYSCALL_BASE+171)
+#endif
+
+#if !defined(__NR_prctl)
+#define __NR_prctl (__NR_SYSCALL_BASE+172)
+#endif
+
+#if !defined(__NR_rt_sigreturn)
+#define __NR_rt_sigreturn (__NR_SYSCALL_BASE+173)
+#endif
+
+#if !defined(__NR_rt_sigaction)
+#define __NR_rt_sigaction (__NR_SYSCALL_BASE+174)
+#endif
+
+#if !defined(__NR_rt_sigprocmask)
+#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE+175)
+#endif
+
+#if !defined(__NR_rt_sigpending)
+#define __NR_rt_sigpending (__NR_SYSCALL_BASE+176)
+#endif
+
+#if !defined(__NR_rt_sigtimedwait)
+#define __NR_rt_sigtimedwait (__NR_SYSCALL_BASE+177)
+#endif
+
+#if !defined(__NR_rt_sigqueueinfo)
+#define __NR_rt_sigqueueinfo (__NR_SYSCALL_BASE+178)
+#endif
+
+#if !defined(__NR_rt_sigsuspend)
+#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE+179)
+#endif
+
+#if !defined(__NR_pread64)
+#define __NR_pread64 (__NR_SYSCALL_BASE+180)
+#endif
+
+#if !defined(__NR_pwrite64)
+#define __NR_pwrite64 (__NR_SYSCALL_BASE+181)
+#endif
+
+#if !defined(__NR_chown)
+#define __NR_chown (__NR_SYSCALL_BASE+182)
+#endif
+
+#if !defined(__NR_getcwd)
+#define __NR_getcwd (__NR_SYSCALL_BASE+183)
+#endif
+
+#if !defined(__NR_capget)
+#define __NR_capget (__NR_SYSCALL_BASE+184)
+#endif
+
+#if !defined(__NR_capset)
+#define __NR_capset (__NR_SYSCALL_BASE+185)
+#endif
+
+#if !defined(__NR_sigaltstack)
+#define __NR_sigaltstack (__NR_SYSCALL_BASE+186)
+#endif
+
+#if !defined(__NR_sendfile)
+#define __NR_sendfile (__NR_SYSCALL_BASE+187)
+#endif
+
+#if !defined(__NR_vfork)
+#define __NR_vfork (__NR_SYSCALL_BASE+190)
+#endif
+
+#if !defined(__NR_ugetrlimit)
+#define __NR_ugetrlimit (__NR_SYSCALL_BASE+191)
+#endif
+
+#if !defined(__NR_mmap2)
+#define __NR_mmap2 (__NR_SYSCALL_BASE+192)
+#endif
+
+#if !defined(__NR_truncate64)
+#define __NR_truncate64 (__NR_SYSCALL_BASE+193)
+#endif
+
+#if !defined(__NR_ftruncate64)
+#define __NR_ftruncate64 (__NR_SYSCALL_BASE+194)
+#endif
+
+#if !defined(__NR_stat64)
+#define __NR_stat64 (__NR_SYSCALL_BASE+195)
+#endif
+
+#if !defined(__NR_lstat64)
+#define __NR_lstat64 (__NR_SYSCALL_BASE+196)
+#endif
+
+#if !defined(__NR_fstat64)
+#define __NR_fstat64 (__NR_SYSCALL_BASE+197)
+#endif
+
+#if !defined(__NR_lchown32)
+#define __NR_lchown32 (__NR_SYSCALL_BASE+198)
+#endif
+
+#if !defined(__NR_getuid32)
+#define __NR_getuid32 (__NR_SYSCALL_BASE+199)
+#endif
+
+#if !defined(__NR_getgid32)
+#define __NR_getgid32 (__NR_SYSCALL_BASE+200)
+#endif
+
+#if !defined(__NR_geteuid32)
+#define __NR_geteuid32 (__NR_SYSCALL_BASE+201)
+#endif
+
+#if !defined(__NR_getegid32)
+#define __NR_getegid32 (__NR_SYSCALL_BASE+202)
+#endif
+
+#if !defined(__NR_setreuid32)
+#define __NR_setreuid32 (__NR_SYSCALL_BASE+203)
+#endif
+
+#if !defined(__NR_setregid32)
+#define __NR_setregid32 (__NR_SYSCALL_BASE+204)
+#endif
+
+#if !defined(__NR_getgroups32)
+#define __NR_getgroups32 (__NR_SYSCALL_BASE+205)
+#endif
+
+#if !defined(__NR_setgroups32)
+#define __NR_setgroups32 (__NR_SYSCALL_BASE+206)
+#endif
+
+#if !defined(__NR_fchown32)
+#define __NR_fchown32 (__NR_SYSCALL_BASE+207)
+#endif
+
+#if !defined(__NR_setresuid32)
+#define __NR_setresuid32 (__NR_SYSCALL_BASE+208)
+#endif
+
+#if !defined(__NR_getresuid32)
+#define __NR_getresuid32 (__NR_SYSCALL_BASE+209)
+#endif
+
+#if !defined(__NR_setresgid32)
+#define __NR_setresgid32 (__NR_SYSCALL_BASE+210)
+#endif
+
+#if !defined(__NR_getresgid32)
+#define __NR_getresgid32 (__NR_SYSCALL_BASE+211)
+#endif
+
+#if !defined(__NR_chown32)
+#define __NR_chown32 (__NR_SYSCALL_BASE+212)
+#endif
+
+#if !defined(__NR_setuid32)
+#define __NR_setuid32 (__NR_SYSCALL_BASE+213)
+#endif
+
+#if !defined(__NR_setgid32)
+#define __NR_setgid32 (__NR_SYSCALL_BASE+214)
+#endif
+
+#if !defined(__NR_setfsuid32)
+#define __NR_setfsuid32 (__NR_SYSCALL_BASE+215)
+#endif
+
+#if !defined(__NR_setfsgid32)
+#define __NR_setfsgid32 (__NR_SYSCALL_BASE+216)
+#endif
+
+#if !defined(__NR_getdents64)
+#define __NR_getdents64 (__NR_SYSCALL_BASE+217)
+#endif
+
+#if !defined(__NR_pivot_root)
+#define __NR_pivot_root (__NR_SYSCALL_BASE+218)
+#endif
+
+#if !defined(__NR_mincore)
+#define __NR_mincore (__NR_SYSCALL_BASE+219)
+#endif
+
+#if !defined(__NR_madvise)
+#define __NR_madvise (__NR_SYSCALL_BASE+220)
+#endif
+
+#if !defined(__NR_fcntl64)
+#define __NR_fcntl64 (__NR_SYSCALL_BASE+221)
+#endif
+
+#if !defined(__NR_gettid)
+#define __NR_gettid (__NR_SYSCALL_BASE+224)
+#endif
+
+#if !defined(__NR_readahead)
+#define __NR_readahead (__NR_SYSCALL_BASE+225)
+#endif
+
+#if !defined(__NR_setxattr)
+#define __NR_setxattr (__NR_SYSCALL_BASE+226)
+#endif
+
+#if !defined(__NR_lsetxattr)
+#define __NR_lsetxattr (__NR_SYSCALL_BASE+227)
+#endif
+
+#if !defined(__NR_fsetxattr)
+#define __NR_fsetxattr (__NR_SYSCALL_BASE+228)
+#endif
+
+#if !defined(__NR_getxattr)
+#define __NR_getxattr (__NR_SYSCALL_BASE+229)
+#endif
+
+#if !defined(__NR_lgetxattr)
+#define __NR_lgetxattr (__NR_SYSCALL_BASE+230)
+#endif
+
+#if !defined(__NR_fgetxattr)
+#define __NR_fgetxattr (__NR_SYSCALL_BASE+231)
+#endif
+
+#if !defined(__NR_listxattr)
+#define __NR_listxattr (__NR_SYSCALL_BASE+232)
+#endif
+
+#if !defined(__NR_llistxattr)
+#define __NR_llistxattr (__NR_SYSCALL_BASE+233)
+#endif
+
+#if !defined(__NR_flistxattr)
+#define __NR_flistxattr (__NR_SYSCALL_BASE+234)
+#endif
+
+#if !defined(__NR_removexattr)
+#define __NR_removexattr (__NR_SYSCALL_BASE+235)
+#endif
+
+#if !defined(__NR_lremovexattr)
+#define __NR_lremovexattr (__NR_SYSCALL_BASE+236)
+#endif
+
+#if !defined(__NR_fremovexattr)
+#define __NR_fremovexattr (__NR_SYSCALL_BASE+237)
+#endif
+
+#if !defined(__NR_tkill)
+#define __NR_tkill (__NR_SYSCALL_BASE+238)
+#endif
+
+#if !defined(__NR_sendfile64)
+#define __NR_sendfile64 (__NR_SYSCALL_BASE+239)
+#endif
+
+#if !defined(__NR_futex)
+#define __NR_futex (__NR_SYSCALL_BASE+240)
+#endif
+
+#if !defined(__NR_sched_setaffinity)
+#define __NR_sched_setaffinity (__NR_SYSCALL_BASE+241)
+#endif
+
+#if !defined(__NR_sched_getaffinity)
+#define __NR_sched_getaffinity (__NR_SYSCALL_BASE+242)
+#endif
+
+#if !defined(__NR_io_setup)
+#define __NR_io_setup (__NR_SYSCALL_BASE+243)
+#endif
+
+#if !defined(__NR_io_destroy)
+#define __NR_io_destroy (__NR_SYSCALL_BASE+244)
+#endif
+
+#if !defined(__NR_io_getevents)
+#define __NR_io_getevents (__NR_SYSCALL_BASE+245)
+#endif
+
+#if !defined(__NR_io_submit)
+#define __NR_io_submit (__NR_SYSCALL_BASE+246)
+#endif
+
+#if !defined(__NR_io_cancel)
+#define __NR_io_cancel (__NR_SYSCALL_BASE+247)
+#endif
+
+#if !defined(__NR_exit_group)
+#define __NR_exit_group (__NR_SYSCALL_BASE+248)
+#endif
+
+#if !defined(__NR_lookup_dcookie)
+#define __NR_lookup_dcookie (__NR_SYSCALL_BASE+249)
+#endif
+
+#if !defined(__NR_epoll_create)
+#define __NR_epoll_create (__NR_SYSCALL_BASE+250)
+#endif
+
+#if !defined(__NR_epoll_ctl)
+#define __NR_epoll_ctl (__NR_SYSCALL_BASE+251)
+#endif
+
+#if !defined(__NR_epoll_wait)
+#define __NR_epoll_wait (__NR_SYSCALL_BASE+252)
+#endif
+
+#if !defined(__NR_remap_file_pages)
+#define __NR_remap_file_pages (__NR_SYSCALL_BASE+253)
+#endif
+
+#if !defined(__NR_set_tid_address)
+#define __NR_set_tid_address (__NR_SYSCALL_BASE+256)
+#endif
+
+#if !defined(__NR_timer_create)
+#define __NR_timer_create (__NR_SYSCALL_BASE+257)
+#endif
+
+#if !defined(__NR_timer_settime)
+#define __NR_timer_settime (__NR_SYSCALL_BASE+258)
+#endif
+
+#if !defined(__NR_timer_gettime)
+#define __NR_timer_gettime (__NR_SYSCALL_BASE+259)
+#endif
+
+#if !defined(__NR_timer_getoverrun)
+#define __NR_timer_getoverrun (__NR_SYSCALL_BASE+260)
+#endif
+
+#if !defined(__NR_timer_delete)
+#define __NR_timer_delete (__NR_SYSCALL_BASE+261)
+#endif
+
+#if !defined(__NR_clock_settime)
+#define __NR_clock_settime (__NR_SYSCALL_BASE+262)
+#endif
+
+#if !defined(__NR_clock_gettime)
+#define __NR_clock_gettime (__NR_SYSCALL_BASE+263)
+#endif
+
+#if !defined(__NR_clock_getres)
+#define __NR_clock_getres (__NR_SYSCALL_BASE+264)
+#endif
+
+#if !defined(__NR_clock_nanosleep)
+#define __NR_clock_nanosleep (__NR_SYSCALL_BASE+265)
+#endif
+
+#if !defined(__NR_statfs64)
+#define __NR_statfs64 (__NR_SYSCALL_BASE+266)
+#endif
+
+#if !defined(__NR_fstatfs64)
+#define __NR_fstatfs64 (__NR_SYSCALL_BASE+267)
+#endif
+
+#if !defined(__NR_tgkill)
+#define __NR_tgkill (__NR_SYSCALL_BASE+268)
+#endif
+
+#if !defined(__NR_utimes)
+#define __NR_utimes (__NR_SYSCALL_BASE+269)
+#endif
+
+#if !defined(__NR_arm_fadvise64_64)
+#define __NR_arm_fadvise64_64 (__NR_SYSCALL_BASE+270)
+#endif
+
+#if !defined(__NR_pciconfig_iobase)
+#define __NR_pciconfig_iobase (__NR_SYSCALL_BASE+271)
+#endif
+
+#if !defined(__NR_pciconfig_read)
+#define __NR_pciconfig_read (__NR_SYSCALL_BASE+272)
+#endif
+
+#if !defined(__NR_pciconfig_write)
+#define __NR_pciconfig_write (__NR_SYSCALL_BASE+273)
+#endif
+
+#if !defined(__NR_mq_open)
+#define __NR_mq_open (__NR_SYSCALL_BASE+274)
+#endif
+
+#if !defined(__NR_mq_unlink)
+#define __NR_mq_unlink (__NR_SYSCALL_BASE+275)
+#endif
+
+#if !defined(__NR_mq_timedsend)
+#define __NR_mq_timedsend (__NR_SYSCALL_BASE+276)
+#endif
+
+#if !defined(__NR_mq_timedreceive)
+#define __NR_mq_timedreceive (__NR_SYSCALL_BASE+277)
+#endif
+
+#if !defined(__NR_mq_notify)
+#define __NR_mq_notify (__NR_SYSCALL_BASE+278)
+#endif
+
+#if !defined(__NR_mq_getsetattr)
+#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279)
+#endif
+
+#if !defined(__NR_waitid)
+#define __NR_waitid (__NR_SYSCALL_BASE+280)
+#endif
+
+#if !defined(__NR_socket)
+#define __NR_socket (__NR_SYSCALL_BASE+281)
+#endif
+
+#if !defined(__NR_bind)
+#define __NR_bind (__NR_SYSCALL_BASE+282)
+#endif
+
+#if !defined(__NR_connect)
+#define __NR_connect (__NR_SYSCALL_BASE+283)
+#endif
+
+#if !defined(__NR_listen)
+#define __NR_listen (__NR_SYSCALL_BASE+284)
+#endif
+
+#if !defined(__NR_accept)
+#define __NR_accept (__NR_SYSCALL_BASE+285)
+#endif
+
+#if !defined(__NR_getsockname)
+#define __NR_getsockname (__NR_SYSCALL_BASE+286)
+#endif
+
+#if !defined(__NR_getpeername)
+#define __NR_getpeername (__NR_SYSCALL_BASE+287)
+#endif
+
+#if !defined(__NR_socketpair)
+#define __NR_socketpair (__NR_SYSCALL_BASE+288)
+#endif
+
+#if !defined(__NR_send)
+#define __NR_send (__NR_SYSCALL_BASE+289)
+#endif
+
+#if !defined(__NR_sendto)
+#define __NR_sendto (__NR_SYSCALL_BASE+290)
+#endif
+
+#if !defined(__NR_recv)
+#define __NR_recv (__NR_SYSCALL_BASE+291)
+#endif
+
+#if !defined(__NR_recvfrom)
+#define __NR_recvfrom (__NR_SYSCALL_BASE+292)
+#endif
+
+#if !defined(__NR_shutdown)
+#define __NR_shutdown (__NR_SYSCALL_BASE+293)
+#endif
+
+#if !defined(__NR_setsockopt)
+#define __NR_setsockopt (__NR_SYSCALL_BASE+294)
+#endif
+
+#if !defined(__NR_getsockopt)
+#define __NR_getsockopt (__NR_SYSCALL_BASE+295)
+#endif
+
+#if !defined(__NR_sendmsg)
+#define __NR_sendmsg (__NR_SYSCALL_BASE+296)
+#endif
+
+#if !defined(__NR_recvmsg)
+#define __NR_recvmsg (__NR_SYSCALL_BASE+297)
+#endif
+
+#if !defined(__NR_semop)
+#define __NR_semop (__NR_SYSCALL_BASE+298)
+#endif
+
+#if !defined(__NR_semget)
+#define __NR_semget (__NR_SYSCALL_BASE+299)
+#endif
+
+#if !defined(__NR_semctl)
+#define __NR_semctl (__NR_SYSCALL_BASE+300)
+#endif
+
+#if !defined(__NR_msgsnd)
+#define __NR_msgsnd (__NR_SYSCALL_BASE+301)
+#endif
+
+#if !defined(__NR_msgrcv)
+#define __NR_msgrcv (__NR_SYSCALL_BASE+302)
+#endif
+
+#if !defined(__NR_msgget)
+#define __NR_msgget (__NR_SYSCALL_BASE+303)
+#endif
+
+#if !defined(__NR_msgctl)
+#define __NR_msgctl (__NR_SYSCALL_BASE+304)
+#endif
+
+#if !defined(__NR_shmat)
+#define __NR_shmat (__NR_SYSCALL_BASE+305)
+#endif
+
+#if !defined(__NR_shmdt)
+#define __NR_shmdt (__NR_SYSCALL_BASE+306)
+#endif
+
+#if !defined(__NR_shmget)
+#define __NR_shmget (__NR_SYSCALL_BASE+307)
+#endif
+
+#if !defined(__NR_shmctl)
+#define __NR_shmctl (__NR_SYSCALL_BASE+308)
+#endif
+
+#if !defined(__NR_add_key)
+#define __NR_add_key (__NR_SYSCALL_BASE+309)
+#endif
+
+#if !defined(__NR_request_key)
+#define __NR_request_key (__NR_SYSCALL_BASE+310)
+#endif
+
+#if !defined(__NR_keyctl)
+#define __NR_keyctl (__NR_SYSCALL_BASE+311)
+#endif
+
+#if !defined(__NR_semtimedop)
+#define __NR_semtimedop (__NR_SYSCALL_BASE+312)
+#endif
+
+#if !defined(__NR_vserver)
+#define __NR_vserver (__NR_SYSCALL_BASE+313)
+#endif
+
+#if !defined(__NR_ioprio_set)
+#define __NR_ioprio_set (__NR_SYSCALL_BASE+314)
+#endif
+
+#if !defined(__NR_ioprio_get)
+#define __NR_ioprio_get (__NR_SYSCALL_BASE+315)
+#endif
+
+#if !defined(__NR_inotify_init)
+#define __NR_inotify_init (__NR_SYSCALL_BASE+316)
+#endif
+
+#if !defined(__NR_inotify_add_watch)
+#define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317)
+#endif
+
+#if !defined(__NR_inotify_rm_watch)
+#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318)
+#endif
+
+#if !defined(__NR_mbind)
+#define __NR_mbind (__NR_SYSCALL_BASE+319)
+#endif
+
+#if !defined(__NR_get_mempolicy)
+#define __NR_get_mempolicy (__NR_SYSCALL_BASE+320)
+#endif
+
+#if !defined(__NR_set_mempolicy)
+#define __NR_set_mempolicy (__NR_SYSCALL_BASE+321)
+#endif
+
+#if !defined(__NR_openat)
+#define __NR_openat (__NR_SYSCALL_BASE+322)
+#endif
+
+#if !defined(__NR_mkdirat)
+#define __NR_mkdirat (__NR_SYSCALL_BASE+323)
+#endif
+
+#if !defined(__NR_mknodat)
+#define __NR_mknodat (__NR_SYSCALL_BASE+324)
+#endif
+
+#if !defined(__NR_fchownat)
+#define __NR_fchownat (__NR_SYSCALL_BASE+325)
+#endif
+
+#if !defined(__NR_futimesat)
+#define __NR_futimesat (__NR_SYSCALL_BASE+326)
+#endif
+
+#if !defined(__NR_fstatat64)
+#define __NR_fstatat64 (__NR_SYSCALL_BASE+327)
+#endif
+
+#if !defined(__NR_unlinkat)
+#define __NR_unlinkat (__NR_SYSCALL_BASE+328)
+#endif
+
+#if !defined(__NR_renameat)
+#define __NR_renameat (__NR_SYSCALL_BASE+329)
+#endif
+
+#if !defined(__NR_linkat)
+#define __NR_linkat (__NR_SYSCALL_BASE+330)
+#endif
+
+#if !defined(__NR_symlinkat)
+#define __NR_symlinkat (__NR_SYSCALL_BASE+331)
+#endif
+
+#if !defined(__NR_readlinkat)
+#define __NR_readlinkat (__NR_SYSCALL_BASE+332)
+#endif
+
+#if !defined(__NR_fchmodat)
+#define __NR_fchmodat (__NR_SYSCALL_BASE+333)
+#endif
+
+#if !defined(__NR_faccessat)
+#define __NR_faccessat (__NR_SYSCALL_BASE+334)
+#endif
+
+#if !defined(__NR_pselect6)
+#define __NR_pselect6 (__NR_SYSCALL_BASE+335)
+#endif
+
+#if !defined(__NR_ppoll)
+#define __NR_ppoll (__NR_SYSCALL_BASE+336)
+#endif
+
+#if !defined(__NR_unshare)
+#define __NR_unshare (__NR_SYSCALL_BASE+337)
+#endif
+
+#if !defined(__NR_set_robust_list)
+#define __NR_set_robust_list (__NR_SYSCALL_BASE+338)
+#endif
+
+#if !defined(__NR_get_robust_list)
+#define __NR_get_robust_list (__NR_SYSCALL_BASE+339)
+#endif
+
+#if !defined(__NR_splice)
+#define __NR_splice (__NR_SYSCALL_BASE+340)
+#endif
+
+#if !defined(__NR_arm_sync_file_range)
+#define __NR_arm_sync_file_range (__NR_SYSCALL_BASE+341)
+#endif
+
+#if !defined(__NR_sync_file_range2)
+#define __NR_sync_file_range2 (__NR_SYSCALL_BASE+341)
+#endif
+
+#if !defined(__NR_tee)
+#define __NR_tee (__NR_SYSCALL_BASE+342)
+#endif
+
+#if !defined(__NR_vmsplice)
+#define __NR_vmsplice (__NR_SYSCALL_BASE+343)
+#endif
+
+#if !defined(__NR_move_pages)
+#define __NR_move_pages (__NR_SYSCALL_BASE+344)
+#endif
+
+#if !defined(__NR_getcpu)
+#define __NR_getcpu (__NR_SYSCALL_BASE+345)
+#endif
+
+#if !defined(__NR_epoll_pwait)
+#define __NR_epoll_pwait (__NR_SYSCALL_BASE+346)
+#endif
+
+#if !defined(__NR_kexec_load)
+#define __NR_kexec_load (__NR_SYSCALL_BASE+347)
+#endif
+
+#if !defined(__NR_utimensat)
+#define __NR_utimensat (__NR_SYSCALL_BASE+348)
+#endif
+
+#if !defined(__NR_signalfd)
+#define __NR_signalfd (__NR_SYSCALL_BASE+349)
+#endif
+
+#if !defined(__NR_timerfd_create)
+#define __NR_timerfd_create (__NR_SYSCALL_BASE+350)
+#endif
+
+#if !defined(__NR_eventfd)
+#define __NR_eventfd (__NR_SYSCALL_BASE+351)
+#endif
+
+#if !defined(__NR_fallocate)
+#define __NR_fallocate (__NR_SYSCALL_BASE+352)
+#endif
+
+#if !defined(__NR_timerfd_settime)
+#define __NR_timerfd_settime (__NR_SYSCALL_BASE+353)
+#endif
+
+#if !defined(__NR_timerfd_gettime)
+#define __NR_timerfd_gettime (__NR_SYSCALL_BASE+354)
+#endif
+
+#if !defined(__NR_signalfd4)
+#define __NR_signalfd4 (__NR_SYSCALL_BASE+355)
+#endif
+
+#if !defined(__NR_eventfd2)
+#define __NR_eventfd2 (__NR_SYSCALL_BASE+356)
+#endif
+
+#if !defined(__NR_epoll_create1)
+#define __NR_epoll_create1 (__NR_SYSCALL_BASE+357)
+#endif
+
+#if !defined(__NR_dup3)
+#define __NR_dup3 (__NR_SYSCALL_BASE+358)
+#endif
+
+#if !defined(__NR_pipe2)
+#define __NR_pipe2 (__NR_SYSCALL_BASE+359)
+#endif
+
+#if !defined(__NR_inotify_init1)
+#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360)
+#endif
+
+#if !defined(__NR_preadv)
+#define __NR_preadv (__NR_SYSCALL_BASE+361)
+#endif
+
+#if !defined(__NR_pwritev)
+#define __NR_pwritev (__NR_SYSCALL_BASE+362)
+#endif
+
+#if !defined(__NR_rt_tgsigqueueinfo)
+#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
+#endif
+
+#if !defined(__NR_perf_event_open)
+#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
+#endif
+
+#if !defined(__NR_recvmmsg)
+#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
+#endif
+
+#if !defined(__NR_accept4)
+#define __NR_accept4 (__NR_SYSCALL_BASE+366)
+#endif
+
+#if !defined(__NR_fanotify_init)
+#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
+#endif
+
+#if !defined(__NR_fanotify_mark)
+#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
+#endif
+
+#if !defined(__NR_prlimit64)
+#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
+#endif
+
+#if !defined(__NR_name_to_handle_at)
+#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370)
+#endif
+
+#if !defined(__NR_open_by_handle_at)
+#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
+#endif
+
+#if !defined(__NR_clock_adjtime)
+#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
+#endif
+
+#if !defined(__NR_syncfs)
+#define __NR_syncfs (__NR_SYSCALL_BASE+373)
+#endif
+
+#if !defined(__NR_sendmmsg)
+#define __NR_sendmmsg (__NR_SYSCALL_BASE+374)
+#endif
+
+#if !defined(__NR_setns)
+#define __NR_setns (__NR_SYSCALL_BASE+375)
+#endif
+
+#if !defined(__NR_process_vm_readv)
+#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
+#endif
+
+#if !defined(__NR_process_vm_writev)
+#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
+#endif
+
+#if !defined(__NR_kcmp)
+#define __NR_kcmp (__NR_SYSCALL_BASE+378)
+#endif
+
+#if !defined(__NR_finit_module)
+#define __NR_finit_module (__NR_SYSCALL_BASE+379)
+#endif
+
+#if !defined(__NR_sched_setattr)
+#define __NR_sched_setattr (__NR_SYSCALL_BASE+380)
+#endif
+
+#if !defined(__NR_sched_getattr)
+#define __NR_sched_getattr (__NR_SYSCALL_BASE+381)
+#endif
+
+#if !defined(__NR_renameat2)
+#define __NR_renameat2 (__NR_SYSCALL_BASE+382)
+#endif
+
+#if !defined(__NR_seccomp)
+#define __NR_seccomp (__NR_SYSCALL_BASE+383)
+#endif
+
+#if !defined(__NR_getrandom)
+#define __NR_getrandom (__NR_SYSCALL_BASE+384)
+#endif
+
+#if !defined(__NR_memfd_create)
+#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
+#endif
+
+// ARM private syscalls.
+#if !defined(__ARM_NR_BASE)
+#define __ARM_NR_BASE (__NR_SYSCALL_BASE + 0xF0000)
+#endif
+
+#if !defined(__ARM_NR_breakpoint)
+#define __ARM_NR_breakpoint (__ARM_NR_BASE+1)
+#endif
+
+#if !defined(__ARM_NR_cacheflush)
+#define __ARM_NR_cacheflush (__ARM_NR_BASE+2)
+#endif
+
+#if !defined(__ARM_NR_usr26)
+#define __ARM_NR_usr26 (__ARM_NR_BASE+3)
+#endif
+
+#if !defined(__ARM_NR_usr32)
+#define __ARM_NR_usr32 (__ARM_NR_BASE+4)
+#endif
+
+#if !defined(__ARM_NR_set_tls)
+#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
+#endif
+
+// ARM kernel private syscall.
+#if !defined(__ARM_NR_cmpxchg)
+#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_ARM_LINUX_SYSCALLS_H_
diff --git a/libchrome/sandbox/linux/system_headers/arm_linux_ucontext.h b/libchrome/sandbox/linux/system_headers/arm_linux_ucontext.h
new file mode 100644
index 0000000..35208fa
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/arm_linux_ucontext.h
@@ -0,0 +1,69 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_ARM_LINUX_UCONTEXT_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_ARM_LINUX_UCONTEXT_H_
+
+#include <stddef.h>
+
+#if !defined(__BIONIC_HAVE_UCONTEXT_T)
+#if !defined(__native_client_nonsfi__)
+#include <asm/sigcontext.h>
+#else
+// In PNaCl toolchain, sigcontext and stack_t is not defined. So here declare
+// them.
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long arm_r0;
+ unsigned long arm_r1;
+ unsigned long arm_r2;
+ unsigned long arm_r3;
+ unsigned long arm_r4;
+ unsigned long arm_r5;
+ unsigned long arm_r6;
+ unsigned long arm_r7;
+ unsigned long arm_r8;
+ unsigned long arm_r9;
+ unsigned long arm_r10;
+ unsigned long arm_fp;
+ unsigned long arm_ip;
+ unsigned long arm_sp;
+ unsigned long arm_lr;
+ unsigned long arm_pc;
+ unsigned long arm_cpsr;
+ unsigned long fault_address;
+};
+
+typedef struct sigaltstack {
+ void* ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#endif
+
+// We also need greg_t for the sandbox, include it in this header as well.
+typedef unsigned long greg_t;
+
+// typedef unsigned long sigset_t;
+typedef struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask;
+ /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
+ int __not_used[32 - (sizeof(sigset_t) / sizeof(int))];
+ /* Last for extensibility. Eight byte aligned because some
+ coprocessors require eight byte alignment. */
+ unsigned long uc_regspace[128] __attribute__((__aligned__(8)));
+} ucontext_t;
+
+#else
+#include <sys/ucontext.h>
+#endif // __BIONIC_HAVE_UCONTEXT_T
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_ARM_LINUX_UCONTEXT_H_
diff --git a/libchrome/sandbox/linux/system_headers/capability.h b/libchrome/sandbox/linux/system_headers/capability.h
new file mode 100644
index 0000000..f91fcf7
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/capability.h
@@ -0,0 +1,42 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_CAPABILITY_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_CAPABILITY_H_
+
+#include <stdint.h>
+
+// The following macros are taken from linux/capability.h.
+// We only support capability version 3, which was introduced in Linux 2.6.26.
+#ifndef _LINUX_CAPABILITY_VERSION_3
+#define _LINUX_CAPABILITY_VERSION_3 0x20080522
+#endif
+#ifndef _LINUX_CAPABILITY_U32S_3
+#define _LINUX_CAPABILITY_U32S_3 2
+#endif
+#ifndef CAP_TO_INDEX
+#define CAP_TO_INDEX(x) ((x) >> 5) // 1 << 5 == bits in __u32
+#endif
+#ifndef CAP_TO_MASK
+#define CAP_TO_MASK(x) (1 << ((x) & 31)) // mask for indexed __u32
+#endif
+#ifndef CAP_SYS_CHROOT
+#define CAP_SYS_CHROOT 18
+#endif
+#ifndef CAP_SYS_ADMIN
+#define CAP_SYS_ADMIN 21
+#endif
+
+struct cap_hdr {
+ uint32_t version;
+ int pid;
+};
+
+struct cap_data {
+ uint32_t effective;
+ uint32_t permitted;
+ uint32_t inheritable;
+};
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_CAPABILITY_H_
diff --git a/libchrome/sandbox/linux/system_headers/i386_linux_ucontext.h b/libchrome/sandbox/linux/system_headers/i386_linux_ucontext.h
new file mode 100644
index 0000000..f438033
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/i386_linux_ucontext.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_ANDROID_I386_UCONTEXT_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_ANDROID_I386_UCONTEXT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// We do something compatible with glibc. Hopefully, at some point Android will
+// provide that for us, and __BIONIC_HAVE_UCONTEXT_T should be defined.
+// This is mostly copied from breakpad (common/android/include/sys/ucontext.h),
+// except we do use sigset_t for uc_sigmask instead of a custom type.
+
+#if !defined(__BIONIC_HAVE_UCONTEXT_T)
+#if !defined(__native_client_nonsfi__)
+#include <asm/sigcontext.h>
+#else
+// In PNaCl toolchain, sigcontext is not defined. So here declare it.
+typedef struct sigaltstack {
+ void* ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+#endif
+
+/* 80-bit floating-point register */
+struct _libc_fpreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+/* Simple floating-point state, see FNSTENV instruction */
+struct _libc_fpstate {
+ unsigned long cw;
+ unsigned long sw;
+ unsigned long tag;
+ unsigned long ipoff;
+ unsigned long cssel;
+ unsigned long dataoff;
+ unsigned long datasel;
+ struct _libc_fpreg _st[8];
+ unsigned long status;
+};
+
+typedef uint32_t greg_t;
+
+typedef struct {
+ uint32_t gregs[19];
+ struct _libc_fpstate* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+enum {
+ REG_GS = 0,
+ REG_FS,
+ REG_ES,
+ REG_DS,
+ REG_EDI,
+ REG_ESI,
+ REG_EBP,
+ REG_ESP,
+ REG_EBX,
+ REG_EDX,
+ REG_ECX,
+ REG_EAX,
+ REG_TRAPNO,
+ REG_ERR,
+ REG_EIP,
+ REG_CS,
+ REG_EFL,
+ REG_UESP,
+ REG_SS,
+};
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Android and PNaCl toolchain's sigset_t has only 32 bits, though Linux
+ // ABI requires 64 bits.
+ union {
+ sigset_t uc_sigmask;
+ uint32_t kernel_sigmask[2];
+ };
+ struct _libc_fpstate __fpregs_mem;
+} ucontext_t;
+
+#else
+#include <sys/ucontext.h>
+#endif // __BIONIC_HAVE_UCONTEXT_T
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_ANDROID_I386_UCONTEXT_H_
diff --git a/libchrome/sandbox/linux/system_headers/linux_filter.h b/libchrome/sandbox/linux/system_headers/linux_filter.h
new file mode 100644
index 0000000..b23b6eb
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_filter.h
@@ -0,0 +1,140 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
+
+#include <stdint.h>
+
+// The following structs and macros are taken from linux/filter.h,
+// as some toolchain does not expose them.
+struct sock_filter {
+ uint16_t code;
+ uint8_t jt;
+ uint8_t jf;
+ uint32_t k;
+};
+
+struct sock_fprog {
+ uint16_t len;
+ struct sock_filter *filter;
+};
+
+#ifndef BPF_CLASS
+#define BPF_CLASS(code) ((code) & 0x07)
+#endif
+
+#ifndef BPF_LD
+#define BPF_LD 0x00
+#endif
+
+#ifndef BPF_ALU
+#define BPF_ALU 0x04
+#endif
+
+#ifndef BPF_JMP
+#define BPF_JMP 0x05
+#endif
+
+#ifndef BPF_RET
+#define BPF_RET 0x06
+#endif
+
+#ifndef BPF_SIZE
+#define BPF_SIZE(code) ((code) & 0x18)
+#endif
+
+#ifndef BPF_W
+#define BPF_W 0x00
+#endif
+
+#ifndef BPF_MODE
+#define BPF_MODE(code) ((code) & 0xe0)
+#endif
+
+#ifndef BPF_ABS
+#define BPF_ABS 0x20
+#endif
+
+#ifndef BPF_OP
+#define BPF_OP(code) ((code) & 0xf0)
+#endif
+
+#ifndef BPF_ADD
+#define BPF_ADD 0x00
+#endif
+
+#ifndef BPF_SUB
+#define BPF_SUB 0x10
+#endif
+
+#ifndef BPF_MUL
+#define BPF_MUL 0x20
+#endif
+
+#ifndef BPF_DIV
+#define BPF_DIV 0x30
+#endif
+
+#ifndef BPF_OR
+#define BPF_OR 0x40
+#endif
+
+#ifndef BPF_AND
+#define BPF_AND 0x50
+#endif
+
+#ifndef BPF_LSH
+#define BPF_LSH 0x60
+#endif
+
+#ifndef BPF_RSH
+#define BPF_RSH 0x70
+#endif
+
+#ifndef BPF_NEG
+#define BPF_NEG 0x80
+#endif
+
+#ifndef BPF_MOD
+#define BPF_MOD 0x90
+#endif
+
+#ifndef BPF_XOR
+#define BPF_XOR 0xA0
+#endif
+
+#ifndef BPF_JA
+#define BPF_JA 0x00
+#endif
+
+#ifndef BPF_JEQ
+#define BPF_JEQ 0x10
+#endif
+
+#ifndef BPF_JGT
+#define BPF_JGT 0x20
+#endif
+
+#ifndef BPF_JGE
+#define BPF_JGE 0x30
+#endif
+
+#ifndef BPF_JSET
+#define BPF_JSET 0x40
+#endif
+
+#ifndef BPF_SRC
+#define BPF_SRC(code) ((code) & 0x08)
+#endif
+
+#ifndef BPF_K
+#define BPF_K 0x00
+#endif
+
+#ifndef BPF_MAXINSNS
+#define BPF_MAXINSNS 4096
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FILTER_H_
diff --git a/libchrome/sandbox/linux/system_headers/linux_futex.h b/libchrome/sandbox/linux/system_headers/linux_futex.h
new file mode 100644
index 0000000..4e28403
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_futex.h
@@ -0,0 +1,84 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FUTEX_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FUTEX_H_
+
+#if !defined(__native_client_nonsfi__)
+#include <linux/futex.h>
+#endif // !defined(__native_client_nonsfi__)
+
+#if !defined(FUTEX_WAIT)
+#define FUTEX_WAIT 0
+#endif
+
+#if !defined(FUTEX_WAKE)
+#define FUTEX_WAKE 1
+#endif
+
+#if !defined(FUTEX_FD)
+#define FUTEX_FD 2
+#endif
+
+#if !defined(FUTEX_REQUEUE)
+#define FUTEX_REQUEUE 3
+#endif
+
+#if !defined(FUTEX_CMP_REQUEUE)
+#define FUTEX_CMP_REQUEUE 4
+#endif
+
+#if !defined(FUTEX_WAKE_OP)
+#define FUTEX_WAKE_OP 5
+#endif
+
+#if !defined(FUTEX_LOCK_PI)
+#define FUTEX_LOCK_PI 6
+#endif
+
+#if !defined(FUTEX_UNLOCK_PI)
+#define FUTEX_UNLOCK_PI 7
+#endif
+
+#if !defined(FUTEX_TRYLOCK_PI)
+#define FUTEX_TRYLOCK_PI 8
+#endif
+
+#if !defined(FUTEX_WAIT_BITSET)
+#define FUTEX_WAIT_BITSET 9
+#endif
+
+#if !defined(FUTEX_WAKE_BITSET)
+#define FUTEX_WAKE_BITSET 10
+#endif
+
+#if !defined(FUTEX_WAIT_REQUEUE_PI)
+#define FUTEX_WAIT_REQUEUE_PI 11
+#endif
+
+#if !defined(FUTEX_CMP_REQUEUE_PI)
+#define FUTEX_CMP_REQUEUE_PI 12
+#endif
+
+#if !defined(FUTEX_PRIVATE_FLAG)
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+
+#if !defined FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+
+#if !defined(FUTEX_CMD_MASK)
+#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
+#endif
+
+#if !defined(FUTEX_CMP_REQUEUE_PI_PRIVATE)
+#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | FUTEX_PRIVATE_FLAG)
+#endif
+
+#if !defined(FUTEX_UNLOCK_PI_PRIVATE)
+#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_FUTEX_H_
diff --git a/libchrome/sandbox/linux/system_headers/linux_seccomp.h b/libchrome/sandbox/linux/system_headers/linux_seccomp.h
new file mode 100644
index 0000000..3deb3d2
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_seccomp.h
@@ -0,0 +1,107 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SECCOMP_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SECCOMP_H_
+
+// The Seccomp2 kernel ABI is not part of older versions of glibc.
+// As we can't break compilation with these versions of the library,
+// we explicitly define all missing symbols.
+// If we ever decide that we can now rely on system headers, the following
+// include files should be enabled:
+// #include <linux/audit.h>
+// #include <linux/seccomp.h>
+
+// For audit.h
+#ifndef EM_ARM
+#define EM_ARM 40
+#endif
+#ifndef EM_386
+#define EM_386 3
+#endif
+#ifndef EM_X86_64
+#define EM_X86_64 62
+#endif
+#ifndef EM_MIPS
+#define EM_MIPS 8
+#endif
+#ifndef EM_AARCH64
+#define EM_AARCH64 183
+#endif
+
+#ifndef __AUDIT_ARCH_64BIT
+#define __AUDIT_ARCH_64BIT 0x80000000
+#endif
+#ifndef __AUDIT_ARCH_LE
+#define __AUDIT_ARCH_LE 0x40000000
+#endif
+#ifndef AUDIT_ARCH_ARM
+#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
+#endif
+#ifndef AUDIT_ARCH_I386
+#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
+#endif
+#ifndef AUDIT_ARCH_X86_64
+#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#endif
+#ifndef AUDIT_ARCH_MIPSEL
+#define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE)
+#endif
+#ifndef AUDIT_ARCH_AARCH64
+#define AUDIT_ARCH_AARCH64 (EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE)
+#endif
+
+// For prctl.h
+#ifndef PR_SET_SECCOMP
+#define PR_SET_SECCOMP 22
+#define PR_GET_SECCOMP 21
+#endif
+#ifndef PR_SET_NO_NEW_PRIVS
+#define PR_SET_NO_NEW_PRIVS 38
+#define PR_GET_NO_NEW_PRIVS 39
+#endif
+#ifndef IPC_64
+#define IPC_64 0x0100
+#endif
+
+// In order to build will older tool chains, we currently have to avoid
+// including <linux/seccomp.h>. Until that can be fixed (if ever). Rely on
+// our own definitions of the seccomp kernel ABI.
+#ifndef SECCOMP_MODE_FILTER
+#define SECCOMP_MODE_DISABLED 0
+#define SECCOMP_MODE_STRICT 1
+#define SECCOMP_MODE_FILTER 2 // User user-supplied filter
+#endif
+
+#ifndef SECCOMP_SET_MODE_STRICT
+#define SECCOMP_SET_MODE_STRICT 0
+#endif
+#ifndef SECCOMP_SET_MODE_FILTER
+#define SECCOMP_SET_MODE_FILTER 1
+#endif
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC 1
+#endif
+
+#ifndef SECCOMP_RET_KILL
+// Return values supported for BPF filter programs. Please note that the
+// "illegal" SECCOMP_RET_INVALID is not supported by the kernel, should only
+// ever be used internally, and would result in the kernel killing our process.
+#define SECCOMP_RET_KILL 0x00000000U // Kill the task immediately
+#define SECCOMP_RET_INVALID 0x00010000U // Illegal return value
+#define SECCOMP_RET_TRAP 0x00030000U // Disallow and force a SIGSYS
+#define SECCOMP_RET_ERRNO 0x00050000U // Returns an errno
+#define SECCOMP_RET_TRACE 0x7ff00000U // Pass to a tracer or disallow
+#define SECCOMP_RET_ALLOW 0x7fff0000U // Allow
+#define SECCOMP_RET_ACTION 0xffff0000U // Masks for the return value
+#define SECCOMP_RET_DATA 0x0000ffffU // sections
+#else
+#define SECCOMP_RET_INVALID 0x00010000U // Illegal return value
+#endif
+
+#ifndef SYS_SECCOMP
+#define SYS_SECCOMP 1
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SECCOMP_H_
diff --git a/libchrome/sandbox/linux/system_headers/linux_signal.h b/libchrome/sandbox/linux/system_headers/linux_signal.h
new file mode 100644
index 0000000..fb9a47b
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_signal.h
@@ -0,0 +1,146 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SIGNAL_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SIGNAL_H_
+
+#include <stdint.h>
+
+// NOTE: On some toolchains, signal related ABI is incompatible with Linux's
+// (not undefined, but defined different values and in different memory
+// layouts). So, fill the gap here.
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
+ defined(__aarch64__)
+
+#define LINUX_SIGHUP 1
+#define LINUX_SIGINT 2
+#define LINUX_SIGQUIT 3
+#define LINUX_SIGABRT 6
+#define LINUX_SIGBUS 7
+#define LINUX_SIGUSR1 10
+#define LINUX_SIGSEGV 11
+#define LINUX_SIGUSR2 12
+#define LINUX_SIGPIPE 13
+#define LINUX_SIGTERM 15
+#define LINUX_SIGCHLD 17
+#define LINUX_SIGSYS 31
+
+#define LINUX_SIG_BLOCK 0
+#define LINUX_SIG_UNBLOCK 1
+
+#define LINUX_SA_SIGINFO 4
+#define LINUX_SA_NODEFER 0x40000000
+#define LINUX_SA_RESTART 0x10000000
+
+#define LINUX_SIG_DFL 0
+
+#elif defined(__mips__)
+
+#define LINUX_SIGHUP 1
+#define LINUX_SIGINT 2
+#define LINUX_SIGQUIT 3
+#define LINUX_SIGABRT 6
+#define LINUX_SIGBUS 10
+#define LINUX_SIGSEGV 11
+#define LINUX_SIGSYS 12
+#define LINUX_SIGPIPE 13
+#define LINUX_SIGTERM 15
+#define LINUX_SIGUSR1 16
+#define LINUX_SIGUSR2 17
+#define LINUX_SIGCHLD 18
+
+#define LINUX_SIG_BLOCK 1
+#define LINUX_SIG_UNBLOCK 2
+
+#define LINUX_SA_SIGINFO 0x00000008
+#define LINUX_SA_NODEFER 0x40000000
+#define LINUX_SA_RESTART 0x10000000
+
+#define LINUX_SIG_DFL 0
+
+#else
+#error "Unsupported platform"
+#endif
+
+#if defined(__native_client_nonsfi__)
+#if !defined(__i386__) && !defined(__arm__)
+#error "Unsupported platform"
+#endif
+
+#include <signal.h>
+
+struct LinuxSigInfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ // Extra data is followed by the |si_code|. The length depends on the
+ // signal number.
+ char _sifields[1];
+};
+
+#include "sandbox/linux/system_headers/linux_ucontext.h"
+
+#else // !defined(__native_client_nonsfi__)
+
+#include <signal.h>
+
+static_assert(LINUX_SIGHUP == SIGHUP, "LINUX_SIGHUP == SIGHUP");
+static_assert(LINUX_SIGINT == SIGINT, "LINUX_SIGINT == SIGINT");
+static_assert(LINUX_SIGQUIT == SIGQUIT, "LINUX_SIGQUIT == SIGQUIT");
+static_assert(LINUX_SIGABRT == SIGABRT, "LINUX_SIGABRT == SIGABRT");
+static_assert(LINUX_SIGBUS == SIGBUS, "LINUX_SIGBUS == SIGBUS");
+static_assert(LINUX_SIGUSR1 == SIGUSR1, "LINUX_SIGUSR1 == SIGUSR1");
+static_assert(LINUX_SIGSEGV == SIGSEGV, "LINUX_SIGSEGV == SIGSEGV");
+static_assert(LINUX_SIGUSR2 == SIGUSR2, "LINUX_SIGUSR2 == SIGUSR2");
+static_assert(LINUX_SIGPIPE == SIGPIPE, "LINUX_SIGPIPE == SIGPIPE");
+static_assert(LINUX_SIGTERM == SIGTERM, "LINUX_SIGTERM == SIGTERM");
+static_assert(LINUX_SIGCHLD == SIGCHLD, "LINUX_SIGCHLD == SIGCHLD");
+static_assert(LINUX_SIGSYS == SIGSYS, "LINUX_SIGSYS == SIGSYS");
+static_assert(LINUX_SIG_BLOCK == SIG_BLOCK, "LINUX_SIG_BLOCK == SIG_BLOCK");
+static_assert(LINUX_SIG_UNBLOCK == SIG_UNBLOCK,
+ "LINUX_SIG_UNBLOCK == SIG_UNBLOCK");
+static_assert(LINUX_SA_SIGINFO == SA_SIGINFO, "LINUX_SA_SIGINFO == SA_SIGINFO");
+static_assert(LINUX_SA_NODEFER == SA_NODEFER, "LINUX_SA_NODEFER == SA_NODEFER");
+static_assert(LINUX_SA_RESTART == SA_RESTART, "LINUX_SA_RESTART == SA_RESTART");
+static_assert(LINUX_SIG_DFL == SIG_DFL, "LINUX_SIG_DFL == SIG_DFL");
+
+typedef siginfo_t LinuxSigInfo;
+
+#if defined(__ANDROID__)
+// Android's signal.h doesn't define ucontext etc.
+#include "sandbox/linux/system_headers/linux_ucontext.h"
+#endif // defined(__ANDROID__)
+
+#endif // !defined(__native_client_nonsfi__)
+
+// struct sigset_t is different size in PNaCl from the Linux's.
+#if defined(__mips__)
+#if !defined(_NSIG_WORDS)
+#define _NSIG_WORDS 4
+#endif
+struct LinuxSigSet {
+ unsigned long sig[_NSIG_WORDS];
+};
+#else
+typedef uint64_t LinuxSigSet;
+#endif
+
+// struct sigaction is different in PNaCl from the Linux's.
+#if defined(__mips__)
+struct LinuxSigAction {
+ unsigned int sa_flags;
+ void (*kernel_handler)(int);
+ LinuxSigSet sa_mask;
+};
+#else
+struct LinuxSigAction {
+ void (*kernel_handler)(int);
+ uint32_t sa_flags;
+ void (*sa_restorer)(void);
+ LinuxSigSet sa_mask;
+};
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SIGNAL_H_
diff --git a/libchrome/sandbox/linux/system_headers/linux_syscalls.h b/libchrome/sandbox/linux/system_headers/linux_syscalls.h
new file mode 100644
index 0000000..2b441e4
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_syscalls.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header will be kept up to date so that we can compile system-call
+// policies even when system headers are old.
+// System call numbers are accessible through __NR_syscall_name.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SYSCALLS_H_
+
+#if defined(__x86_64__)
+#include "sandbox/linux/system_headers/x86_64_linux_syscalls.h"
+#endif
+
+#if defined(__i386__)
+#include "sandbox/linux/system_headers/x86_32_linux_syscalls.h"
+#endif
+
+#if defined(__arm__) && defined(__ARM_EABI__)
+#include "sandbox/linux/system_headers/arm_linux_syscalls.h"
+#endif
+
+#if defined(__mips__) && (_MIPS_SIM == _ABIO32)
+#include "sandbox/linux/system_headers/mips_linux_syscalls.h"
+#endif
+
+#if defined(__mips__) && (_MIPS_SIM == _ABI64)
+#include "sandbox/linux/system_headers/mips64_linux_syscalls.h"
+#endif
+
+#if defined(__aarch64__)
+#include "sandbox/linux/system_headers/arm64_linux_syscalls.h"
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_SYSCALLS_H_
+
diff --git a/libchrome/sandbox/linux/system_headers/linux_time.h b/libchrome/sandbox/linux/system_headers/linux_time.h
new file mode 100644
index 0000000..e6c8112
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_time.h
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_TIME_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_TIME_H_
+
+#include <time.h>
+
+#if !defined(CLOCK_REALTIME_COARSE)
+#define CLOCK_REALTIME_COARSE 5
+#endif
+
+#if !defined(CLOCK_MONOTONIC_COARSE)
+#define CLOCK_MONOTONIC_COARSE 6
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_TIME_H_
diff --git a/libchrome/sandbox/linux/system_headers/linux_ucontext.h b/libchrome/sandbox/linux/system_headers/linux_ucontext.h
new file mode 100644
index 0000000..ea4d8a6
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/linux_ucontext.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_UCONTEXT_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_UCONTEXT_H_
+
+#if defined(__ANDROID__) || defined(__native_client_nonsfi__)
+
+#if defined(__arm__)
+#include "sandbox/linux/system_headers/arm_linux_ucontext.h"
+#elif defined(__i386__)
+#include "sandbox/linux/system_headers/i386_linux_ucontext.h"
+#elif defined(__x86_64__)
+#include "sandbox/linux/system_headers/x86_64_linux_ucontext.h"
+#elif defined(__mips__)
+#include "sandbox/linux/system_headers/mips_linux_ucontext.h"
+#elif defined(__aarch64__)
+#include "sandbox/linux/system_headers/arm64_linux_ucontext.h"
+#else
+#error "No support for your architecture in Android or PNaCl header"
+#endif
+
+#else // defined(__ANDROID__) || defined(__native_client_nonsfi__)
+#error "The header file included on non Android and non PNaCl."
+#endif // defined(__ANDROID__) || defined(__native_client_nonsfi__)
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_LINUX_UCONTEXT_H_
diff --git a/libchrome/sandbox/linux/system_headers/mips64_linux_syscalls.h b/libchrome/sandbox/linux/system_headers/mips64_linux_syscalls.h
new file mode 100644
index 0000000..d003124
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/mips64_linux_syscalls.h
@@ -0,0 +1,1266 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated from the Linux kernel's calls.S.
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_MIPS64_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_MIPS64_LINUX_SYSCALLS_H_
+
+#if !defined(__mips__) || (_MIPS_SIM != _ABI64)
+#error "Including header on wrong architecture"
+#endif
+
+// __NR_Linux, is defined in <asm/unistd.h>.
+#include <asm/unistd.h>
+
+#if !defined(__NR_read)
+#define __NR_read (__NR_Linux + 0)
+#endif
+
+#if !defined(__NR_write)
+#define __NR_write (__NR_Linux + 1)
+#endif
+
+#if !defined(__NR_open)
+#define __NR_open (__NR_Linux + 2)
+#endif
+
+#if !defined(__NR_close)
+#define __NR_close (__NR_Linux + 3)
+#endif
+
+#if !defined(__NR_stat)
+#define __NR_stat (__NR_Linux + 4)
+#endif
+
+#if !defined(__NR_fstat)
+#define __NR_fstat (__NR_Linux + 5)
+#endif
+
+#if !defined(__NR_lstat)
+#define __NR_lstat (__NR_Linux + 6)
+#endif
+
+#if !defined(__NR_poll)
+#define __NR_poll (__NR_Linux + 7)
+#endif
+
+#if !defined(__NR_lseek)
+#define __NR_lseek (__NR_Linux + 8)
+#endif
+
+#if !defined(__NR_mmap)
+#define __NR_mmap (__NR_Linux + 9)
+#endif
+
+#if !defined(__NR_mprotect)
+#define __NR_mprotect (__NR_Linux + 10)
+#endif
+
+#if !defined(__NR_munmap)
+#define __NR_munmap (__NR_Linux + 11)
+#endif
+
+#if !defined(__NR_brk)
+#define __NR_brk (__NR_Linux + 12)
+#endif
+
+#if !defined(__NR_rt_sigaction)
+#define __NR_rt_sigaction (__NR_Linux + 13)
+#endif
+
+#if !defined(__NR_rt_sigprocmask)
+#define __NR_rt_sigprocmask (__NR_Linux + 14)
+#endif
+
+#if !defined(__NR_ioctl)
+#define __NR_ioctl (__NR_Linux + 15)
+#endif
+
+#if !defined(__NR_pread64)
+#define __NR_pread64 (__NR_Linux + 16)
+#endif
+
+#if !defined(__NR_pwrite64)
+#define __NR_pwrite64 (__NR_Linux + 17)
+#endif
+
+#if !defined(__NR_readv)
+#define __NR_readv (__NR_Linux + 18)
+#endif
+
+#if !defined(__NR_writev)
+#define __NR_writev (__NR_Linux + 19)
+#endif
+
+#if !defined(__NR_access)
+#define __NR_access (__NR_Linux + 20)
+#endif
+
+#if !defined(__NR_pipe)
+#define __NR_pipe (__NR_Linux + 21)
+#endif
+
+#if !defined(__NR__newselect)
+#define __NR__newselect (__NR_Linux + 22)
+#endif
+
+#if !defined(__NR_sched_yield)
+#define __NR_sched_yield (__NR_Linux + 23)
+#endif
+
+#if !defined(__NR_mremap)
+#define __NR_mremap (__NR_Linux + 24)
+#endif
+
+#if !defined(__NR_msync)
+#define __NR_msync (__NR_Linux + 25)
+#endif
+
+#if !defined(__NR_mincore)
+#define __NR_mincore (__NR_Linux + 26)
+#endif
+
+#if !defined(__NR_madvise)
+#define __NR_madvise (__NR_Linux + 27)
+#endif
+
+#if !defined(__NR_shmget)
+#define __NR_shmget (__NR_Linux + 28)
+#endif
+
+#if !defined(__NR_shmat)
+#define __NR_shmat (__NR_Linux + 29)
+#endif
+
+#if !defined(__NR_shmctl)
+#define __NR_shmctl (__NR_Linux + 30)
+#endif
+
+#if !defined(__NR_dup)
+#define __NR_dup (__NR_Linux + 31)
+#endif
+
+#if !defined(__NR_dup2)
+#define __NR_dup2 (__NR_Linux + 32)
+#endif
+
+#if !defined(__NR_pause)
+#define __NR_pause (__NR_Linux + 33)
+#endif
+
+#if !defined(__NR_nanosleep)
+#define __NR_nanosleep (__NR_Linux + 34)
+#endif
+
+#if !defined(__NR_getitimer)
+#define __NR_getitimer (__NR_Linux + 35)
+#endif
+
+#if !defined(__NR_setitimer)
+#define __NR_setitimer (__NR_Linux + 36)
+#endif
+
+#if !defined(__NR_alarm)
+#define __NR_alarm (__NR_Linux + 37)
+#endif
+
+#if !defined(__NR_getpid)
+#define __NR_getpid (__NR_Linux + 38)
+#endif
+
+#if !defined(__NR_sendfile)
+#define __NR_sendfile (__NR_Linux + 39)
+#endif
+
+#if !defined(__NR_socket)
+#define __NR_socket (__NR_Linux + 40)
+#endif
+
+#if !defined(__NR_connect)
+#define __NR_connect (__NR_Linux + 41)
+#endif
+
+#if !defined(__NR_accept)
+#define __NR_accept (__NR_Linux + 42)
+#endif
+
+#if !defined(__NR_sendto)
+#define __NR_sendto (__NR_Linux + 43)
+#endif
+
+#if !defined(__NR_recvfrom)
+#define __NR_recvfrom (__NR_Linux + 44)
+#endif
+
+#if !defined(__NR_sendmsg)
+#define __NR_sendmsg (__NR_Linux + 45)
+#endif
+
+#if !defined(__NR_recvmsg)
+#define __NR_recvmsg (__NR_Linux + 46)
+#endif
+
+#if !defined(__NR_shutdown)
+#define __NR_shutdown (__NR_Linux + 47)
+#endif
+
+#if !defined(__NR_bind)
+#define __NR_bind (__NR_Linux + 48)
+#endif
+
+#if !defined(__NR_listen)
+#define __NR_listen (__NR_Linux + 49)
+#endif
+
+#if !defined(__NR_getsockname)
+#define __NR_getsockname (__NR_Linux + 50)
+#endif
+
+#if !defined(__NR_getpeername)
+#define __NR_getpeername (__NR_Linux + 51)
+#endif
+
+#if !defined(__NR_socketpair)
+#define __NR_socketpair (__NR_Linux + 52)
+#endif
+
+#if !defined(__NR_setsockopt)
+#define __NR_setsockopt (__NR_Linux + 53)
+#endif
+
+#if !defined(__NR_getsockopt)
+#define __NR_getsockopt (__NR_Linux + 54)
+#endif
+
+#if !defined(__NR_clone)
+#define __NR_clone (__NR_Linux + 55)
+#endif
+
+#if !defined(__NR_fork)
+#define __NR_fork (__NR_Linux + 56)
+#endif
+
+#if !defined(__NR_execve)
+#define __NR_execve (__NR_Linux + 57)
+#endif
+
+#if !defined(__NR_exit)
+#define __NR_exit (__NR_Linux + 58)
+#endif
+
+#if !defined(__NR_wait4)
+#define __NR_wait4 (__NR_Linux + 59)
+#endif
+
+#if !defined(__NR_kill)
+#define __NR_kill (__NR_Linux + 60)
+#endif
+
+#if !defined(__NR_uname)
+#define __NR_uname (__NR_Linux + 61)
+#endif
+
+#if !defined(__NR_semget)
+#define __NR_semget (__NR_Linux + 62)
+#endif
+
+#if !defined(__NR_semop)
+#define __NR_semop (__NR_Linux + 63)
+#endif
+
+#if !defined(__NR_semctl)
+#define __NR_semctl (__NR_Linux + 64)
+#endif
+
+#if !defined(__NR_shmdt)
+#define __NR_shmdt (__NR_Linux + 65)
+#endif
+
+#if !defined(__NR_msgget)
+#define __NR_msgget (__NR_Linux + 66)
+#endif
+
+#if !defined(__NR_msgsnd)
+#define __NR_msgsnd (__NR_Linux + 67)
+#endif
+
+#if !defined(__NR_msgrcv)
+#define __NR_msgrcv (__NR_Linux + 68)
+#endif
+
+#if !defined(__NR_msgctl)
+#define __NR_msgctl (__NR_Linux + 69)
+#endif
+
+#if !defined(__NR_fcntl)
+#define __NR_fcntl (__NR_Linux + 70)
+#endif
+
+#if !defined(__NR_flock)
+#define __NR_flock (__NR_Linux + 71)
+#endif
+
+#if !defined(__NR_fsync)
+#define __NR_fsync (__NR_Linux + 72)
+#endif
+
+#if !defined(__NR_fdatasync)
+#define __NR_fdatasync (__NR_Linux + 73)
+#endif
+
+#if !defined(__NR_truncate)
+#define __NR_truncate (__NR_Linux + 74)
+#endif
+
+#if !defined(__NR_ftruncate)
+#define __NR_ftruncate (__NR_Linux + 75)
+#endif
+
+#if !defined(__NR_getdents)
+#define __NR_getdents (__NR_Linux + 76)
+#endif
+
+#if !defined(__NR_getcwd)
+#define __NR_getcwd (__NR_Linux + 77)
+#endif
+
+#if !defined(__NR_chdir)
+#define __NR_chdir (__NR_Linux + 78)
+#endif
+
+#if !defined(__NR_fchdir)
+#define __NR_fchdir (__NR_Linux + 79)
+#endif
+
+#if !defined(__NR_rename)
+#define __NR_rename (__NR_Linux + 80)
+#endif
+
+#if !defined(__NR_mkdir)
+#define __NR_mkdir (__NR_Linux + 81)
+#endif
+
+#if !defined(__NR_rmdir)
+#define __NR_rmdir (__NR_Linux + 82)
+#endif
+
+#if !defined(__NR_creat)
+#define __NR_creat (__NR_Linux + 83)
+#endif
+
+#if !defined(__NR_link)
+#define __NR_link (__NR_Linux + 84)
+#endif
+
+#if !defined(__NR_unlink)
+#define __NR_unlink (__NR_Linux + 85)
+#endif
+
+#if !defined(__NR_symlink)
+#define __NR_symlink (__NR_Linux + 86)
+#endif
+
+#if !defined(__NR_readlink)
+#define __NR_readlink (__NR_Linux + 87)
+#endif
+
+#if !defined(__NR_chmod)
+#define __NR_chmod (__NR_Linux + 88)
+#endif
+
+#if !defined(__NR_fchmod)
+#define __NR_fchmod (__NR_Linux + 89)
+#endif
+
+#if !defined(__NR_chown)
+#define __NR_chown (__NR_Linux + 90)
+#endif
+
+#if !defined(__NR_fchown)
+#define __NR_fchown (__NR_Linux + 91)
+#endif
+
+#if !defined(__NR_lchown)
+#define __NR_lchown (__NR_Linux + 92)
+#endif
+
+#if !defined(__NR_umask)
+#define __NR_umask (__NR_Linux + 93)
+#endif
+
+#if !defined(__NR_gettimeofday)
+#define __NR_gettimeofday (__NR_Linux + 94)
+#endif
+
+#if !defined(__NR_getrlimit)
+#define __NR_getrlimit (__NR_Linux + 95)
+#endif
+
+#if !defined(__NR_getrusage)
+#define __NR_getrusage (__NR_Linux + 96)
+#endif
+
+#if !defined(__NR_sysinfo)
+#define __NR_sysinfo (__NR_Linux + 97)
+#endif
+
+#if !defined(__NR_times)
+#define __NR_times (__NR_Linux + 98)
+#endif
+
+#if !defined(__NR_ptrace)
+#define __NR_ptrace (__NR_Linux + 99)
+#endif
+
+#if !defined(__NR_getuid)
+#define __NR_getuid (__NR_Linux + 100)
+#endif
+
+#if !defined(__NR_syslog)
+#define __NR_syslog (__NR_Linux + 101)
+#endif
+
+#if !defined(__NR_getgid)
+#define __NR_getgid (__NR_Linux + 102)
+#endif
+
+#if !defined(__NR_setuid)
+#define __NR_setuid (__NR_Linux + 103)
+#endif
+
+#if !defined(__NR_setgid)
+#define __NR_setgid (__NR_Linux + 104)
+#endif
+
+#if !defined(__NR_geteuid)
+#define __NR_geteuid (__NR_Linux + 105)
+#endif
+
+#if !defined(__NR_getegid)
+#define __NR_getegid (__NR_Linux + 106)
+#endif
+
+#if !defined(__NR_setpgid)
+#define __NR_setpgid (__NR_Linux + 107)
+#endif
+
+#if !defined(__NR_getppid)
+#define __NR_getppid (__NR_Linux + 108)
+#endif
+
+#if !defined(__NR_getpgrp)
+#define __NR_getpgrp (__NR_Linux + 109)
+#endif
+
+#if !defined(__NR_setsid)
+#define __NR_setsid (__NR_Linux + 110)
+#endif
+
+#if !defined(__NR_setreuid)
+#define __NR_setreuid (__NR_Linux + 111)
+#endif
+
+#if !defined(__NR_setregid)
+#define __NR_setregid (__NR_Linux + 112)
+#endif
+
+#if !defined(__NR_getgroups)
+#define __NR_getgroups (__NR_Linux + 113)
+#endif
+
+#if !defined(__NR_setgroups)
+#define __NR_setgroups (__NR_Linux + 114)
+#endif
+
+#if !defined(__NR_setresuid)
+#define __NR_setresuid (__NR_Linux + 115)
+#endif
+
+#if !defined(__NR_getresuid)
+#define __NR_getresuid (__NR_Linux + 116)
+#endif
+
+#if !defined(__NR_setresgid)
+#define __NR_setresgid (__NR_Linux + 117)
+#endif
+
+#if !defined(__NR_getresgid)
+#define __NR_getresgid (__NR_Linux + 118)
+#endif
+
+#if !defined(__NR_getpgid)
+#define __NR_getpgid (__NR_Linux + 119)
+#endif
+
+#if !defined(__NR_setfsuid)
+#define __NR_setfsuid (__NR_Linux + 120)
+#endif
+
+#if !defined(__NR_setfsgid)
+#define __NR_setfsgid (__NR_Linux + 121)
+#endif
+
+#if !defined(__NR_getsid)
+#define __NR_getsid (__NR_Linux + 122)
+#endif
+
+#if !defined(__NR_capget)
+#define __NR_capget (__NR_Linux + 123)
+#endif
+
+#if !defined(__NR_capset)
+#define __NR_capset (__NR_Linux + 124)
+#endif
+
+#if !defined(__NR_rt_sigpending)
+#define __NR_rt_sigpending (__NR_Linux + 125)
+#endif
+
+#if !defined(__NR_rt_sigtimedwait)
+#define __NR_rt_sigtimedwait (__NR_Linux + 126)
+#endif
+
+#if !defined(__NR_rt_sigqueueinfo)
+#define __NR_rt_sigqueueinfo (__NR_Linux + 127)
+#endif
+
+#if !defined(__NR_rt_sigsuspend)
+#define __NR_rt_sigsuspend (__NR_Linux + 128)
+#endif
+
+#if !defined(__NR_sigaltstack)
+#define __NR_sigaltstack (__NR_Linux + 129)
+#endif
+
+#if !defined(__NR_utime)
+#define __NR_utime (__NR_Linux + 130)
+#endif
+
+#if !defined(__NR_mknod)
+#define __NR_mknod (__NR_Linux + 131)
+#endif
+
+#if !defined(__NR_personality)
+#define __NR_personality (__NR_Linux + 132)
+#endif
+
+#if !defined(__NR_ustat)
+#define __NR_ustat (__NR_Linux + 133)
+#endif
+
+#if !defined(__NR_statfs)
+#define __NR_statfs (__NR_Linux + 134)
+#endif
+
+#if !defined(__NR_fstatfs)
+#define __NR_fstatfs (__NR_Linux + 135)
+#endif
+
+#if !defined(__NR_sysfs)
+#define __NR_sysfs (__NR_Linux + 136)
+#endif
+
+#if !defined(__NR_getpriority)
+#define __NR_getpriority (__NR_Linux + 137)
+#endif
+
+#if !defined(__NR_setpriority)
+#define __NR_setpriority (__NR_Linux + 138)
+#endif
+
+#if !defined(__NR_sched_setparam)
+#define __NR_sched_setparam (__NR_Linux + 139)
+#endif
+
+#if !defined(__NR_sched_getparam)
+#define __NR_sched_getparam (__NR_Linux + 140)
+#endif
+
+#if !defined(__NR_sched_setscheduler)
+#define __NR_sched_setscheduler (__NR_Linux + 141)
+#endif
+
+#if !defined(__NR_sched_getscheduler)
+#define __NR_sched_getscheduler (__NR_Linux + 142)
+#endif
+
+#if !defined(__NR_sched_get_priority_max)
+#define __NR_sched_get_priority_max (__NR_Linux + 143)
+#endif
+
+#if !defined(__NR_sched_get_priority_min)
+#define __NR_sched_get_priority_min (__NR_Linux + 144)
+#endif
+
+#if !defined(__NR_sched_rr_get_interval)
+#define __NR_sched_rr_get_interval (__NR_Linux + 145)
+#endif
+
+#if !defined(__NR_mlock)
+#define __NR_mlock (__NR_Linux + 146)
+#endif
+
+#if !defined(__NR_munlock)
+#define __NR_munlock (__NR_Linux + 147)
+#endif
+
+#if !defined(__NR_mlockall)
+#define __NR_mlockall (__NR_Linux + 148)
+#endif
+
+#if !defined(__NR_munlockall)
+#define __NR_munlockall (__NR_Linux + 149)
+#endif
+
+#if !defined(__NR_vhangup)
+#define __NR_vhangup (__NR_Linux + 150)
+#endif
+
+#if !defined(__NR_pivot_root)
+#define __NR_pivot_root (__NR_Linux + 151)
+#endif
+
+#if !defined(__NR__sysctl)
+#define __NR__sysctl (__NR_Linux + 152)
+#endif
+
+#if !defined(__NR_prctl)
+#define __NR_prctl (__NR_Linux + 153)
+#endif
+
+#if !defined(__NR_adjtimex)
+#define __NR_adjtimex (__NR_Linux + 154)
+#endif
+
+#if !defined(__NR_setrlimit)
+#define __NR_setrlimit (__NR_Linux + 155)
+#endif
+
+#if !defined(__NR_chroot)
+#define __NR_chroot (__NR_Linux + 156)
+#endif
+
+#if !defined(__NR_sync)
+#define __NR_sync (__NR_Linux + 157)
+#endif
+
+#if !defined(__NR_acct)
+#define __NR_acct (__NR_Linux + 158)
+#endif
+
+#if !defined(__NR_settimeofday)
+#define __NR_settimeofday (__NR_Linux + 159)
+#endif
+
+#if !defined(__NR_mount)
+#define __NR_mount (__NR_Linux + 160)
+#endif
+
+#if !defined(__NR_umount2)
+#define __NR_umount2 (__NR_Linux + 161)
+#endif
+
+#if !defined(__NR_swapon)
+#define __NR_swapon (__NR_Linux + 162)
+#endif
+
+#if !defined(__NR_swapoff)
+#define __NR_swapoff (__NR_Linux + 163)
+#endif
+
+#if !defined(__NR_reboot)
+#define __NR_reboot (__NR_Linux + 164)
+#endif
+
+#if !defined(__NR_sethostname)
+#define __NR_sethostname (__NR_Linux + 165)
+#endif
+
+#if !defined(__NR_setdomainname)
+#define __NR_setdomainname (__NR_Linux + 166)
+#endif
+
+#if !defined(__NR_create_module)
+#define __NR_create_module (__NR_Linux + 167)
+#endif
+
+#if !defined(__NR_init_module)
+#define __NR_init_module (__NR_Linux + 168)
+#endif
+
+#if !defined(__NR_delete_module)
+#define __NR_delete_module (__NR_Linux + 169)
+#endif
+
+#if !defined(__NR_get_kernel_syms)
+#define __NR_get_kernel_syms (__NR_Linux + 170)
+#endif
+
+#if !defined(__NR_query_module)
+#define __NR_query_module (__NR_Linux + 171)
+#endif
+
+#if !defined(__NR_quotactl)
+#define __NR_quotactl (__NR_Linux + 172)
+#endif
+
+#if !defined(__NR_nfsservctl)
+#define __NR_nfsservctl (__NR_Linux + 173)
+#endif
+
+#if !defined(__NR_getpmsg)
+#define __NR_getpmsg (__NR_Linux + 174)
+#endif
+
+#if !defined(__NR_putpmsg)
+#define __NR_putpmsg (__NR_Linux + 175)
+#endif
+
+#if !defined(__NR_afs_syscall)
+#define __NR_afs_syscall (__NR_Linux + 176)
+#endif
+
+#if !defined(__NR_reserved177)
+#define __NR_reserved177 (__NR_Linux + 177)
+#endif
+
+#if !defined(__NR_gettid)
+#define __NR_gettid (__NR_Linux + 178)
+#endif
+
+#if !defined(__NR_readahead)
+#define __NR_readahead (__NR_Linux + 179)
+#endif
+
+#if !defined(__NR_setxattr)
+#define __NR_setxattr (__NR_Linux + 180)
+#endif
+
+#if !defined(__NR_lsetxattr)
+#define __NR_lsetxattr (__NR_Linux + 181)
+#endif
+
+#if !defined(__NR_fsetxattr)
+#define __NR_fsetxattr (__NR_Linux + 182)
+#endif
+
+#if !defined(__NR_getxattr)
+#define __NR_getxattr (__NR_Linux + 183)
+#endif
+
+#if !defined(__NR_lgetxattr)
+#define __NR_lgetxattr (__NR_Linux + 184)
+#endif
+
+#if !defined(__NR_fgetxattr)
+#define __NR_fgetxattr (__NR_Linux + 185)
+#endif
+
+#if !defined(__NR_listxattr)
+#define __NR_listxattr (__NR_Linux + 186)
+#endif
+
+#if !defined(__NR_llistxattr)
+#define __NR_llistxattr (__NR_Linux + 187)
+#endif
+
+#if !defined(__NR_flistxattr)
+#define __NR_flistxattr (__NR_Linux + 188)
+#endif
+
+#if !defined(__NR_removexattr)
+#define __NR_removexattr (__NR_Linux + 189)
+#endif
+
+#if !defined(__NR_lremovexattr)
+#define __NR_lremovexattr (__NR_Linux + 190)
+#endif
+
+#if !defined(__NR_fremovexattr)
+#define __NR_fremovexattr (__NR_Linux + 191)
+#endif
+
+#if !defined(__NR_tkill)
+#define __NR_tkill (__NR_Linux + 192)
+#endif
+
+#if !defined(__NR_reserved193)
+#define __NR_reserved193 (__NR_Linux + 193)
+#endif
+
+#if !defined(__NR_futex)
+#define __NR_futex (__NR_Linux + 194)
+#endif
+
+#if !defined(__NR_sched_setaffinity)
+#define __NR_sched_setaffinity (__NR_Linux + 195)
+#endif
+
+#if !defined(__NR_sched_getaffinity)
+#define __NR_sched_getaffinity (__NR_Linux + 196)
+#endif
+
+#if !defined(__NR_cacheflush)
+#define __NR_cacheflush (__NR_Linux + 197)
+#endif
+
+#if !defined(__NR_cachectl)
+#define __NR_cachectl (__NR_Linux + 198)
+#endif
+
+#if !defined(__NR_sysmips)
+#define __NR_sysmips (__NR_Linux + 199)
+#endif
+
+#if !defined(__NR_io_setup)
+#define __NR_io_setup (__NR_Linux + 200)
+#endif
+
+#if !defined(__NR_io_destroy)
+#define __NR_io_destroy (__NR_Linux + 201)
+#endif
+
+#if !defined(__NR_io_getevents)
+#define __NR_io_getevents (__NR_Linux + 202)
+#endif
+
+#if !defined(__NR_io_submit)
+#define __NR_io_submit (__NR_Linux + 203)
+#endif
+
+#if !defined(__NR_io_cancel)
+#define __NR_io_cancel (__NR_Linux + 204)
+#endif
+
+#if !defined(__NR_exit_group)
+#define __NR_exit_group (__NR_Linux + 205)
+#endif
+
+#if !defined(__NR_lookup_dcookie)
+#define __NR_lookup_dcookie (__NR_Linux + 206)
+#endif
+
+#if !defined(__NR_epoll_create)
+#define __NR_epoll_create (__NR_Linux + 207)
+#endif
+
+#if !defined(__NR_epoll_ctl)
+#define __NR_epoll_ctl (__NR_Linux + 208)
+#endif
+
+#if !defined(__NR_epoll_wait)
+#define __NR_epoll_wait (__NR_Linux + 209)
+#endif
+
+#if !defined(__NR_remap_file_pages)
+#define __NR_remap_file_pages (__NR_Linux + 210)
+#endif
+
+#if !defined(__NR_rt_sigreturn)
+#define __NR_rt_sigreturn (__NR_Linux + 211)
+#endif
+
+#if !defined(__NR_set_tid_address)
+#define __NR_set_tid_address (__NR_Linux + 212)
+#endif
+
+#if !defined(__NR_restart_syscall)
+#define __NR_restart_syscall (__NR_Linux + 213)
+#endif
+
+#if !defined(__NR_semtimedop)
+#define __NR_semtimedop (__NR_Linux + 214)
+#endif
+
+#if !defined(__NR_fadvise64)
+#define __NR_fadvise64 (__NR_Linux + 215)
+#endif
+
+#if !defined(__NR_timer_create)
+#define __NR_timer_create (__NR_Linux + 216)
+#endif
+
+#if !defined(__NR_timer_settime)
+#define __NR_timer_settime (__NR_Linux + 217)
+#endif
+
+#if !defined(__NR_timer_gettime)
+#define __NR_timer_gettime (__NR_Linux + 218)
+#endif
+
+#if !defined(__NR_timer_getoverrun)
+#define __NR_timer_getoverrun (__NR_Linux + 219)
+#endif
+
+#if !defined(__NR_timer_delete)
+#define __NR_timer_delete (__NR_Linux + 220)
+#endif
+
+#if !defined(__NR_clock_settime)
+#define __NR_clock_settime (__NR_Linux + 221)
+#endif
+
+#if !defined(__NR_clock_gettime)
+#define __NR_clock_gettime (__NR_Linux + 222)
+#endif
+
+#if !defined(__NR_clock_getres)
+#define __NR_clock_getres (__NR_Linux + 223)
+#endif
+
+#if !defined(__NR_clock_nanosleep)
+#define __NR_clock_nanosleep (__NR_Linux + 224)
+#endif
+
+#if !defined(__NR_tgkill)
+#define __NR_tgkill (__NR_Linux + 225)
+#endif
+
+#if !defined(__NR_utimes)
+#define __NR_utimes (__NR_Linux + 226)
+#endif
+
+#if !defined(__NR_mbind)
+#define __NR_mbind (__NR_Linux + 227)
+#endif
+
+#if !defined(__NR_get_mempolicy)
+#define __NR_get_mempolicy (__NR_Linux + 228)
+#endif
+
+#if !defined(__NR_set_mempolicy)
+#define __NR_set_mempolicy (__NR_Linux + 229)
+#endif
+
+#if !defined(__NR_mq_open)
+#define __NR_mq_open (__NR_Linux + 230)
+#endif
+
+#if !defined(__NR_mq_unlink)
+#define __NR_mq_unlink (__NR_Linux + 231)
+#endif
+
+#if !defined(__NR_mq_timedsend)
+#define __NR_mq_timedsend (__NR_Linux + 232)
+#endif
+
+#if !defined(__NR_mq_timedreceive)
+#define __NR_mq_timedreceive (__NR_Linux + 233)
+#endif
+
+#if !defined(__NR_mq_notify)
+#define __NR_mq_notify (__NR_Linux + 234)
+#endif
+
+#if !defined(__NR_mq_getsetattr)
+#define __NR_mq_getsetattr (__NR_Linux + 235)
+#endif
+
+#if !defined(__NR_vserver)
+#define __NR_vserver (__NR_Linux + 236)
+#endif
+
+#if !defined(__NR_waitid)
+#define __NR_waitid (__NR_Linux + 237)
+#endif
+
+/* #define __NR_sys_setaltroot (__NR_Linux + 238) */
+
+#if !defined(__NR_add_key)
+#define __NR_add_key (__NR_Linux + 239)
+#endif
+
+#if !defined(__NR_request_key)
+#define __NR_request_key (__NR_Linux + 240)
+#endif
+
+#if !defined(__NR_keyctl)
+#define __NR_keyctl (__NR_Linux + 241)
+#endif
+
+#if !defined(__NR_set_thread_area)
+#define __NR_set_thread_area (__NR_Linux + 242)
+#endif
+
+#if !defined(__NR_inotify_init)
+#define __NR_inotify_init (__NR_Linux + 243)
+#endif
+
+#if !defined(__NR_inotify_add_watch)
+#define __NR_inotify_add_watch (__NR_Linux + 244)
+#endif
+
+#if !defined(__NR_inotify_rm_watch)
+#define __NR_inotify_rm_watch (__NR_Linux + 245)
+#endif
+
+#if !defined(__NR_migrate_pages)
+#define __NR_migrate_pages (__NR_Linux + 246)
+#endif
+
+#if !defined(__NR_openat)
+#define __NR_openat (__NR_Linux + 247)
+#endif
+
+#if !defined(__NR_mkdirat)
+#define __NR_mkdirat (__NR_Linux + 248)
+#endif
+
+#if !defined(__NR_mknodat)
+#define __NR_mknodat (__NR_Linux + 249)
+#endif
+
+#if !defined(__NR_fchownat)
+#define __NR_fchownat (__NR_Linux + 250)
+#endif
+
+#if !defined(__NR_futimesat)
+#define __NR_futimesat (__NR_Linux + 251)
+#endif
+
+#if !defined(__NR_newfstatat)
+#define __NR_newfstatat (__NR_Linux + 252)
+#endif
+
+#if !defined(__NR_unlinkat)
+#define __NR_unlinkat (__NR_Linux + 253)
+#endif
+
+#if !defined(__NR_renameat)
+#define __NR_renameat (__NR_Linux + 254)
+#endif
+
+#if !defined(__NR_linkat)
+#define __NR_linkat (__NR_Linux + 255)
+#endif
+
+#if !defined(__NR_symlinkat)
+#define __NR_symlinkat (__NR_Linux + 256)
+#endif
+
+#if !defined(__NR_readlinkat)
+#define __NR_readlinkat (__NR_Linux + 257)
+#endif
+
+#if !defined(__NR_fchmodat)
+#define __NR_fchmodat (__NR_Linux + 258)
+#endif
+
+#if !defined(__NR_faccessat)
+#define __NR_faccessat (__NR_Linux + 259)
+#endif
+
+#if !defined(__NR_pselect6)
+#define __NR_pselect6 (__NR_Linux + 260)
+#endif
+
+#if !defined(__NR_ppoll)
+#define __NR_ppoll (__NR_Linux + 261)
+#endif
+
+#if !defined(__NR_unshare)
+#define __NR_unshare (__NR_Linux + 262)
+#endif
+
+#if !defined(__NR_splice)
+#define __NR_splice (__NR_Linux + 263)
+#endif
+
+#if !defined(__NR_sync_file_range)
+#define __NR_sync_file_range (__NR_Linux + 264)
+#endif
+
+#if !defined(__NR_tee)
+#define __NR_tee (__NR_Linux + 265)
+#endif
+
+#if !defined(__NR_vmsplice)
+#define __NR_vmsplice (__NR_Linux + 266)
+#endif
+
+#if !defined(__NR_move_pages)
+#define __NR_move_pages (__NR_Linux + 267)
+#endif
+
+#if !defined(__NR_set_robust_list)
+#define __NR_set_robust_list (__NR_Linux + 268)
+#endif
+
+#if !defined(__NR_get_robust_list)
+#define __NR_get_robust_list (__NR_Linux + 269)
+#endif
+
+#if !defined(__NR_kexec_load)
+#define __NR_kexec_load (__NR_Linux + 270)
+#endif
+
+#if !defined(__NR_getcpu)
+#define __NR_getcpu (__NR_Linux + 271)
+#endif
+
+#if !defined(__NR_epoll_pwait)
+#define __NR_epoll_pwait (__NR_Linux + 272)
+#endif
+
+#if !defined(__NR_ioprio_set)
+#define __NR_ioprio_set (__NR_Linux + 273)
+#endif
+
+#if !defined(__NR_ioprio_get)
+#define __NR_ioprio_get (__NR_Linux + 274)
+#endif
+
+#if !defined(__NR_utimensat)
+#define __NR_utimensat (__NR_Linux + 275)
+#endif
+
+#if !defined(__NR_signalfd)
+#define __NR_signalfd (__NR_Linux + 276)
+#endif
+
+#if !defined(__NR_timerfd)
+#define __NR_timerfd (__NR_Linux + 277)
+#endif
+
+#if !defined(__NR_eventfd)
+#define __NR_eventfd (__NR_Linux + 278)
+#endif
+
+#if !defined(__NR_fallocate)
+#define __NR_fallocate (__NR_Linux + 279)
+#endif
+
+#if !defined(__NR_timerfd_create)
+#define __NR_timerfd_create (__NR_Linux + 280)
+#endif
+
+#if !defined(__NR_timerfd_gettime)
+#define __NR_timerfd_gettime (__NR_Linux + 281)
+#endif
+
+#if !defined(__NR_timerfd_settime)
+#define __NR_timerfd_settime (__NR_Linux + 282)
+#endif
+
+#if !defined(__NR_signalfd4)
+#define __NR_signalfd4 (__NR_Linux + 283)
+#endif
+
+#if !defined(__NR_eventfd2)
+#define __NR_eventfd2 (__NR_Linux + 284)
+#endif
+
+#if !defined(__NR_epoll_create1)
+#define __NR_epoll_create1 (__NR_Linux + 285)
+#endif
+
+#if !defined(__NR_dup3)
+#define __NR_dup3 (__NR_Linux + 286)
+#endif
+
+#if !defined(__NR_pipe2)
+#define __NR_pipe2 (__NR_Linux + 287)
+#endif
+
+#if !defined(__NR_inotify_init1)
+#define __NR_inotify_init1 (__NR_Linux + 288)
+#endif
+
+#if !defined(__NR_preadv)
+#define __NR_preadv (__NR_Linux + 289)
+#endif
+
+#if !defined(__NR_pwritev)
+#define __NR_pwritev (__NR_Linux + 290)
+#endif
+
+#if !defined(__NR_rt_tgsigqueueinfo)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
+#endif
+
+#if !defined(__NR_perf_event_open)
+#define __NR_perf_event_open (__NR_Linux + 292)
+#endif
+
+#if !defined(__NR_accept4)
+#define __NR_accept4 (__NR_Linux + 293)
+#endif
+
+#if !defined(__NR_recvmmsg)
+#define __NR_recvmmsg (__NR_Linux + 294)
+#endif
+
+#if !defined(__NR_fanotify_init)
+#define __NR_fanotify_init (__NR_Linux + 295)
+#endif
+
+#if !defined(__NR_fanotify_mark)
+#define __NR_fanotify_mark (__NR_Linux + 296)
+#endif
+
+#if !defined(__NR_prlimit64)
+#define __NR_prlimit64 (__NR_Linux + 297)
+#endif
+
+#if !defined(__NR_name_to_handle_at)
+#define __NR_name_to_handle_at (__NR_Linux + 298)
+#endif
+
+#if !defined(__NR_open_by_handle_at)
+#define __NR_open_by_handle_at (__NR_Linux + 299)
+#endif
+
+#if !defined(__NR_clock_adjtime)
+#define __NR_clock_adjtime (__NR_Linux + 300)
+#endif
+
+#if !defined(__NR_syncfs)
+#define __NR_syncfs (__NR_Linux + 301)
+#endif
+
+#if !defined(__NR_sendmmsg)
+#define __NR_sendmmsg (__NR_Linux + 302)
+#endif
+
+#if !defined(__NR_setns)
+#define __NR_setns (__NR_Linux + 303)
+#endif
+
+#if !defined(__NR_process_vm_readv)
+#define __NR_process_vm_readv (__NR_Linux + 304)
+#endif
+
+#if !defined(__NR_process_vm_writev)
+#define __NR_process_vm_writev (__NR_Linux + 305)
+#endif
+
+#if !defined(__NR_kcmp)
+#define __NR_kcmp (__NR_Linux + 306)
+#endif
+
+#if !defined(__NR_finit_module)
+#define __NR_finit_module (__NR_Linux + 307)
+#endif
+
+#if !defined(__NR_getdents64)
+#define __NR_getdents64 (__NR_Linux + 308)
+#endif
+
+#if !defined(__NR_sched_setattr)
+#define __NR_sched_setattr (__NR_Linux + 309)
+#endif
+
+#if !defined(__NR_sched_getattr)
+#define __NR_sched_getattr (__NR_Linux + 310)
+#endif
+
+#if !defined(__NR_renameat2)
+#define __NR_renameat2 (__NR_Linux + 311)
+#endif
+
+#if !defined(__NR_seccomp)
+#define __NR_seccomp (__NR_Linux + 312)
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS64_LINUX_SYSCALLS_H_
diff --git a/libchrome/sandbox/linux/system_headers/mips_linux_syscalls.h b/libchrome/sandbox/linux/system_headers/mips_linux_syscalls.h
new file mode 100644
index 0000000..eb1717a
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/mips_linux_syscalls.h
@@ -0,0 +1,1428 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated from the Linux kernel's calls.S.
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_
+
+#if !defined(__mips__) || (_MIPS_SIM != _ABIO32)
+#error "Including header on wrong architecture"
+#endif
+
+// __NR_Linux, is defined in <asm/unistd.h>.
+#include <asm/unistd.h>
+
+#if !defined(__NR_syscall)
+#define __NR_syscall (__NR_Linux + 0)
+#endif
+
+#if !defined(__NR_exit)
+#define __NR_exit (__NR_Linux + 1)
+#endif
+
+#if !defined(__NR_fork)
+#define __NR_fork (__NR_Linux + 2)
+#endif
+
+#if !defined(__NR_read)
+#define __NR_read (__NR_Linux + 3)
+#endif
+
+#if !defined(__NR_write)
+#define __NR_write (__NR_Linux + 4)
+#endif
+
+#if !defined(__NR_open)
+#define __NR_open (__NR_Linux + 5)
+#endif
+
+#if !defined(__NR_close)
+#define __NR_close (__NR_Linux + 6)
+#endif
+
+#if !defined(__NR_waitpid)
+#define __NR_waitpid (__NR_Linux + 7)
+#endif
+
+#if !defined(__NR_creat)
+#define __NR_creat (__NR_Linux + 8)
+#endif
+
+#if !defined(__NR_link)
+#define __NR_link (__NR_Linux + 9)
+#endif
+
+#if !defined(__NR_unlink)
+#define __NR_unlink (__NR_Linux + 10)
+#endif
+
+#if !defined(__NR_execve)
+#define __NR_execve (__NR_Linux + 11)
+#endif
+
+#if !defined(__NR_chdir)
+#define __NR_chdir (__NR_Linux + 12)
+#endif
+
+#if !defined(__NR_time)
+#define __NR_time (__NR_Linux + 13)
+#endif
+
+#if !defined(__NR_mknod)
+#define __NR_mknod (__NR_Linux + 14)
+#endif
+
+#if !defined(__NR_chmod)
+#define __NR_chmod (__NR_Linux + 15)
+#endif
+
+#if !defined(__NR_lchown)
+#define __NR_lchown (__NR_Linux + 16)
+#endif
+
+#if !defined(__NR_break)
+#define __NR_break (__NR_Linux + 17)
+#endif
+
+#if !defined(__NR_unused18)
+#define __NR_unused18 (__NR_Linux + 18)
+#endif
+
+#if !defined(__NR_lseek)
+#define __NR_lseek (__NR_Linux + 19)
+#endif
+
+#if !defined(__NR_getpid)
+#define __NR_getpid (__NR_Linux + 20)
+#endif
+
+#if !defined(__NR_mount)
+#define __NR_mount (__NR_Linux + 21)
+#endif
+
+#if !defined(__NR_umount)
+#define __NR_umount (__NR_Linux + 22)
+#endif
+
+#if !defined(__NR_setuid)
+#define __NR_setuid (__NR_Linux + 23)
+#endif
+
+#if !defined(__NR_getuid)
+#define __NR_getuid (__NR_Linux + 24)
+#endif
+
+#if !defined(__NR_stime)
+#define __NR_stime (__NR_Linux + 25)
+#endif
+
+#if !defined(__NR_ptrace)
+#define __NR_ptrace (__NR_Linux + 26)
+#endif
+
+#if !defined(__NR_alarm)
+#define __NR_alarm (__NR_Linux + 27)
+#endif
+
+#if !defined(__NR_unused28)
+#define __NR_unused28 (__NR_Linux + 28)
+#endif
+
+#if !defined(__NR_pause)
+#define __NR_pause (__NR_Linux + 29)
+#endif
+
+#if !defined(__NR_utime)
+#define __NR_utime (__NR_Linux + 30)
+#endif
+
+#if !defined(__NR_stty)
+#define __NR_stty (__NR_Linux + 31)
+#endif
+
+#if !defined(__NR_gtty)
+#define __NR_gtty (__NR_Linux + 32)
+#endif
+
+#if !defined(__NR_access)
+#define __NR_access (__NR_Linux + 33)
+#endif
+
+#if !defined(__NR_nice)
+#define __NR_nice (__NR_Linux + 34)
+#endif
+
+#if !defined(__NR_ftime)
+#define __NR_ftime (__NR_Linux + 35)
+#endif
+
+#if !defined(__NR_sync)
+#define __NR_sync (__NR_Linux + 36)
+#endif
+
+#if !defined(__NR_kill)
+#define __NR_kill (__NR_Linux + 37)
+#endif
+
+#if !defined(__NR_rename)
+#define __NR_rename (__NR_Linux + 38)
+#endif
+
+#if !defined(__NR_mkdir)
+#define __NR_mkdir (__NR_Linux + 39)
+#endif
+
+#if !defined(__NR_rmdir)
+#define __NR_rmdir (__NR_Linux + 40)
+#endif
+
+#if !defined(__NR_dup)
+#define __NR_dup (__NR_Linux + 41)
+#endif
+
+#if !defined(__NR_pipe)
+#define __NR_pipe (__NR_Linux + 42)
+#endif
+
+#if !defined(__NR_times)
+#define __NR_times (__NR_Linux + 43)
+#endif
+
+#if !defined(__NR_prof)
+#define __NR_prof (__NR_Linux + 44)
+#endif
+
+#if !defined(__NR_brk)
+#define __NR_brk (__NR_Linux + 45)
+#endif
+
+#if !defined(__NR_setgid)
+#define __NR_setgid (__NR_Linux + 46)
+#endif
+
+#if !defined(__NR_getgid)
+#define __NR_getgid (__NR_Linux + 47)
+#endif
+
+#if !defined(__NR_signal)
+#define __NR_signal (__NR_Linux + 48)
+#endif
+
+#if !defined(__NR_geteuid)
+#define __NR_geteuid (__NR_Linux + 49)
+#endif
+
+#if !defined(__NR_getegid)
+#define __NR_getegid (__NR_Linux + 50)
+#endif
+
+#if !defined(__NR_acct)
+#define __NR_acct (__NR_Linux + 51)
+#endif
+
+#if !defined(__NR_umount2)
+#define __NR_umount2 (__NR_Linux + 52)
+#endif
+
+#if !defined(__NR_lock)
+#define __NR_lock (__NR_Linux + 53)
+#endif
+
+#if !defined(__NR_ioctl)
+#define __NR_ioctl (__NR_Linux + 54)
+#endif
+
+#if !defined(__NR_fcntl)
+#define __NR_fcntl (__NR_Linux + 55)
+#endif
+
+#if !defined(__NR_mpx)
+#define __NR_mpx (__NR_Linux + 56)
+#endif
+
+#if !defined(__NR_setpgid)
+#define __NR_setpgid (__NR_Linux + 57)
+#endif
+
+#if !defined(__NR_ulimit)
+#define __NR_ulimit (__NR_Linux + 58)
+#endif
+
+#if !defined(__NR_unused59)
+#define __NR_unused59 (__NR_Linux + 59)
+#endif
+
+#if !defined(__NR_umask)
+#define __NR_umask (__NR_Linux + 60)
+#endif
+
+#if !defined(__NR_chroot)
+#define __NR_chroot (__NR_Linux + 61)
+#endif
+
+#if !defined(__NR_ustat)
+#define __NR_ustat (__NR_Linux + 62)
+#endif
+
+#if !defined(__NR_dup2)
+#define __NR_dup2 (__NR_Linux + 63)
+#endif
+
+#if !defined(__NR_getppid)
+#define __NR_getppid (__NR_Linux + 64)
+#endif
+
+#if !defined(__NR_getpgrp)
+#define __NR_getpgrp (__NR_Linux + 65)
+#endif
+
+#if !defined(__NR_setsid)
+#define __NR_setsid (__NR_Linux + 66)
+#endif
+
+#if !defined(__NR_sigaction)
+#define __NR_sigaction (__NR_Linux + 67)
+#endif
+
+#if !defined(__NR_sgetmask)
+#define __NR_sgetmask (__NR_Linux + 68)
+#endif
+
+#if !defined(__NR_ssetmask)
+#define __NR_ssetmask (__NR_Linux + 69)
+#endif
+
+#if !defined(__NR_setreuid)
+#define __NR_setreuid (__NR_Linux + 70)
+#endif
+
+#if !defined(__NR_setregid)
+#define __NR_setregid (__NR_Linux + 71)
+#endif
+
+#if !defined(__NR_sigsuspend)
+#define __NR_sigsuspend (__NR_Linux + 72)
+#endif
+
+#if !defined(__NR_sigpending)
+#define __NR_sigpending (__NR_Linux + 73)
+#endif
+
+#if !defined(__NR_sethostname)
+#define __NR_sethostname (__NR_Linux + 74)
+#endif
+
+#if !defined(__NR_setrlimit)
+#define __NR_setrlimit (__NR_Linux + 75)
+#endif
+
+#if !defined(__NR_getrlimit)
+#define __NR_getrlimit (__NR_Linux + 76)
+#endif
+
+#if !defined(__NR_getrusage)
+#define __NR_getrusage (__NR_Linux + 77)
+#endif
+
+#if !defined(__NR_gettimeofday)
+#define __NR_gettimeofday (__NR_Linux + 78)
+#endif
+
+#if !defined(__NR_settimeofday)
+#define __NR_settimeofday (__NR_Linux + 79)
+#endif
+
+#if !defined(__NR_getgroups)
+#define __NR_getgroups (__NR_Linux + 80)
+#endif
+
+#if !defined(__NR_setgroups)
+#define __NR_setgroups (__NR_Linux + 81)
+#endif
+
+#if !defined(__NR_reserved82)
+#define __NR_reserved82 (__NR_Linux + 82)
+#endif
+
+#if !defined(__NR_symlink)
+#define __NR_symlink (__NR_Linux + 83)
+#endif
+
+#if !defined(__NR_unused84)
+#define __NR_unused84 (__NR_Linux + 84)
+#endif
+
+#if !defined(__NR_readlink)
+#define __NR_readlink (__NR_Linux + 85)
+#endif
+
+#if !defined(__NR_uselib)
+#define __NR_uselib (__NR_Linux + 86)
+#endif
+
+#if !defined(__NR_swapon)
+#define __NR_swapon (__NR_Linux + 87)
+#endif
+
+#if !defined(__NR_reboot)
+#define __NR_reboot (__NR_Linux + 88)
+#endif
+
+#if !defined(__NR_readdir)
+#define __NR_readdir (__NR_Linux + 89)
+#endif
+
+#if !defined(__NR_mmap)
+#define __NR_mmap (__NR_Linux + 90)
+#endif
+
+#if !defined(__NR_munmap)
+#define __NR_munmap (__NR_Linux + 91)
+#endif
+
+#if !defined(__NR_truncate)
+#define __NR_truncate (__NR_Linux + 92)
+#endif
+
+#if !defined(__NR_ftruncate)
+#define __NR_ftruncate (__NR_Linux + 93)
+#endif
+
+#if !defined(__NR_fchmod)
+#define __NR_fchmod (__NR_Linux + 94)
+#endif
+
+#if !defined(__NR_fchown)
+#define __NR_fchown (__NR_Linux + 95)
+#endif
+
+#if !defined(__NR_getpriority)
+#define __NR_getpriority (__NR_Linux + 96)
+#endif
+
+#if !defined(__NR_setpriority)
+#define __NR_setpriority (__NR_Linux + 97)
+#endif
+
+#if !defined(__NR_profil)
+#define __NR_profil (__NR_Linux + 98)
+#endif
+
+#if !defined(__NR_statfs)
+#define __NR_statfs (__NR_Linux + 99)
+#endif
+
+#if !defined(__NR_fstatfs)
+#define __NR_fstatfs (__NR_Linux + 100)
+#endif
+
+#if !defined(__NR_ioperm)
+#define __NR_ioperm (__NR_Linux + 101)
+#endif
+
+#if !defined(__NR_socketcall)
+#define __NR_socketcall (__NR_Linux + 102)
+#endif
+
+#if !defined(__NR_syslog)
+#define __NR_syslog (__NR_Linux + 103)
+#endif
+
+#if !defined(__NR_setitimer)
+#define __NR_setitimer (__NR_Linux + 104)
+#endif
+
+#if !defined(__NR_getitimer)
+#define __NR_getitimer (__NR_Linux + 105)
+#endif
+
+#if !defined(__NR_stat)
+#define __NR_stat (__NR_Linux + 106)
+#endif
+
+#if !defined(__NR_lstat)
+#define __NR_lstat (__NR_Linux + 107)
+#endif
+
+#if !defined(__NR_fstat)
+#define __NR_fstat (__NR_Linux + 108)
+#endif
+
+#if !defined(__NR_unused109)
+#define __NR_unused109 (__NR_Linux + 109)
+#endif
+
+#if !defined(__NR_iopl)
+#define __NR_iopl (__NR_Linux + 110)
+#endif
+
+#if !defined(__NR_vhangup)
+#define __NR_vhangup (__NR_Linux + 111)
+#endif
+
+#if !defined(__NR_idle)
+#define __NR_idle (__NR_Linux + 112)
+#endif
+
+#if !defined(__NR_vm86)
+#define __NR_vm86 (__NR_Linux + 113)
+#endif
+
+#if !defined(__NR_wait4)
+#define __NR_wait4 (__NR_Linux + 114)
+#endif
+
+#if !defined(__NR_swapoff)
+#define __NR_swapoff (__NR_Linux + 115)
+#endif
+
+#if !defined(__NR_sysinfo)
+#define __NR_sysinfo (__NR_Linux + 116)
+#endif
+
+#if !defined(__NR_ipc)
+#define __NR_ipc (__NR_Linux + 117)
+#endif
+
+#if !defined(__NR_fsync)
+#define __NR_fsync (__NR_Linux + 118)
+#endif
+
+#if !defined(__NR_sigreturn)
+#define __NR_sigreturn (__NR_Linux + 119)
+#endif
+
+#if !defined(__NR_clone)
+#define __NR_clone (__NR_Linux + 120)
+#endif
+
+#if !defined(__NR_setdomainname)
+#define __NR_setdomainname (__NR_Linux + 121)
+#endif
+
+#if !defined(__NR_uname)
+#define __NR_uname (__NR_Linux + 122)
+#endif
+
+#if !defined(__NR_modify_ldt)
+#define __NR_modify_ldt (__NR_Linux + 123)
+#endif
+
+#if !defined(__NR_adjtimex)
+#define __NR_adjtimex (__NR_Linux + 124)
+#endif
+
+#if !defined(__NR_mprotect)
+#define __NR_mprotect (__NR_Linux + 125)
+#endif
+
+#if !defined(__NR_sigprocmask)
+#define __NR_sigprocmask (__NR_Linux + 126)
+#endif
+
+#if !defined(__NR_create_module)
+#define __NR_create_module (__NR_Linux + 127)
+#endif
+
+#if !defined(__NR_init_module)
+#define __NR_init_module (__NR_Linux + 128)
+#endif
+
+#if !defined(__NR_delete_module)
+#define __NR_delete_module (__NR_Linux + 129)
+#endif
+
+#if !defined(__NR_get_kernel_syms)
+#define __NR_get_kernel_syms (__NR_Linux + 130)
+#endif
+
+#if !defined(__NR_quotactl)
+#define __NR_quotactl (__NR_Linux + 131)
+#endif
+
+#if !defined(__NR_getpgid)
+#define __NR_getpgid (__NR_Linux + 132)
+#endif
+
+#if !defined(__NR_fchdir)
+#define __NR_fchdir (__NR_Linux + 133)
+#endif
+
+#if !defined(__NR_bdflush)
+#define __NR_bdflush (__NR_Linux + 134)
+#endif
+
+#if !defined(__NR_sysfs)
+#define __NR_sysfs (__NR_Linux + 135)
+#endif
+
+#if !defined(__NR_personality)
+#define __NR_personality (__NR_Linux + 136)
+#endif
+
+#if !defined(__NR_afs_syscall)
+#define __NR_afs_syscall \
+ (__NR_Linux + 137) /* Syscall for Andrew File System \
+ */
+#endif
+
+#if !defined(__NR_setfsuid)
+#define __NR_setfsuid (__NR_Linux + 138)
+#endif
+
+#if !defined(__NR_setfsgid)
+#define __NR_setfsgid (__NR_Linux + 139)
+#endif
+
+#if !defined(__NR__llseek)
+#define __NR__llseek (__NR_Linux + 140)
+#endif
+
+#if !defined(__NR_getdents)
+#define __NR_getdents (__NR_Linux + 141)
+#endif
+
+#if !defined(__NR__newselect)
+#define __NR__newselect (__NR_Linux + 142)
+#endif
+
+#if !defined(__NR_flock)
+#define __NR_flock (__NR_Linux + 143)
+#endif
+
+#if !defined(__NR_msync)
+#define __NR_msync (__NR_Linux + 144)
+#endif
+
+#if !defined(__NR_readv)
+#define __NR_readv (__NR_Linux + 145)
+#endif
+
+#if !defined(__NR_writev)
+#define __NR_writev (__NR_Linux + 146)
+#endif
+
+#if !defined(__NR_cacheflush)
+#define __NR_cacheflush (__NR_Linux + 147)
+#endif
+
+#if !defined(__NR_cachectl)
+#define __NR_cachectl (__NR_Linux + 148)
+#endif
+
+#if !defined(__NR_sysmips)
+#define __NR_sysmips (__NR_Linux + 149)
+#endif
+
+#if !defined(__NR_unused150)
+#define __NR_unused150 (__NR_Linux + 150)
+#endif
+
+#if !defined(__NR_getsid)
+#define __NR_getsid (__NR_Linux + 151)
+#endif
+
+#if !defined(__NR_fdatasync)
+#define __NR_fdatasync (__NR_Linux + 152)
+#endif
+
+#if !defined(__NR__sysctl)
+#define __NR__sysctl (__NR_Linux + 153)
+#endif
+
+#if !defined(__NR_mlock)
+#define __NR_mlock (__NR_Linux + 154)
+#endif
+
+#if !defined(__NR_munlock)
+#define __NR_munlock (__NR_Linux + 155)
+#endif
+
+#if !defined(__NR_mlockall)
+#define __NR_mlockall (__NR_Linux + 156)
+#endif
+
+#if !defined(__NR_munlockall)
+#define __NR_munlockall (__NR_Linux + 157)
+#endif
+
+#if !defined(__NR_sched_setparam)
+#define __NR_sched_setparam (__NR_Linux + 158)
+#endif
+
+#if !defined(__NR_sched_getparam)
+#define __NR_sched_getparam (__NR_Linux + 159)
+#endif
+
+#if !defined(__NR_sched_setscheduler)
+#define __NR_sched_setscheduler (__NR_Linux + 160)
+#endif
+
+#if !defined(__NR_sched_getscheduler)
+#define __NR_sched_getscheduler (__NR_Linux + 161)
+#endif
+
+#if !defined(__NR_sched_yield)
+#define __NR_sched_yield (__NR_Linux + 162)
+#endif
+
+#if !defined(__NR_sched_get_priority_max)
+#define __NR_sched_get_priority_max (__NR_Linux + 163)
+#endif
+
+#if !defined(__NR_sched_get_priority_min)
+#define __NR_sched_get_priority_min (__NR_Linux + 164)
+#endif
+
+#if !defined(__NR_sched_rr_get_interval)
+#define __NR_sched_rr_get_interval (__NR_Linux + 165)
+#endif
+
+#if !defined(__NR_nanosleep)
+#define __NR_nanosleep (__NR_Linux + 166)
+#endif
+
+#if !defined(__NR_mremap)
+#define __NR_mremap (__NR_Linux + 167)
+#endif
+
+#if !defined(__NR_accept)
+#define __NR_accept (__NR_Linux + 168)
+#endif
+
+#if !defined(__NR_bind)
+#define __NR_bind (__NR_Linux + 169)
+#endif
+
+#if !defined(__NR_connect)
+#define __NR_connect (__NR_Linux + 170)
+#endif
+
+#if !defined(__NR_getpeername)
+#define __NR_getpeername (__NR_Linux + 171)
+#endif
+
+#if !defined(__NR_getsockname)
+#define __NR_getsockname (__NR_Linux + 172)
+#endif
+
+#if !defined(__NR_getsockopt)
+#define __NR_getsockopt (__NR_Linux + 173)
+#endif
+
+#if !defined(__NR_listen)
+#define __NR_listen (__NR_Linux + 174)
+#endif
+
+#if !defined(__NR_recv)
+#define __NR_recv (__NR_Linux + 175)
+#endif
+
+#if !defined(__NR_recvfrom)
+#define __NR_recvfrom (__NR_Linux + 176)
+#endif
+
+#if !defined(__NR_recvmsg)
+#define __NR_recvmsg (__NR_Linux + 177)
+#endif
+
+#if !defined(__NR_send)
+#define __NR_send (__NR_Linux + 178)
+#endif
+
+#if !defined(__NR_sendmsg)
+#define __NR_sendmsg (__NR_Linux + 179)
+#endif
+
+#if !defined(__NR_sendto)
+#define __NR_sendto (__NR_Linux + 180)
+#endif
+
+#if !defined(__NR_setsockopt)
+#define __NR_setsockopt (__NR_Linux + 181)
+#endif
+
+#if !defined(__NR_shutdown)
+#define __NR_shutdown (__NR_Linux + 182)
+#endif
+
+#if !defined(__NR_socket)
+#define __NR_socket (__NR_Linux + 183)
+#endif
+
+#if !defined(__NR_socketpair)
+#define __NR_socketpair (__NR_Linux + 184)
+#endif
+
+#if !defined(__NR_setresuid)
+#define __NR_setresuid (__NR_Linux + 185)
+#endif
+
+#if !defined(__NR_getresuid)
+#define __NR_getresuid (__NR_Linux + 186)
+#endif
+
+#if !defined(__NR_query_module)
+#define __NR_query_module (__NR_Linux + 187)
+#endif
+
+#if !defined(__NR_poll)
+#define __NR_poll (__NR_Linux + 188)
+#endif
+
+#if !defined(__NR_nfsservctl)
+#define __NR_nfsservctl (__NR_Linux + 189)
+#endif
+
+#if !defined(__NR_setresgid)
+#define __NR_setresgid (__NR_Linux + 190)
+#endif
+
+#if !defined(__NR_getresgid)
+#define __NR_getresgid (__NR_Linux + 191)
+#endif
+
+#if !defined(__NR_prctl)
+#define __NR_prctl (__NR_Linux + 192)
+#endif
+
+#if !defined(__NR_rt_sigreturn)
+#define __NR_rt_sigreturn (__NR_Linux + 193)
+#endif
+
+#if !defined(__NR_rt_sigaction)
+#define __NR_rt_sigaction (__NR_Linux + 194)
+#endif
+
+#if !defined(__NR_rt_sigprocmask)
+#define __NR_rt_sigprocmask (__NR_Linux + 195)
+#endif
+
+#if !defined(__NR_rt_sigpending)
+#define __NR_rt_sigpending (__NR_Linux + 196)
+#endif
+
+#if !defined(__NR_rt_sigtimedwait)
+#define __NR_rt_sigtimedwait (__NR_Linux + 197)
+#endif
+
+#if !defined(__NR_rt_sigqueueinfo)
+#define __NR_rt_sigqueueinfo (__NR_Linux + 198)
+#endif
+
+#if !defined(__NR_rt_sigsuspend)
+#define __NR_rt_sigsuspend (__NR_Linux + 199)
+#endif
+
+#if !defined(__NR_pread64)
+#define __NR_pread64 (__NR_Linux + 200)
+#endif
+
+#if !defined(__NR_pwrite64)
+#define __NR_pwrite64 (__NR_Linux + 201)
+#endif
+
+#if !defined(__NR_chown)
+#define __NR_chown (__NR_Linux + 202)
+#endif
+
+#if !defined(__NR_getcwd)
+#define __NR_getcwd (__NR_Linux + 203)
+#endif
+
+#if !defined(__NR_capget)
+#define __NR_capget (__NR_Linux + 204)
+#endif
+
+#if !defined(__NR_capset)
+#define __NR_capset (__NR_Linux + 205)
+#endif
+
+#if !defined(__NR_sigaltstack)
+#define __NR_sigaltstack (__NR_Linux + 206)
+#endif
+
+#if !defined(__NR_sendfile)
+#define __NR_sendfile (__NR_Linux + 207)
+#endif
+
+#if !defined(__NR_getpmsg)
+#define __NR_getpmsg (__NR_Linux + 208)
+#endif
+
+#if !defined(__NR_putpmsg)
+#define __NR_putpmsg (__NR_Linux + 209)
+#endif
+
+#if !defined(__NR_mmap2)
+#define __NR_mmap2 (__NR_Linux + 210)
+#endif
+
+#if !defined(__NR_truncate64)
+#define __NR_truncate64 (__NR_Linux + 211)
+#endif
+
+#if !defined(__NR_ftruncate64)
+#define __NR_ftruncate64 (__NR_Linux + 212)
+#endif
+
+#if !defined(__NR_stat64)
+#define __NR_stat64 (__NR_Linux + 213)
+#endif
+
+#if !defined(__NR_lstat64)
+#define __NR_lstat64 (__NR_Linux + 214)
+#endif
+
+#if !defined(__NR_fstat64)
+#define __NR_fstat64 (__NR_Linux + 215)
+#endif
+
+#if !defined(__NR_pivot_root)
+#define __NR_pivot_root (__NR_Linux + 216)
+#endif
+
+#if !defined(__NR_mincore)
+#define __NR_mincore (__NR_Linux + 217)
+#endif
+
+#if !defined(__NR_madvise)
+#define __NR_madvise (__NR_Linux + 218)
+#endif
+
+#if !defined(__NR_getdents64)
+#define __NR_getdents64 (__NR_Linux + 219)
+#endif
+
+#if !defined(__NR_fcntl64)
+#define __NR_fcntl64 (__NR_Linux + 220)
+#endif
+
+#if !defined(__NR_reserved221)
+#define __NR_reserved221 (__NR_Linux + 221)
+#endif
+
+#if !defined(__NR_gettid)
+#define __NR_gettid (__NR_Linux + 222)
+#endif
+
+#if !defined(__NR_readahead)
+#define __NR_readahead (__NR_Linux + 223)
+#endif
+
+#if !defined(__NR_setxattr)
+#define __NR_setxattr (__NR_Linux + 224)
+#endif
+
+#if !defined(__NR_lsetxattr)
+#define __NR_lsetxattr (__NR_Linux + 225)
+#endif
+
+#if !defined(__NR_fsetxattr)
+#define __NR_fsetxattr (__NR_Linux + 226)
+#endif
+
+#if !defined(__NR_getxattr)
+#define __NR_getxattr (__NR_Linux + 227)
+#endif
+
+#if !defined(__NR_lgetxattr)
+#define __NR_lgetxattr (__NR_Linux + 228)
+#endif
+
+#if !defined(__NR_fgetxattr)
+#define __NR_fgetxattr (__NR_Linux + 229)
+#endif
+
+#if !defined(__NR_listxattr)
+#define __NR_listxattr (__NR_Linux + 230)
+#endif
+
+#if !defined(__NR_llistxattr)
+#define __NR_llistxattr (__NR_Linux + 231)
+#endif
+
+#if !defined(__NR_flistxattr)
+#define __NR_flistxattr (__NR_Linux + 232)
+#endif
+
+#if !defined(__NR_removexattr)
+#define __NR_removexattr (__NR_Linux + 233)
+#endif
+
+#if !defined(__NR_lremovexattr)
+#define __NR_lremovexattr (__NR_Linux + 234)
+#endif
+
+#if !defined(__NR_fremovexattr)
+#define __NR_fremovexattr (__NR_Linux + 235)
+#endif
+
+#if !defined(__NR_tkill)
+#define __NR_tkill (__NR_Linux + 236)
+#endif
+
+#if !defined(__NR_sendfile64)
+#define __NR_sendfile64 (__NR_Linux + 237)
+#endif
+
+#if !defined(__NR_futex)
+#define __NR_futex (__NR_Linux + 238)
+#endif
+
+#if !defined(__NR_sched_setaffinity)
+#define __NR_sched_setaffinity (__NR_Linux + 239)
+#endif
+
+#if !defined(__NR_sched_getaffinity)
+#define __NR_sched_getaffinity (__NR_Linux + 240)
+#endif
+
+#if !defined(__NR_io_setup)
+#define __NR_io_setup (__NR_Linux + 241)
+#endif
+
+#if !defined(__NR_io_destroy)
+#define __NR_io_destroy (__NR_Linux + 242)
+#endif
+
+#if !defined(__NR_io_getevents)
+#define __NR_io_getevents (__NR_Linux + 243)
+#endif
+
+#if !defined(__NR_io_submit)
+#define __NR_io_submit (__NR_Linux + 244)
+#endif
+
+#if !defined(__NR_io_cancel)
+#define __NR_io_cancel (__NR_Linux + 245)
+#endif
+
+#if !defined(__NR_exit_group)
+#define __NR_exit_group (__NR_Linux + 246)
+#endif
+
+#if !defined(__NR_lookup_dcookie)
+#define __NR_lookup_dcookie (__NR_Linux + 247)
+#endif
+
+#if !defined(__NR_epoll_create)
+#define __NR_epoll_create (__NR_Linux + 248)
+#endif
+
+#if !defined(__NR_epoll_ctl)
+#define __NR_epoll_ctl (__NR_Linux + 249)
+#endif
+
+#if !defined(__NR_epoll_wait)
+#define __NR_epoll_wait (__NR_Linux + 250)
+#endif
+
+#if !defined(__NR_remap_file_pages)
+#define __NR_remap_file_pages (__NR_Linux + 251)
+#endif
+
+#if !defined(__NR_set_tid_address)
+#define __NR_set_tid_address (__NR_Linux + 252)
+#endif
+
+#if !defined(__NR_restart_syscall)
+#define __NR_restart_syscall (__NR_Linux + 253)
+#endif
+
+#if !defined(__NR_fadvise64)
+#define __NR_fadvise64 (__NR_Linux + 254)
+#endif
+
+#if !defined(__NR_statfs64)
+#define __NR_statfs64 (__NR_Linux + 255)
+#endif
+
+#if !defined(__NR_fstatfs64)
+#define __NR_fstatfs64 (__NR_Linux + 256)
+#endif
+
+#if !defined(__NR_timer_create)
+#define __NR_timer_create (__NR_Linux + 257)
+#endif
+
+#if !defined(__NR_timer_settime)
+#define __NR_timer_settime (__NR_Linux + 258)
+#endif
+
+#if !defined(__NR_timer_gettime)
+#define __NR_timer_gettime (__NR_Linux + 259)
+#endif
+
+#if !defined(__NR_timer_getoverrun)
+#define __NR_timer_getoverrun (__NR_Linux + 260)
+#endif
+
+#if !defined(__NR_timer_delete)
+#define __NR_timer_delete (__NR_Linux + 261)
+#endif
+
+#if !defined(__NR_clock_settime)
+#define __NR_clock_settime (__NR_Linux + 262)
+#endif
+
+#if !defined(__NR_clock_gettime)
+#define __NR_clock_gettime (__NR_Linux + 263)
+#endif
+
+#if !defined(__NR_clock_getres)
+#define __NR_clock_getres (__NR_Linux + 264)
+#endif
+
+#if !defined(__NR_clock_nanosleep)
+#define __NR_clock_nanosleep (__NR_Linux + 265)
+#endif
+
+#if !defined(__NR_tgkill)
+#define __NR_tgkill (__NR_Linux + 266)
+#endif
+
+#if !defined(__NR_utimes)
+#define __NR_utimes (__NR_Linux + 267)
+#endif
+
+#if !defined(__NR_mbind)
+#define __NR_mbind (__NR_Linux + 268)
+#endif
+
+#if !defined(__NR_get_mempolicy)
+#define __NR_get_mempolicy (__NR_Linux + 269)
+#endif
+
+#if !defined(__NR_set_mempolicy)
+#define __NR_set_mempolicy (__NR_Linux + 270)
+#endif
+
+#if !defined(__NR_mq_open)
+#define __NR_mq_open (__NR_Linux + 271)
+#endif
+
+#if !defined(__NR_mq_unlink)
+#define __NR_mq_unlink (__NR_Linux + 272)
+#endif
+
+#if !defined(__NR_mq_timedsend)
+#define __NR_mq_timedsend (__NR_Linux + 273)
+#endif
+
+#if !defined(__NR_mq_timedreceive)
+#define __NR_mq_timedreceive (__NR_Linux + 274)
+#endif
+
+#if !defined(__NR_mq_notify)
+#define __NR_mq_notify (__NR_Linux + 275)
+#endif
+
+#if !defined(__NR_mq_getsetattr)
+#define __NR_mq_getsetattr (__NR_Linux + 276)
+#endif
+
+#if !defined(__NR_vserver)
+#define __NR_vserver (__NR_Linux + 277)
+#endif
+
+#if !defined(__NR_waitid)
+#define __NR_waitid (__NR_Linux + 278)
+#endif
+
+/* #define __NR_sys_setaltroot (__NR_Linux + 279) */
+
+#if !defined(__NR_add_key)
+#define __NR_add_key (__NR_Linux + 280)
+#endif
+
+#if !defined(__NR_request_key)
+#define __NR_request_key (__NR_Linux + 281)
+#endif
+
+#if !defined(__NR_keyctl)
+#define __NR_keyctl (__NR_Linux + 282)
+#endif
+
+#if !defined(__NR_set_thread_area)
+#define __NR_set_thread_area (__NR_Linux + 283)
+#endif
+
+#if !defined(__NR_inotify_init)
+#define __NR_inotify_init (__NR_Linux + 284)
+#endif
+
+#if !defined(__NR_inotify_add_watch)
+#define __NR_inotify_add_watch (__NR_Linux + 285)
+#endif
+
+#if !defined(__NR_inotify_rm_watch)
+#define __NR_inotify_rm_watch (__NR_Linux + 286)
+#endif
+
+#if !defined(__NR_migrate_pages)
+#define __NR_migrate_pages (__NR_Linux + 287)
+#endif
+
+#if !defined(__NR_openat)
+#define __NR_openat (__NR_Linux + 288)
+#endif
+
+#if !defined(__NR_mkdirat)
+#define __NR_mkdirat (__NR_Linux + 289)
+#endif
+
+#if !defined(__NR_mknodat)
+#define __NR_mknodat (__NR_Linux + 290)
+#endif
+
+#if !defined(__NR_fchownat)
+#define __NR_fchownat (__NR_Linux + 291)
+#endif
+
+#if !defined(__NR_futimesat)
+#define __NR_futimesat (__NR_Linux + 292)
+#endif
+
+#if !defined(__NR_fstatat64)
+#define __NR_fstatat64 (__NR_Linux + 293)
+#endif
+
+#if !defined(__NR_unlinkat)
+#define __NR_unlinkat (__NR_Linux + 294)
+#endif
+
+#if !defined(__NR_renameat)
+#define __NR_renameat (__NR_Linux + 295)
+#endif
+
+#if !defined(__NR_linkat)
+#define __NR_linkat (__NR_Linux + 296)
+#endif
+
+#if !defined(__NR_symlinkat)
+#define __NR_symlinkat (__NR_Linux + 297)
+#endif
+
+#if !defined(__NR_readlinkat)
+#define __NR_readlinkat (__NR_Linux + 298)
+#endif
+
+#if !defined(__NR_fchmodat)
+#define __NR_fchmodat (__NR_Linux + 299)
+#endif
+
+#if !defined(__NR_faccessat)
+#define __NR_faccessat (__NR_Linux + 300)
+#endif
+
+#if !defined(__NR_pselect6)
+#define __NR_pselect6 (__NR_Linux + 301)
+#endif
+
+#if !defined(__NR_ppoll)
+#define __NR_ppoll (__NR_Linux + 302)
+#endif
+
+#if !defined(__NR_unshare)
+#define __NR_unshare (__NR_Linux + 303)
+#endif
+
+#if !defined(__NR_splice)
+#define __NR_splice (__NR_Linux + 304)
+#endif
+
+#if !defined(__NR_sync_file_range)
+#define __NR_sync_file_range (__NR_Linux + 305)
+#endif
+
+#if !defined(__NR_tee)
+#define __NR_tee (__NR_Linux + 306)
+#endif
+
+#if !defined(__NR_vmsplice)
+#define __NR_vmsplice (__NR_Linux + 307)
+#endif
+
+#if !defined(__NR_move_pages)
+#define __NR_move_pages (__NR_Linux + 308)
+#endif
+
+#if !defined(__NR_set_robust_list)
+#define __NR_set_robust_list (__NR_Linux + 309)
+#endif
+
+#if !defined(__NR_get_robust_list)
+#define __NR_get_robust_list (__NR_Linux + 310)
+#endif
+
+#if !defined(__NR_kexec_load)
+#define __NR_kexec_load (__NR_Linux + 311)
+#endif
+
+#if !defined(__NR_getcpu)
+#define __NR_getcpu (__NR_Linux + 312)
+#endif
+
+#if !defined(__NR_epoll_pwait)
+#define __NR_epoll_pwait (__NR_Linux + 313)
+#endif
+
+#if !defined(__NR_ioprio_set)
+#define __NR_ioprio_set (__NR_Linux + 314)
+#endif
+
+#if !defined(__NR_ioprio_get)
+#define __NR_ioprio_get (__NR_Linux + 315)
+#endif
+
+#if !defined(__NR_utimensat)
+#define __NR_utimensat (__NR_Linux + 316)
+#endif
+
+#if !defined(__NR_signalfd)
+#define __NR_signalfd (__NR_Linux + 317)
+#endif
+
+#if !defined(__NR_timerfd)
+#define __NR_timerfd (__NR_Linux + 318)
+#endif
+
+#if !defined(__NR_eventfd)
+#define __NR_eventfd (__NR_Linux + 319)
+#endif
+
+#if !defined(__NR_eventfd)
+#define __NR_eventfd (__NR_Linux + 320)
+#endif
+
+#if !defined(__NR_timerfd_create)
+#define __NR_timerfd_create (__NR_Linux + 321)
+#endif
+
+#if !defined(__NR_timerfd_gettime)
+#define __NR_timerfd_gettime (__NR_Linux + 322)
+#endif
+
+#if !defined(__NR_timerfd_settime)
+#define __NR_timerfd_settime (__NR_Linux + 323)
+#endif
+
+#if !defined(__NR_signalfd4)
+#define __NR_signalfd4 (__NR_Linux + 324)
+#endif
+
+#if !defined(__NR_eventfd2)
+#define __NR_eventfd2 (__NR_Linux + 325)
+#endif
+
+#if !defined(__NR_epoll_create1)
+#define __NR_epoll_create1 (__NR_Linux + 326)
+#endif
+
+#if !defined(__NR_dup3)
+#define __NR_dup3 (__NR_Linux + 327)
+#endif
+
+#if !defined(__NR_pipe2)
+#define __NR_pipe2 (__NR_Linux + 328)
+#endif
+
+#if !defined(__NR_inotify_init1)
+#define __NR_inotify_init1 (__NR_Linux + 329)
+#endif
+
+#if !defined(__NR_preadv)
+#define __NR_preadv (__NR_Linux + 330)
+#endif
+
+#if !defined(__NR_pwritev)
+#define __NR_pwritev (__NR_Linux + 331)
+#endif
+
+#if !defined(__NR_rt_tgsigqueueinfo)
+#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
+#endif
+
+#if !defined(__NR_perf_event_open)
+#define __NR_perf_event_open (__NR_Linux + 333)
+#endif
+
+#if !defined(__NR_accept4)
+#define __NR_accept4 (__NR_Linux + 334)
+#endif
+
+#if !defined(__NR_recvmmsg)
+#define __NR_recvmmsg (__NR_Linux + 335)
+#endif
+
+#if !defined(__NR_fanotify_init)
+#define __NR_fanotify_init (__NR_Linux + 336)
+#endif
+
+#if !defined(__NR_fanotify_mark)
+#define __NR_fanotify_mark (__NR_Linux + 337)
+#endif
+
+#if !defined(__NR_prlimit64)
+#define __NR_prlimit64 (__NR_Linux + 338)
+#endif
+
+#if !defined(__NR_name_to_handle_at)
+#define __NR_name_to_handle_at (__NR_Linux + 339)
+#endif
+
+#if !defined(__NR_open_by_handle_at)
+#define __NR_open_by_handle_at (__NR_Linux + 340)
+#endif
+
+#if !defined(__NR_clock_adjtime)
+#define __NR_clock_adjtime (__NR_Linux + 341)
+#endif
+
+#if !defined(__NR_syncfs)
+#define __NR_syncfs (__NR_Linux + 342)
+#endif
+
+#if !defined(__NR_sendmmsg)
+#define __NR_sendmmsg (__NR_Linux + 343)
+#endif
+
+#if !defined(__NR_setns)
+#define __NR_setns (__NR_Linux + 344)
+#endif
+
+#if !defined(__NR_process_vm_readv)
+#define __NR_process_vm_readv (__NR_Linux + 345)
+#endif
+
+#if !defined(__NR_process_vm_writev)
+#define __NR_process_vm_writev (__NR_Linux + 346)
+#endif
+
+#if !defined(__NR_kcmp)
+#define __NR_kcmp (__NR_Linux + 347)
+#endif
+
+#if !defined(__NR_finit_module)
+#define __NR_finit_module (__NR_Linux + 348)
+#endif
+
+#if !defined(__NR_sched_setattr)
+#define __NR_sched_setattr (__NR_Linux + 349)
+#endif
+
+#if !defined(__NR_sched_getattr)
+#define __NR_sched_getattr (__NR_Linux + 350)
+#endif
+
+#if !defined(__NR_renameat2)
+#define __NR_renameat2 (__NR_Linux + 351)
+#endif
+
+#if !defined(__NR_seccomp)
+#define __NR_seccomp (__NR_Linux + 352)
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_
diff --git a/libchrome/sandbox/linux/system_headers/mips_linux_ucontext.h b/libchrome/sandbox/linux/system_headers/mips_linux_ucontext.h
new file mode 100644
index 0000000..774bf31
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/mips_linux_ucontext.h
@@ -0,0 +1,53 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_UCONTEXT_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_UCONTEXT_H_
+
+#include <stdint.h>
+
+// This is mostly copied from breakpad (common/android/include/sys/ucontext.h),
+// except we do use sigset_t for uc_sigmask instead of a custom type.
+#if !defined(__BIONIC_HAVE_UCONTEXT_T)
+// Ensure that 'stack_t' is defined.
+#include <asm/signal.h>
+
+// We also need greg_t for the sandbox, include it in this header as well.
+typedef unsigned long greg_t;
+
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ sigset_t uc_sigmask;
+ // Other fields are not used by Google Breakpad. Don't define them.
+} ucontext_t;
+
+#else
+#include <sys/ucontext.h>
+#endif // __BIONIC_HAVE_UCONTEXT_T
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_UCONTEXT_H_
diff --git a/libchrome/sandbox/linux/system_headers/x86_32_linux_syscalls.h b/libchrome/sandbox/linux/system_headers/x86_32_linux_syscalls.h
new file mode 100644
index 0000000..a6afc62
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/x86_32_linux_syscalls.h
@@ -0,0 +1,1426 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated from the Linux kernel's syscall_32.tbl.
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_X86_32_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_X86_32_LINUX_SYSCALLS_H_
+
+#if !defined(__i386__)
+#error "Including header on wrong architecture"
+#endif
+
+#if !defined(__NR_restart_syscall)
+#define __NR_restart_syscall 0
+#endif
+
+#if !defined(__NR_exit)
+#define __NR_exit 1
+#endif
+
+#if !defined(__NR_fork)
+#define __NR_fork 2
+#endif
+
+#if !defined(__NR_read)
+#define __NR_read 3
+#endif
+
+#if !defined(__NR_write)
+#define __NR_write 4
+#endif
+
+#if !defined(__NR_open)
+#define __NR_open 5
+#endif
+
+#if !defined(__NR_close)
+#define __NR_close 6
+#endif
+
+#if !defined(__NR_waitpid)
+#define __NR_waitpid 7
+#endif
+
+#if !defined(__NR_creat)
+#define __NR_creat 8
+#endif
+
+#if !defined(__NR_link)
+#define __NR_link 9
+#endif
+
+#if !defined(__NR_unlink)
+#define __NR_unlink 10
+#endif
+
+#if !defined(__NR_execve)
+#define __NR_execve 11
+#endif
+
+#if !defined(__NR_chdir)
+#define __NR_chdir 12
+#endif
+
+#if !defined(__NR_time)
+#define __NR_time 13
+#endif
+
+#if !defined(__NR_mknod)
+#define __NR_mknod 14
+#endif
+
+#if !defined(__NR_chmod)
+#define __NR_chmod 15
+#endif
+
+#if !defined(__NR_lchown)
+#define __NR_lchown 16
+#endif
+
+#if !defined(__NR_break)
+#define __NR_break 17
+#endif
+
+#if !defined(__NR_oldstat)
+#define __NR_oldstat 18
+#endif
+
+#if !defined(__NR_lseek)
+#define __NR_lseek 19
+#endif
+
+#if !defined(__NR_getpid)
+#define __NR_getpid 20
+#endif
+
+#if !defined(__NR_mount)
+#define __NR_mount 21
+#endif
+
+#if !defined(__NR_umount)
+#define __NR_umount 22
+#endif
+
+#if !defined(__NR_setuid)
+#define __NR_setuid 23
+#endif
+
+#if !defined(__NR_getuid)
+#define __NR_getuid 24
+#endif
+
+#if !defined(__NR_stime)
+#define __NR_stime 25
+#endif
+
+#if !defined(__NR_ptrace)
+#define __NR_ptrace 26
+#endif
+
+#if !defined(__NR_alarm)
+#define __NR_alarm 27
+#endif
+
+#if !defined(__NR_oldfstat)
+#define __NR_oldfstat 28
+#endif
+
+#if !defined(__NR_pause)
+#define __NR_pause 29
+#endif
+
+#if !defined(__NR_utime)
+#define __NR_utime 30
+#endif
+
+#if !defined(__NR_stty)
+#define __NR_stty 31
+#endif
+
+#if !defined(__NR_gtty)
+#define __NR_gtty 32
+#endif
+
+#if !defined(__NR_access)
+#define __NR_access 33
+#endif
+
+#if !defined(__NR_nice)
+#define __NR_nice 34
+#endif
+
+#if !defined(__NR_ftime)
+#define __NR_ftime 35
+#endif
+
+#if !defined(__NR_sync)
+#define __NR_sync 36
+#endif
+
+#if !defined(__NR_kill)
+#define __NR_kill 37
+#endif
+
+#if !defined(__NR_rename)
+#define __NR_rename 38
+#endif
+
+#if !defined(__NR_mkdir)
+#define __NR_mkdir 39
+#endif
+
+#if !defined(__NR_rmdir)
+#define __NR_rmdir 40
+#endif
+
+#if !defined(__NR_dup)
+#define __NR_dup 41
+#endif
+
+#if !defined(__NR_pipe)
+#define __NR_pipe 42
+#endif
+
+#if !defined(__NR_times)
+#define __NR_times 43
+#endif
+
+#if !defined(__NR_prof)
+#define __NR_prof 44
+#endif
+
+#if !defined(__NR_brk)
+#define __NR_brk 45
+#endif
+
+#if !defined(__NR_setgid)
+#define __NR_setgid 46
+#endif
+
+#if !defined(__NR_getgid)
+#define __NR_getgid 47
+#endif
+
+#if !defined(__NR_signal)
+#define __NR_signal 48
+#endif
+
+#if !defined(__NR_geteuid)
+#define __NR_geteuid 49
+#endif
+
+#if !defined(__NR_getegid)
+#define __NR_getegid 50
+#endif
+
+#if !defined(__NR_acct)
+#define __NR_acct 51
+#endif
+
+#if !defined(__NR_umount2)
+#define __NR_umount2 52
+#endif
+
+#if !defined(__NR_lock)
+#define __NR_lock 53
+#endif
+
+#if !defined(__NR_ioctl)
+#define __NR_ioctl 54
+#endif
+
+#if !defined(__NR_fcntl)
+#define __NR_fcntl 55
+#endif
+
+#if !defined(__NR_mpx)
+#define __NR_mpx 56
+#endif
+
+#if !defined(__NR_setpgid)
+#define __NR_setpgid 57
+#endif
+
+#if !defined(__NR_ulimit)
+#define __NR_ulimit 58
+#endif
+
+#if !defined(__NR_oldolduname)
+#define __NR_oldolduname 59
+#endif
+
+#if !defined(__NR_umask)
+#define __NR_umask 60
+#endif
+
+#if !defined(__NR_chroot)
+#define __NR_chroot 61
+#endif
+
+#if !defined(__NR_ustat)
+#define __NR_ustat 62
+#endif
+
+#if !defined(__NR_dup2)
+#define __NR_dup2 63
+#endif
+
+#if !defined(__NR_getppid)
+#define __NR_getppid 64
+#endif
+
+#if !defined(__NR_getpgrp)
+#define __NR_getpgrp 65
+#endif
+
+#if !defined(__NR_setsid)
+#define __NR_setsid 66
+#endif
+
+#if !defined(__NR_sigaction)
+#define __NR_sigaction 67
+#endif
+
+#if !defined(__NR_sgetmask)
+#define __NR_sgetmask 68
+#endif
+
+#if !defined(__NR_ssetmask)
+#define __NR_ssetmask 69
+#endif
+
+#if !defined(__NR_setreuid)
+#define __NR_setreuid 70
+#endif
+
+#if !defined(__NR_setregid)
+#define __NR_setregid 71
+#endif
+
+#if !defined(__NR_sigsuspend)
+#define __NR_sigsuspend 72
+#endif
+
+#if !defined(__NR_sigpending)
+#define __NR_sigpending 73
+#endif
+
+#if !defined(__NR_sethostname)
+#define __NR_sethostname 74
+#endif
+
+#if !defined(__NR_setrlimit)
+#define __NR_setrlimit 75
+#endif
+
+#if !defined(__NR_getrlimit)
+#define __NR_getrlimit 76
+#endif
+
+#if !defined(__NR_getrusage)
+#define __NR_getrusage 77
+#endif
+
+#if !defined(__NR_gettimeofday)
+#define __NR_gettimeofday 78
+#endif
+
+#if !defined(__NR_settimeofday)
+#define __NR_settimeofday 79
+#endif
+
+#if !defined(__NR_getgroups)
+#define __NR_getgroups 80
+#endif
+
+#if !defined(__NR_setgroups)
+#define __NR_setgroups 81
+#endif
+
+#if !defined(__NR_select)
+#define __NR_select 82
+#endif
+
+#if !defined(__NR_symlink)
+#define __NR_symlink 83
+#endif
+
+#if !defined(__NR_oldlstat)
+#define __NR_oldlstat 84
+#endif
+
+#if !defined(__NR_readlink)
+#define __NR_readlink 85
+#endif
+
+#if !defined(__NR_uselib)
+#define __NR_uselib 86
+#endif
+
+#if !defined(__NR_swapon)
+#define __NR_swapon 87
+#endif
+
+#if !defined(__NR_reboot)
+#define __NR_reboot 88
+#endif
+
+#if !defined(__NR_readdir)
+#define __NR_readdir 89
+#endif
+
+#if !defined(__NR_mmap)
+#define __NR_mmap 90
+#endif
+
+#if !defined(__NR_munmap)
+#define __NR_munmap 91
+#endif
+
+#if !defined(__NR_truncate)
+#define __NR_truncate 92
+#endif
+
+#if !defined(__NR_ftruncate)
+#define __NR_ftruncate 93
+#endif
+
+#if !defined(__NR_fchmod)
+#define __NR_fchmod 94
+#endif
+
+#if !defined(__NR_fchown)
+#define __NR_fchown 95
+#endif
+
+#if !defined(__NR_getpriority)
+#define __NR_getpriority 96
+#endif
+
+#if !defined(__NR_setpriority)
+#define __NR_setpriority 97
+#endif
+
+#if !defined(__NR_profil)
+#define __NR_profil 98
+#endif
+
+#if !defined(__NR_statfs)
+#define __NR_statfs 99
+#endif
+
+#if !defined(__NR_fstatfs)
+#define __NR_fstatfs 100
+#endif
+
+#if !defined(__NR_ioperm)
+#define __NR_ioperm 101
+#endif
+
+#if !defined(__NR_socketcall)
+#define __NR_socketcall 102
+#endif
+
+#if !defined(__NR_syslog)
+#define __NR_syslog 103
+#endif
+
+#if !defined(__NR_setitimer)
+#define __NR_setitimer 104
+#endif
+
+#if !defined(__NR_getitimer)
+#define __NR_getitimer 105
+#endif
+
+#if !defined(__NR_stat)
+#define __NR_stat 106
+#endif
+
+#if !defined(__NR_lstat)
+#define __NR_lstat 107
+#endif
+
+#if !defined(__NR_fstat)
+#define __NR_fstat 108
+#endif
+
+#if !defined(__NR_olduname)
+#define __NR_olduname 109
+#endif
+
+#if !defined(__NR_iopl)
+#define __NR_iopl 110
+#endif
+
+#if !defined(__NR_vhangup)
+#define __NR_vhangup 111
+#endif
+
+#if !defined(__NR_idle)
+#define __NR_idle 112
+#endif
+
+#if !defined(__NR_vm86old)
+#define __NR_vm86old 113
+#endif
+
+#if !defined(__NR_wait4)
+#define __NR_wait4 114
+#endif
+
+#if !defined(__NR_swapoff)
+#define __NR_swapoff 115
+#endif
+
+#if !defined(__NR_sysinfo)
+#define __NR_sysinfo 116
+#endif
+
+#if !defined(__NR_ipc)
+#define __NR_ipc 117
+#endif
+
+#if !defined(__NR_fsync)
+#define __NR_fsync 118
+#endif
+
+#if !defined(__NR_sigreturn)
+#define __NR_sigreturn 119
+#endif
+
+#if !defined(__NR_clone)
+#define __NR_clone 120
+#endif
+
+#if !defined(__NR_setdomainname)
+#define __NR_setdomainname 121
+#endif
+
+#if !defined(__NR_uname)
+#define __NR_uname 122
+#endif
+
+#if !defined(__NR_modify_ldt)
+#define __NR_modify_ldt 123
+#endif
+
+#if !defined(__NR_adjtimex)
+#define __NR_adjtimex 124
+#endif
+
+#if !defined(__NR_mprotect)
+#define __NR_mprotect 125
+#endif
+
+#if !defined(__NR_sigprocmask)
+#define __NR_sigprocmask 126
+#endif
+
+#if !defined(__NR_create_module)
+#define __NR_create_module 127
+#endif
+
+#if !defined(__NR_init_module)
+#define __NR_init_module 128
+#endif
+
+#if !defined(__NR_delete_module)
+#define __NR_delete_module 129
+#endif
+
+#if !defined(__NR_get_kernel_syms)
+#define __NR_get_kernel_syms 130
+#endif
+
+#if !defined(__NR_quotactl)
+#define __NR_quotactl 131
+#endif
+
+#if !defined(__NR_getpgid)
+#define __NR_getpgid 132
+#endif
+
+#if !defined(__NR_fchdir)
+#define __NR_fchdir 133
+#endif
+
+#if !defined(__NR_bdflush)
+#define __NR_bdflush 134
+#endif
+
+#if !defined(__NR_sysfs)
+#define __NR_sysfs 135
+#endif
+
+#if !defined(__NR_personality)
+#define __NR_personality 136
+#endif
+
+#if !defined(__NR_afs_syscall)
+#define __NR_afs_syscall 137
+#endif
+
+#if !defined(__NR_setfsuid)
+#define __NR_setfsuid 138
+#endif
+
+#if !defined(__NR_setfsgid)
+#define __NR_setfsgid 139
+#endif
+
+#if !defined(__NR__llseek)
+#define __NR__llseek 140
+#endif
+
+#if !defined(__NR_getdents)
+#define __NR_getdents 141
+#endif
+
+#if !defined(__NR__newselect)
+#define __NR__newselect 142
+#endif
+
+#if !defined(__NR_flock)
+#define __NR_flock 143
+#endif
+
+#if !defined(__NR_msync)
+#define __NR_msync 144
+#endif
+
+#if !defined(__NR_readv)
+#define __NR_readv 145
+#endif
+
+#if !defined(__NR_writev)
+#define __NR_writev 146
+#endif
+
+#if !defined(__NR_getsid)
+#define __NR_getsid 147
+#endif
+
+#if !defined(__NR_fdatasync)
+#define __NR_fdatasync 148
+#endif
+
+#if !defined(__NR__sysctl)
+#define __NR__sysctl 149
+#endif
+
+#if !defined(__NR_mlock)
+#define __NR_mlock 150
+#endif
+
+#if !defined(__NR_munlock)
+#define __NR_munlock 151
+#endif
+
+#if !defined(__NR_mlockall)
+#define __NR_mlockall 152
+#endif
+
+#if !defined(__NR_munlockall)
+#define __NR_munlockall 153
+#endif
+
+#if !defined(__NR_sched_setparam)
+#define __NR_sched_setparam 154
+#endif
+
+#if !defined(__NR_sched_getparam)
+#define __NR_sched_getparam 155
+#endif
+
+#if !defined(__NR_sched_setscheduler)
+#define __NR_sched_setscheduler 156
+#endif
+
+#if !defined(__NR_sched_getscheduler)
+#define __NR_sched_getscheduler 157
+#endif
+
+#if !defined(__NR_sched_yield)
+#define __NR_sched_yield 158
+#endif
+
+#if !defined(__NR_sched_get_priority_max)
+#define __NR_sched_get_priority_max 159
+#endif
+
+#if !defined(__NR_sched_get_priority_min)
+#define __NR_sched_get_priority_min 160
+#endif
+
+#if !defined(__NR_sched_rr_get_interval)
+#define __NR_sched_rr_get_interval 161
+#endif
+
+#if !defined(__NR_nanosleep)
+#define __NR_nanosleep 162
+#endif
+
+#if !defined(__NR_mremap)
+#define __NR_mremap 163
+#endif
+
+#if !defined(__NR_setresuid)
+#define __NR_setresuid 164
+#endif
+
+#if !defined(__NR_getresuid)
+#define __NR_getresuid 165
+#endif
+
+#if !defined(__NR_vm86)
+#define __NR_vm86 166
+#endif
+
+#if !defined(__NR_query_module)
+#define __NR_query_module 167
+#endif
+
+#if !defined(__NR_poll)
+#define __NR_poll 168
+#endif
+
+#if !defined(__NR_nfsservctl)
+#define __NR_nfsservctl 169
+#endif
+
+#if !defined(__NR_setresgid)
+#define __NR_setresgid 170
+#endif
+
+#if !defined(__NR_getresgid)
+#define __NR_getresgid 171
+#endif
+
+#if !defined(__NR_prctl)
+#define __NR_prctl 172
+#endif
+
+#if !defined(__NR_rt_sigreturn)
+#define __NR_rt_sigreturn 173
+#endif
+
+#if !defined(__NR_rt_sigaction)
+#define __NR_rt_sigaction 174
+#endif
+
+#if !defined(__NR_rt_sigprocmask)
+#define __NR_rt_sigprocmask 175
+#endif
+
+#if !defined(__NR_rt_sigpending)
+#define __NR_rt_sigpending 176
+#endif
+
+#if !defined(__NR_rt_sigtimedwait)
+#define __NR_rt_sigtimedwait 177
+#endif
+
+#if !defined(__NR_rt_sigqueueinfo)
+#define __NR_rt_sigqueueinfo 178
+#endif
+
+#if !defined(__NR_rt_sigsuspend)
+#define __NR_rt_sigsuspend 179
+#endif
+
+#if !defined(__NR_pread64)
+#define __NR_pread64 180
+#endif
+
+#if !defined(__NR_pwrite64)
+#define __NR_pwrite64 181
+#endif
+
+#if !defined(__NR_chown)
+#define __NR_chown 182
+#endif
+
+#if !defined(__NR_getcwd)
+#define __NR_getcwd 183
+#endif
+
+#if !defined(__NR_capget)
+#define __NR_capget 184
+#endif
+
+#if !defined(__NR_capset)
+#define __NR_capset 185
+#endif
+
+#if !defined(__NR_sigaltstack)
+#define __NR_sigaltstack 186
+#endif
+
+#if !defined(__NR_sendfile)
+#define __NR_sendfile 187
+#endif
+
+#if !defined(__NR_getpmsg)
+#define __NR_getpmsg 188
+#endif
+
+#if !defined(__NR_putpmsg)
+#define __NR_putpmsg 189
+#endif
+
+#if !defined(__NR_vfork)
+#define __NR_vfork 190
+#endif
+
+#if !defined(__NR_ugetrlimit)
+#define __NR_ugetrlimit 191
+#endif
+
+#if !defined(__NR_mmap2)
+#define __NR_mmap2 192
+#endif
+
+#if !defined(__NR_truncate64)
+#define __NR_truncate64 193
+#endif
+
+#if !defined(__NR_ftruncate64)
+#define __NR_ftruncate64 194
+#endif
+
+#if !defined(__NR_stat64)
+#define __NR_stat64 195
+#endif
+
+#if !defined(__NR_lstat64)
+#define __NR_lstat64 196
+#endif
+
+#if !defined(__NR_fstat64)
+#define __NR_fstat64 197
+#endif
+
+#if !defined(__NR_lchown32)
+#define __NR_lchown32 198
+#endif
+
+#if !defined(__NR_getuid32)
+#define __NR_getuid32 199
+#endif
+
+#if !defined(__NR_getgid32)
+#define __NR_getgid32 200
+#endif
+
+#if !defined(__NR_geteuid32)
+#define __NR_geteuid32 201
+#endif
+
+#if !defined(__NR_getegid32)
+#define __NR_getegid32 202
+#endif
+
+#if !defined(__NR_setreuid32)
+#define __NR_setreuid32 203
+#endif
+
+#if !defined(__NR_setregid32)
+#define __NR_setregid32 204
+#endif
+
+#if !defined(__NR_getgroups32)
+#define __NR_getgroups32 205
+#endif
+
+#if !defined(__NR_setgroups32)
+#define __NR_setgroups32 206
+#endif
+
+#if !defined(__NR_fchown32)
+#define __NR_fchown32 207
+#endif
+
+#if !defined(__NR_setresuid32)
+#define __NR_setresuid32 208
+#endif
+
+#if !defined(__NR_getresuid32)
+#define __NR_getresuid32 209
+#endif
+
+#if !defined(__NR_setresgid32)
+#define __NR_setresgid32 210
+#endif
+
+#if !defined(__NR_getresgid32)
+#define __NR_getresgid32 211
+#endif
+
+#if !defined(__NR_chown32)
+#define __NR_chown32 212
+#endif
+
+#if !defined(__NR_setuid32)
+#define __NR_setuid32 213
+#endif
+
+#if !defined(__NR_setgid32)
+#define __NR_setgid32 214
+#endif
+
+#if !defined(__NR_setfsuid32)
+#define __NR_setfsuid32 215
+#endif
+
+#if !defined(__NR_setfsgid32)
+#define __NR_setfsgid32 216
+#endif
+
+#if !defined(__NR_pivot_root)
+#define __NR_pivot_root 217
+#endif
+
+#if !defined(__NR_mincore)
+#define __NR_mincore 218
+#endif
+
+#if !defined(__NR_madvise)
+#define __NR_madvise 219
+#endif
+
+#if !defined(__NR_getdents64)
+#define __NR_getdents64 220
+#endif
+
+#if !defined(__NR_fcntl64)
+#define __NR_fcntl64 221
+#endif
+
+#if !defined(__NR_gettid)
+#define __NR_gettid 224
+#endif
+
+#if !defined(__NR_readahead)
+#define __NR_readahead 225
+#endif
+
+#if !defined(__NR_setxattr)
+#define __NR_setxattr 226
+#endif
+
+#if !defined(__NR_lsetxattr)
+#define __NR_lsetxattr 227
+#endif
+
+#if !defined(__NR_fsetxattr)
+#define __NR_fsetxattr 228
+#endif
+
+#if !defined(__NR_getxattr)
+#define __NR_getxattr 229
+#endif
+
+#if !defined(__NR_lgetxattr)
+#define __NR_lgetxattr 230
+#endif
+
+#if !defined(__NR_fgetxattr)
+#define __NR_fgetxattr 231
+#endif
+
+#if !defined(__NR_listxattr)
+#define __NR_listxattr 232
+#endif
+
+#if !defined(__NR_llistxattr)
+#define __NR_llistxattr 233
+#endif
+
+#if !defined(__NR_flistxattr)
+#define __NR_flistxattr 234
+#endif
+
+#if !defined(__NR_removexattr)
+#define __NR_removexattr 235
+#endif
+
+#if !defined(__NR_lremovexattr)
+#define __NR_lremovexattr 236
+#endif
+
+#if !defined(__NR_fremovexattr)
+#define __NR_fremovexattr 237
+#endif
+
+#if !defined(__NR_tkill)
+#define __NR_tkill 238
+#endif
+
+#if !defined(__NR_sendfile64)
+#define __NR_sendfile64 239
+#endif
+
+#if !defined(__NR_futex)
+#define __NR_futex 240
+#endif
+
+#if !defined(__NR_sched_setaffinity)
+#define __NR_sched_setaffinity 241
+#endif
+
+#if !defined(__NR_sched_getaffinity)
+#define __NR_sched_getaffinity 242
+#endif
+
+#if !defined(__NR_set_thread_area)
+#define __NR_set_thread_area 243
+#endif
+
+#if !defined(__NR_get_thread_area)
+#define __NR_get_thread_area 244
+#endif
+
+#if !defined(__NR_io_setup)
+#define __NR_io_setup 245
+#endif
+
+#if !defined(__NR_io_destroy)
+#define __NR_io_destroy 246
+#endif
+
+#if !defined(__NR_io_getevents)
+#define __NR_io_getevents 247
+#endif
+
+#if !defined(__NR_io_submit)
+#define __NR_io_submit 248
+#endif
+
+#if !defined(__NR_io_cancel)
+#define __NR_io_cancel 249
+#endif
+
+#if !defined(__NR_fadvise64)
+#define __NR_fadvise64 250
+#endif
+
+#if !defined(__NR_exit_group)
+#define __NR_exit_group 252
+#endif
+
+#if !defined(__NR_lookup_dcookie)
+#define __NR_lookup_dcookie 253
+#endif
+
+#if !defined(__NR_epoll_create)
+#define __NR_epoll_create 254
+#endif
+
+#if !defined(__NR_epoll_ctl)
+#define __NR_epoll_ctl 255
+#endif
+
+#if !defined(__NR_epoll_wait)
+#define __NR_epoll_wait 256
+#endif
+
+#if !defined(__NR_remap_file_pages)
+#define __NR_remap_file_pages 257
+#endif
+
+#if !defined(__NR_set_tid_address)
+#define __NR_set_tid_address 258
+#endif
+
+#if !defined(__NR_timer_create)
+#define __NR_timer_create 259
+#endif
+
+#if !defined(__NR_timer_settime)
+#define __NR_timer_settime 260
+#endif
+
+#if !defined(__NR_timer_gettime)
+#define __NR_timer_gettime 261
+#endif
+
+#if !defined(__NR_timer_getoverrun)
+#define __NR_timer_getoverrun 262
+#endif
+
+#if !defined(__NR_timer_delete)
+#define __NR_timer_delete 263
+#endif
+
+#if !defined(__NR_clock_settime)
+#define __NR_clock_settime 264
+#endif
+
+#if !defined(__NR_clock_gettime)
+#define __NR_clock_gettime 265
+#endif
+
+#if !defined(__NR_clock_getres)
+#define __NR_clock_getres 266
+#endif
+
+#if !defined(__NR_clock_nanosleep)
+#define __NR_clock_nanosleep 267
+#endif
+
+#if !defined(__NR_statfs64)
+#define __NR_statfs64 268
+#endif
+
+#if !defined(__NR_fstatfs64)
+#define __NR_fstatfs64 269
+#endif
+
+#if !defined(__NR_tgkill)
+#define __NR_tgkill 270
+#endif
+
+#if !defined(__NR_utimes)
+#define __NR_utimes 271
+#endif
+
+#if !defined(__NR_fadvise64_64)
+#define __NR_fadvise64_64 272
+#endif
+
+#if !defined(__NR_vserver)
+#define __NR_vserver 273
+#endif
+
+#if !defined(__NR_mbind)
+#define __NR_mbind 274
+#endif
+
+#if !defined(__NR_get_mempolicy)
+#define __NR_get_mempolicy 275
+#endif
+
+#if !defined(__NR_set_mempolicy)
+#define __NR_set_mempolicy 276
+#endif
+
+#if !defined(__NR_mq_open)
+#define __NR_mq_open 277
+#endif
+
+#if !defined(__NR_mq_unlink)
+#define __NR_mq_unlink 278
+#endif
+
+#if !defined(__NR_mq_timedsend)
+#define __NR_mq_timedsend 279
+#endif
+
+#if !defined(__NR_mq_timedreceive)
+#define __NR_mq_timedreceive 280
+#endif
+
+#if !defined(__NR_mq_notify)
+#define __NR_mq_notify 281
+#endif
+
+#if !defined(__NR_mq_getsetattr)
+#define __NR_mq_getsetattr 282
+#endif
+
+#if !defined(__NR_kexec_load)
+#define __NR_kexec_load 283
+#endif
+
+#if !defined(__NR_waitid)
+#define __NR_waitid 284
+#endif
+
+#if !defined(__NR_add_key)
+#define __NR_add_key 286
+#endif
+
+#if !defined(__NR_request_key)
+#define __NR_request_key 287
+#endif
+
+#if !defined(__NR_keyctl)
+#define __NR_keyctl 288
+#endif
+
+#if !defined(__NR_ioprio_set)
+#define __NR_ioprio_set 289
+#endif
+
+#if !defined(__NR_ioprio_get)
+#define __NR_ioprio_get 290
+#endif
+
+#if !defined(__NR_inotify_init)
+#define __NR_inotify_init 291
+#endif
+
+#if !defined(__NR_inotify_add_watch)
+#define __NR_inotify_add_watch 292
+#endif
+
+#if !defined(__NR_inotify_rm_watch)
+#define __NR_inotify_rm_watch 293
+#endif
+
+#if !defined(__NR_migrate_pages)
+#define __NR_migrate_pages 294
+#endif
+
+#if !defined(__NR_openat)
+#define __NR_openat 295
+#endif
+
+#if !defined(__NR_mkdirat)
+#define __NR_mkdirat 296
+#endif
+
+#if !defined(__NR_mknodat)
+#define __NR_mknodat 297
+#endif
+
+#if !defined(__NR_fchownat)
+#define __NR_fchownat 298
+#endif
+
+#if !defined(__NR_futimesat)
+#define __NR_futimesat 299
+#endif
+
+#if !defined(__NR_fstatat64)
+#define __NR_fstatat64 300
+#endif
+
+#if !defined(__NR_unlinkat)
+#define __NR_unlinkat 301
+#endif
+
+#if !defined(__NR_renameat)
+#define __NR_renameat 302
+#endif
+
+#if !defined(__NR_linkat)
+#define __NR_linkat 303
+#endif
+
+#if !defined(__NR_symlinkat)
+#define __NR_symlinkat 304
+#endif
+
+#if !defined(__NR_readlinkat)
+#define __NR_readlinkat 305
+#endif
+
+#if !defined(__NR_fchmodat)
+#define __NR_fchmodat 306
+#endif
+
+#if !defined(__NR_faccessat)
+#define __NR_faccessat 307
+#endif
+
+#if !defined(__NR_pselect6)
+#define __NR_pselect6 308
+#endif
+
+#if !defined(__NR_ppoll)
+#define __NR_ppoll 309
+#endif
+
+#if !defined(__NR_unshare)
+#define __NR_unshare 310
+#endif
+
+#if !defined(__NR_set_robust_list)
+#define __NR_set_robust_list 311
+#endif
+
+#if !defined(__NR_get_robust_list)
+#define __NR_get_robust_list 312
+#endif
+
+#if !defined(__NR_splice)
+#define __NR_splice 313
+#endif
+
+#if !defined(__NR_sync_file_range)
+#define __NR_sync_file_range 314
+#endif
+
+#if !defined(__NR_tee)
+#define __NR_tee 315
+#endif
+
+#if !defined(__NR_vmsplice)
+#define __NR_vmsplice 316
+#endif
+
+#if !defined(__NR_move_pages)
+#define __NR_move_pages 317
+#endif
+
+#if !defined(__NR_getcpu)
+#define __NR_getcpu 318
+#endif
+
+#if !defined(__NR_epoll_pwait)
+#define __NR_epoll_pwait 319
+#endif
+
+#if !defined(__NR_utimensat)
+#define __NR_utimensat 320
+#endif
+
+#if !defined(__NR_signalfd)
+#define __NR_signalfd 321
+#endif
+
+#if !defined(__NR_timerfd_create)
+#define __NR_timerfd_create 322
+#endif
+
+#if !defined(__NR_eventfd)
+#define __NR_eventfd 323
+#endif
+
+#if !defined(__NR_fallocate)
+#define __NR_fallocate 324
+#endif
+
+#if !defined(__NR_timerfd_settime)
+#define __NR_timerfd_settime 325
+#endif
+
+#if !defined(__NR_timerfd_gettime)
+#define __NR_timerfd_gettime 326
+#endif
+
+#if !defined(__NR_signalfd4)
+#define __NR_signalfd4 327
+#endif
+
+#if !defined(__NR_eventfd2)
+#define __NR_eventfd2 328
+#endif
+
+#if !defined(__NR_epoll_create1)
+#define __NR_epoll_create1 329
+#endif
+
+#if !defined(__NR_dup3)
+#define __NR_dup3 330
+#endif
+
+#if !defined(__NR_pipe2)
+#define __NR_pipe2 331
+#endif
+
+#if !defined(__NR_inotify_init1)
+#define __NR_inotify_init1 332
+#endif
+
+#if !defined(__NR_preadv)
+#define __NR_preadv 333
+#endif
+
+#if !defined(__NR_pwritev)
+#define __NR_pwritev 334
+#endif
+
+#if !defined(__NR_rt_tgsigqueueinfo)
+#define __NR_rt_tgsigqueueinfo 335
+#endif
+
+#if !defined(__NR_perf_event_open)
+#define __NR_perf_event_open 336
+#endif
+
+#if !defined(__NR_recvmmsg)
+#define __NR_recvmmsg 337
+#endif
+
+#if !defined(__NR_fanotify_init)
+#define __NR_fanotify_init 338
+#endif
+
+#if !defined(__NR_fanotify_mark)
+#define __NR_fanotify_mark 339
+#endif
+
+#if !defined(__NR_prlimit64)
+#define __NR_prlimit64 340
+#endif
+
+#if !defined(__NR_name_to_handle_at)
+#define __NR_name_to_handle_at 341
+#endif
+
+#if !defined(__NR_open_by_handle_at)
+#define __NR_open_by_handle_at 342
+#endif
+
+#if !defined(__NR_clock_adjtime)
+#define __NR_clock_adjtime 343
+#endif
+
+#if !defined(__NR_syncfs)
+#define __NR_syncfs 344
+#endif
+
+#if !defined(__NR_sendmmsg)
+#define __NR_sendmmsg 345
+#endif
+
+#if !defined(__NR_setns)
+#define __NR_setns 346
+#endif
+
+#if !defined(__NR_process_vm_readv)
+#define __NR_process_vm_readv 347
+#endif
+
+#if !defined(__NR_process_vm_writev)
+#define __NR_process_vm_writev 348
+#endif
+
+#if !defined(__NR_kcmp)
+#define __NR_kcmp 349
+#endif
+
+#if !defined(__NR_finit_module)
+#define __NR_finit_module 350
+#endif
+
+#if !defined(__NR_sched_setattr)
+#define __NR_sched_setattr 351
+#endif
+
+#if !defined(__NR_sched_getattr)
+#define __NR_sched_getattr 352
+#endif
+
+#if !defined(__NR_renameat2)
+#define __NR_renameat2 353
+#endif
+
+#if !defined(__NR_seccomp)
+#define __NR_seccomp 354
+#endif
+
+#if !defined(__NR_getrandom)
+#define __NR_getrandom 355
+#endif
+
+#if !defined(__NR_memfd_create)
+#define __NR_memfd_create 356
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_X86_32_LINUX_SYSCALLS_H_
+
diff --git a/libchrome/sandbox/linux/system_headers/x86_64_linux_syscalls.h b/libchrome/sandbox/linux/system_headers/x86_64_linux_syscalls.h
new file mode 100644
index 0000000..349504a
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/x86_64_linux_syscalls.h
@@ -0,0 +1,1294 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated from the Linux kernel's syscall_64.tbl.
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_SYSCALLS_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_SYSCALLS_H_
+
+#if !defined(__x86_64__)
+#error "Including header on wrong architecture"
+#endif
+
+#if !defined(__NR_read)
+#define __NR_read 0
+#endif
+
+#if !defined(__NR_write)
+#define __NR_write 1
+#endif
+
+#if !defined(__NR_open)
+#define __NR_open 2
+#endif
+
+#if !defined(__NR_close)
+#define __NR_close 3
+#endif
+
+#if !defined(__NR_stat)
+#define __NR_stat 4
+#endif
+
+#if !defined(__NR_fstat)
+#define __NR_fstat 5
+#endif
+
+#if !defined(__NR_lstat)
+#define __NR_lstat 6
+#endif
+
+#if !defined(__NR_poll)
+#define __NR_poll 7
+#endif
+
+#if !defined(__NR_lseek)
+#define __NR_lseek 8
+#endif
+
+#if !defined(__NR_mmap)
+#define __NR_mmap 9
+#endif
+
+#if !defined(__NR_mprotect)
+#define __NR_mprotect 10
+#endif
+
+#if !defined(__NR_munmap)
+#define __NR_munmap 11
+#endif
+
+#if !defined(__NR_brk)
+#define __NR_brk 12
+#endif
+
+#if !defined(__NR_rt_sigaction)
+#define __NR_rt_sigaction 13
+#endif
+
+#if !defined(__NR_rt_sigprocmask)
+#define __NR_rt_sigprocmask 14
+#endif
+
+#if !defined(__NR_rt_sigreturn)
+#define __NR_rt_sigreturn 15
+#endif
+
+#if !defined(__NR_ioctl)
+#define __NR_ioctl 16
+#endif
+
+#if !defined(__NR_pread64)
+#define __NR_pread64 17
+#endif
+
+#if !defined(__NR_pwrite64)
+#define __NR_pwrite64 18
+#endif
+
+#if !defined(__NR_readv)
+#define __NR_readv 19
+#endif
+
+#if !defined(__NR_writev)
+#define __NR_writev 20
+#endif
+
+#if !defined(__NR_access)
+#define __NR_access 21
+#endif
+
+#if !defined(__NR_pipe)
+#define __NR_pipe 22
+#endif
+
+#if !defined(__NR_select)
+#define __NR_select 23
+#endif
+
+#if !defined(__NR_sched_yield)
+#define __NR_sched_yield 24
+#endif
+
+#if !defined(__NR_mremap)
+#define __NR_mremap 25
+#endif
+
+#if !defined(__NR_msync)
+#define __NR_msync 26
+#endif
+
+#if !defined(__NR_mincore)
+#define __NR_mincore 27
+#endif
+
+#if !defined(__NR_madvise)
+#define __NR_madvise 28
+#endif
+
+#if !defined(__NR_shmget)
+#define __NR_shmget 29
+#endif
+
+#if !defined(__NR_shmat)
+#define __NR_shmat 30
+#endif
+
+#if !defined(__NR_shmctl)
+#define __NR_shmctl 31
+#endif
+
+#if !defined(__NR_dup)
+#define __NR_dup 32
+#endif
+
+#if !defined(__NR_dup2)
+#define __NR_dup2 33
+#endif
+
+#if !defined(__NR_pause)
+#define __NR_pause 34
+#endif
+
+#if !defined(__NR_nanosleep)
+#define __NR_nanosleep 35
+#endif
+
+#if !defined(__NR_getitimer)
+#define __NR_getitimer 36
+#endif
+
+#if !defined(__NR_alarm)
+#define __NR_alarm 37
+#endif
+
+#if !defined(__NR_setitimer)
+#define __NR_setitimer 38
+#endif
+
+#if !defined(__NR_getpid)
+#define __NR_getpid 39
+#endif
+
+#if !defined(__NR_sendfile)
+#define __NR_sendfile 40
+#endif
+
+#if !defined(__NR_socket)
+#define __NR_socket 41
+#endif
+
+#if !defined(__NR_connect)
+#define __NR_connect 42
+#endif
+
+#if !defined(__NR_accept)
+#define __NR_accept 43
+#endif
+
+#if !defined(__NR_sendto)
+#define __NR_sendto 44
+#endif
+
+#if !defined(__NR_recvfrom)
+#define __NR_recvfrom 45
+#endif
+
+#if !defined(__NR_sendmsg)
+#define __NR_sendmsg 46
+#endif
+
+#if !defined(__NR_recvmsg)
+#define __NR_recvmsg 47
+#endif
+
+#if !defined(__NR_shutdown)
+#define __NR_shutdown 48
+#endif
+
+#if !defined(__NR_bind)
+#define __NR_bind 49
+#endif
+
+#if !defined(__NR_listen)
+#define __NR_listen 50
+#endif
+
+#if !defined(__NR_getsockname)
+#define __NR_getsockname 51
+#endif
+
+#if !defined(__NR_getpeername)
+#define __NR_getpeername 52
+#endif
+
+#if !defined(__NR_socketpair)
+#define __NR_socketpair 53
+#endif
+
+#if !defined(__NR_setsockopt)
+#define __NR_setsockopt 54
+#endif
+
+#if !defined(__NR_getsockopt)
+#define __NR_getsockopt 55
+#endif
+
+#if !defined(__NR_clone)
+#define __NR_clone 56
+#endif
+
+#if !defined(__NR_fork)
+#define __NR_fork 57
+#endif
+
+#if !defined(__NR_vfork)
+#define __NR_vfork 58
+#endif
+
+#if !defined(__NR_execve)
+#define __NR_execve 59
+#endif
+
+#if !defined(__NR_exit)
+#define __NR_exit 60
+#endif
+
+#if !defined(__NR_wait4)
+#define __NR_wait4 61
+#endif
+
+#if !defined(__NR_kill)
+#define __NR_kill 62
+#endif
+
+#if !defined(__NR_uname)
+#define __NR_uname 63
+#endif
+
+#if !defined(__NR_semget)
+#define __NR_semget 64
+#endif
+
+#if !defined(__NR_semop)
+#define __NR_semop 65
+#endif
+
+#if !defined(__NR_semctl)
+#define __NR_semctl 66
+#endif
+
+#if !defined(__NR_shmdt)
+#define __NR_shmdt 67
+#endif
+
+#if !defined(__NR_msgget)
+#define __NR_msgget 68
+#endif
+
+#if !defined(__NR_msgsnd)
+#define __NR_msgsnd 69
+#endif
+
+#if !defined(__NR_msgrcv)
+#define __NR_msgrcv 70
+#endif
+
+#if !defined(__NR_msgctl)
+#define __NR_msgctl 71
+#endif
+
+#if !defined(__NR_fcntl)
+#define __NR_fcntl 72
+#endif
+
+#if !defined(__NR_flock)
+#define __NR_flock 73
+#endif
+
+#if !defined(__NR_fsync)
+#define __NR_fsync 74
+#endif
+
+#if !defined(__NR_fdatasync)
+#define __NR_fdatasync 75
+#endif
+
+#if !defined(__NR_truncate)
+#define __NR_truncate 76
+#endif
+
+#if !defined(__NR_ftruncate)
+#define __NR_ftruncate 77
+#endif
+
+#if !defined(__NR_getdents)
+#define __NR_getdents 78
+#endif
+
+#if !defined(__NR_getcwd)
+#define __NR_getcwd 79
+#endif
+
+#if !defined(__NR_chdir)
+#define __NR_chdir 80
+#endif
+
+#if !defined(__NR_fchdir)
+#define __NR_fchdir 81
+#endif
+
+#if !defined(__NR_rename)
+#define __NR_rename 82
+#endif
+
+#if !defined(__NR_mkdir)
+#define __NR_mkdir 83
+#endif
+
+#if !defined(__NR_rmdir)
+#define __NR_rmdir 84
+#endif
+
+#if !defined(__NR_creat)
+#define __NR_creat 85
+#endif
+
+#if !defined(__NR_link)
+#define __NR_link 86
+#endif
+
+#if !defined(__NR_unlink)
+#define __NR_unlink 87
+#endif
+
+#if !defined(__NR_symlink)
+#define __NR_symlink 88
+#endif
+
+#if !defined(__NR_readlink)
+#define __NR_readlink 89
+#endif
+
+#if !defined(__NR_chmod)
+#define __NR_chmod 90
+#endif
+
+#if !defined(__NR_fchmod)
+#define __NR_fchmod 91
+#endif
+
+#if !defined(__NR_chown)
+#define __NR_chown 92
+#endif
+
+#if !defined(__NR_fchown)
+#define __NR_fchown 93
+#endif
+
+#if !defined(__NR_lchown)
+#define __NR_lchown 94
+#endif
+
+#if !defined(__NR_umask)
+#define __NR_umask 95
+#endif
+
+#if !defined(__NR_gettimeofday)
+#define __NR_gettimeofday 96
+#endif
+
+#if !defined(__NR_getrlimit)
+#define __NR_getrlimit 97
+#endif
+
+#if !defined(__NR_getrusage)
+#define __NR_getrusage 98
+#endif
+
+#if !defined(__NR_sysinfo)
+#define __NR_sysinfo 99
+#endif
+
+#if !defined(__NR_times)
+#define __NR_times 100
+#endif
+
+#if !defined(__NR_ptrace)
+#define __NR_ptrace 101
+#endif
+
+#if !defined(__NR_getuid)
+#define __NR_getuid 102
+#endif
+
+#if !defined(__NR_syslog)
+#define __NR_syslog 103
+#endif
+
+#if !defined(__NR_getgid)
+#define __NR_getgid 104
+#endif
+
+#if !defined(__NR_setuid)
+#define __NR_setuid 105
+#endif
+
+#if !defined(__NR_setgid)
+#define __NR_setgid 106
+#endif
+
+#if !defined(__NR_geteuid)
+#define __NR_geteuid 107
+#endif
+
+#if !defined(__NR_getegid)
+#define __NR_getegid 108
+#endif
+
+#if !defined(__NR_setpgid)
+#define __NR_setpgid 109
+#endif
+
+#if !defined(__NR_getppid)
+#define __NR_getppid 110
+#endif
+
+#if !defined(__NR_getpgrp)
+#define __NR_getpgrp 111
+#endif
+
+#if !defined(__NR_setsid)
+#define __NR_setsid 112
+#endif
+
+#if !defined(__NR_setreuid)
+#define __NR_setreuid 113
+#endif
+
+#if !defined(__NR_setregid)
+#define __NR_setregid 114
+#endif
+
+#if !defined(__NR_getgroups)
+#define __NR_getgroups 115
+#endif
+
+#if !defined(__NR_setgroups)
+#define __NR_setgroups 116
+#endif
+
+#if !defined(__NR_setresuid)
+#define __NR_setresuid 117
+#endif
+
+#if !defined(__NR_getresuid)
+#define __NR_getresuid 118
+#endif
+
+#if !defined(__NR_setresgid)
+#define __NR_setresgid 119
+#endif
+
+#if !defined(__NR_getresgid)
+#define __NR_getresgid 120
+#endif
+
+#if !defined(__NR_getpgid)
+#define __NR_getpgid 121
+#endif
+
+#if !defined(__NR_setfsuid)
+#define __NR_setfsuid 122
+#endif
+
+#if !defined(__NR_setfsgid)
+#define __NR_setfsgid 123
+#endif
+
+#if !defined(__NR_getsid)
+#define __NR_getsid 124
+#endif
+
+#if !defined(__NR_capget)
+#define __NR_capget 125
+#endif
+
+#if !defined(__NR_capset)
+#define __NR_capset 126
+#endif
+
+#if !defined(__NR_rt_sigpending)
+#define __NR_rt_sigpending 127
+#endif
+
+#if !defined(__NR_rt_sigtimedwait)
+#define __NR_rt_sigtimedwait 128
+#endif
+
+#if !defined(__NR_rt_sigqueueinfo)
+#define __NR_rt_sigqueueinfo 129
+#endif
+
+#if !defined(__NR_rt_sigsuspend)
+#define __NR_rt_sigsuspend 130
+#endif
+
+#if !defined(__NR_sigaltstack)
+#define __NR_sigaltstack 131
+#endif
+
+#if !defined(__NR_utime)
+#define __NR_utime 132
+#endif
+
+#if !defined(__NR_mknod)
+#define __NR_mknod 133
+#endif
+
+#if !defined(__NR_uselib)
+#define __NR_uselib 134
+#endif
+
+#if !defined(__NR_personality)
+#define __NR_personality 135
+#endif
+
+#if !defined(__NR_ustat)
+#define __NR_ustat 136
+#endif
+
+#if !defined(__NR_statfs)
+#define __NR_statfs 137
+#endif
+
+#if !defined(__NR_fstatfs)
+#define __NR_fstatfs 138
+#endif
+
+#if !defined(__NR_sysfs)
+#define __NR_sysfs 139
+#endif
+
+#if !defined(__NR_getpriority)
+#define __NR_getpriority 140
+#endif
+
+#if !defined(__NR_setpriority)
+#define __NR_setpriority 141
+#endif
+
+#if !defined(__NR_sched_setparam)
+#define __NR_sched_setparam 142
+#endif
+
+#if !defined(__NR_sched_getparam)
+#define __NR_sched_getparam 143
+#endif
+
+#if !defined(__NR_sched_setscheduler)
+#define __NR_sched_setscheduler 144
+#endif
+
+#if !defined(__NR_sched_getscheduler)
+#define __NR_sched_getscheduler 145
+#endif
+
+#if !defined(__NR_sched_get_priority_max)
+#define __NR_sched_get_priority_max 146
+#endif
+
+#if !defined(__NR_sched_get_priority_min)
+#define __NR_sched_get_priority_min 147
+#endif
+
+#if !defined(__NR_sched_rr_get_interval)
+#define __NR_sched_rr_get_interval 148
+#endif
+
+#if !defined(__NR_mlock)
+#define __NR_mlock 149
+#endif
+
+#if !defined(__NR_munlock)
+#define __NR_munlock 150
+#endif
+
+#if !defined(__NR_mlockall)
+#define __NR_mlockall 151
+#endif
+
+#if !defined(__NR_munlockall)
+#define __NR_munlockall 152
+#endif
+
+#if !defined(__NR_vhangup)
+#define __NR_vhangup 153
+#endif
+
+#if !defined(__NR_modify_ldt)
+#define __NR_modify_ldt 154
+#endif
+
+#if !defined(__NR_pivot_root)
+#define __NR_pivot_root 155
+#endif
+
+#if !defined(__NR__sysctl)
+#define __NR__sysctl 156
+#endif
+
+#if !defined(__NR_prctl)
+#define __NR_prctl 157
+#endif
+
+#if !defined(__NR_arch_prctl)
+#define __NR_arch_prctl 158
+#endif
+
+#if !defined(__NR_adjtimex)
+#define __NR_adjtimex 159
+#endif
+
+#if !defined(__NR_setrlimit)
+#define __NR_setrlimit 160
+#endif
+
+#if !defined(__NR_chroot)
+#define __NR_chroot 161
+#endif
+
+#if !defined(__NR_sync)
+#define __NR_sync 162
+#endif
+
+#if !defined(__NR_acct)
+#define __NR_acct 163
+#endif
+
+#if !defined(__NR_settimeofday)
+#define __NR_settimeofday 164
+#endif
+
+#if !defined(__NR_mount)
+#define __NR_mount 165
+#endif
+
+#if !defined(__NR_umount2)
+#define __NR_umount2 166
+#endif
+
+#if !defined(__NR_swapon)
+#define __NR_swapon 167
+#endif
+
+#if !defined(__NR_swapoff)
+#define __NR_swapoff 168
+#endif
+
+#if !defined(__NR_reboot)
+#define __NR_reboot 169
+#endif
+
+#if !defined(__NR_sethostname)
+#define __NR_sethostname 170
+#endif
+
+#if !defined(__NR_setdomainname)
+#define __NR_setdomainname 171
+#endif
+
+#if !defined(__NR_iopl)
+#define __NR_iopl 172
+#endif
+
+#if !defined(__NR_ioperm)
+#define __NR_ioperm 173
+#endif
+
+#if !defined(__NR_create_module)
+#define __NR_create_module 174
+#endif
+
+#if !defined(__NR_init_module)
+#define __NR_init_module 175
+#endif
+
+#if !defined(__NR_delete_module)
+#define __NR_delete_module 176
+#endif
+
+#if !defined(__NR_get_kernel_syms)
+#define __NR_get_kernel_syms 177
+#endif
+
+#if !defined(__NR_query_module)
+#define __NR_query_module 178
+#endif
+
+#if !defined(__NR_quotactl)
+#define __NR_quotactl 179
+#endif
+
+#if !defined(__NR_nfsservctl)
+#define __NR_nfsservctl 180
+#endif
+
+#if !defined(__NR_getpmsg)
+#define __NR_getpmsg 181
+#endif
+
+#if !defined(__NR_putpmsg)
+#define __NR_putpmsg 182
+#endif
+
+#if !defined(__NR_afs_syscall)
+#define __NR_afs_syscall 183
+#endif
+
+#if !defined(__NR_tuxcall)
+#define __NR_tuxcall 184
+#endif
+
+#if !defined(__NR_security)
+#define __NR_security 185
+#endif
+
+#if !defined(__NR_gettid)
+#define __NR_gettid 186
+#endif
+
+#if !defined(__NR_readahead)
+#define __NR_readahead 187
+#endif
+
+#if !defined(__NR_setxattr)
+#define __NR_setxattr 188
+#endif
+
+#if !defined(__NR_lsetxattr)
+#define __NR_lsetxattr 189
+#endif
+
+#if !defined(__NR_fsetxattr)
+#define __NR_fsetxattr 190
+#endif
+
+#if !defined(__NR_getxattr)
+#define __NR_getxattr 191
+#endif
+
+#if !defined(__NR_lgetxattr)
+#define __NR_lgetxattr 192
+#endif
+
+#if !defined(__NR_fgetxattr)
+#define __NR_fgetxattr 193
+#endif
+
+#if !defined(__NR_listxattr)
+#define __NR_listxattr 194
+#endif
+
+#if !defined(__NR_llistxattr)
+#define __NR_llistxattr 195
+#endif
+
+#if !defined(__NR_flistxattr)
+#define __NR_flistxattr 196
+#endif
+
+#if !defined(__NR_removexattr)
+#define __NR_removexattr 197
+#endif
+
+#if !defined(__NR_lremovexattr)
+#define __NR_lremovexattr 198
+#endif
+
+#if !defined(__NR_fremovexattr)
+#define __NR_fremovexattr 199
+#endif
+
+#if !defined(__NR_tkill)
+#define __NR_tkill 200
+#endif
+
+#if !defined(__NR_time)
+#define __NR_time 201
+#endif
+
+#if !defined(__NR_futex)
+#define __NR_futex 202
+#endif
+
+#if !defined(__NR_sched_setaffinity)
+#define __NR_sched_setaffinity 203
+#endif
+
+#if !defined(__NR_sched_getaffinity)
+#define __NR_sched_getaffinity 204
+#endif
+
+#if !defined(__NR_set_thread_area)
+#define __NR_set_thread_area 205
+#endif
+
+#if !defined(__NR_io_setup)
+#define __NR_io_setup 206
+#endif
+
+#if !defined(__NR_io_destroy)
+#define __NR_io_destroy 207
+#endif
+
+#if !defined(__NR_io_getevents)
+#define __NR_io_getevents 208
+#endif
+
+#if !defined(__NR_io_submit)
+#define __NR_io_submit 209
+#endif
+
+#if !defined(__NR_io_cancel)
+#define __NR_io_cancel 210
+#endif
+
+#if !defined(__NR_get_thread_area)
+#define __NR_get_thread_area 211
+#endif
+
+#if !defined(__NR_lookup_dcookie)
+#define __NR_lookup_dcookie 212
+#endif
+
+#if !defined(__NR_epoll_create)
+#define __NR_epoll_create 213
+#endif
+
+#if !defined(__NR_epoll_ctl_old)
+#define __NR_epoll_ctl_old 214
+#endif
+
+#if !defined(__NR_epoll_wait_old)
+#define __NR_epoll_wait_old 215
+#endif
+
+#if !defined(__NR_remap_file_pages)
+#define __NR_remap_file_pages 216
+#endif
+
+#if !defined(__NR_getdents64)
+#define __NR_getdents64 217
+#endif
+
+#if !defined(__NR_set_tid_address)
+#define __NR_set_tid_address 218
+#endif
+
+#if !defined(__NR_restart_syscall)
+#define __NR_restart_syscall 219
+#endif
+
+#if !defined(__NR_semtimedop)
+#define __NR_semtimedop 220
+#endif
+
+#if !defined(__NR_fadvise64)
+#define __NR_fadvise64 221
+#endif
+
+#if !defined(__NR_timer_create)
+#define __NR_timer_create 222
+#endif
+
+#if !defined(__NR_timer_settime)
+#define __NR_timer_settime 223
+#endif
+
+#if !defined(__NR_timer_gettime)
+#define __NR_timer_gettime 224
+#endif
+
+#if !defined(__NR_timer_getoverrun)
+#define __NR_timer_getoverrun 225
+#endif
+
+#if !defined(__NR_timer_delete)
+#define __NR_timer_delete 226
+#endif
+
+#if !defined(__NR_clock_settime)
+#define __NR_clock_settime 227
+#endif
+
+#if !defined(__NR_clock_gettime)
+#define __NR_clock_gettime 228
+#endif
+
+#if !defined(__NR_clock_getres)
+#define __NR_clock_getres 229
+#endif
+
+#if !defined(__NR_clock_nanosleep)
+#define __NR_clock_nanosleep 230
+#endif
+
+#if !defined(__NR_exit_group)
+#define __NR_exit_group 231
+#endif
+
+#if !defined(__NR_epoll_wait)
+#define __NR_epoll_wait 232
+#endif
+
+#if !defined(__NR_epoll_ctl)
+#define __NR_epoll_ctl 233
+#endif
+
+#if !defined(__NR_tgkill)
+#define __NR_tgkill 234
+#endif
+
+#if !defined(__NR_utimes)
+#define __NR_utimes 235
+#endif
+
+#if !defined(__NR_vserver)
+#define __NR_vserver 236
+#endif
+
+#if !defined(__NR_mbind)
+#define __NR_mbind 237
+#endif
+
+#if !defined(__NR_set_mempolicy)
+#define __NR_set_mempolicy 238
+#endif
+
+#if !defined(__NR_get_mempolicy)
+#define __NR_get_mempolicy 239
+#endif
+
+#if !defined(__NR_mq_open)
+#define __NR_mq_open 240
+#endif
+
+#if !defined(__NR_mq_unlink)
+#define __NR_mq_unlink 241
+#endif
+
+#if !defined(__NR_mq_timedsend)
+#define __NR_mq_timedsend 242
+#endif
+
+#if !defined(__NR_mq_timedreceive)
+#define __NR_mq_timedreceive 243
+#endif
+
+#if !defined(__NR_mq_notify)
+#define __NR_mq_notify 244
+#endif
+
+#if !defined(__NR_mq_getsetattr)
+#define __NR_mq_getsetattr 245
+#endif
+
+#if !defined(__NR_kexec_load)
+#define __NR_kexec_load 246
+#endif
+
+#if !defined(__NR_waitid)
+#define __NR_waitid 247
+#endif
+
+#if !defined(__NR_add_key)
+#define __NR_add_key 248
+#endif
+
+#if !defined(__NR_request_key)
+#define __NR_request_key 249
+#endif
+
+#if !defined(__NR_keyctl)
+#define __NR_keyctl 250
+#endif
+
+#if !defined(__NR_ioprio_set)
+#define __NR_ioprio_set 251
+#endif
+
+#if !defined(__NR_ioprio_get)
+#define __NR_ioprio_get 252
+#endif
+
+#if !defined(__NR_inotify_init)
+#define __NR_inotify_init 253
+#endif
+
+#if !defined(__NR_inotify_add_watch)
+#define __NR_inotify_add_watch 254
+#endif
+
+#if !defined(__NR_inotify_rm_watch)
+#define __NR_inotify_rm_watch 255
+#endif
+
+#if !defined(__NR_migrate_pages)
+#define __NR_migrate_pages 256
+#endif
+
+#if !defined(__NR_openat)
+#define __NR_openat 257
+#endif
+
+#if !defined(__NR_mkdirat)
+#define __NR_mkdirat 258
+#endif
+
+#if !defined(__NR_mknodat)
+#define __NR_mknodat 259
+#endif
+
+#if !defined(__NR_fchownat)
+#define __NR_fchownat 260
+#endif
+
+#if !defined(__NR_futimesat)
+#define __NR_futimesat 261
+#endif
+
+#if !defined(__NR_newfstatat)
+#define __NR_newfstatat 262
+#endif
+
+#if !defined(__NR_unlinkat)
+#define __NR_unlinkat 263
+#endif
+
+#if !defined(__NR_renameat)
+#define __NR_renameat 264
+#endif
+
+#if !defined(__NR_linkat)
+#define __NR_linkat 265
+#endif
+
+#if !defined(__NR_symlinkat)
+#define __NR_symlinkat 266
+#endif
+
+#if !defined(__NR_readlinkat)
+#define __NR_readlinkat 267
+#endif
+
+#if !defined(__NR_fchmodat)
+#define __NR_fchmodat 268
+#endif
+
+#if !defined(__NR_faccessat)
+#define __NR_faccessat 269
+#endif
+
+#if !defined(__NR_pselect6)
+#define __NR_pselect6 270
+#endif
+
+#if !defined(__NR_ppoll)
+#define __NR_ppoll 271
+#endif
+
+#if !defined(__NR_unshare)
+#define __NR_unshare 272
+#endif
+
+#if !defined(__NR_set_robust_list)
+#define __NR_set_robust_list 273
+#endif
+
+#if !defined(__NR_get_robust_list)
+#define __NR_get_robust_list 274
+#endif
+
+#if !defined(__NR_splice)
+#define __NR_splice 275
+#endif
+
+#if !defined(__NR_tee)
+#define __NR_tee 276
+#endif
+
+#if !defined(__NR_sync_file_range)
+#define __NR_sync_file_range 277
+#endif
+
+#if !defined(__NR_vmsplice)
+#define __NR_vmsplice 278
+#endif
+
+#if !defined(__NR_move_pages)
+#define __NR_move_pages 279
+#endif
+
+#if !defined(__NR_utimensat)
+#define __NR_utimensat 280
+#endif
+
+#if !defined(__NR_epoll_pwait)
+#define __NR_epoll_pwait 281
+#endif
+
+#if !defined(__NR_signalfd)
+#define __NR_signalfd 282
+#endif
+
+#if !defined(__NR_timerfd_create)
+#define __NR_timerfd_create 283
+#endif
+
+#if !defined(__NR_eventfd)
+#define __NR_eventfd 284
+#endif
+
+#if !defined(__NR_fallocate)
+#define __NR_fallocate 285
+#endif
+
+#if !defined(__NR_timerfd_settime)
+#define __NR_timerfd_settime 286
+#endif
+
+#if !defined(__NR_timerfd_gettime)
+#define __NR_timerfd_gettime 287
+#endif
+
+#if !defined(__NR_accept4)
+#define __NR_accept4 288
+#endif
+
+#if !defined(__NR_signalfd4)
+#define __NR_signalfd4 289
+#endif
+
+#if !defined(__NR_eventfd2)
+#define __NR_eventfd2 290
+#endif
+
+#if !defined(__NR_epoll_create1)
+#define __NR_epoll_create1 291
+#endif
+
+#if !defined(__NR_dup3)
+#define __NR_dup3 292
+#endif
+
+#if !defined(__NR_pipe2)
+#define __NR_pipe2 293
+#endif
+
+#if !defined(__NR_inotify_init1)
+#define __NR_inotify_init1 294
+#endif
+
+#if !defined(__NR_preadv)
+#define __NR_preadv 295
+#endif
+
+#if !defined(__NR_pwritev)
+#define __NR_pwritev 296
+#endif
+
+#if !defined(__NR_rt_tgsigqueueinfo)
+#define __NR_rt_tgsigqueueinfo 297
+#endif
+
+#if !defined(__NR_perf_event_open)
+#define __NR_perf_event_open 298
+#endif
+
+#if !defined(__NR_recvmmsg)
+#define __NR_recvmmsg 299
+#endif
+
+#if !defined(__NR_fanotify_init)
+#define __NR_fanotify_init 300
+#endif
+
+#if !defined(__NR_fanotify_mark)
+#define __NR_fanotify_mark 301
+#endif
+
+#if !defined(__NR_prlimit64)
+#define __NR_prlimit64 302
+#endif
+
+#if !defined(__NR_name_to_handle_at)
+#define __NR_name_to_handle_at 303
+#endif
+
+#if !defined(__NR_open_by_handle_at)
+#define __NR_open_by_handle_at 304
+#endif
+
+#if !defined(__NR_clock_adjtime)
+#define __NR_clock_adjtime 305
+#endif
+
+#if !defined(__NR_syncfs)
+#define __NR_syncfs 306
+#endif
+
+#if !defined(__NR_sendmmsg)
+#define __NR_sendmmsg 307
+#endif
+
+#if !defined(__NR_setns)
+#define __NR_setns 308
+#endif
+
+#if !defined(__NR_getcpu)
+#define __NR_getcpu 309
+#endif
+
+#if !defined(__NR_process_vm_readv)
+#define __NR_process_vm_readv 310
+#endif
+
+#if !defined(__NR_process_vm_writev)
+#define __NR_process_vm_writev 311
+#endif
+
+#if !defined(__NR_kcmp)
+#define __NR_kcmp 312
+#endif
+
+#if !defined(__NR_finit_module)
+#define __NR_finit_module 313
+#endif
+
+#if !defined(__NR_sched_setattr)
+#define __NR_sched_setattr 314
+#endif
+
+#if !defined(__NR_sched_getattr)
+#define __NR_sched_getattr 315
+#endif
+
+#if !defined(__NR_renameat2)
+#define __NR_renameat2 316
+#endif
+
+#if !defined(__NR_seccomp)
+#define __NR_seccomp 317
+#endif
+
+#if !defined(__NR_getrandom)
+#define __NR_getrandom 318
+#endif
+
+#if !defined(__NR_memfd_create)
+#define __NR_memfd_create 319
+#endif
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_SYSCALLS_H_
+
diff --git a/libchrome/sandbox/linux/system_headers/x86_64_linux_ucontext.h b/libchrome/sandbox/linux/system_headers/x86_64_linux_ucontext.h
new file mode 100644
index 0000000..1f1abe6
--- /dev/null
+++ b/libchrome/sandbox/linux/system_headers/x86_64_linux_ucontext.h
@@ -0,0 +1,90 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_UCONTEXT_H_
+#define SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_UCONTEXT_H_
+
+#include <stdint.h>
+
+// We do something compatible with glibc. Hopefully, at some point Android will
+// provide that for us, and __BIONIC_HAVE_UCONTEXT_T should be defined.
+// Spec:
+// http://refspecs.linuxfoundation.org/LSB_4.1.0/LSB-Core-AMD64/LSB-Core-AMD64/libc-ddefs.html#AEN5668
+
+#if !defined(__BIONIC_HAVE_UCONTEXT_T)
+#include <asm/sigcontext.h>
+
+struct _libc_fpxreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+ unsigned short padding[3];
+};
+
+struct _libc_xmmreg {
+ uint32_t element[4];
+};
+
+struct _libc_fpstate {
+ uint16_t cwd;
+ uint16_t swd;
+ uint16_t twd;
+ uint16_t fop;
+ uint64_t rip;
+ uint64_t rdp;
+ uint32_t mxcsr;
+ uint32_t mxcsr_mask;
+ struct _libc_fpxreg _st[8];
+ struct _libc_xmmreg _xmm[16];
+ uint32_t padding[24];
+};
+
+typedef uint64_t greg_t;
+
+typedef struct {
+ greg_t gregs[23];
+ struct _libc_fpstate* fpregs;
+ unsigned long __reserved1[8];
+} mcontext_t;
+
+enum {
+ REG_R8 = 0,
+ REG_R9,
+ REG_R10,
+ REG_R11,
+ REG_R12,
+ REG_R13,
+ REG_R14,
+ REG_R15,
+ REG_RDI,
+ REG_RSI,
+ REG_RBP,
+ REG_RBX,
+ REG_RDX,
+ REG_RAX,
+ REG_RCX,
+ REG_RSP,
+ REG_RIP,
+ REG_EFL,
+ REG_CSGSFS,
+ REG_ERR,
+ REG_TRAPNO,
+ REG_OLDMASK,
+ REG_CR2,
+ NGREG,
+};
+
+typedef struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ sigset_t uc_sigmask;
+ struct _libc_fpstate __fpregs_mem;
+} ucontext_t;
+
+#else
+#include <sys/ucontext.h>
+#endif // __BIONIC_HAVE_UCONTEXT_T
+
+#endif // SANDBOX_LINUX_SYSTEM_HEADERS_X86_64_LINUX_UCONTEXT_H_
diff --git a/libchrome/sandbox/mac/BUILD.gn b/libchrome/sandbox/mac/BUILD.gn
new file mode 100644
index 0000000..fd53131
--- /dev/null
+++ b/libchrome/sandbox/mac/BUILD.gn
@@ -0,0 +1,64 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/mac/mac_sdk.gni")
+import("//testing/test.gni")
+
+component("sandbox") {
+ sources = [
+ "bootstrap_sandbox.cc",
+ "bootstrap_sandbox.h",
+ "launchd_interception_server.cc",
+ "launchd_interception_server.h",
+ "mach_message_server.cc",
+ "mach_message_server.h",
+ "message_server.h",
+ "os_compatibility.cc",
+ "os_compatibility.h",
+ "policy.cc",
+ "policy.h",
+ "pre_exec_delegate.cc",
+ "pre_exec_delegate.h",
+ "xpc.h",
+ "xpc_message_server.cc",
+ "xpc_message_server.h",
+ ]
+
+ defines = [ "SANDBOX_IMPLEMENTATION" ]
+ libs = [ "bsm" ]
+
+ deps = [
+ "//base",
+ ]
+}
+
+component("seatbelt") {
+ sources = [
+ "seatbelt.cc",
+ "seatbelt.h",
+ "seatbelt_export.h",
+ ]
+ libs = [ "sandbox" ]
+ defines = [ "SEATBELT_IMPLEMENTATION" ]
+}
+
+test("sandbox_mac_unittests") {
+ sources = [
+ "bootstrap_sandbox_unittest.mm",
+ "policy_unittest.cc",
+ "xpc_message_server_unittest.cc",
+ ]
+
+ libs = [
+ "CoreFoundation.framework",
+ "Foundation.framework",
+ ]
+
+ deps = [
+ ":sandbox",
+ "//base",
+ "//base/test:run_all_unittests",
+ "//testing/gtest",
+ ]
+}
diff --git a/libchrome/sandbox/mac/OWNERS b/libchrome/sandbox/mac/OWNERS
new file mode 100644
index 0000000..163563f
--- /dev/null
+++ b/libchrome/sandbox/mac/OWNERS
@@ -0,0 +1,2 @@
+mark@chromium.org
+rsesek@chromium.org
diff --git a/libchrome/sandbox/mac/message_server.h b/libchrome/sandbox/mac/message_server.h
new file mode 100644
index 0000000..6ee119b
--- /dev/null
+++ b/libchrome/sandbox/mac/message_server.h
@@ -0,0 +1,76 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_MAC_MESSAGE_SERVER_H_
+#define SANDBOX_MAC_MESSAGE_SERVER_H_
+
+#include <mach/mach.h>
+#include <unistd.h>
+
+#include "sandbox/mac/xpc.h"
+
+namespace sandbox {
+
+// A message received by a MessageServer. Each concrete implementation of
+// that interface will handle the fields of this union appropriately.
+// Consumers should treat this as an opaque handle.
+union IPCMessage {
+ mach_msg_header_t* mach;
+ xpc_object_t xpc;
+};
+
+// A delegate interface for MessageServer that handles processing of
+// incoming intercepted IPC messages.
+class MessageDemuxer {
+ public:
+ // Handle a |request| message. The message is owned by the server. Use the
+ // server's methods to create and send a reply message.
+ virtual void DemuxMessage(IPCMessage request) = 0;
+
+ protected:
+ virtual ~MessageDemuxer() {}
+};
+
+// An interaface for an IPC server that implements Mach messaging semantics.
+// The concrete implementation may be powered by raw Mach messages, XPC, or
+// some other technology. This interface is the abstraction on top of those
+// that enables message interception.
+class MessageServer {
+ public:
+ virtual ~MessageServer() {}
+
+ // Initializes the class and starts running the message server. If this
+ // returns false, no other methods may be called on this class.
+ virtual bool Initialize() = 0;
+
+ // Blocks the calling thread while the server shuts down. This prevents
+ // the server from receiving new messages. After this method is called,
+ // no other methods may be called on this class.
+ virtual void Shutdown() = 0;
+
+ // Given a received request message, returns the PID of the sending process.
+ virtual pid_t GetMessageSenderPID(IPCMessage request) = 0;
+
+ // Creates a reply message from a request message. The result is owned by
+ // the server.
+ virtual IPCMessage CreateReply(IPCMessage request) = 0;
+
+ // Sends a reply message. Returns true if the message was sent successfully.
+ virtual bool SendReply(IPCMessage reply) = 0;
+
+ // Forwards the original |request| to the |destination| for handling.
+ virtual void ForwardMessage(IPCMessage request, mach_port_t destination) = 0;
+
+ // Replies to the received |request| message by creating a reply and setting
+ // the specified |error_code| in a field that is interpreted by the
+ // underlying IPC system.
+ virtual void RejectMessage(IPCMessage request, int error_code) = 0;
+
+ // Returns the Mach port on which the MessageServer is listening.
+ virtual mach_port_t GetServerPort() const = 0;
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_MAC_MESSAGE_SERVER_H_
diff --git a/libchrome/sandbox/mac/sandbox_mac.gypi b/libchrome/sandbox/mac/sandbox_mac.gypi
new file mode 100644
index 0000000..79740e5
--- /dev/null
+++ b/libchrome/sandbox/mac/sandbox_mac.gypi
@@ -0,0 +1,104 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'seatbelt',
+ 'type' : '<(component)',
+ 'sources': [
+ 'seatbelt.cc',
+ 'seatbelt.h',
+ 'seatbelt_export.h',
+ ],
+ 'defines': [
+ 'SEATBELT_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/usr/lib/libsandbox.dylib',
+ ],
+ }
+ },
+ {
+ 'target_name': 'sandbox',
+ 'type': '<(component)',
+ 'sources': [
+ 'bootstrap_sandbox.cc',
+ 'bootstrap_sandbox.h',
+ 'launchd_interception_server.cc',
+ 'launchd_interception_server.h',
+ 'mach_message_server.cc',
+ 'mach_message_server.h',
+ 'message_server.h',
+ 'os_compatibility.cc',
+ 'os_compatibility.h',
+ 'policy.cc',
+ 'policy.h',
+ 'pre_exec_delegate.cc',
+ 'pre_exec_delegate.h',
+ 'xpc.h',
+ 'xpc_message_server.cc',
+ 'xpc_message_server.h',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'include_dirs': [
+ '..',
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'defines': [
+ 'SANDBOX_IMPLEMENTATION',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/usr/lib/libbsm.dylib',
+ ],
+ },
+ },
+ {
+ 'target_name': 'sandbox_mac_unittests',
+ 'type': 'executable',
+ 'sources': [
+ 'bootstrap_sandbox_unittest.mm',
+ 'policy_unittest.cc',
+ 'xpc_message_server_unittest.cc',
+ ],
+ 'dependencies': [
+ 'sandbox',
+ '../base/base.gyp:base',
+ '../base/base.gyp:run_all_unittests',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ ],
+ },
+ },
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_mac_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'sandbox_mac_unittests',
+ ],
+ 'includes': [ '../../build/isolate.gypi' ],
+ 'sources': [ '../sandbox_mac_unittests.isolate' ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/sandbox/sandbox.gyp b/libchrome/sandbox/sandbox.gyp
new file mode 100644
index 0000000..f93fa18
--- /dev/null
+++ b/libchrome/sandbox/sandbox.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+ 'conditions': [
+ [ 'OS=="win"', {
+ 'includes': [
+ 'win/sandbox_win.gypi',
+ ],
+ }],
+ [ 'OS=="linux" or OS=="android"', {
+ 'includes': [
+ 'linux/sandbox_linux.gypi',
+ ],
+ }],
+ [ 'OS=="mac" and OS!="ios"', {
+ 'includes': [
+ 'mac/sandbox_mac.gypi',
+ ],
+ }],
+ [ 'OS!="win" and OS!="mac" and OS!="linux" and OS!="android"', {
+ # A 'default' to accomodate the "sandbox" target.
+ 'targets': [
+ {
+ 'target_name': 'sandbox',
+ 'type': 'none',
+ }
+ ]
+ }],
+ ],
+}
diff --git a/libchrome/sandbox/sandbox_export.h b/libchrome/sandbox/sandbox_export.h
new file mode 100644
index 0000000..35d6a1b
--- /dev/null
+++ b/libchrome/sandbox/sandbox_export.h
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SANDBOX_EXPORT_H_
+#define SANDBOX_SANDBOX_EXPORT_H_
+
+#if defined(WIN32)
+#error "sandbox_export.h does not support WIN32."
+#endif
+
+#if defined(COMPONENT_BUILD)
+
+#if defined(SANDBOX_IMPLEMENTATION)
+#define SANDBOX_EXPORT __attribute__((visibility("default")))
+#else
+#define SANDBOX_EXPORT
+#endif // defined(SANDBOX_IMPLEMENTATION)
+
+#else // defined(COMPONENT_BUILD)
+
+#define SANDBOX_EXPORT
+
+#endif // defined(COMPONENT_BUILD)
+
+#endif // SANDBOX_SANDBOX_EXPORT_H_
diff --git a/libchrome/sandbox/sandbox_linux_unittests.isolate b/libchrome/sandbox/sandbox_linux_unittests.isolate
new file mode 100644
index 0000000..2b7c2a7
--- /dev/null
+++ b/libchrome/sandbox/sandbox_linux_unittests.isolate
@@ -0,0 +1,23 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Because of a limitation in isolate_driver.py, this file needs to be in
+# the same directory as the main .gyp file.
+
+{
+ 'conditions': [
+ ['OS=="android" or OS=="linux"', {
+ 'variables': {
+ 'command': [
+ '<(PRODUCT_DIR)/sandbox_linux_unittests',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ # This is needed because of base/ dependencies on
+ # icudtl.dat.
+ '../base/base.isolate',
+ ],
+}
diff --git a/libchrome/sandbox/win/BUILD.gn b/libchrome/sandbox/win/BUILD.gn
new file mode 100644
index 0000000..60bb499
--- /dev/null
+++ b/libchrome/sandbox/win/BUILD.gn
@@ -0,0 +1,326 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//testing/test.gni")
+
+# This needs to be a static library rather than a sources set because small
+# portions of this are used in some contexts (like chrome_elf), and it
+# doesnn't seem to dead-code strip very well. This saves 12K on chrome_elf.dll,
+# over a source set, for example.
+static_library("sandbox") {
+ sources = [
+ "src/acl.cc",
+ "src/acl.h",
+ "src/broker_services.cc",
+ "src/broker_services.h",
+ "src/crosscall_client.h",
+ "src/crosscall_params.h",
+ "src/crosscall_server.cc",
+ "src/crosscall_server.h",
+ "src/eat_resolver.cc",
+ "src/eat_resolver.h",
+ "src/filesystem_dispatcher.cc",
+ "src/filesystem_dispatcher.h",
+ "src/filesystem_interception.cc",
+ "src/filesystem_interception.h",
+ "src/filesystem_policy.cc",
+ "src/filesystem_policy.h",
+ "src/handle_closer.cc",
+ "src/handle_closer.h",
+ "src/handle_closer_agent.cc",
+ "src/handle_closer_agent.h",
+ "src/interception.cc",
+ "src/interception.h",
+ "src/interception_agent.cc",
+ "src/interception_agent.h",
+ "src/interception_internal.h",
+ "src/interceptors.h",
+ "src/internal_types.h",
+ "src/ipc_tags.h",
+ "src/job.cc",
+ "src/job.h",
+ "src/named_pipe_dispatcher.cc",
+ "src/named_pipe_dispatcher.h",
+ "src/named_pipe_interception.cc",
+ "src/named_pipe_interception.h",
+ "src/named_pipe_policy.cc",
+ "src/named_pipe_policy.h",
+ "src/nt_internals.h",
+ "src/policy_broker.cc",
+ "src/policy_broker.h",
+ "src/policy_engine_opcodes.cc",
+ "src/policy_engine_opcodes.h",
+ "src/policy_engine_params.h",
+ "src/policy_engine_processor.cc",
+ "src/policy_engine_processor.h",
+ "src/policy_low_level.cc",
+ "src/policy_low_level.h",
+ "src/policy_params.h",
+ "src/policy_target.cc",
+ "src/policy_target.h",
+ "src/process_mitigations.cc",
+ "src/process_mitigations.h",
+ "src/process_mitigations_win32k_dispatcher.cc",
+ "src/process_mitigations_win32k_dispatcher.h",
+ "src/process_mitigations_win32k_interception.cc",
+ "src/process_mitigations_win32k_interception.h",
+ "src/process_mitigations_win32k_policy.cc",
+ "src/process_mitigations_win32k_policy.h",
+ "src/process_thread_dispatcher.cc",
+ "src/process_thread_dispatcher.h",
+ "src/process_thread_interception.cc",
+ "src/process_thread_interception.h",
+ "src/process_thread_policy.cc",
+ "src/process_thread_policy.h",
+ "src/registry_dispatcher.cc",
+ "src/registry_dispatcher.h",
+ "src/registry_interception.cc",
+ "src/registry_interception.h",
+ "src/registry_policy.cc",
+ "src/registry_policy.h",
+ "src/resolver.cc",
+ "src/resolver.h",
+ "src/restricted_token.cc",
+ "src/restricted_token.h",
+ "src/restricted_token_utils.cc",
+ "src/restricted_token_utils.h",
+ "src/sandbox.cc",
+ "src/sandbox.h",
+ "src/sandbox_factory.h",
+ "src/sandbox_globals.cc",
+ "src/sandbox_nt_types.h",
+ "src/sandbox_nt_util.cc",
+ "src/sandbox_nt_util.h",
+ "src/sandbox_policy.h",
+ "src/sandbox_policy_base.cc",
+ "src/sandbox_policy_base.h",
+ "src/sandbox_rand.cc",
+ "src/sandbox_rand.h",
+ "src/sandbox_types.h",
+ "src/sandbox_utils.cc",
+ "src/sandbox_utils.h",
+ "src/security_level.h",
+ "src/service_resolver.cc",
+ "src/service_resolver.h",
+ "src/sharedmem_ipc_client.cc",
+ "src/sharedmem_ipc_client.h",
+ "src/sharedmem_ipc_server.cc",
+ "src/sharedmem_ipc_server.h",
+ "src/sid.cc",
+ "src/sid.h",
+ "src/sync_dispatcher.cc",
+ "src/sync_dispatcher.h",
+ "src/sync_interception.cc",
+ "src/sync_interception.h",
+ "src/sync_policy.cc",
+ "src/sync_policy.h",
+ "src/target_interceptions.cc",
+ "src/target_interceptions.h",
+ "src/target_process.cc",
+ "src/target_process.h",
+ "src/target_services.cc",
+ "src/target_services.h",
+ "src/top_level_dispatcher.cc",
+ "src/top_level_dispatcher.h",
+ "src/win2k_threadpool.cc",
+ "src/win2k_threadpool.h",
+ "src/win_utils.cc",
+ "src/win_utils.h",
+ "src/window.cc",
+ "src/window.h",
+ ]
+
+ if (current_cpu == "x64") {
+ sources += [
+ "src/interceptors_64.cc",
+ "src/interceptors_64.h",
+ "src/resolver_64.cc",
+ "src/service_resolver_64.cc",
+ ]
+ } else if (current_cpu == "x86") {
+ sources += [
+ "src/resolver_32.cc",
+ "src/service_resolver_32.cc",
+ "src/sidestep/ia32_modrm_map.cpp",
+ "src/sidestep/ia32_opcode_map.cpp",
+ "src/sidestep/mini_disassembler.cpp",
+ "src/sidestep/mini_disassembler.h",
+ "src/sidestep/mini_disassembler_types.h",
+ "src/sidestep/preamble_patcher.h",
+ "src/sidestep/preamble_patcher_with_stub.cpp",
+ "src/sidestep_resolver.cc",
+ "src/sidestep_resolver.h",
+ ]
+ }
+
+ configs += [ "//build/config:precompiled_headers" ]
+
+ deps = [
+ "//base",
+ "//base:base_static",
+ ]
+ if (current_cpu == "x86") {
+ deps += [ ":copy_wow_helper" ]
+ }
+}
+
+if (current_cpu == "x86") {
+ # Make a target that copies the wow_helper files to the out dir.
+ #
+ # TODO(brettw) we can probably just build this now that we have proper
+ # toolchain support.
+ copy("copy_wow_helper") {
+ sources = [
+ "wow_helper/wow_helper.exe",
+ "wow_helper/wow_helper.pdb",
+ ]
+ outputs = [
+ "$root_out_dir/{{source_file_part}}",
+ ]
+ }
+}
+
+test("sbox_integration_tests") {
+ sources = [
+ "src/address_sanitizer_test.cc",
+ "src/app_container_test.cc",
+ "src/file_policy_test.cc",
+ "src/handle_closer_test.cc",
+ "src/handle_inheritance_test.cc",
+ "src/integrity_level_test.cc",
+ "src/ipc_ping_test.cc",
+ "src/lpc_policy_test.cc",
+ "src/named_pipe_policy_test.cc",
+ "src/policy_target_test.cc",
+ "src/process_mitigations_test.cc",
+ "src/process_policy_test.cc",
+ "src/registry_policy_test.cc",
+ "src/restricted_token_test.cc",
+ "src/sync_policy_test.cc",
+ "src/sync_policy_test.h",
+ "src/unload_dll_test.cc",
+ "tests/common/controller.cc",
+ "tests/common/controller.h",
+ "tests/common/test_utils.cc",
+ "tests/common/test_utils.h",
+ "tests/integration_tests/integration_tests.cc",
+ "tests/integration_tests/integration_tests_common.h",
+ "tests/integration_tests/integration_tests_test.cc",
+ ]
+
+ deps = [
+ ":sandbox",
+ ":sbox_integration_test_hook_dll",
+ ":sbox_integration_test_win_proc",
+ "//base/test:test_support",
+ "//testing/gtest",
+ ]
+
+ libs = [ "dxva2.lib" ]
+}
+
+loadable_module("sbox_integration_test_hook_dll") {
+ sources = [
+ "tests/integration_tests/hooking_dll.cc",
+ "tests/integration_tests/integration_tests_common.h",
+ ]
+}
+
+executable("sbox_integration_test_win_proc") {
+ sources = [
+ "tests/integration_tests/hooking_win_proc.cc",
+ "tests/integration_tests/integration_tests_common.h",
+ ]
+
+ configs -= [ "//build/config/win:console" ]
+ configs += [ "//build/config/win:windowed" ]
+}
+
+test("sbox_validation_tests") {
+ sources = [
+ "tests/common/controller.cc",
+ "tests/common/controller.h",
+ "tests/validation_tests/commands.cc",
+ "tests/validation_tests/commands.h",
+ "tests/validation_tests/suite.cc",
+ "tests/validation_tests/unit_tests.cc",
+ ]
+
+ deps = [
+ ":sandbox",
+ "//base/test:test_support",
+ "//testing/gtest",
+ ]
+
+ libs = [ "shlwapi.lib" ]
+}
+
+test("sbox_unittests") {
+ sources = [
+ "src/interception_unittest.cc",
+ "src/ipc_unittest.cc",
+ "src/job_unittest.cc",
+ "src/policy_engine_unittest.cc",
+ "src/policy_low_level_unittest.cc",
+ "src/policy_opcodes_unittest.cc",
+ "src/restricted_token_unittest.cc",
+ "src/sandbox_nt_util_unittest.cc",
+ "src/service_resolver_unittest.cc",
+ "src/sid_unittest.cc",
+ "src/threadpool_unittest.cc",
+ "src/win_utils_unittest.cc",
+ "tests/common/test_utils.cc",
+ "tests/common/test_utils.h",
+ "tests/unit_tests/unit_tests.cc",
+ ]
+
+ deps = [
+ ":sandbox",
+ "//base/test:test_support",
+ "//testing/gtest",
+ ]
+}
+
+test("sandbox_poc") {
+ sources = [
+ "sandbox_poc/main_ui_window.cc",
+ "sandbox_poc/main_ui_window.h",
+ "sandbox_poc/resource.h",
+ "sandbox_poc/sandbox.cc",
+ "sandbox_poc/sandbox.h",
+ "sandbox_poc/sandbox.ico",
+ "sandbox_poc/sandbox.rc",
+ ]
+
+ configs -= [ "//build/config/win:console" ]
+ configs += [ "//build/config/win:windowed" ]
+
+ libs = [ "comctl32.lib" ]
+
+ deps = [
+ ":pocdll",
+ ":sandbox",
+ ]
+}
+
+shared_library("pocdll") {
+ sources = [
+ "sandbox_poc/pocdll/exports.h",
+ "sandbox_poc/pocdll/fs.cc",
+ "sandbox_poc/pocdll/handles.cc",
+ "sandbox_poc/pocdll/invasive.cc",
+ "sandbox_poc/pocdll/network.cc",
+ "sandbox_poc/pocdll/pocdll.cc",
+ "sandbox_poc/pocdll/processes_and_threads.cc",
+ "sandbox_poc/pocdll/registry.cc",
+ "sandbox_poc/pocdll/spyware.cc",
+ "sandbox_poc/pocdll/utils.h",
+ ]
+
+ defines = [ "POCDLL_EXPORTS" ]
+
+ deps = [
+ "//build/config/sanitizers:deps",
+ ]
+}
diff --git a/libchrome/sandbox/win/OWNERS b/libchrome/sandbox/win/OWNERS
new file mode 100644
index 0000000..54a76c1
--- /dev/null
+++ b/libchrome/sandbox/win/OWNERS
@@ -0,0 +1,4 @@
+cpu@chromium.org
+forshaw@chromium.org
+jschuh@chromium.org
+wfh@chromium.org
diff --git a/libchrome/sandbox/win/sandbox_poc/pocdll/exports.h b/libchrome/sandbox/win/sandbox_poc/pocdll/exports.h
new file mode 100644
index 0000000..66a07d6
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/pocdll/exports.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SANDBOX_POC_POCDLL_EXPORTS_H__
+#define SANDBOX_SANDBOX_POC_POCDLL_EXPORTS_H__
+
+#include <windows.h>
+
+#ifdef POCDLL_EXPORTS
+#define POCDLL_API __declspec(dllexport) __cdecl
+#else
+#define POCDLL_API __declspec(dllimport) __cdecl
+#endif
+
+extern "C" {
+// Tries to open several known system path and outputs
+// the result.
+// "log" is the handle of the log file.
+void POCDLL_API TestFileSystem(HANDLE log);
+
+// Tries to find all handles open in the process and prints the name of the
+// resource references by the handle along with the access right.
+// "log" is the handle of the log file.
+void POCDLL_API TestGetHandle(HANDLE log);
+
+// Creates a lot of threads until it cannot create more. The goal of this
+// function is to determine if it's possible to crash the machine when we
+// flood the machine with new threads
+// "log" is the handle of the log file.
+void POCDLL_API TestThreadBombing(HANDLE log);
+
+// Takes all cpu of the machine. For each processor on the machine we assign
+// a thread. This thread will compute a mathematical expression over and over
+// to take all cpu.
+// "log" is the handle of the log file.
+// Note: here we are using the affinity to find out how many processors are on
+// the machine and to force a thread to run only on a given processor.
+void POCDLL_API TestTakeAllCpu(HANDLE log);
+
+// Creates memory in the heap until it fails 5 times in a row and prints the
+// amount of memory created. This function is used to find out if it's possible
+// to take all memory on the machine and crash the system.
+// "log" is the handle of the log file.
+void POCDLL_API TestUseAllMemory(HANDLE log);
+
+// Creates millions of kernel objects. This function is used to find out if it's
+// possible to crash the system if we create too many kernel objects and if we
+// hold too many handles. All those kernel objects are unnamed.
+// "log" is the handle of the log file.
+void POCDLL_API TestCreateObjects(HANDLE log);
+
+// Receives a hwnd and tries to close it. This is the callback for EnumWindows.
+// It will be called for each window(hwnd) on the system.
+// "log" is the handle of the log file.
+// Always returns TRUE to tell the system that we want to continue the
+// enumeration.
+void POCDLL_API TestCloseHWND(HANDLE log);
+
+// Tries to listen on the port 88.
+// "log" is the handle of the log file.
+void POCDLL_API TestNetworkListen(HANDLE log);
+
+// Lists all processes on the system and tries to open them
+// "log" is the handle of the log file.
+void POCDLL_API TestProcesses(HANDLE log);
+
+// Lists all threads on the system and tries to open them
+// "log" is the handle of the log file.
+void POCDLL_API TestThreads(HANDLE log);
+
+// Tries to open some known system registry key and outputs the result.
+// "log" is the handle of the log file.
+void POCDLL_API TestRegistry(HANDLE log);
+
+// Records all keystrokes typed for 15 seconds and then display them.
+// "log" is the handle of the log file.
+void POCDLL_API TestSpyKeys(HANDLE log);
+
+// Tries to read pixels on the monitor and output if the operation
+// failes or succeeded.
+// "log" is the handle of the log file.
+void POCDLL_API TestSpyScreen(HANDLE log);
+
+// Runs all tests except those who are invasive
+void POCDLL_API Run(HANDLE log);
+}
+
+#endif // SANDBOX_SANDBOX_POC_POCDLL_EXPORTS_H__
diff --git a/libchrome/sandbox/win/sandbox_poc/pocdll/pocdll.vcproj b/libchrome/sandbox/win/sandbox_poc/pocdll/pocdll.vcproj
new file mode 100644
index 0000000..8e4e31f
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/pocdll/pocdll.vcproj
@@ -0,0 +1,218 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="pocdll"
+ ProjectGUID="{AE5BFB87-850E-4454-B01D-58E7D8BAC224}"
+ RootNamespace="pocdll"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="2"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="POCDLL_EXPORTS"
+ UsePrecompiledHeader="2"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ ModuleDefinitionFile=""
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ EmbedManifest="false"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="2"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="POCDLL_EXPORTS"
+ UsePrecompiledHeader="0"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ ModuleDefinitionFile=""
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ EmbedManifest="false"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath=".\exports.h"
+ >
+ </File>
+ <File
+ RelativePath=".\fs.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\handles.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\invasive.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\network.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\pocdll.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\processes_and_threads.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\registry.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\spyware.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ <File
+ RelativePath=".\utils.h"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/sandbox_poc/pocdll/utils.h b/libchrome/sandbox/win/sandbox_poc/pocdll/utils.h
new file mode 100644
index 0000000..0a6ad37
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/pocdll/utils.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SANDBOX_POC_POCDLL_UTILS_H__
+#define SANDBOX_SANDBOX_POC_POCDLL_UTILS_H__
+
+#include <stdio.h>
+#include <io.h>
+
+#include "base/macros.h"
+
+// Class to convert a HANDLE to a FILE *. The FILE * is closed when the
+// object goes out of scope
+class HandleToFile {
+ public:
+ HandleToFile() {
+ file_ = NULL;
+ };
+
+ // Note: c_file_handle_ does not need to be closed because fclose does it.
+ ~HandleToFile() {
+ if (file_) {
+ fflush(file_);
+ fclose(file_);
+ }
+ };
+
+ // Translates a HANDLE (handle) to a FILE * opened with the mode "mode".
+ // The return value is the FILE * or NULL if there is an error.
+ FILE* Translate(HANDLE handle, const char *mode) {
+ if (file_) {
+ return NULL;
+ }
+
+ HANDLE new_handle;
+ BOOL result = ::DuplicateHandle(::GetCurrentProcess(),
+ handle,
+ ::GetCurrentProcess(),
+ &new_handle,
+ 0, // Don't ask for a specific
+ // desired access.
+ FALSE, // Not inheritable.
+ DUPLICATE_SAME_ACCESS);
+
+ if (!result) {
+ return NULL;
+ }
+
+ int c_file_handle = _open_osfhandle(reinterpret_cast<LONG_PTR>(new_handle),
+ 0); // No flags
+ if (-1 == c_file_handle) {
+ return NULL;
+ }
+
+ file_ = _fdopen(c_file_handle, mode);
+ return file_;
+ };
+ private:
+ // the FILE* returned. We need to closed it at the end.
+ FILE* file_;
+
+ DISALLOW_COPY_AND_ASSIGN(HandleToFile);
+};
+
+#endif // SANDBOX_SANDBOX_POC_POCDLL_UTILS_H__
diff --git a/libchrome/sandbox/win/sandbox_poc/resource.h b/libchrome/sandbox/win/sandbox_poc/resource.h
new file mode 100644
index 0000000..87ff920
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/resource.h
@@ -0,0 +1,30 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Visual C++ generated include file.
+// Used by sandbox.rc
+//
+#define IDI_SANDBOX 107
+#define IDR_MENU_MAIN_UI 129
+#define IDD_LAUNCH_DLL 130
+#define IDC_RADIO_POCDLL 1000
+#define IDC_RADIO_CUSTOM_DLL 1001
+#define IDC_DLL_NAME 1002
+#define IDC_ENTRY_POINT 1003
+#define IDC_LOG_FILE 1004
+#define IDC_BROWSE_DLL 1005
+#define IDC_BROWSE_LOG 1006
+#define ID_FILE_EXIT 32771
+#define ID_COMMANDS_LAUNCHDLL 32772
+#define ID_COMMANDS_SPAWNTARGET 32773
+#define IDC_STATIC -1
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NO_MFC 1
+#define _APS_NEXT_RESOURCE_VALUE 131
+#define _APS_NEXT_COMMAND_VALUE 32774
+#define _APS_NEXT_CONTROL_VALUE 1007
+#define _APS_NEXT_SYMED_VALUE 110
+#endif
+#endif
diff --git a/libchrome/sandbox/win/sandbox_poc/sandbox.ico b/libchrome/sandbox/win/sandbox_poc/sandbox.ico
new file mode 100644
index 0000000..916fa12
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/sandbox.ico
Binary files differ
diff --git a/libchrome/sandbox/win/sandbox_poc/sandbox.rc b/libchrome/sandbox/win/sandbox_poc/sandbox.rc
new file mode 100644
index 0000000..978c96f
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/sandbox.rc
@@ -0,0 +1,136 @@
+// Microsoft Visual C++ generated resource script.
+//
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#define APSTUDIO_HIDDEN_SYMBOLS
+#include "windows.h"
+#undef APSTUDIO_HIDDEN_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+// English (U.S.) resources
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
+#ifdef _WIN32
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+#pragma code_page(1252)
+#endif //_WIN32
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Menu
+//
+
+IDR_MENU_MAIN_UI MENU
+BEGIN
+ POPUP "&File"
+ BEGIN
+ MENUITEM "E&xit", ID_FILE_EXIT
+ END
+ POPUP "&Commands"
+ BEGIN
+ MENUITEM "&Spawn target", ID_COMMANDS_SPAWNTARGET
+ END
+END
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Dialog
+//
+
+IDD_LAUNCH_DLL DIALOGEX 0, 0, 269, 118
+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU
+CAPTION "BrokerUI: Load an Attack DLL"
+FONT 8, "MS Shell Dlg", 400, 0, 0x1
+BEGIN
+ DEFPUSHBUTTON "Call now",IDOK,212,70,50,14
+ PUSHBUTTON "Cancel",IDCANCEL,212,95,50,14
+ EDITTEXT IDC_DLL_NAME,7,43,200,13,ES_AUTOHSCROLL
+ LTEXT "DLL to load in target:",IDC_STATIC,7,33,168,8
+ LTEXT "Function to call:",IDC_STATIC,7,61,139,8
+ EDITTEXT IDC_ENTRY_POINT,7,71,200,13,ES_AUTOHSCROLL
+ EDITTEXT IDC_LOG_FILE,7,17,200,13,ES_AUTOHSCROLL
+ LTEXT "File for Target logging (optional):",IDC_STATIC,7,7,139,8
+ PUSHBUTTON "Browse...",IDC_BROWSE_DLL,212,42,50,14
+ PUSHBUTTON "Browse...",IDC_BROWSE_LOG,212,16,50,14
+END
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// DESIGNINFO
+//
+
+#ifdef APSTUDIO_INVOKED
+GUIDELINES DESIGNINFO
+BEGIN
+ IDD_LAUNCH_DLL, DIALOG
+ BEGIN
+ LEFTMARGIN, 7
+ RIGHTMARGIN, 262
+ TOPMARGIN, 7
+ BOTTOMMARGIN, 111
+ END
+END
+#endif // APSTUDIO_INVOKED
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Icon
+//
+
+// Icon with lowest ID value placed first to ensure application icon
+// remains consistent on all systems.
+IDI_SANDBOX ICON "sandbox.ico"
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+
+1 TEXTINCLUDE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE
+BEGIN
+ "#define APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "#include ""windows.h""\r\n"
+ "#undef APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+#endif // English (U.S.) resources
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+
+/////////////////////////////////////////////////////////////////////////////
+#endif // not APSTUDIO_INVOKED
+
diff --git a/libchrome/sandbox/win/sandbox_poc/sandbox_poc.vcproj b/libchrome/sandbox/win/sandbox_poc/sandbox_poc.vcproj
new file mode 100644
index 0000000..5fde1cd
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_poc/sandbox_poc.vcproj
@@ -0,0 +1,202 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="sandbox_poc"
+ ProjectGUID="{CF757839-F2A1-417C-8F25-DCAE480020F1}"
+ RootNamespace="sandbox_poc"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="2"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="comctl32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="comctl32.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath=".\main_ui_window.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\main_ui_window.h"
+ >
+ </File>
+ <File
+ RelativePath=".\resource.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox.ico"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox.rc"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/sandbox_standalone.sln b/libchrome/sandbox/win/sandbox_standalone.sln
new file mode 100644
index 0000000..529d20e
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_standalone.sln
@@ -0,0 +1,127 @@
+Microsoft Visual Studio Solution File, Format Version 9.00
+# Visual Studio 2005
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sandbox", "src\sandbox.vcproj", "{881F6A97-D539-4C48-B401-DF04385B2343}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sbox_unittests", "tests\unit_tests\sbox_unittests.vcproj", "{883553BE-2A9D-418C-A121-61FE1DFBC562}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {881F6A97-D539-4C48-B401-DF04385B2343} = {881F6A97-D539-4C48-B401-DF04385B2343}
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B} = {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}
+ EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{F7A3B82E-B8B4-4FDF-BC8E-FEC9398F57ED}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sbox_validation_tests", "tests\validation_tests\sbox_validation_tests.vcproj", "{B9CC7B0D-145A-49C2-B887-84E43CFA0F27}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {881F6A97-D539-4C48-B401-DF04385B2343} = {881F6A97-D539-4C48-B401-DF04385B2343}
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B} = {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}
+ EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "dependencies", "dependencies", "{BCE54389-D18D-48B9-977E-9D1998200F63}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "debug_message", "..\base\debug_message.vcproj", "{F0F92189-193A-6607-C2BB-0F98BBD19ADF}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{7F36EE20-5016-4051-B0D7-42824CDA0291}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "proof_of_concept", "proof_of_concept", "{B607BE7B-3555-422C-A40B-28E73C0B5E24}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sandbox_poc", "sandbox_poc\sandbox_poc.vcproj", "{CF757839-F2A1-417C-8F25-DCAE480020F1}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {881F6A97-D539-4C48-B401-DF04385B2343} = {881F6A97-D539-4C48-B401-DF04385B2343}
+ {AE5BFB87-850E-4454-B01D-58E7D8BAC224} = {AE5BFB87-850E-4454-B01D-58E7D8BAC224}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pocdll", "sandbox_poc\pocdll\pocdll.vcproj", "{AE5BFB87-850E-4454-B01D-58E7D8BAC224}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "finder", "tools\finder\finder.vcproj", "{ACDC2E06-0366-41A4-A646-C37E130A605D}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {881F6A97-D539-4C48-B401-DF04385B2343} = {881F6A97-D539-4C48-B401-DF04385B2343}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "launcher", "tools\launcher\launcher.vcproj", "{386FA217-FBC2-4461-882D-CDAD221ED800}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {881F6A97-D539-4C48-B401-DF04385B2343} = {881F6A97-D539-4C48-B401-DF04385B2343}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sbox_integration_tests", "tests\integration_tests\sbox_integration_tests.vcproj", "{542D4B3B-98D4-4233-B68D-0103891508C6}"
+ ProjectSection(ProjectDependencies) = postProject
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {1832A374-8A74-4F9E-B536-69A699B3E165}
+ {881F6A97-D539-4C48-B401-DF04385B2343} = {881F6A97-D539-4C48-B401-DF04385B2343}
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B} = {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "base", "..\base\base.vcproj", "{1832A374-8A74-4F9E-B536-69A699B3E165}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest", "..\testing\gtest.vcproj", "{BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Release|Win32 = Release|Win32
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {881F6A97-D539-4C48-B401-DF04385B2343}.Debug|Win32.ActiveCfg = Debug|Win32
+ {881F6A97-D539-4C48-B401-DF04385B2343}.Debug|Win32.Build.0 = Debug|Win32
+ {881F6A97-D539-4C48-B401-DF04385B2343}.Release|Win32.ActiveCfg = Release|Win32
+ {881F6A97-D539-4C48-B401-DF04385B2343}.Release|Win32.Build.0 = Release|Win32
+ {883553BE-2A9D-418C-A121-61FE1DFBC562}.Debug|Win32.ActiveCfg = Debug|Win32
+ {883553BE-2A9D-418C-A121-61FE1DFBC562}.Debug|Win32.Build.0 = Debug|Win32
+ {883553BE-2A9D-418C-A121-61FE1DFBC562}.Release|Win32.ActiveCfg = Release|Win32
+ {883553BE-2A9D-418C-A121-61FE1DFBC562}.Release|Win32.Build.0 = Release|Win32
+ {B9CC7B0D-145A-49C2-B887-84E43CFA0F27}.Debug|Win32.ActiveCfg = Debug|Win32
+ {B9CC7B0D-145A-49C2-B887-84E43CFA0F27}.Debug|Win32.Build.0 = Debug|Win32
+ {B9CC7B0D-145A-49C2-B887-84E43CFA0F27}.Release|Win32.ActiveCfg = Release|Win32
+ {B9CC7B0D-145A-49C2-B887-84E43CFA0F27}.Release|Win32.Build.0 = Release|Win32
+ {F0F92189-193A-6607-C2BB-0F98BBD19ADF}.Debug|Win32.ActiveCfg = Debug|Win32
+ {F0F92189-193A-6607-C2BB-0F98BBD19ADF}.Debug|Win32.Build.0 = Debug|Win32
+ {F0F92189-193A-6607-C2BB-0F98BBD19ADF}.Release|Win32.ActiveCfg = Release|Win32
+ {F0F92189-193A-6607-C2BB-0F98BBD19ADF}.Release|Win32.Build.0 = Release|Win32
+ {CF757839-F2A1-417C-8F25-DCAE480020F1}.Debug|Win32.ActiveCfg = Debug|Win32
+ {CF757839-F2A1-417C-8F25-DCAE480020F1}.Debug|Win32.Build.0 = Debug|Win32
+ {CF757839-F2A1-417C-8F25-DCAE480020F1}.Release|Win32.ActiveCfg = Release|Win32
+ {CF757839-F2A1-417C-8F25-DCAE480020F1}.Release|Win32.Build.0 = Release|Win32
+ {AE5BFB87-850E-4454-B01D-58E7D8BAC224}.Debug|Win32.ActiveCfg = Debug|Win32
+ {AE5BFB87-850E-4454-B01D-58E7D8BAC224}.Debug|Win32.Build.0 = Debug|Win32
+ {AE5BFB87-850E-4454-B01D-58E7D8BAC224}.Release|Win32.ActiveCfg = Release|Win32
+ {AE5BFB87-850E-4454-B01D-58E7D8BAC224}.Release|Win32.Build.0 = Release|Win32
+ {ACDC2E06-0366-41A4-A646-C37E130A605D}.Debug|Win32.ActiveCfg = Debug|Win32
+ {ACDC2E06-0366-41A4-A646-C37E130A605D}.Debug|Win32.Build.0 = Debug|Win32
+ {ACDC2E06-0366-41A4-A646-C37E130A605D}.Release|Win32.ActiveCfg = Release|Win32
+ {ACDC2E06-0366-41A4-A646-C37E130A605D}.Release|Win32.Build.0 = Release|Win32
+ {386FA217-FBC2-4461-882D-CDAD221ED800}.Debug|Win32.ActiveCfg = Debug|Win32
+ {386FA217-FBC2-4461-882D-CDAD221ED800}.Debug|Win32.Build.0 = Debug|Win32
+ {386FA217-FBC2-4461-882D-CDAD221ED800}.Release|Win32.ActiveCfg = Release|Win32
+ {386FA217-FBC2-4461-882D-CDAD221ED800}.Release|Win32.Build.0 = Release|Win32
+ {542D4B3B-98D4-4233-B68D-0103891508C6}.Debug|Win32.ActiveCfg = Debug|Win32
+ {542D4B3B-98D4-4233-B68D-0103891508C6}.Debug|Win32.Build.0 = Debug|Win32
+ {542D4B3B-98D4-4233-B68D-0103891508C6}.Release|Win32.ActiveCfg = Release|Win32
+ {542D4B3B-98D4-4233-B68D-0103891508C6}.Release|Win32.Build.0 = Release|Win32
+ {1832A374-8A74-4F9E-B536-69A699B3E165}.Debug|Win32.ActiveCfg = Debug|Win32
+ {1832A374-8A74-4F9E-B536-69A699B3E165}.Debug|Win32.Build.0 = Debug|Win32
+ {1832A374-8A74-4F9E-B536-69A699B3E165}.Release|Win32.ActiveCfg = Release|Win32
+ {1832A374-8A74-4F9E-B536-69A699B3E165}.Release|Win32.Build.0 = Release|Win32
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}.Debug|Win32.ActiveCfg = Debug|Win32
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}.Debug|Win32.Build.0 = Debug|Win32
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}.Release|Win32.ActiveCfg = Release|Win32
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B}.Release|Win32.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(NestedProjects) = preSolution
+ {883553BE-2A9D-418C-A121-61FE1DFBC562} = {F7A3B82E-B8B4-4FDF-BC8E-FEC9398F57ED}
+ {B9CC7B0D-145A-49C2-B887-84E43CFA0F27} = {F7A3B82E-B8B4-4FDF-BC8E-FEC9398F57ED}
+ {542D4B3B-98D4-4233-B68D-0103891508C6} = {F7A3B82E-B8B4-4FDF-BC8E-FEC9398F57ED}
+ {F0F92189-193A-6607-C2BB-0F98BBD19ADF} = {BCE54389-D18D-48B9-977E-9D1998200F63}
+ {1832A374-8A74-4F9E-B536-69A699B3E165} = {BCE54389-D18D-48B9-977E-9D1998200F63}
+ {BFE8E2A7-3B3B-43B0-A994-3058B852DB8B} = {BCE54389-D18D-48B9-977E-9D1998200F63}
+ {ACDC2E06-0366-41A4-A646-C37E130A605D} = {7F36EE20-5016-4051-B0D7-42824CDA0291}
+ {386FA217-FBC2-4461-882D-CDAD221ED800} = {7F36EE20-5016-4051-B0D7-42824CDA0291}
+ {CF757839-F2A1-417C-8F25-DCAE480020F1} = {B607BE7B-3555-422C-A40B-28E73C0B5E24}
+ {AE5BFB87-850E-4454-B01D-58E7D8BAC224} = {B607BE7B-3555-422C-A40B-28E73C0B5E24}
+ EndGlobalSection
+EndGlobal
diff --git a/libchrome/sandbox/win/sandbox_win.gypi b/libchrome/sandbox/win/sandbox_win.gypi
new file mode 100644
index 0000000..e9673aa
--- /dev/null
+++ b/libchrome/sandbox/win/sandbox_win.gypi
@@ -0,0 +1,432 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'variables': {
+ 'sandbox_windows_target': 0,
+ 'target_arch%': 'ia32',
+ },
+ 'target_conditions': [
+ ['sandbox_windows_target==1', {
+ # Files that are shared between the 32-bit and the 64-bit versions
+ # of the Windows sandbox library.
+ 'sources': [
+ 'src/acl.cc',
+ 'src/acl.h',
+ 'src/broker_services.cc',
+ 'src/broker_services.h',
+ 'src/crosscall_client.h',
+ 'src/crosscall_params.h',
+ 'src/crosscall_server.cc',
+ 'src/crosscall_server.h',
+ 'src/eat_resolver.cc',
+ 'src/eat_resolver.h',
+ 'src/filesystem_dispatcher.cc',
+ 'src/filesystem_dispatcher.h',
+ 'src/filesystem_interception.cc',
+ 'src/filesystem_interception.h',
+ 'src/filesystem_policy.cc',
+ 'src/filesystem_policy.h',
+ 'src/handle_closer.cc',
+ 'src/handle_closer.h',
+ 'src/handle_closer_agent.cc',
+ 'src/handle_closer_agent.h',
+ 'src/interception.cc',
+ 'src/interception.h',
+ 'src/interception_agent.cc',
+ 'src/interception_agent.h',
+ 'src/interception_internal.h',
+ 'src/interceptors.h',
+ 'src/internal_types.h',
+ 'src/ipc_tags.h',
+ 'src/job.cc',
+ 'src/job.h',
+ 'src/named_pipe_dispatcher.cc',
+ 'src/named_pipe_dispatcher.h',
+ 'src/named_pipe_interception.cc',
+ 'src/named_pipe_interception.h',
+ 'src/named_pipe_policy.cc',
+ 'src/named_pipe_policy.h',
+ 'src/nt_internals.h',
+ 'src/policy_broker.cc',
+ 'src/policy_broker.h',
+ 'src/policy_engine_opcodes.cc',
+ 'src/policy_engine_opcodes.h',
+ 'src/policy_engine_params.h',
+ 'src/policy_engine_processor.cc',
+ 'src/policy_engine_processor.h',
+ 'src/policy_low_level.cc',
+ 'src/policy_low_level.h',
+ 'src/policy_params.h',
+ 'src/policy_target.cc',
+ 'src/policy_target.h',
+ 'src/process_mitigations.cc',
+ 'src/process_mitigations.h',
+ 'src/process_mitigations_win32k_dispatcher.cc',
+ 'src/process_mitigations_win32k_dispatcher.h',
+ 'src/process_mitigations_win32k_interception.cc',
+ 'src/process_mitigations_win32k_interception.h',
+ 'src/process_mitigations_win32k_policy.cc',
+ 'src/process_mitigations_win32k_policy.h',
+ 'src/process_thread_dispatcher.cc',
+ 'src/process_thread_dispatcher.h',
+ 'src/process_thread_interception.cc',
+ 'src/process_thread_interception.h',
+ 'src/process_thread_policy.cc',
+ 'src/process_thread_policy.h',
+ 'src/registry_dispatcher.cc',
+ 'src/registry_dispatcher.h',
+ 'src/registry_interception.cc',
+ 'src/registry_interception.h',
+ 'src/registry_policy.cc',
+ 'src/registry_policy.h',
+ 'src/resolver.cc',
+ 'src/resolver.h',
+ 'src/restricted_token_utils.cc',
+ 'src/restricted_token_utils.h',
+ 'src/restricted_token.cc',
+ 'src/restricted_token.h',
+ 'src/sandbox_factory.h',
+ 'src/sandbox_globals.cc',
+ 'src/sandbox_nt_types.h',
+ 'src/sandbox_nt_util.cc',
+ 'src/sandbox_nt_util.h',
+ 'src/sandbox_policy_base.cc',
+ 'src/sandbox_policy_base.h',
+ 'src/sandbox_policy.h',
+ 'src/sandbox_rand.cc',
+ 'src/sandbox_rand.h',
+ 'src/sandbox_types.h',
+ 'src/sandbox_utils.cc',
+ 'src/sandbox_utils.h',
+ 'src/sandbox.cc',
+ 'src/sandbox.h',
+ 'src/security_level.h',
+ 'src/service_resolver.cc',
+ 'src/service_resolver.h',
+ 'src/sharedmem_ipc_client.cc',
+ 'src/sharedmem_ipc_client.h',
+ 'src/sharedmem_ipc_server.cc',
+ 'src/sharedmem_ipc_server.h',
+ 'src/sid.cc',
+ 'src/sid.h',
+ 'src/sync_dispatcher.cc',
+ 'src/sync_dispatcher.h',
+ 'src/sync_interception.cc',
+ 'src/sync_interception.h',
+ 'src/sync_policy.cc',
+ 'src/sync_policy.h',
+ 'src/target_interceptions.cc',
+ 'src/target_interceptions.h',
+ 'src/target_process.cc',
+ 'src/target_process.h',
+ 'src/target_services.cc',
+ 'src/target_services.h',
+ 'src/top_level_dispatcher.cc',
+ 'src/top_level_dispatcher.h',
+ 'src/win_utils.cc',
+ 'src/win_utils.h',
+ 'src/win2k_threadpool.cc',
+ 'src/win2k_threadpool.h',
+ 'src/window.cc',
+ 'src/window.h',
+ ],
+ 'target_conditions': [
+ ['target_arch=="x64"', {
+ 'sources': [
+ 'src/interceptors_64.cc',
+ 'src/interceptors_64.h',
+ 'src/resolver_64.cc',
+ 'src/service_resolver_64.cc',
+ ],
+ }],
+ ['target_arch=="ia32"', {
+ 'sources': [
+ 'src/resolver_32.cc',
+ 'src/service_resolver_32.cc',
+ 'src/sidestep_resolver.cc',
+ 'src/sidestep_resolver.h',
+ 'src/sidestep\ia32_modrm_map.cpp',
+ 'src/sidestep\ia32_opcode_map.cpp',
+ 'src/sidestep\mini_disassembler_types.h',
+ 'src/sidestep\mini_disassembler.cpp',
+ 'src/sidestep\mini_disassembler.h',
+ 'src/sidestep\preamble_patcher_with_stub.cpp',
+ 'src/sidestep\preamble_patcher.h',
+ ],
+ }],
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'sandbox',
+ 'type': 'static_library',
+ 'variables': {
+ 'sandbox_windows_target': 1,
+ },
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_static',
+ ],
+ 'export_dependent_settings': [
+ '../base/base.gyp:base',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'target_conditions': [
+ ['target_arch=="ia32"', {
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)',
+ 'files': [
+ 'wow_helper/wow_helper.exe',
+ 'wow_helper/wow_helper.pdb',
+ ],
+ },
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'sbox_integration_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'sandbox',
+ 'sbox_integration_test_hook_dll',
+ 'sbox_integration_test_win_proc',
+ '../base/base.gyp:test_support_base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'src/address_sanitizer_test.cc',
+ 'src/app_container_test.cc',
+ 'src/file_policy_test.cc',
+ 'src/handle_inheritance_test.cc',
+ 'tests/integration_tests/integration_tests_test.cc',
+ 'src/handle_closer_test.cc',
+ 'src/integrity_level_test.cc',
+ 'src/ipc_ping_test.cc',
+ 'src/lpc_policy_test.cc',
+ 'src/named_pipe_policy_test.cc',
+ 'src/policy_target_test.cc',
+ 'src/process_mitigations_test.cc',
+ 'src/process_policy_test.cc',
+ 'src/registry_policy_test.cc',
+ 'src/restricted_token_test.cc',
+ 'src/sync_policy_test.cc',
+ 'src/sync_policy_test.h',
+ 'src/unload_dll_test.cc',
+ 'tests/common/controller.cc',
+ 'tests/common/controller.h',
+ 'tests/common/test_utils.cc',
+ 'tests/common/test_utils.h',
+ 'tests/integration_tests/integration_tests.cc',
+ 'tests/integration_tests/integration_tests_common.h',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-ldxva2.lib',
+ ],
+ },
+ },
+ {
+ 'target_name': 'sbox_integration_test_hook_dll',
+ 'type': 'shared_library',
+ 'dependencies': [
+ ],
+ 'sources': [
+ 'tests/integration_tests/hooking_dll.cc',
+ 'tests/integration_tests/integration_tests_common.h',
+ ],
+ },
+ {
+ 'target_name': 'sbox_integration_test_win_proc',
+ 'type': 'executable',
+ 'dependencies': [
+ ],
+ 'sources': [
+ 'tests/integration_tests/hooking_win_proc.cc',
+ 'tests/integration_tests/integration_tests_common.h',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
+ },
+ },
+ },
+ {
+ 'target_name': 'sbox_validation_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'sandbox',
+ '../base/base.gyp:test_support_base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'tests/common/controller.cc',
+ 'tests/common/controller.h',
+ 'tests/validation_tests/unit_tests.cc',
+ 'tests/validation_tests/commands.cc',
+ 'tests/validation_tests/commands.h',
+ 'tests/validation_tests/suite.cc',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lshlwapi.lib',
+ ],
+ },
+ },
+ {
+ 'target_name': 'sbox_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'sandbox',
+ '../base/base.gyp:test_support_base',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'src/interception_unittest.cc',
+ 'src/service_resolver_unittest.cc',
+ 'src/restricted_token_unittest.cc',
+ 'src/job_unittest.cc',
+ 'src/sid_unittest.cc',
+ 'src/policy_engine_unittest.cc',
+ 'src/policy_low_level_unittest.cc',
+ 'src/policy_opcodes_unittest.cc',
+ 'src/ipc_unittest.cc',
+ 'src/sandbox_nt_util_unittest.cc',
+ 'src/threadpool_unittest.cc',
+ 'src/win_utils_unittest.cc',
+ 'tests/common/test_utils.cc',
+ 'tests/common/test_utils.h',
+ 'tests/unit_tests/unit_tests.cc',
+ ],
+ },
+ {
+ 'target_name': 'sandbox_poc',
+ 'type': 'executable',
+ 'dependencies': [
+ 'sandbox',
+ 'pocdll',
+ ],
+ 'sources': [
+ 'sandbox_poc/main_ui_window.cc',
+ 'sandbox_poc/main_ui_window.h',
+ 'sandbox_poc/resource.h',
+ 'sandbox_poc/sandbox.cc',
+ 'sandbox_poc/sandbox.h',
+ 'sandbox_poc/sandbox.ico',
+ 'sandbox_poc/sandbox.rc',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lcomctl32.lib',
+ ],
+ },
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
+ },
+ },
+ },
+ {
+ 'target_name': 'pocdll',
+ 'type': 'shared_library',
+ 'sources': [
+ 'sandbox_poc/pocdll/exports.h',
+ 'sandbox_poc/pocdll/fs.cc',
+ 'sandbox_poc/pocdll/handles.cc',
+ 'sandbox_poc/pocdll/invasive.cc',
+ 'sandbox_poc/pocdll/network.cc',
+ 'sandbox_poc/pocdll/pocdll.cc',
+ 'sandbox_poc/pocdll/processes_and_threads.cc',
+ 'sandbox_poc/pocdll/registry.cc',
+ 'sandbox_poc/pocdll/spyware.cc',
+ 'sandbox_poc/pocdll/utils.h',
+ ],
+ 'defines': [
+ 'POCDLL_EXPORTS',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win" and target_arch=="ia32"', {
+ 'targets': [
+ {
+ 'target_name': 'sandbox_win64',
+ 'type': 'static_library',
+ 'variables': {
+ 'sandbox_windows_target': 1,
+ 'target_arch': 'x64',
+ },
+ 'dependencies': [
+ '../base/base.gyp:base_win64',
+ '../base/base.gyp:base_static_win64',
+ ],
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ 'include_dirs': [
+ '../..',
+ ],
+ 'defines': [
+ '<@(nacl_win64_defines)',
+ ]
+ },
+ ],
+ }],
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'sbox_integration_tests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'sbox_integration_tests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ '../sbox_integration_tests.isolate',
+ ],
+ },
+ {
+ 'target_name': 'sbox_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'sbox_unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ '../sbox_unittests.isolate',
+ ],
+ },
+ {
+ 'target_name': 'sbox_validation_tests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'sbox_validation_tests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ '../sbox_validation_tests.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/libchrome/sandbox/win/src/crosscall_client.h b/libchrome/sandbox/win/src/crosscall_client.h
new file mode 100644
index 0000000..60ff243
--- /dev/null
+++ b/libchrome/sandbox/win/src/crosscall_client.h
@@ -0,0 +1,526 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_CROSSCALL_CLIENT_H_
+#define SANDBOX_SRC_CROSSCALL_CLIENT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "sandbox/win/src/crosscall_params.h"
+#include "sandbox/win/src/sandbox.h"
+
+// This header defines the CrossCall(..) family of templated functions
+// Their purpose is to simulate the syntax of regular call but to generate
+// and IPC from the client-side.
+//
+// The basic pattern is to
+// 1) use template argument deduction to compute the size of each
+// parameter and the appropriate copy method
+// 2) pack the parameters in the appropriate ActualCallParams< > object
+// 3) call the IPC interface IPCProvider::DoCall( )
+//
+// The general interface of CrossCall is:
+// ResultCode CrossCall(IPCProvider& ipc_provider,
+// uint32_t tag,
+// const Par1& p1, const Par2& p2,...pn
+// CrossCallReturn* answer)
+//
+// where:
+// ipc_provider: is a specific implementation of the ipc transport see
+// sharedmem_ipc_server.h for an example.
+// tag : is the unique id for this IPC call. Is used to route the call to
+// the appropriate service.
+// p1, p2,.. pn : The input parameters of the IPC. Use only simple types
+// and wide strings (can add support for others).
+// answer : If the IPC was successful. The server-side answer is here. The
+// interpretation of the answer is private to client and server.
+//
+// The return value is ALL_OK if the IPC was delivered to the server, other
+// return codes indicate that the IPC transport failed to deliver it.
+namespace sandbox {
+
+// this is the assumed channel size. This can be overridden in a given
+// IPC implementation.
+const uint32_t kIPCChannelSize = 1024;
+
+// The copy helper uses templates to deduce the appropriate copy function to
+// copy the input parameters in the buffer that is going to be send across the
+// IPC. These template facility can be made more sophisticated as need arises.
+
+// The default copy helper. It catches the general case where no other
+// specialized template matches better. We set the type to UINT32_TYPE, so this
+// only works with objects whose size is 32 bits.
+template<typename T>
+class CopyHelper {
+ public:
+ CopyHelper(const T& t) : t_(t) {}
+
+ // Returns the pointer to the start of the input.
+ const void* GetStart() const {
+ return &t_;
+ }
+
+ // Update the stored value with the value in the buffer. This is not
+ // supported for this type.
+ bool Update(void* buffer) {
+ // Not supported;
+ return true;
+ }
+
+ // Returns the size of the input in bytes.
+ uint32_t GetSize() const { return sizeof(T); }
+
+ // Returns true if the current type is used as an In or InOut parameter.
+ bool IsInOut() {
+ return false;
+ }
+
+ // Returns this object's type.
+ ArgType GetType() {
+ static_assert(sizeof(T) == sizeof(uint32_t), "specialization needed");
+ return UINT32_TYPE;
+ }
+
+ private:
+ const T& t_;
+};
+
+// This copy helper template specialization if for the void pointer
+// case both 32 and 64 bit.
+template<>
+class CopyHelper<void*> {
+ public:
+ CopyHelper(void* t) : t_(t) {}
+
+ // Returns the pointer to the start of the input.
+ const void* GetStart() const {
+ return &t_;
+ }
+
+ // Update the stored value with the value in the buffer. This is not
+ // supported for this type.
+ bool Update(void* buffer) {
+ // Not supported;
+ return true;
+ }
+
+ // Returns the size of the input in bytes.
+ uint32_t GetSize() const { return sizeof(t_); }
+
+ // Returns true if the current type is used as an In or InOut parameter.
+ bool IsInOut() {
+ return false;
+ }
+
+ // Returns this object's type.
+ ArgType GetType() {
+ return VOIDPTR_TYPE;
+ }
+
+ private:
+ const void* t_;
+};
+
+// This copy helper template specialization catches the cases where the
+// parameter is a pointer to a string.
+template<>
+class CopyHelper<const wchar_t*> {
+ public:
+ CopyHelper(const wchar_t* t)
+ : t_(t) {
+ }
+
+ // Returns the pointer to the start of the string.
+ const void* GetStart() const {
+ return t_;
+ }
+
+ // Update the stored value with the value in the buffer. This is not
+ // supported for this type.
+ bool Update(void* buffer) {
+ // Not supported;
+ return true;
+ }
+
+ // Returns the size of the string in bytes. We define a NULL string to
+ // be of zero length.
+ uint32_t GetSize() const {
+ __try {
+ return (!t_) ? 0
+ : static_cast<uint32_t>(StringLength(t_) * sizeof(t_[0]));
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER) {
+ return UINT32_MAX;
+ }
+ }
+
+ // Returns true if the current type is used as an In or InOut parameter.
+ bool IsInOut() {
+ return false;
+ }
+
+ ArgType GetType() {
+ return WCHAR_TYPE;
+ }
+
+ private:
+ // We provide our not very optimized version of wcslen(), since we don't
+ // want to risk having the linker use the version in the CRT since the CRT
+ // might not be present when we do an early IPC call.
+ static size_t __cdecl StringLength(const wchar_t* wcs) {
+ const wchar_t *eos = wcs;
+ while (*eos++);
+ return static_cast<size_t>(eos - wcs - 1);
+ }
+
+ const wchar_t* t_;
+};
+
+// Specialization for non-const strings. We just reuse the implementation of the
+// const string specialization.
+template<>
+class CopyHelper<wchar_t*> : public CopyHelper<const wchar_t*> {
+ public:
+ typedef CopyHelper<const wchar_t*> Base;
+ CopyHelper(wchar_t* t) : Base(t) {}
+
+ const void* GetStart() const {
+ return Base::GetStart();
+ }
+
+ bool Update(void* buffer) {
+ return Base::Update(buffer);
+ }
+
+ uint32_t GetSize() const { return Base::GetSize(); }
+
+ bool IsInOut() {
+ return Base::IsInOut();
+ }
+
+ ArgType GetType() {
+ return Base::GetType();
+ }
+};
+
+// Specialization for wchar_t arrays strings. We just reuse the implementation
+// of the const string specialization.
+template<size_t n>
+class CopyHelper<const wchar_t[n]> : public CopyHelper<const wchar_t*> {
+ public:
+ typedef const wchar_t array[n];
+ typedef CopyHelper<const wchar_t*> Base;
+ CopyHelper(array t) : Base(t) {}
+
+ const void* GetStart() const {
+ return Base::GetStart();
+ }
+
+ bool Update(void* buffer) {
+ return Base::Update(buffer);
+ }
+
+ uint32_t GetSize() const { return Base::GetSize(); }
+
+ bool IsInOut() {
+ return Base::IsInOut();
+ }
+
+ ArgType GetType() {
+ return Base::GetType();
+ }
+};
+
+// Generic encapsulation class containing a pointer to a buffer and the
+// size of the buffer. It is used by the IPC to be able to pass in/out
+// parameters.
+class InOutCountedBuffer : public CountedBuffer {
+ public:
+ InOutCountedBuffer(void* buffer, uint32_t size)
+ : CountedBuffer(buffer, size) {}
+};
+
+// This copy helper template specialization catches the cases where the
+// parameter is a an input/output buffer.
+template<>
+class CopyHelper<InOutCountedBuffer> {
+ public:
+ CopyHelper(const InOutCountedBuffer t) : t_(t) {}
+
+ // Returns the pointer to the start of the string.
+ const void* GetStart() const {
+ return t_.Buffer();
+ }
+
+ // Updates the buffer with the value from the new buffer in parameter.
+ bool Update(void* buffer) {
+ // We are touching user memory, this has to be done from inside a try
+ // except.
+ __try {
+ memcpy(t_.Buffer(), buffer, t_.Size());
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER) {
+ return false;
+ }
+ return true;
+ }
+
+ // Returns the size of the string in bytes. We define a NULL string to
+ // be of zero length.
+ uint32_t GetSize() const { return t_.Size(); }
+
+ // Returns true if the current type is used as an In or InOut parameter.
+ bool IsInOut() {
+ return true;
+ }
+
+ ArgType GetType() {
+ return INOUTPTR_TYPE;
+ }
+
+ private:
+ const InOutCountedBuffer t_;
+};
+
+// The following two macros make it less error prone the generation
+// of CrossCall functions with ever more input parameters.
+
+#define XCALL_GEN_PARAMS_OBJ(num, params) \
+ typedef ActualCallParams<num, kIPCChannelSize> ActualParams; \
+ void* raw_mem = ipc_provider.GetBuffer(); \
+ if (NULL == raw_mem) \
+ return SBOX_ERROR_NO_SPACE; \
+ ActualParams* params = new(raw_mem) ActualParams(tag);
+
+#define XCALL_GEN_COPY_PARAM(num, params) \
+ static_assert(kMaxIpcParams >= num, "too many parameters"); \
+ CopyHelper<Par##num> ch##num(p##num); \
+ if (!params->CopyParamIn(num - 1, ch##num.GetStart(), ch##num.GetSize(), \
+ ch##num.IsInOut(), ch##num.GetType())) \
+ return SBOX_ERROR_NO_SPACE;
+
+#define XCALL_GEN_UPDATE_PARAM(num, params) \
+ if (!ch##num.Update(params->GetParamPtr(num-1))) {\
+ ipc_provider.FreeBuffer(raw_mem); \
+ return SBOX_ERROR_BAD_PARAMS; \
+ }
+
+#define XCALL_GEN_FREE_CHANNEL() \
+ ipc_provider.FreeBuffer(raw_mem);
+
+// CrossCall template with one input parameter
+template <typename IPCProvider, typename Par1>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(1, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+
+ return result;
+}
+
+// CrossCall template with two input parameters.
+template <typename IPCProvider, typename Par1, typename Par2>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ const Par2& p2,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(2, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+ XCALL_GEN_COPY_PARAM(2, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_UPDATE_PARAM(2, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+ return result;
+}
+
+// CrossCall template with three input parameters.
+template <typename IPCProvider, typename Par1, typename Par2, typename Par3>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ const Par2& p2,
+ const Par3& p3,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(3, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+ XCALL_GEN_COPY_PARAM(2, call_params);
+ XCALL_GEN_COPY_PARAM(3, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_UPDATE_PARAM(2, call_params);
+ XCALL_GEN_UPDATE_PARAM(3, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+ return result;
+}
+
+// CrossCall template with four input parameters.
+template <typename IPCProvider,
+ typename Par1,
+ typename Par2,
+ typename Par3,
+ typename Par4>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ const Par2& p2,
+ const Par3& p3,
+ const Par4& p4,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(4, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+ XCALL_GEN_COPY_PARAM(2, call_params);
+ XCALL_GEN_COPY_PARAM(3, call_params);
+ XCALL_GEN_COPY_PARAM(4, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_UPDATE_PARAM(2, call_params);
+ XCALL_GEN_UPDATE_PARAM(3, call_params);
+ XCALL_GEN_UPDATE_PARAM(4, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+ return result;
+}
+
+// CrossCall template with five input parameters.
+template <typename IPCProvider,
+ typename Par1,
+ typename Par2,
+ typename Par3,
+ typename Par4,
+ typename Par5>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ const Par2& p2,
+ const Par3& p3,
+ const Par4& p4,
+ const Par5& p5,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(5, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+ XCALL_GEN_COPY_PARAM(2, call_params);
+ XCALL_GEN_COPY_PARAM(3, call_params);
+ XCALL_GEN_COPY_PARAM(4, call_params);
+ XCALL_GEN_COPY_PARAM(5, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_UPDATE_PARAM(2, call_params);
+ XCALL_GEN_UPDATE_PARAM(3, call_params);
+ XCALL_GEN_UPDATE_PARAM(4, call_params);
+ XCALL_GEN_UPDATE_PARAM(5, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+ return result;
+}
+
+// CrossCall template with six input parameters.
+template <typename IPCProvider,
+ typename Par1,
+ typename Par2,
+ typename Par3,
+ typename Par4,
+ typename Par5,
+ typename Par6>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ const Par2& p2,
+ const Par3& p3,
+ const Par4& p4,
+ const Par5& p5,
+ const Par6& p6,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(6, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+ XCALL_GEN_COPY_PARAM(2, call_params);
+ XCALL_GEN_COPY_PARAM(3, call_params);
+ XCALL_GEN_COPY_PARAM(4, call_params);
+ XCALL_GEN_COPY_PARAM(5, call_params);
+ XCALL_GEN_COPY_PARAM(6, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_UPDATE_PARAM(2, call_params);
+ XCALL_GEN_UPDATE_PARAM(3, call_params);
+ XCALL_GEN_UPDATE_PARAM(4, call_params);
+ XCALL_GEN_UPDATE_PARAM(5, call_params);
+ XCALL_GEN_UPDATE_PARAM(6, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+ return result;
+}
+
+// CrossCall template with seven input parameters.
+template <typename IPCProvider,
+ typename Par1,
+ typename Par2,
+ typename Par3,
+ typename Par4,
+ typename Par5,
+ typename Par6,
+ typename Par7>
+ResultCode CrossCall(IPCProvider& ipc_provider,
+ uint32_t tag,
+ const Par1& p1,
+ const Par2& p2,
+ const Par3& p3,
+ const Par4& p4,
+ const Par5& p5,
+ const Par6& p6,
+ const Par7& p7,
+ CrossCallReturn* answer) {
+ XCALL_GEN_PARAMS_OBJ(7, call_params);
+ XCALL_GEN_COPY_PARAM(1, call_params);
+ XCALL_GEN_COPY_PARAM(2, call_params);
+ XCALL_GEN_COPY_PARAM(3, call_params);
+ XCALL_GEN_COPY_PARAM(4, call_params);
+ XCALL_GEN_COPY_PARAM(5, call_params);
+ XCALL_GEN_COPY_PARAM(6, call_params);
+ XCALL_GEN_COPY_PARAM(7, call_params);
+
+ ResultCode result = ipc_provider.DoCall(call_params, answer);
+
+ if (SBOX_ERROR_CHANNEL_ERROR != result) {
+ XCALL_GEN_UPDATE_PARAM(1, call_params);
+ XCALL_GEN_UPDATE_PARAM(2, call_params);
+ XCALL_GEN_UPDATE_PARAM(3, call_params);
+ XCALL_GEN_UPDATE_PARAM(4, call_params);
+ XCALL_GEN_UPDATE_PARAM(5, call_params);
+ XCALL_GEN_UPDATE_PARAM(6, call_params);
+ XCALL_GEN_UPDATE_PARAM(7, call_params);
+ XCALL_GEN_FREE_CHANNEL();
+ }
+ return result;
+}
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_CROSSCALL_CLIENT_H__
diff --git a/libchrome/sandbox/win/src/crosscall_params.h b/libchrome/sandbox/win/src/crosscall_params.h
new file mode 100644
index 0000000..eb59c44
--- /dev/null
+++ b/libchrome/sandbox/win/src/crosscall_params.h
@@ -0,0 +1,287 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_CROSSCALL_PARAMS_H__
+#define SANDBOX_SRC_CROSSCALL_PARAMS_H__
+
+#include <windows.h>
+#include <lmaccess.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/macros.h"
+#include "sandbox/win/src/internal_types.h"
+#include "sandbox/win/src/sandbox_types.h"
+
+// Increases |value| until there is no need for padding given an int64_t
+// alignment. Returns the increased value.
+inline uint32_t Align(uint32_t value) {
+ uint32_t alignment = sizeof(int64_t);
+ return ((value + alignment - 1) / alignment) * alignment;
+}
+
+// This header is part of CrossCall: the sandbox inter-process communication.
+// This header defines the basic types used both in the client IPC and in the
+// server IPC code. CrossCallParams and ActualCallParams model the input
+// parameters of an IPC call and CrossCallReturn models the output params and
+// the return value.
+//
+// An IPC call is defined by its 'tag' which is a (uint32_t) unique identifier
+// that is used to route the IPC call to the proper server. Every tag implies
+// a complete call signature including the order and type of each parameter.
+//
+// Like most IPC systems. CrossCall is designed to take as inputs 'simple'
+// types such as integers and strings. Classes, generic arrays or pointers to
+// them are not supported.
+//
+// Another limitation of CrossCall is that the return value and output
+// parameters can only be uint32_t integers. Returning complex structures or
+// strings is not supported.
+
+namespace sandbox {
+
+// max number of extended return parameters. See CrossCallReturn
+const size_t kExtendedReturnCount = 8;
+
+// Union of multiple types to be used as extended results
+// in the CrossCallReturn.
+union MultiType {
+ uint32_t unsigned_int;
+ void* pointer;
+ HANDLE handle;
+ ULONG_PTR ulong_ptr;
+};
+
+// Maximum number of IPC parameters currently supported.
+// To increase this value, we have to:
+// - Add another Callback typedef to Dispatcher.
+// - Add another case to the switch on SharedMemIPCServer::InvokeCallback.
+// - Add another case to the switch in GetActualAndMaxBufferSize
+const int kMaxIpcParams = 9;
+
+// Contains the information about a parameter in the ipc buffer.
+struct ParamInfo {
+ ArgType type_;
+ uint32_t offset_;
+ uint32_t size_;
+};
+
+// Models the return value and the return parameters of an IPC call
+// currently limited to one status code and eight generic return values
+// which cannot be pointers to other data. For x64 ports this structure
+// might have to use other integer types.
+struct CrossCallReturn {
+ // the IPC tag. It should match the original IPC tag.
+ uint32_t tag;
+ // The result of the IPC operation itself.
+ ResultCode call_outcome;
+ // the result of the IPC call as executed in the server. The interpretation
+ // of this value depends on the specific service.
+ union {
+ NTSTATUS nt_status;
+ DWORD win32_result;
+ };
+ // Number of extended return values.
+ uint32_t extended_count;
+ // for calls that should return a windows handle. It is found here.
+ HANDLE handle;
+ // The array of extended values.
+ MultiType extended[kExtendedReturnCount];
+};
+
+// CrossCallParams base class that models the input params all packed in a
+// single compact memory blob. The representation can vary but in general a
+// given child of this class is meant to represent all input parameters
+// necessary to make a IPC call.
+//
+// This class cannot have virtual members because its assumed the IPC
+// parameters start from the 'this' pointer to the end, which is defined by
+// one of the subclasses
+//
+// Objects of this class cannot be constructed directly. Only derived
+// classes have the proper knowledge to construct it.
+class CrossCallParams {
+ public:
+ // Returns the tag (ipc unique id) associated with this IPC.
+ uint32_t GetTag() const { return tag_; }
+
+ // Returns the beggining of the buffer where the IPC params can be stored.
+ // prior to an IPC call
+ const void* GetBuffer() const {
+ return this;
+ }
+
+ // Returns how many parameter this IPC call should have.
+ uint32_t GetParamsCount() const { return params_count_; }
+
+ // Returns a pointer to the CrossCallReturn structure.
+ CrossCallReturn* GetCallReturn() {
+ return &call_return;
+ }
+
+ // Returns TRUE if this call contains InOut parameters.
+ bool IsInOut() const { return (1 == is_in_out_); }
+
+ // Tells the CrossCall object if it contains InOut parameters.
+ void SetIsInOut(bool value) {
+ if (value)
+ is_in_out_ = 1;
+ else
+ is_in_out_ = 0;
+ }
+
+ protected:
+ // constructs the IPC call params. Called only from the derived classes
+ CrossCallParams(uint32_t tag, uint32_t params_count)
+ : tag_(tag), is_in_out_(0), params_count_(params_count) {}
+
+ private:
+ uint32_t tag_;
+ uint32_t is_in_out_;
+ CrossCallReturn call_return;
+ const uint32_t params_count_;
+ DISALLOW_COPY_AND_ASSIGN(CrossCallParams);
+};
+
+// ActualCallParams models an specific IPC call parameters with respect to the
+// storage allocation that the packed parameters should need.
+// NUMBER_PARAMS: the number of parameters, valid from 1 to N
+// BLOCK_SIZE: the total storage that the NUMBER_PARAMS parameters can take,
+// typically the block size is defined by the channel size of the underlying
+// ipc mechanism.
+// In practice this class is used to levergage C++ capacity to properly
+// calculate sizes and displacements given the possibility of the packed params
+// blob to be complex.
+//
+// As is, this class assumes that the layout of the blob is as follows. Assume
+// that NUMBER_PARAMS = 2 and a 32-bit build:
+//
+// [ tag 4 bytes]
+// [ IsOnOut 4 bytes]
+// [ call return 52 bytes]
+// [ params count 4 bytes]
+// [ parameter 0 type 4 bytes]
+// [ parameter 0 offset 4 bytes] ---delta to ---\
+// [ parameter 0 size 4 bytes] |
+// [ parameter 1 type 4 bytes] |
+// [ parameter 1 offset 4 bytes] ---------------|--\
+// [ parameter 1 size 4 bytes] | |
+// [ parameter 2 type 4 bytes] | |
+// [ parameter 2 offset 4 bytes] ----------------------\
+// [ parameter 2 size 4 bytes] | | |
+// |---------------------------| | | |
+// | value 0 (x bytes) | <--------------/ | |
+// | value 1 (y bytes) | <-----------------/ |
+// | | |
+// | end of buffer | <---------------------/
+// |---------------------------|
+//
+// Note that the actual number of params is NUMBER_PARAMS + 1
+// so that the size of each actual param can be computed from the difference
+// between one parameter and the next down. The offset of the last param
+// points to the end of the buffer and the type and size are undefined.
+//
+template <size_t NUMBER_PARAMS, size_t BLOCK_SIZE>
+class ActualCallParams : public CrossCallParams {
+ public:
+ // constructor. Pass the ipc unique tag as input
+ explicit ActualCallParams(uint32_t tag)
+ : CrossCallParams(tag, NUMBER_PARAMS) {
+ param_info_[0].offset_ =
+ static_cast<uint32_t>(parameters_ - reinterpret_cast<char*>(this));
+ }
+
+ // Testing-only constructor. Allows setting the |number_params| to a
+ // wrong value.
+ ActualCallParams(uint32_t tag, uint32_t number_params)
+ : CrossCallParams(tag, number_params) {
+ param_info_[0].offset_ =
+ static_cast<uint32_t>(parameters_ - reinterpret_cast<char*>(this));
+ }
+
+ // Testing-only method. Allows setting the apparent size to a wrong value.
+ // returns the previous size.
+ uint32_t OverrideSize(uint32_t new_size) {
+ uint32_t previous_size = param_info_[NUMBER_PARAMS].offset_;
+ param_info_[NUMBER_PARAMS].offset_ = new_size;
+ return previous_size;
+ }
+
+ // Copies each paramter into the internal buffer. For each you must supply:
+ // index: 0 for the first param, 1 for the next an so on
+ bool CopyParamIn(uint32_t index,
+ const void* parameter_address,
+ uint32_t size,
+ bool is_in_out,
+ ArgType type) {
+ if (index >= NUMBER_PARAMS) {
+ return false;
+ }
+
+ if (UINT32_MAX == size) {
+ // Memory error while getting the size.
+ return false;
+ }
+
+ if (size && !parameter_address) {
+ return false;
+ }
+
+ if ((size > sizeof(*this)) ||
+ (param_info_[index].offset_ > (sizeof(*this) - size))) {
+ // It does not fit, abort copy.
+ return false;
+ }
+
+ char* dest = reinterpret_cast<char*>(this) + param_info_[index].offset_;
+
+ // We might be touching user memory, this has to be done from inside a try
+ // except.
+ __try {
+ memcpy(dest, parameter_address, size);
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER) {
+ return false;
+ }
+
+ // Set the flag to tell the broker to update the buffer once the call is
+ // made.
+ if (is_in_out)
+ SetIsInOut(true);
+
+ param_info_[index + 1].offset_ = Align(param_info_[index].offset_ +
+ size);
+ param_info_[index].size_ = size;
+ param_info_[index].type_ = type;
+ return true;
+ }
+
+ // Returns a pointer to a parameter in the memory section.
+ void* GetParamPtr(size_t index) {
+ return reinterpret_cast<char*>(this) + param_info_[index].offset_;
+ }
+
+ // Returns the total size of the buffer. Only valid once all the paramters
+ // have been copied in with CopyParamIn.
+ uint32_t GetSize() const { return param_info_[NUMBER_PARAMS].offset_; }
+
+ protected:
+ ActualCallParams() : CrossCallParams(0, NUMBER_PARAMS) { }
+
+ private:
+ ParamInfo param_info_[NUMBER_PARAMS + 1];
+ char parameters_[BLOCK_SIZE - sizeof(CrossCallParams)
+ - sizeof(ParamInfo) * (NUMBER_PARAMS + 1)];
+ DISALLOW_COPY_AND_ASSIGN(ActualCallParams);
+};
+
+static_assert(sizeof(ActualCallParams<1, 1024>) == 1024, "bad size buffer");
+static_assert(sizeof(ActualCallParams<2, 1024>) == 1024, "bad size buffer");
+static_assert(sizeof(ActualCallParams<3, 1024>) == 1024, "bad size buffer");
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_CROSSCALL_PARAMS_H__
diff --git a/libchrome/sandbox/win/src/interception_internal.h b/libchrome/sandbox/win/src/interception_internal.h
new file mode 100644
index 0000000..45a0557
--- /dev/null
+++ b/libchrome/sandbox/win/src/interception_internal.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines InterceptionManager, the class in charge of setting up interceptions
+// for the sandboxed process. For more details see:
+// http://dev.chromium.org/developers/design-documents/sandbox .
+
+#ifndef SANDBOX_SRC_INTERCEPTION_INTERNAL_H_
+#define SANDBOX_SRC_INTERCEPTION_INTERNAL_H_
+
+#include <stddef.h>
+
+#include "sandbox/win/src/sandbox_types.h"
+
+namespace sandbox {
+
+const int kMaxThunkDataBytes = 64;
+
+enum InterceptorId;
+
+// The following structures contain variable size fields at the end, and will be
+// used to transfer information between two processes. In order to guarantee
+// our ability to follow the chain of structures, the alignment should be fixed,
+// hence this pragma.
+#pragma pack(push, 4)
+
+// Structures for the shared memory that contains patching information
+// for the InterceptionAgent.
+// A single interception:
+struct FunctionInfo {
+ size_t record_bytes; // rounded to sizeof(size_t) bytes
+ InterceptionType type;
+ InterceptorId id;
+ const void* interceptor_address;
+ char function[1]; // placeholder for null terminated name
+ // char interceptor[] // followed by the interceptor function
+};
+
+// A single dll:
+struct DllPatchInfo {
+ size_t record_bytes; // rounded to sizeof(size_t) bytes
+ size_t offset_to_functions;
+ int num_functions;
+ bool unload_module;
+ wchar_t dll_name[1]; // placeholder for null terminated name
+ // FunctionInfo function_info[] // followed by the functions to intercept
+};
+
+// All interceptions:
+struct SharedMemory {
+ int num_intercepted_dlls;
+ void* interceptor_base;
+ DllPatchInfo dll_list[1]; // placeholder for the list of dlls
+};
+
+// Dummy single thunk:
+struct ThunkData {
+ char data[kMaxThunkDataBytes];
+};
+
+// In-memory representation of the interceptions for a given dll:
+struct DllInterceptionData {
+ size_t data_bytes;
+ size_t used_bytes;
+ void* base;
+ int num_thunks;
+#if defined(_WIN64)
+ int dummy; // Improve alignment.
+#endif
+ ThunkData thunks[1];
+};
+
+#pragma pack(pop)
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_INTERCEPTION_INTERNAL_H_
diff --git a/libchrome/sandbox/win/src/interceptors.h b/libchrome/sandbox/win/src/interceptors.h
new file mode 100644
index 0000000..44b34e3
--- /dev/null
+++ b/libchrome/sandbox/win/src/interceptors.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_INTERCEPTORS_H_
+#define SANDBOX_SRC_INTERCEPTORS_H_
+
+#if defined(_WIN64)
+#include "sandbox/win/src/interceptors_64.h"
+#endif
+
+namespace sandbox {
+
+enum InterceptorId {
+ // Internal use:
+ MAP_VIEW_OF_SECTION_ID = 0,
+ UNMAP_VIEW_OF_SECTION_ID,
+ // Policy broker:
+ SET_INFORMATION_THREAD_ID,
+ OPEN_THREAD_TOKEN_ID,
+ OPEN_THREAD_TOKEN_EX_ID,
+ OPEN_THREAD_ID,
+ OPEN_PROCESS_ID,
+ OPEN_PROCESS_TOKEN_ID,
+ OPEN_PROCESS_TOKEN_EX_ID,
+ // Filesystem dispatcher:
+ CREATE_FILE_ID,
+ OPEN_FILE_ID,
+ QUERY_ATTRIB_FILE_ID,
+ QUERY_FULL_ATTRIB_FILE_ID,
+ SET_INFO_FILE_ID,
+ // Named pipe dispatcher:
+ CREATE_NAMED_PIPE_ID,
+ // Process-thread dispatcher:
+ CREATE_PROCESSW_ID,
+ CREATE_PROCESSA_ID,
+ CREATE_THREAD_ID,
+ // Registry dispatcher:
+ CREATE_KEY_ID,
+ OPEN_KEY_ID,
+ OPEN_KEY_EX_ID,
+ // Sync dispatcher:
+ CREATE_EVENT_ID,
+ OPEN_EVENT_ID,
+ // Process mitigations Win32k dispatcher:
+ GDIINITIALIZE_ID,
+ GETSTOCKOBJECT_ID,
+ REGISTERCLASSW_ID,
+ ENUMDISPLAYMONITORS_ID,
+ ENUMDISPLAYDEVICESA_ID,
+ GETMONITORINFOA_ID,
+ GETMONITORINFOW_ID,
+ CREATEOPMPROTECTEDOUTPUTS_ID,
+ GETCERTIFICATE_ID,
+ GETCERTIFICATESIZE_ID,
+ GETCERTIFICATEBYHANDLE_ID,
+ GETCERTIFICATESIZEBYHANDLE_ID,
+ DESTROYOPMPROTECTEDOUTPUT_ID,
+ CONFIGUREOPMPROTECTEDOUTPUT_ID,
+ GETOPMINFORMATION_ID,
+ GETOPMRANDOMNUMBER_ID,
+ GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_ID,
+ SETOPMSIGNINGKEYANDSEQUENCENUMBERS_ID,
+ INTERCEPTOR_MAX_ID
+};
+
+typedef void* OriginalFunctions[INTERCEPTOR_MAX_ID];
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_INTERCEPTORS_H_
diff --git a/libchrome/sandbox/win/src/internal_types.h b/libchrome/sandbox/win/src/internal_types.h
new file mode 100644
index 0000000..e102818
--- /dev/null
+++ b/libchrome/sandbox/win/src/internal_types.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_WIN_SRC_INTERNAL_TYPES_H_
+#define SANDBOX_WIN_SRC_INTERNAL_TYPES_H_
+
+#include <stdint.h>
+
+namespace sandbox {
+
+const wchar_t kNtdllName[] = L"ntdll.dll";
+const wchar_t kKerneldllName[] = L"kernel32.dll";
+const wchar_t kKernelBasedllName[] = L"kernelbase.dll";
+
+// Defines the supported C++ types encoding to numeric id. Like a poor's man
+// RTTI. Note that true C++ RTTI will not work because the types are not
+// polymorphic anyway.
+enum ArgType {
+ INVALID_TYPE = 0,
+ WCHAR_TYPE,
+ UINT32_TYPE,
+ UNISTR_TYPE,
+ VOIDPTR_TYPE,
+ INPTR_TYPE,
+ INOUTPTR_TYPE,
+ LAST_TYPE
+};
+
+// Encapsulates a pointer to a buffer and the size of the buffer.
+class CountedBuffer {
+ public:
+ CountedBuffer(void* buffer, uint32_t size) : size_(size), buffer_(buffer) {}
+
+ uint32_t Size() const { return size_; }
+
+ void* Buffer() const {
+ return buffer_;
+ }
+
+ private:
+ uint32_t size_;
+ void* buffer_;
+};
+
+// Helper class to convert void-pointer packed ints for both
+// 32 and 64 bit builds. This construct is non-portable.
+class IPCInt {
+ public:
+ explicit IPCInt(void* buffer) {
+ buffer_.vp = buffer;
+ }
+
+ explicit IPCInt(unsigned __int32 i32) {
+ buffer_.vp = NULL;
+ buffer_.i32 = i32;
+ }
+
+ unsigned __int32 As32Bit() const {
+ return buffer_.i32;
+ }
+
+ void* AsVoidPtr() const {
+ return buffer_.vp;
+ }
+
+ private:
+ union U {
+ void* vp;
+ unsigned __int32 i32;
+ } buffer_;
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_WIN_SRC_INTERNAL_TYPES_H_
diff --git a/libchrome/sandbox/win/src/ipc_tags.h b/libchrome/sandbox/win/src/ipc_tags.h
new file mode 100644
index 0000000..1c754cd
--- /dev/null
+++ b/libchrome/sandbox/win/src/ipc_tags.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_IPC_TAGS_H__
+#define SANDBOX_SRC_IPC_TAGS_H__
+
+namespace sandbox {
+
+enum {
+ IPC_UNUSED_TAG = 0,
+ IPC_PING1_TAG, // Takes a cookie in parameters and returns the cookie
+ // multiplied by 2 and the tick_count. Used for testing only.
+ IPC_PING2_TAG, // Takes an in/out cookie in parameters and modify the cookie
+ // to be multiplied by 3. Used for testing only.
+ IPC_NTCREATEFILE_TAG,
+ IPC_NTOPENFILE_TAG,
+ IPC_NTQUERYATTRIBUTESFILE_TAG,
+ IPC_NTQUERYFULLATTRIBUTESFILE_TAG,
+ IPC_NTSETINFO_RENAME_TAG,
+ IPC_CREATENAMEDPIPEW_TAG,
+ IPC_NTOPENTHREAD_TAG,
+ IPC_NTOPENPROCESS_TAG,
+ IPC_NTOPENPROCESSTOKEN_TAG,
+ IPC_NTOPENPROCESSTOKENEX_TAG,
+ IPC_CREATEPROCESSW_TAG,
+ IPC_CREATEEVENT_TAG,
+ IPC_OPENEVENT_TAG,
+ IPC_NTCREATEKEY_TAG,
+ IPC_NTOPENKEY_TAG,
+ IPC_GDI_GDIDLLINITIALIZE_TAG,
+ IPC_GDI_GETSTOCKOBJECT_TAG,
+ IPC_USER_REGISTERCLASSW_TAG,
+ IPC_CREATETHREAD_TAG,
+ IPC_USER_ENUMDISPLAYMONITORS_TAG,
+ IPC_USER_ENUMDISPLAYDEVICES_TAG,
+ IPC_USER_GETMONITORINFO_TAG,
+ IPC_GDI_CREATEOPMPROTECTEDOUTPUTS_TAG,
+ IPC_GDI_GETCERTIFICATE_TAG,
+ IPC_GDI_GETCERTIFICATESIZE_TAG,
+ IPC_GDI_DESTROYOPMPROTECTEDOUTPUT_TAG,
+ IPC_GDI_CONFIGUREOPMPROTECTEDOUTPUT_TAG,
+ IPC_GDI_GETOPMINFORMATION_TAG,
+ IPC_GDI_GETOPMRANDOMNUMBER_TAG,
+ IPC_GDI_GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_TAG,
+ IPC_GDI_SETOPMSIGNINGKEYANDSEQUENCENUMBERS_TAG,
+ IPC_LAST_TAG
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_IPC_TAGS_H__
diff --git a/libchrome/sandbox/win/src/nt_internals.h b/libchrome/sandbox/win/src/nt_internals.h
new file mode 100644
index 0000000..6469c2b
--- /dev/null
+++ b/libchrome/sandbox/win/src/nt_internals.h
@@ -0,0 +1,912 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file holds definitions related to the ntdll API.
+
+#ifndef SANDBOX_WIN_SRC_NT_INTERNALS_H__
+#define SANDBOX_WIN_SRC_NT_INTERNALS_H__
+
+#include <windows.h>
+#include <stddef.h>
+
+typedef LONG NTSTATUS;
+#define NT_SUCCESS(st) (st >= 0)
+
+#define STATUS_SUCCESS ((NTSTATUS)0x00000000L)
+#define STATUS_BUFFER_OVERFLOW ((NTSTATUS)0x80000005L)
+#define STATUS_UNSUCCESSFUL ((NTSTATUS)0xC0000001L)
+#define STATUS_NOT_IMPLEMENTED ((NTSTATUS)0xC0000002L)
+#define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS)0xC0000004L)
+#ifndef STATUS_INVALID_PARAMETER
+// It is now defined in Windows 2008 SDK.
+#define STATUS_INVALID_PARAMETER ((NTSTATUS)0xC000000DL)
+#endif
+#define STATUS_CONFLICTING_ADDRESSES ((NTSTATUS)0xC0000018L)
+#define STATUS_ACCESS_DENIED ((NTSTATUS)0xC0000022L)
+#define STATUS_BUFFER_TOO_SMALL ((NTSTATUS)0xC0000023L)
+#define STATUS_OBJECT_NAME_NOT_FOUND ((NTSTATUS)0xC0000034L)
+#define STATUS_OBJECT_NAME_COLLISION ((NTSTATUS)0xC0000035L)
+#define STATUS_PROCEDURE_NOT_FOUND ((NTSTATUS)0xC000007AL)
+#define STATUS_INVALID_IMAGE_FORMAT ((NTSTATUS)0xC000007BL)
+#define STATUS_NO_TOKEN ((NTSTATUS)0xC000007CL)
+#define STATUS_NOT_SUPPORTED ((NTSTATUS)0xC00000BBL)
+
+#define CURRENT_PROCESS ((HANDLE) -1)
+#define CURRENT_THREAD ((HANDLE) -2)
+#define NtCurrentProcess CURRENT_PROCESS
+
+typedef struct _UNICODE_STRING {
+ USHORT Length;
+ USHORT MaximumLength;
+ PWSTR Buffer;
+} UNICODE_STRING;
+typedef UNICODE_STRING *PUNICODE_STRING;
+typedef const UNICODE_STRING *PCUNICODE_STRING;
+
+typedef struct _STRING {
+ USHORT Length;
+ USHORT MaximumLength;
+ PCHAR Buffer;
+} STRING;
+typedef STRING *PSTRING;
+
+typedef STRING ANSI_STRING;
+typedef PSTRING PANSI_STRING;
+typedef CONST PSTRING PCANSI_STRING;
+
+typedef STRING OEM_STRING;
+typedef PSTRING POEM_STRING;
+typedef CONST STRING* PCOEM_STRING;
+
+#define OBJ_CASE_INSENSITIVE 0x00000040L
+#define OBJ_OPENIF 0x00000080L
+
+typedef struct _OBJECT_ATTRIBUTES {
+ ULONG Length;
+ HANDLE RootDirectory;
+ PUNICODE_STRING ObjectName;
+ ULONG Attributes;
+ PVOID SecurityDescriptor;
+ PVOID SecurityQualityOfService;
+} OBJECT_ATTRIBUTES;
+typedef OBJECT_ATTRIBUTES *POBJECT_ATTRIBUTES;
+
+#define InitializeObjectAttributes(p, n, a, r, s) { \
+ (p)->Length = sizeof(OBJECT_ATTRIBUTES);\
+ (p)->RootDirectory = r;\
+ (p)->Attributes = a;\
+ (p)->ObjectName = n;\
+ (p)->SecurityDescriptor = s;\
+ (p)->SecurityQualityOfService = NULL;\
+}
+
+typedef struct _IO_STATUS_BLOCK {
+ union {
+ NTSTATUS Status;
+ PVOID Pointer;
+ };
+ ULONG_PTR Information;
+} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
+
+// -----------------------------------------------------------------------
+// File IO
+
+// Create disposition values.
+
+#define FILE_SUPERSEDE 0x00000000
+#define FILE_OPEN 0x00000001
+#define FILE_CREATE 0x00000002
+#define FILE_OPEN_IF 0x00000003
+#define FILE_OVERWRITE 0x00000004
+#define FILE_OVERWRITE_IF 0x00000005
+#define FILE_MAXIMUM_DISPOSITION 0x00000005
+
+// Create/open option flags.
+
+#define FILE_DIRECTORY_FILE 0x00000001
+#define FILE_WRITE_THROUGH 0x00000002
+#define FILE_SEQUENTIAL_ONLY 0x00000004
+#define FILE_NO_INTERMEDIATE_BUFFERING 0x00000008
+
+#define FILE_SYNCHRONOUS_IO_ALERT 0x00000010
+#define FILE_SYNCHRONOUS_IO_NONALERT 0x00000020
+#define FILE_NON_DIRECTORY_FILE 0x00000040
+#define FILE_CREATE_TREE_CONNECTION 0x00000080
+
+#define FILE_COMPLETE_IF_OPLOCKED 0x00000100
+#define FILE_NO_EA_KNOWLEDGE 0x00000200
+#define FILE_OPEN_REMOTE_INSTANCE 0x00000400
+#define FILE_RANDOM_ACCESS 0x00000800
+
+#define FILE_DELETE_ON_CLOSE 0x00001000
+#define FILE_OPEN_BY_FILE_ID 0x00002000
+#define FILE_OPEN_FOR_BACKUP_INTENT 0x00004000
+#define FILE_NO_COMPRESSION 0x00008000
+
+#define FILE_RESERVE_OPFILTER 0x00100000
+#define FILE_OPEN_REPARSE_POINT 0x00200000
+#define FILE_OPEN_NO_RECALL 0x00400000
+#define FILE_OPEN_FOR_FREE_SPACE_QUERY 0x00800000
+
+// Create/open result values. These are the disposition values returned on the
+// io status information.
+#define FILE_SUPERSEDED 0x00000000
+#define FILE_OPENED 0x00000001
+#define FILE_CREATED 0x00000002
+#define FILE_OVERWRITTEN 0x00000003
+#define FILE_EXISTS 0x00000004
+#define FILE_DOES_NOT_EXIST 0x00000005
+
+typedef NTSTATUS (WINAPI *NtCreateFileFunction)(
+ OUT PHANDLE FileHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ OUT PIO_STATUS_BLOCK IoStatusBlock,
+ IN PLARGE_INTEGER AllocationSize OPTIONAL,
+ IN ULONG FileAttributes,
+ IN ULONG ShareAccess,
+ IN ULONG CreateDisposition,
+ IN ULONG CreateOptions,
+ IN PVOID EaBuffer OPTIONAL,
+ IN ULONG EaLength);
+
+typedef NTSTATUS (WINAPI *NtOpenFileFunction)(
+ OUT PHANDLE FileHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ OUT PIO_STATUS_BLOCK IoStatusBlock,
+ IN ULONG ShareAccess,
+ IN ULONG OpenOptions);
+
+typedef NTSTATUS (WINAPI *NtCloseFunction)(
+ IN HANDLE Handle);
+
+typedef enum _FILE_INFORMATION_CLASS {
+ FileRenameInformation = 10
+} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS;
+
+typedef struct _FILE_RENAME_INFORMATION {
+ BOOLEAN ReplaceIfExists;
+ HANDLE RootDirectory;
+ ULONG FileNameLength;
+ WCHAR FileName[1];
+} FILE_RENAME_INFORMATION, *PFILE_RENAME_INFORMATION;
+
+typedef NTSTATUS (WINAPI *NtSetInformationFileFunction)(
+ IN HANDLE FileHandle,
+ OUT PIO_STATUS_BLOCK IoStatusBlock,
+ IN PVOID FileInformation,
+ IN ULONG Length,
+ IN FILE_INFORMATION_CLASS FileInformationClass);
+
+typedef struct FILE_BASIC_INFORMATION {
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ ULONG FileAttributes;
+} FILE_BASIC_INFORMATION, *PFILE_BASIC_INFORMATION;
+
+typedef NTSTATUS (WINAPI *NtQueryAttributesFileFunction)(
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ OUT PFILE_BASIC_INFORMATION FileAttributes);
+
+typedef struct _FILE_NETWORK_OPEN_INFORMATION {
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ LARGE_INTEGER AllocationSize;
+ LARGE_INTEGER EndOfFile;
+ ULONG FileAttributes;
+} FILE_NETWORK_OPEN_INFORMATION, *PFILE_NETWORK_OPEN_INFORMATION;
+
+typedef NTSTATUS (WINAPI *NtQueryFullAttributesFileFunction)(
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ OUT PFILE_NETWORK_OPEN_INFORMATION FileAttributes);
+
+// -----------------------------------------------------------------------
+// Sections
+
+typedef NTSTATUS (WINAPI *NtCreateSectionFunction)(
+ OUT PHANDLE SectionHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
+ IN PLARGE_INTEGER MaximumSize OPTIONAL,
+ IN ULONG SectionPageProtection,
+ IN ULONG AllocationAttributes,
+ IN HANDLE FileHandle OPTIONAL);
+
+typedef ULONG SECTION_INHERIT;
+#define ViewShare 1
+#define ViewUnmap 2
+
+typedef NTSTATUS (WINAPI *NtMapViewOfSectionFunction)(
+ IN HANDLE SectionHandle,
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN ULONG_PTR ZeroBits,
+ IN SIZE_T CommitSize,
+ IN OUT PLARGE_INTEGER SectionOffset OPTIONAL,
+ IN OUT PSIZE_T ViewSize,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG AllocationType,
+ IN ULONG Win32Protect);
+
+typedef NTSTATUS (WINAPI *NtUnmapViewOfSectionFunction)(
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress);
+
+typedef enum _SECTION_INFORMATION_CLASS {
+ SectionBasicInformation = 0,
+ SectionImageInformation
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+ PVOID BaseAddress;
+ ULONG Attributes;
+ LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef NTSTATUS (WINAPI *NtQuerySectionFunction)(
+ IN HANDLE SectionHandle,
+ IN SECTION_INFORMATION_CLASS SectionInformationClass,
+ OUT PVOID SectionInformation,
+ IN SIZE_T SectionInformationLength,
+ OUT PSIZE_T ReturnLength OPTIONAL);
+
+// -----------------------------------------------------------------------
+// Process and Thread
+
+typedef struct _CLIENT_ID {
+ PVOID UniqueProcess;
+ PVOID UniqueThread;
+} CLIENT_ID, *PCLIENT_ID;
+
+typedef NTSTATUS (WINAPI *NtOpenThreadFunction) (
+ OUT PHANDLE ThreadHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ IN PCLIENT_ID ClientId);
+
+typedef NTSTATUS (WINAPI *NtOpenProcessFunction) (
+ OUT PHANDLE ProcessHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ IN PCLIENT_ID ClientId);
+
+typedef enum _NT_THREAD_INFORMATION_CLASS {
+ ThreadBasicInformation,
+ ThreadTimes,
+ ThreadPriority,
+ ThreadBasePriority,
+ ThreadAffinityMask,
+ ThreadImpersonationToken,
+ ThreadDescriptorTableEntry,
+ ThreadEnableAlignmentFaultFixup,
+ ThreadEventPair,
+ ThreadQuerySetWin32StartAddress,
+ ThreadZeroTlsCell,
+ ThreadPerformanceCount,
+ ThreadAmILastThread,
+ ThreadIdealProcessor,
+ ThreadPriorityBoost,
+ ThreadSetTlsArrayAddress,
+ ThreadIsIoPending,
+ ThreadHideFromDebugger
+} NT_THREAD_INFORMATION_CLASS, *PNT_THREAD_INFORMATION_CLASS;
+
+typedef NTSTATUS (WINAPI *NtSetInformationThreadFunction) (
+ IN HANDLE ThreadHandle,
+ IN NT_THREAD_INFORMATION_CLASS ThreadInformationClass,
+ IN PVOID ThreadInformation,
+ IN ULONG ThreadInformationLength);
+
+// Partial definition only:
+typedef enum _PROCESSINFOCLASS {
+ ProcessBasicInformation = 0,
+ ProcessExecuteFlags = 0x22
+} PROCESSINFOCLASS;
+
+typedef PVOID PPEB;
+typedef LONG KPRIORITY;
+
+typedef struct _PROCESS_BASIC_INFORMATION {
+ union {
+ NTSTATUS ExitStatus;
+ PVOID padding_for_x64_0;
+ };
+ PPEB PebBaseAddress;
+ KAFFINITY AffinityMask;
+ union {
+ KPRIORITY BasePriority;
+ PVOID padding_for_x64_1;
+ };
+ union {
+ DWORD UniqueProcessId;
+ PVOID padding_for_x64_2;
+ };
+ union {
+ DWORD InheritedFromUniqueProcessId;
+ PVOID padding_for_x64_3;
+ };
+} PROCESS_BASIC_INFORMATION, *PPROCESS_BASIC_INFORMATION;
+
+typedef NTSTATUS(WINAPI* NtQueryInformationProcessFunction)(
+ IN HANDLE ProcessHandle,
+ IN PROCESSINFOCLASS ProcessInformationClass,
+ OUT PVOID ProcessInformation,
+ IN ULONG ProcessInformationLength,
+ OUT PULONG ReturnLength OPTIONAL);
+
+typedef NTSTATUS(WINAPI* NtSetInformationProcessFunction)(
+ HANDLE ProcessHandle,
+ IN PROCESSINFOCLASS ProcessInformationClass,
+ IN PVOID ProcessInformation,
+ IN ULONG ProcessInformationLength);
+
+typedef NTSTATUS (WINAPI *NtOpenThreadTokenFunction) (
+ IN HANDLE ThreadHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN BOOLEAN OpenAsSelf,
+ OUT PHANDLE TokenHandle);
+
+typedef NTSTATUS (WINAPI *NtOpenThreadTokenExFunction) (
+ IN HANDLE ThreadHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN BOOLEAN OpenAsSelf,
+ IN ULONG HandleAttributes,
+ OUT PHANDLE TokenHandle);
+
+typedef NTSTATUS (WINAPI *NtOpenProcessTokenFunction) (
+ IN HANDLE ProcessHandle,
+ IN ACCESS_MASK DesiredAccess,
+ OUT PHANDLE TokenHandle);
+
+typedef NTSTATUS (WINAPI *NtOpenProcessTokenExFunction) (
+ IN HANDLE ProcessHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN ULONG HandleAttributes,
+ OUT PHANDLE TokenHandle);
+
+typedef NTSTATUS(WINAPI* NtQueryInformationTokenFunction)(
+ IN HANDLE TokenHandle,
+ IN TOKEN_INFORMATION_CLASS TokenInformationClass,
+ OUT PVOID TokenInformation,
+ IN ULONG TokenInformationLength,
+ OUT PULONG ReturnLength);
+
+typedef NTSTATUS(WINAPI* RtlCreateUserThreadFunction)(
+ IN HANDLE Process,
+ IN PSECURITY_DESCRIPTOR ThreadSecurityDescriptor,
+ IN BOOLEAN CreateSuspended,
+ IN ULONG ZeroBits,
+ IN SIZE_T MaximumStackSize,
+ IN SIZE_T CommittedStackSize,
+ IN LPTHREAD_START_ROUTINE StartAddress,
+ IN PVOID Parameter,
+ OUT PHANDLE Thread,
+ OUT PCLIENT_ID ClientId);
+
+typedef NTSTATUS(WINAPI* RtlConvertSidToUnicodeStringFunction)(
+ OUT PUNICODE_STRING UnicodeString,
+ IN PSID Sid,
+ IN BOOLEAN AllocateDestinationString);
+
+typedef VOID(WINAPI* RtlFreeUnicodeStringFunction)(
+ IN OUT PUNICODE_STRING UnicodeString);
+
+// -----------------------------------------------------------------------
+// Registry
+
+typedef enum _KEY_VALUE_INFORMATION_CLASS {
+ KeyValueFullInformation = 1
+} KEY_VALUE_INFORMATION_CLASS,
+ *PKEY_VALUE_INFORMATION_CLASS;
+
+typedef struct _KEY_VALUE_FULL_INFORMATION {
+ ULONG TitleIndex;
+ ULONG Type;
+ ULONG DataOffset;
+ ULONG DataLength;
+ ULONG NameLength;
+ WCHAR Name[1];
+} KEY_VALUE_FULL_INFORMATION, *PKEY_VALUE_FULL_INFORMATION;
+
+typedef NTSTATUS (WINAPI *NtCreateKeyFunction)(
+ OUT PHANDLE KeyHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ IN ULONG TitleIndex,
+ IN PUNICODE_STRING Class OPTIONAL,
+ IN ULONG CreateOptions,
+ OUT PULONG Disposition OPTIONAL);
+
+typedef NTSTATUS (WINAPI *NtOpenKeyFunction)(
+ OUT PHANDLE KeyHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI *NtOpenKeyExFunction)(
+ OUT PHANDLE KeyHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes,
+ IN DWORD open_options);
+
+typedef NTSTATUS (WINAPI *NtDeleteKeyFunction)(
+ IN HANDLE KeyHandle);
+
+typedef NTSTATUS(WINAPI* RtlFormatCurrentUserKeyPathFunction)(
+ OUT PUNICODE_STRING RegistryPath);
+
+typedef NTSTATUS(WINAPI* NtQueryValueKeyFunction)(IN HANDLE KeyHandle,
+ IN PUNICODE_STRING ValueName,
+ IN KEY_VALUE_INFORMATION_CLASS
+ KeyValueInformationClass,
+ OUT PVOID KeyValueInformation,
+ IN ULONG Length,
+ OUT PULONG ResultLength);
+
+typedef NTSTATUS(WINAPI* NtSetValueKeyFunction)(IN HANDLE KeyHandle,
+ IN PUNICODE_STRING ValueName,
+ IN ULONG TitleIndex OPTIONAL,
+ IN ULONG Type,
+ IN PVOID Data,
+ IN ULONG DataSize);
+
+// -----------------------------------------------------------------------
+// Memory
+
+// Don't really need this structure right now.
+typedef PVOID PRTL_HEAP_PARAMETERS;
+
+typedef PVOID (WINAPI *RtlCreateHeapFunction)(
+ IN ULONG Flags,
+ IN PVOID HeapBase OPTIONAL,
+ IN SIZE_T ReserveSize OPTIONAL,
+ IN SIZE_T CommitSize OPTIONAL,
+ IN PVOID Lock OPTIONAL,
+ IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL);
+
+typedef PVOID (WINAPI *RtlDestroyHeapFunction)(
+ IN PVOID HeapHandle);
+
+typedef PVOID (WINAPI *RtlAllocateHeapFunction)(
+ IN PVOID HeapHandle,
+ IN ULONG Flags,
+ IN SIZE_T Size);
+
+typedef BOOLEAN (WINAPI *RtlFreeHeapFunction)(
+ IN PVOID HeapHandle,
+ IN ULONG Flags,
+ IN PVOID HeapBase);
+
+typedef NTSTATUS (WINAPI *NtAllocateVirtualMemoryFunction) (
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN ULONG_PTR ZeroBits,
+ IN OUT PSIZE_T RegionSize,
+ IN ULONG AllocationType,
+ IN ULONG Protect);
+
+typedef NTSTATUS (WINAPI *NtFreeVirtualMemoryFunction) (
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PSIZE_T RegionSize,
+ IN ULONG FreeType);
+
+typedef enum _MEMORY_INFORMATION_CLASS {
+ MemoryBasicInformation = 0,
+ MemoryWorkingSetList,
+ MemorySectionName,
+ MemoryBasicVlmInformation
+} MEMORY_INFORMATION_CLASS;
+
+typedef struct _MEMORY_SECTION_NAME { // Information Class 2
+ UNICODE_STRING SectionFileName;
+} MEMORY_SECTION_NAME, *PMEMORY_SECTION_NAME;
+
+typedef NTSTATUS (WINAPI *NtQueryVirtualMemoryFunction)(
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ IN MEMORY_INFORMATION_CLASS MemoryInformationClass,
+ OUT PVOID MemoryInformation,
+ IN SIZE_T MemoryInformationLength,
+ OUT PSIZE_T ReturnLength OPTIONAL);
+
+typedef NTSTATUS (WINAPI *NtProtectVirtualMemoryFunction)(
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID* BaseAddress,
+ IN OUT PSIZE_T ProtectSize,
+ IN ULONG NewProtect,
+ OUT PULONG OldProtect);
+
+// -----------------------------------------------------------------------
+// Objects
+
+typedef enum _OBJECT_INFORMATION_CLASS {
+ ObjectBasicInformation,
+ ObjectNameInformation,
+ ObjectTypeInformation,
+ ObjectAllInformation,
+ ObjectDataInformation
+} OBJECT_INFORMATION_CLASS, *POBJECT_INFORMATION_CLASS;
+
+typedef struct _OBJDIR_INFORMATION {
+ UNICODE_STRING ObjectName;
+ UNICODE_STRING ObjectTypeName;
+ BYTE Data[1];
+} OBJDIR_INFORMATION;
+
+typedef struct _PUBLIC_OBJECT_BASIC_INFORMATION {
+ ULONG Attributes;
+ ACCESS_MASK GrantedAccess;
+ ULONG HandleCount;
+ ULONG PointerCount;
+ ULONG Reserved[10]; // reserved for internal use
+} PUBLIC_OBJECT_BASIC_INFORMATION, *PPUBLIC_OBJECT_BASIC_INFORMATION;
+
+typedef struct __PUBLIC_OBJECT_TYPE_INFORMATION {
+ UNICODE_STRING TypeName;
+ ULONG Reserved[22]; // reserved for internal use
+} PUBLIC_OBJECT_TYPE_INFORMATION, *PPUBLIC_OBJECT_TYPE_INFORMATION;
+
+typedef enum _POOL_TYPE {
+ NonPagedPool,
+ PagedPool,
+ NonPagedPoolMustSucceed,
+ ReservedType,
+ NonPagedPoolCacheAligned,
+ PagedPoolCacheAligned,
+ NonPagedPoolCacheAlignedMustS
+} POOL_TYPE;
+
+typedef struct _OBJECT_BASIC_INFORMATION {
+ ULONG Attributes;
+ ACCESS_MASK GrantedAccess;
+ ULONG HandleCount;
+ ULONG PointerCount;
+ ULONG PagedPoolUsage;
+ ULONG NonPagedPoolUsage;
+ ULONG Reserved[3];
+ ULONG NameInformationLength;
+ ULONG TypeInformationLength;
+ ULONG SecurityDescriptorLength;
+ LARGE_INTEGER CreateTime;
+} OBJECT_BASIC_INFORMATION, *POBJECT_BASIC_INFORMATION;
+
+typedef struct _OBJECT_TYPE_INFORMATION {
+ UNICODE_STRING Name;
+ ULONG TotalNumberOfObjects;
+ ULONG TotalNumberOfHandles;
+ ULONG TotalPagedPoolUsage;
+ ULONG TotalNonPagedPoolUsage;
+ ULONG TotalNamePoolUsage;
+ ULONG TotalHandleTableUsage;
+ ULONG HighWaterNumberOfObjects;
+ ULONG HighWaterNumberOfHandles;
+ ULONG HighWaterPagedPoolUsage;
+ ULONG HighWaterNonPagedPoolUsage;
+ ULONG HighWaterNamePoolUsage;
+ ULONG HighWaterHandleTableUsage;
+ ULONG InvalidAttributes;
+ GENERIC_MAPPING GenericMapping;
+ ULONG ValidAccess;
+ BOOLEAN SecurityRequired;
+ BOOLEAN MaintainHandleCount;
+ USHORT MaintainTypeList;
+ POOL_TYPE PoolType;
+ ULONG PagedPoolUsage;
+ ULONG NonPagedPoolUsage;
+} OBJECT_TYPE_INFORMATION, *POBJECT_TYPE_INFORMATION;
+
+typedef enum _SYSTEM_INFORMATION_CLASS {
+ SystemHandleInformation = 16
+} SYSTEM_INFORMATION_CLASS;
+
+typedef struct _SYSTEM_HANDLE_INFORMATION {
+ USHORT ProcessId;
+ USHORT CreatorBackTraceIndex;
+ UCHAR ObjectTypeNumber;
+ UCHAR Flags;
+ USHORT Handle;
+ PVOID Object;
+ ACCESS_MASK GrantedAccess;
+} SYSTEM_HANDLE_INFORMATION, *PSYSTEM_HANDLE_INFORMATION;
+
+typedef struct _SYSTEM_HANDLE_INFORMATION_EX {
+ ULONG NumberOfHandles;
+ SYSTEM_HANDLE_INFORMATION Information[1];
+} SYSTEM_HANDLE_INFORMATION_EX, *PSYSTEM_HANDLE_INFORMATION_EX;
+
+typedef struct _OBJECT_NAME_INFORMATION {
+ UNICODE_STRING ObjectName;
+} OBJECT_NAME_INFORMATION, *POBJECT_NAME_INFORMATION;
+
+typedef NTSTATUS (WINAPI *NtQueryObjectFunction)(
+ IN HANDLE Handle,
+ IN OBJECT_INFORMATION_CLASS ObjectInformationClass,
+ OUT PVOID ObjectInformation OPTIONAL,
+ IN ULONG ObjectInformationLength,
+ OUT PULONG ReturnLength OPTIONAL);
+
+typedef NTSTATUS (WINAPI *NtDuplicateObjectFunction)(
+ IN HANDLE SourceProcess,
+ IN HANDLE SourceHandle,
+ IN HANDLE TargetProcess,
+ OUT PHANDLE TargetHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN ULONG Attributes,
+ IN ULONG Options);
+
+typedef NTSTATUS (WINAPI *NtSignalAndWaitForSingleObjectFunction)(
+ IN HANDLE HandleToSignal,
+ IN HANDLE HandleToWait,
+ IN BOOLEAN Alertable,
+ IN PLARGE_INTEGER Timeout OPTIONAL);
+
+typedef NTSTATUS (WINAPI *NtQuerySystemInformation)(
+ IN SYSTEM_INFORMATION_CLASS SystemInformationClass,
+ OUT PVOID SystemInformation,
+ IN ULONG SystemInformationLength,
+ OUT PULONG ReturnLength);
+
+typedef NTSTATUS (WINAPI *NtQueryObject)(
+ IN HANDLE Handle,
+ IN OBJECT_INFORMATION_CLASS ObjectInformationClass,
+ OUT PVOID ObjectInformation,
+ IN ULONG ObjectInformationLength,
+ OUT PULONG ReturnLength);
+
+// -----------------------------------------------------------------------
+// Strings
+
+typedef int (__cdecl *_strnicmpFunction)(
+ IN const char* _Str1,
+ IN const char* _Str2,
+ IN size_t _MaxCount);
+
+typedef size_t (__cdecl *strlenFunction)(
+ IN const char * _Str);
+
+typedef size_t (__cdecl *wcslenFunction)(
+ IN const wchar_t* _Str);
+
+typedef void* (__cdecl *memcpyFunction)(
+ IN void* dest,
+ IN const void* src,
+ IN size_t count);
+
+typedef NTSTATUS (WINAPI *RtlAnsiStringToUnicodeStringFunction)(
+ IN OUT PUNICODE_STRING DestinationString,
+ IN PANSI_STRING SourceString,
+ IN BOOLEAN AllocateDestinationString);
+
+typedef LONG (WINAPI *RtlCompareUnicodeStringFunction)(
+ IN PCUNICODE_STRING String1,
+ IN PCUNICODE_STRING String2,
+ IN BOOLEAN CaseInSensitive);
+
+typedef VOID (WINAPI *RtlInitUnicodeStringFunction) (
+ IN OUT PUNICODE_STRING DestinationString,
+ IN PCWSTR SourceString);
+
+typedef ULONG (WINAPI* RtlNtStatusToDosErrorFunction)(NTSTATUS status);
+
+typedef enum _EVENT_TYPE {
+ NotificationEvent,
+ SynchronizationEvent
+} EVENT_TYPE, *PEVENT_TYPE;
+
+typedef NTSTATUS (WINAPI* NtCreateDirectoryObjectFunction) (
+ PHANDLE DirectoryHandle,
+ ACCESS_MASK DesiredAccess,
+ POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NtOpenDirectoryObjectFunction) (
+ PHANDLE DirectoryHandle,
+ ACCESS_MASK DesiredAccess,
+ POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NtQuerySymbolicLinkObjectFunction) (
+ HANDLE LinkHandle,
+ PUNICODE_STRING LinkTarget,
+ PULONG ReturnedLength);
+
+typedef NTSTATUS (WINAPI* NtOpenSymbolicLinkObjectFunction) (
+ PHANDLE LinkHandle,
+ ACCESS_MASK DesiredAccess,
+ POBJECT_ATTRIBUTES ObjectAttributes);
+
+#define DIRECTORY_QUERY 0x0001
+#define DIRECTORY_TRAVERSE 0x0002
+#define DIRECTORY_CREATE_OBJECT 0x0004
+#define DIRECTORY_CREATE_SUBDIRECTORY 0x0008
+#define DIRECTORY_ALL_ACCESS 0x000F
+
+typedef NTSTATUS (WINAPI* NtCreateLowBoxToken)(
+ OUT PHANDLE token,
+ IN HANDLE original_handle,
+ IN ACCESS_MASK access,
+ IN POBJECT_ATTRIBUTES object_attribute,
+ IN PSID appcontainer_sid,
+ IN DWORD capabilityCount,
+ IN PSID_AND_ATTRIBUTES capabilities,
+ IN DWORD handle_count,
+ IN PHANDLE handles);
+
+typedef NTSTATUS(WINAPI *NtSetInformationProcess)(
+ IN HANDLE process_handle,
+ IN ULONG info_class,
+ IN PVOID process_information,
+ IN ULONG information_length);
+
+struct PROCESS_ACCESS_TOKEN {
+ HANDLE token;
+ HANDLE thread;
+};
+
+const unsigned int NtProcessInformationAccessToken = 9;
+
+// -----------------------------------------------------------------------
+// GDI OPM API and Supported Calls
+
+#define DXGKMDT_OPM_OMAC_SIZE 16
+#define DXGKMDT_OPM_128_BIT_RANDOM_NUMBER_SIZE 16
+#define DXGKMDT_OPM_ENCRYPTED_PARAMETERS_SIZE 256
+#define DXGKMDT_OPM_CONFIGURE_SETTING_DATA_SIZE 4056
+#define DXGKMDT_OPM_GET_INFORMATION_PARAMETERS_SIZE 4056
+#define DXGKMDT_OPM_REQUESTED_INFORMATION_SIZE 4076
+#define DXGKMDT_OPM_HDCP_KEY_SELECTION_VECTOR_SIZE 5
+#define DXGKMDT_OPM_PROTECTION_TYPE_SIZE 4
+
+enum DXGKMDT_CERTIFICATE_TYPE {
+ DXGKMDT_OPM_CERTIFICATE = 0,
+ DXGKMDT_COPP_CERTIFICATE = 1,
+ DXGKMDT_UAB_CERTIFICATE = 2,
+ DXGKMDT_FORCE_ULONG = 0xFFFFFFFF
+};
+
+enum DXGKMDT_OPM_VIDEO_OUTPUT_SEMANTICS {
+ DXGKMDT_OPM_VOS_COPP_SEMANTICS = 0,
+ DXGKMDT_OPM_VOS_OPM_SEMANTICS = 1
+};
+
+enum DXGKMDT_DPCP_PROTECTION_LEVEL {
+ DXGKMDT_OPM_DPCP_OFF = 0,
+ DXGKMDT_OPM_DPCP_ON = 1,
+ DXGKMDT_OPM_DPCP_FORCE_ULONG = 0x7fffffff
+};
+
+enum DXGKMDT_OPM_HDCP_PROTECTION_LEVEL {
+ DXGKMDT_OPM_HDCP_OFF = 0,
+ DXGKMDT_OPM_HDCP_ON = 1,
+ DXGKMDT_OPM_HDCP_FORCE_ULONG = 0x7fffffff
+};
+
+enum DXGKMDT_OPM_HDCP_FLAG {
+ DXGKMDT_OPM_HDCP_FLAG_NONE = 0x00,
+ DXGKMDT_OPM_HDCP_FLAG_REPEATER = 0x01
+};
+
+enum DXGKMDT_OPM_PROTECTION_TYPE {
+ DXGKMDT_OPM_PROTECTION_TYPE_OTHER = 0x80000000,
+ DXGKMDT_OPM_PROTECTION_TYPE_NONE = 0x00000000,
+ DXGKMDT_OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x00000001,
+ DXGKMDT_OPM_PROTECTION_TYPE_ACP = 0x00000002,
+ DXGKMDT_OPM_PROTECTION_TYPE_CGMSA = 0x00000004,
+ DXGKMDT_OPM_PROTECTION_TYPE_HDCP = 0x00000008,
+ DXGKMDT_OPM_PROTECTION_TYPE_DPCP = 0x00000010,
+ DXGKMDT_OPM_PROTECTION_TYPE_MASK = 0x8000001F
+};
+
+typedef void* OPM_PROTECTED_OUTPUT_HANDLE;
+
+struct DXGKMDT_OPM_ENCRYPTED_PARAMETERS {
+ BYTE abEncryptedParameters[DXGKMDT_OPM_ENCRYPTED_PARAMETERS_SIZE];
+};
+
+struct DXGKMDT_OPM_OMAC {
+ BYTE abOMAC[DXGKMDT_OPM_OMAC_SIZE];
+};
+
+struct DXGKMDT_OPM_CONFIGURE_PARAMETERS {
+ DXGKMDT_OPM_OMAC omac;
+ GUID guidSetting;
+ ULONG ulSequenceNumber;
+ ULONG cbParametersSize;
+ BYTE abParameters[DXGKMDT_OPM_CONFIGURE_SETTING_DATA_SIZE];
+};
+
+struct DXGKMDT_OPM_RANDOM_NUMBER {
+ BYTE abRandomNumber[DXGKMDT_OPM_128_BIT_RANDOM_NUMBER_SIZE];
+};
+
+struct DXGKMDT_OPM_GET_INFO_PARAMETERS {
+ DXGKMDT_OPM_OMAC omac;
+ DXGKMDT_OPM_RANDOM_NUMBER rnRandomNumber;
+ GUID guidInformation;
+ ULONG ulSequenceNumber;
+ ULONG cbParametersSize;
+ BYTE abParameters[DXGKMDT_OPM_GET_INFORMATION_PARAMETERS_SIZE];
+};
+
+struct DXGKMDT_OPM_REQUESTED_INFORMATION {
+ DXGKMDT_OPM_OMAC omac;
+ ULONG cbRequestedInformationSize;
+ BYTE abRequestedInformation[DXGKMDT_OPM_REQUESTED_INFORMATION_SIZE];
+};
+
+struct DXGKMDT_OPM_SET_PROTECTION_LEVEL_PARAMETERS {
+ ULONG ulProtectionType;
+ ULONG ulProtectionLevel;
+ ULONG Reserved;
+ ULONG Reserved2;
+};
+
+struct DXGKMDT_OPM_STANDARD_INFORMATION {
+ DXGKMDT_OPM_RANDOM_NUMBER rnRandomNumber;
+ ULONG ulStatusFlags;
+ ULONG ulInformation;
+ ULONG ulReserved;
+ ULONG ulReserved2;
+};
+
+typedef NTSTATUS(WINAPI* GetSuggestedOPMProtectedOutputArraySizeFunction)(
+ PUNICODE_STRING device_name,
+ DWORD* suggested_output_array_size);
+
+typedef NTSTATUS(WINAPI* CreateOPMProtectedOutputsFunction)(
+ PUNICODE_STRING device_name,
+ DXGKMDT_OPM_VIDEO_OUTPUT_SEMANTICS vos,
+ DWORD output_array_size,
+ DWORD* num_in_output_array,
+ OPM_PROTECTED_OUTPUT_HANDLE* output_array);
+
+typedef NTSTATUS(WINAPI* GetCertificateFunction)(
+ PUNICODE_STRING device_name,
+ DXGKMDT_CERTIFICATE_TYPE certificate_type,
+ BYTE* certificate,
+ ULONG certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateSizeFunction)(
+ PUNICODE_STRING device_name,
+ DXGKMDT_CERTIFICATE_TYPE certificate_type,
+ ULONG* certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateByHandleFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+ DXGKMDT_CERTIFICATE_TYPE certificate_type,
+ BYTE* certificate,
+ ULONG certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateSizeByHandleFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+ DXGKMDT_CERTIFICATE_TYPE certificate_type,
+ ULONG* certificate_length);
+
+typedef NTSTATUS(WINAPI* DestroyOPMProtectedOutputFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output);
+
+typedef NTSTATUS(WINAPI* ConfigureOPMProtectedOutputFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+ const DXGKMDT_OPM_CONFIGURE_PARAMETERS* parameters,
+ ULONG additional_parameters_size,
+ const BYTE* additional_parameters);
+
+typedef NTSTATUS(WINAPI* GetOPMInformationFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+ const DXGKMDT_OPM_GET_INFO_PARAMETERS* parameters,
+ DXGKMDT_OPM_REQUESTED_INFORMATION* requested_information);
+
+typedef NTSTATUS(WINAPI* GetOPMRandomNumberFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+ DXGKMDT_OPM_RANDOM_NUMBER* random_number);
+
+typedef NTSTATUS(WINAPI* SetOPMSigningKeyAndSequenceNumbersFunction)(
+ OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+ const DXGKMDT_OPM_ENCRYPTED_PARAMETERS* parameters);
+
+#endif // SANDBOX_WIN_SRC_NT_INTERNALS_H__
+
diff --git a/libchrome/sandbox/win/src/policy_engine_params.h b/libchrome/sandbox/win/src/policy_engine_params.h
new file mode 100644
index 0000000..fb4c00e
--- /dev/null
+++ b/libchrome/sandbox/win/src/policy_engine_params.h
@@ -0,0 +1,202 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_POLICY_ENGINE_PARAMS_H__
+#define SANDBOX_SRC_POLICY_ENGINE_PARAMS_H__
+
+#include <stdint.h>
+
+#include "sandbox/win/src/internal_types.h"
+#include "sandbox/win/src/nt_internals.h"
+#include "sandbox/win/src/sandbox_nt_util.h"
+
+// This header defines the classes that allow the low level policy to select
+// the input parameters. In order to better make sense of this header is
+// recommended that you check policy_engine_opcodes.h first.
+
+namespace sandbox {
+
+// Models the set of interesting parameters of an intercepted system call
+// normally you don't create objects of this class directly, instead you
+// use the POLPARAMS_XXX macros.
+// For example, if an intercepted function has the following signature:
+//
+// NTSTATUS NtOpenFileFunction (PHANDLE FileHandle,
+// ACCESS_MASK DesiredAccess,
+// POBJECT_ATTRIBUTES ObjectAttributes,
+// PIO_STATUS_BLOCK IoStatusBlock,
+// ULONG ShareAccess,
+// ULONG OpenOptions);
+//
+// You could say that the following parameters are of interest to policy:
+//
+// POLPARAMS_BEGIN(open_params)
+// POLPARAM(DESIRED_ACCESS)
+// POLPARAM(OBJECT_NAME)
+// POLPARAM(SECURITY_DESCRIPTOR)
+// POLPARAM(IO_STATUS)
+// POLPARAM(OPEN_OPTIONS)
+// POLPARAMS_END;
+//
+// and the actual code will use this for defining the parameters:
+//
+// CountedParameterSet<open_params> p;
+// p[open_params::DESIRED_ACCESS] = ParamPickerMake(DesiredAccess);
+// p[open_params::OBJECT_NAME] =
+// ParamPickerMake(ObjectAttributes->ObjectName);
+// p[open_params::SECURITY_DESCRIPTOR] =
+// ParamPickerMake(ObjectAttributes->SecurityDescriptor);
+// p[open_params::IO_STATUS] = ParamPickerMake(IoStatusBlock);
+// p[open_params::OPEN_OPTIONS] = ParamPickerMake(OpenOptions);
+//
+// These will create an stack-allocated array of ParameterSet objects which
+// have each 1) the address of the parameter 2) a numeric id that encodes the
+// original C++ type. This allows the policy to treat any set of supported
+// argument types uniformily and with some type safety.
+//
+// TODO(cpu): support not fully implemented yet for unicode string and will
+// probably add other types as well.
+class ParameterSet {
+ public:
+ ParameterSet() : real_type_(INVALID_TYPE), address_(NULL) {}
+
+ // Retrieve the stored parameter. If the type does not match ulong fail.
+ bool Get(uint32_t* destination) const {
+ if (real_type_ != UINT32_TYPE) {
+ return false;
+ }
+ *destination = Void2TypePointerCopy<uint32_t>();
+ return true;
+ }
+
+ // Retrieve the stored parameter. If the type does not match void* fail.
+ bool Get(const void** destination) const {
+ if (real_type_ != VOIDPTR_TYPE) {
+ return false;
+ }
+ *destination = Void2TypePointerCopy<void*>();
+ return true;
+ }
+
+ // Retrieve the stored parameter. If the type does not match wchar_t* fail.
+ bool Get(const wchar_t** destination) const {
+ if (real_type_ != WCHAR_TYPE) {
+ return false;
+ }
+ *destination = Void2TypePointerCopy<const wchar_t*>();
+ return true;
+ }
+
+ // False if the parameter is not properly initialized.
+ bool IsValid() const {
+ return real_type_ != INVALID_TYPE;
+ }
+
+ protected:
+ // The constructor can only be called by derived types, which should
+ // safely provide the real_type and the address of the argument.
+ ParameterSet(ArgType real_type, const void* address)
+ : real_type_(real_type), address_(address) {
+ }
+
+ private:
+ // This template provides the same functionality as bits_cast but
+ // it works with pointer while the former works only with references.
+ template <typename T>
+ T Void2TypePointerCopy() const {
+ return *(reinterpret_cast<const T*>(address_));
+ }
+
+ ArgType real_type_;
+ const void* address_;
+};
+
+// To safely infer the type, we use a set of template specializations
+// in ParameterSetEx with a template function ParamPickerMake to do the
+// parameter type deduction.
+
+// Base template class. Not implemented so using unsupported types should
+// fail to compile.
+template <typename T>
+class ParameterSetEx : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address);
+};
+
+template<>
+class ParameterSetEx<void const*> : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address)
+ : ParameterSet(VOIDPTR_TYPE, address) {}
+};
+
+template<>
+class ParameterSetEx<void*> : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address)
+ : ParameterSet(VOIDPTR_TYPE, address) {}
+};
+
+
+template<>
+class ParameterSetEx<wchar_t*> : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address)
+ : ParameterSet(WCHAR_TYPE, address) {}
+};
+
+template<>
+class ParameterSetEx<wchar_t const*> : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address)
+ : ParameterSet(WCHAR_TYPE, address) {}
+};
+
+template <>
+class ParameterSetEx<uint32_t> : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address)
+ : ParameterSet(UINT32_TYPE, address) {}
+};
+
+template<>
+class ParameterSetEx<UNICODE_STRING> : public ParameterSet {
+ public:
+ ParameterSetEx(const void* address)
+ : ParameterSet(UNISTR_TYPE, address) {}
+};
+
+template <typename T>
+ParameterSet ParamPickerMake(T& parameter) {
+ return ParameterSetEx<T>(¶meter);
+};
+
+struct CountedParameterSetBase {
+ int count;
+ ParameterSet parameters[1];
+};
+
+// This template defines the actual list of policy parameters for a given
+// interception.
+// Warning: This template stores the address to the actual variables, in
+// other words, the values are not copied.
+template <typename T>
+struct CountedParameterSet {
+ CountedParameterSet() : count(T::PolParamLast) {}
+
+ ParameterSet& operator[](typename T::Args n) {
+ return parameters[n];
+ }
+
+ CountedParameterSetBase* GetBase() {
+ return reinterpret_cast<CountedParameterSetBase*>(this);
+ }
+
+ int count;
+ ParameterSet parameters[T::PolParamLast];
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_POLICY_ENGINE_PARAMS_H__
diff --git a/libchrome/sandbox/win/src/policy_params.h b/libchrome/sandbox/win/src/policy_params.h
new file mode 100644
index 0000000..e051d2b
--- /dev/null
+++ b/libchrome/sandbox/win/src/policy_params.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_POLICY_PARAMS_H__
+#define SANDBOX_SRC_POLICY_PARAMS_H__
+
+#include "sandbox/win/src/policy_engine_params.h"
+
+namespace sandbox {
+
+class ParameterSet;
+
+// Warning: The following macros store the address to the actual variables, in
+// other words, the values are not copied.
+#define POLPARAMS_BEGIN(type) class type { public: enum Args {
+#define POLPARAM(arg) arg,
+#define POLPARAMS_END(type) PolParamLast }; }; \
+ typedef sandbox::ParameterSet type##Array [type::PolParamLast];
+
+// Policy parameters for file open / create.
+POLPARAMS_BEGIN(OpenFile)
+ POLPARAM(NAME)
+ POLPARAM(BROKER) // TRUE if called from the broker.
+ POLPARAM(ACCESS)
+ POLPARAM(DISPOSITION)
+ POLPARAM(OPTIONS)
+POLPARAMS_END(OpenFile)
+
+// Policy parameter for name-based policies.
+POLPARAMS_BEGIN(FileName)
+ POLPARAM(NAME)
+ POLPARAM(BROKER) // TRUE if called from the broker.
+POLPARAMS_END(FileName)
+
+static_assert(OpenFile::NAME == static_cast<int>(FileName::NAME),
+ "to simplify fs policies");
+static_assert(OpenFile::BROKER == static_cast<int>(FileName::BROKER),
+ "to simplify fs policies");
+
+// Policy parameter for name-based policies.
+POLPARAMS_BEGIN(NameBased)
+ POLPARAM(NAME)
+POLPARAMS_END(NameBased)
+
+// Policy parameters for open event.
+POLPARAMS_BEGIN(OpenEventParams)
+ POLPARAM(NAME)
+ POLPARAM(ACCESS)
+POLPARAMS_END(OpenEventParams)
+
+// Policy Parameters for reg open / create.
+POLPARAMS_BEGIN(OpenKey)
+ POLPARAM(NAME)
+ POLPARAM(ACCESS)
+POLPARAMS_END(OpenKey)
+
+// Policy parameter for name-based policies.
+POLPARAMS_BEGIN(HandleTarget)
+ POLPARAM(NAME)
+ POLPARAM(TARGET)
+POLPARAMS_END(HandleTarget)
+
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_POLICY_PARAMS_H__
diff --git a/libchrome/sandbox/win/src/sandbox.vcproj b/libchrome/sandbox/win/src/sandbox.vcproj
new file mode 100644
index 0000000..f206e01
--- /dev/null
+++ b/libchrome/sandbox/win/src/sandbox.vcproj
@@ -0,0 +1,658 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="sandbox"
+ ProjectGUID="{881F6A97-D539-4C48-B401-DF04385B2343}"
+ RootNamespace="sandbox"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="4"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="2"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLibrarianTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ Description="Copy wow_helper to output directory"
+ CommandLine="copy $(ProjectDir)\..\wow_helper\wow_helper.exe $(OutDir) && copy $(ProjectDir)\..\wow_helper\wow_helper.pdb $(OutDir)"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="4"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLibrarianTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ Description="Copy wow_helper to output directory"
+ CommandLine="copy $(ProjectDir)\..\wow_helper\wow_helper.exe $(OutDir) && copy $(ProjectDir)\..\wow_helper\wow_helper.pdb $(OutDir)"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="security"
+ >
+ <File
+ RelativePath=".\acl.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\acl.h"
+ >
+ </File>
+ <File
+ RelativePath=".\dep.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\dep.h"
+ >
+ </File>
+ <File
+ RelativePath=".\job.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\job.h"
+ >
+ </File>
+ <File
+ RelativePath=".\restricted_token.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\restricted_token.h"
+ >
+ </File>
+ <File
+ RelativePath=".\restricted_token_utils.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\restricted_token_utils.h"
+ >
+ </File>
+ <File
+ RelativePath=".\security_level.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sid.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sid.h"
+ >
+ </File>
+ <File
+ RelativePath=".\window.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\window.h"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Interception"
+ >
+ <File
+ RelativePath=".\eat_resolver.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\eat_resolver.h"
+ >
+ </File>
+ <File
+ RelativePath=".\interception.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\interception.h"
+ >
+ </File>
+ <File
+ RelativePath=".\interception_agent.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\interception_agent.h"
+ >
+ </File>
+ <File
+ RelativePath=".\interception_internal.h"
+ >
+ </File>
+ <File
+ RelativePath=".\pe_image.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\pe_image.h"
+ >
+ </File>
+ <File
+ RelativePath=".\resolver.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\resolver.h"
+ >
+ </File>
+ <File
+ RelativePath=".\service_resolver.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\service_resolver.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep_resolver.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep_resolver.h"
+ >
+ </File>
+ <File
+ RelativePath=".\target_interceptions.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\target_interceptions.h"
+ >
+ </File>
+ <File
+ RelativePath=".\Wow64.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\Wow64.h"
+ >
+ </File>
+ <Filter
+ Name="sidestep"
+ >
+ <File
+ RelativePath=".\sidestep\ia32_modrm_map.cpp"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep\ia32_opcode_map.cpp"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep\mini_disassembler.cpp"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep\mini_disassembler.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep\mini_disassembler_types.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep\preamble_patcher.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sidestep\preamble_patcher_with_stub.cpp"
+ >
+ </File>
+ </Filter>
+ </Filter>
+ <Filter
+ Name="nt_level"
+ >
+ <File
+ RelativePath=".\nt_internals.h"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_target.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_target.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_nt_types.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_nt_util.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_nt_util.h"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Policy_handlers"
+ >
+ <File
+ RelativePath=".\filesystem_dispatcher.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\filesystem_dispatcher.h"
+ >
+ </File>
+ <File
+ RelativePath=".\filesystem_interception.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\filesystem_interception.h"
+ >
+ </File>
+ <File
+ RelativePath=".\filesystem_policy.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\filesystem_policy.h"
+ >
+ </File>
+ <File
+ RelativePath=".\named_pipe_dispatcher.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\named_pipe_dispatcher.h"
+ >
+ </File>
+ <File
+ RelativePath=".\named_pipe_interception.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\named_pipe_interception.h"
+ >
+ </File>
+ <File
+ RelativePath=".\named_pipe_policy.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\named_pipe_policy.h"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_params.h"
+ >
+ </File>
+ <File
+ RelativePath=".\process_thread_dispatcher.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\process_thread_dispatcher.h"
+ >
+ </File>
+ <File
+ RelativePath=".\process_thread_interception.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\process_thread_interception.h"
+ >
+ </File>
+ <File
+ RelativePath=".\process_thread_policy.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\process_thread_policy.h"
+ >
+ </File>
+ <File
+ RelativePath=".\registry_dispatcher.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\registry_dispatcher.h"
+ >
+ </File>
+ <File
+ RelativePath=".\registry_interception.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\registry_interception.h"
+ >
+ </File>
+ <File
+ RelativePath=".\registry_policy.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\registry_policy.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sync_dispatcher.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sync_dispatcher.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sync_interception.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sync_interception.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sync_policy.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sync_policy.h"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="IPC"
+ >
+ <File
+ RelativePath=".\crosscall_client.h"
+ >
+ </File>
+ <File
+ RelativePath=".\crosscall_params.h"
+ >
+ </File>
+ <File
+ RelativePath=".\crosscall_server.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\crosscall_server.h"
+ >
+ </File>
+ <File
+ RelativePath=".\ipc_tags.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sharedmem_ipc_client.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sharedmem_ipc_client.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sharedmem_ipc_server.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sharedmem_ipc_server.h"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Policy_base"
+ >
+ <File
+ RelativePath=".\policy_engine_opcodes.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_engine_opcodes.h"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_engine_params.h"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_engine_processor.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_engine_processor.h"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_low_level.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_low_level.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_policy_base.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_policy_base.h"
+ >
+ </File>
+ </Filter>
+ <File
+ RelativePath=".\broker_services.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\broker_services.h"
+ >
+ </File>
+ <File
+ RelativePath=".\internal_types.h"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_broker.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\policy_broker.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_factory.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_policy.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_types.h"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_utils.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\sandbox_utils.h"
+ >
+ </File>
+ <File
+ RelativePath=".\shared_handles.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\shared_handles.h"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ <File
+ RelativePath=".\target_process.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\target_process.h"
+ >
+ </File>
+ <File
+ RelativePath=".\target_services.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\target_services.h"
+ >
+ </File>
+ <File
+ RelativePath=".\win2k_threadpool.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\win2k_threadpool.h"
+ >
+ </File>
+ <File
+ RelativePath=".\win_utils.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\win_utils.h"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/src/sandbox_factory.h b/libchrome/sandbox/win/src/sandbox_factory.h
new file mode 100644
index 0000000..f5888ff
--- /dev/null
+++ b/libchrome/sandbox/win/src/sandbox_factory.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_SANDBOX_FACTORY_H__
+#define SANDBOX_SRC_SANDBOX_FACTORY_H__
+
+#include "base/macros.h"
+#include "sandbox/win/src/sandbox.h"
+
+// SandboxFactory is a set of static methods to get access to the broker
+// or target services object. Only one of the two methods (GetBrokerServices,
+// GetTargetServices) will return a non-null pointer and that should be used
+// as the indication that the process is the broker or the target:
+//
+// BrokerServices* broker_services = SandboxFactory::GetBrokerServices();
+// if (NULL != broker_services) {
+// //we are the broker, call broker api here
+// broker_services->Init();
+// } else {
+// TargetServices* target_services = SandboxFactory::GetTargetServices();
+// if (NULL != target_services) {
+// //we are the target, call target api here
+// target_services->Init();
+// }
+//
+// The methods in this class are expected to be called from a single thread
+//
+// The Sandbox library needs to be linked against the main executable, but
+// sometimes the API calls are issued from a DLL that loads into the exe
+// process. These factory methods then need to be called from the main
+// exe and the interface pointers then can be safely passed to the DLL where
+// the Sandbox API calls are made.
+namespace sandbox {
+
+class SandboxFactory {
+ public:
+ // Returns the Broker API interface, returns NULL if this process is the
+ // target.
+ static BrokerServices* GetBrokerServices();
+
+ // Returns the Target API interface, returns NULL if this process is the
+ // broker.
+ static TargetServices* GetTargetServices();
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SandboxFactory);
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_SANDBOX_FACTORY_H__
diff --git a/libchrome/sandbox/win/src/sandbox_nt_types.h b/libchrome/sandbox/win/src/sandbox_nt_types.h
new file mode 100644
index 0000000..a4a88bb
--- /dev/null
+++ b/libchrome/sandbox/win/src/sandbox_nt_types.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_SANDBOX_NT_TYPES_H__
+#define SANDBOX_SRC_SANDBOX_NT_TYPES_H__
+
+#include "sandbox/win/src/nt_internals.h"
+
+namespace sandbox {
+
+struct NtExports {
+ NtAllocateVirtualMemoryFunction AllocateVirtualMemory;
+ NtCloseFunction Close;
+ NtDuplicateObjectFunction DuplicateObject;
+ NtFreeVirtualMemoryFunction FreeVirtualMemory;
+ NtMapViewOfSectionFunction MapViewOfSection;
+ NtProtectVirtualMemoryFunction ProtectVirtualMemory;
+ NtQueryInformationProcessFunction QueryInformationProcess;
+ NtQueryObjectFunction QueryObject;
+ NtQuerySectionFunction QuerySection;
+ NtQueryVirtualMemoryFunction QueryVirtualMemory;
+ NtUnmapViewOfSectionFunction UnmapViewOfSection;
+ RtlAllocateHeapFunction RtlAllocateHeap;
+ RtlAnsiStringToUnicodeStringFunction RtlAnsiStringToUnicodeString;
+ RtlCompareUnicodeStringFunction RtlCompareUnicodeString;
+ RtlCreateHeapFunction RtlCreateHeap;
+ RtlCreateUserThreadFunction RtlCreateUserThread;
+ RtlDestroyHeapFunction RtlDestroyHeap;
+ RtlFreeHeapFunction RtlFreeHeap;
+ _strnicmpFunction _strnicmp;
+ strlenFunction strlen;
+ wcslenFunction wcslen;
+ memcpyFunction memcpy;
+};
+
+// This is the value used for the ntdll level allocator.
+enum AllocationType {
+ NT_ALLOC,
+ NT_PAGE
+};
+
+} // namespace sandbox
+
+
+#endif // SANDBOX_SRC_SANDBOX_NT_TYPES_H__
diff --git a/libchrome/sandbox/win/src/sandbox_policy.h b/libchrome/sandbox/win/src/sandbox_policy.h
new file mode 100644
index 0000000..c0916ea
--- /dev/null
+++ b/libchrome/sandbox/win/src/sandbox_policy.h
@@ -0,0 +1,260 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_WIN_SRC_SANDBOX_POLICY_H_
+#define SANDBOX_WIN_SRC_SANDBOX_POLICY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/strings/string16.h"
+#include "sandbox/win/src/sandbox_types.h"
+#include "sandbox/win/src/security_level.h"
+
+namespace sandbox {
+
+class TargetPolicy {
+ public:
+ // Windows subsystems that can have specific rules.
+ // Note: The process subsystem(SUBSY_PROCESS) does not evaluate the request
+ // exactly like the CreateProcess API does. See the comment at the top of
+ // process_thread_dispatcher.cc for more details.
+ enum SubSystem {
+ SUBSYS_FILES, // Creation and opening of files and pipes.
+ SUBSYS_NAMED_PIPES, // Creation of named pipes.
+ SUBSYS_PROCESS, // Creation of child processes.
+ SUBSYS_REGISTRY, // Creation and opening of registry keys.
+ SUBSYS_SYNC, // Creation of named sync objects.
+ SUBSYS_WIN32K_LOCKDOWN // Win32K Lockdown related policy.
+ };
+
+ // Allowable semantics when a rule is matched.
+ enum Semantics {
+ FILES_ALLOW_ANY, // Allows open or create for any kind of access that
+ // the file system supports.
+ FILES_ALLOW_READONLY, // Allows open or create with read access only.
+ FILES_ALLOW_QUERY, // Allows access to query the attributes of a file.
+ FILES_ALLOW_DIR_ANY, // Allows open or create with directory semantics
+ // only.
+ NAMEDPIPES_ALLOW_ANY, // Allows creation of a named pipe.
+ PROCESS_MIN_EXEC, // Allows to create a process with minimal rights
+ // over the resulting process and thread handles.
+ // No other parameters besides the command line are
+ // passed to the child process.
+ PROCESS_ALL_EXEC, // Allows the creation of a process and return full
+ // access on the returned handles.
+ // This flag can be used only when the main token of
+ // the sandboxed application is at least INTERACTIVE.
+ EVENTS_ALLOW_ANY, // Allows the creation of an event with full access.
+ EVENTS_ALLOW_READONLY, // Allows opening an even with synchronize access.
+ REG_ALLOW_READONLY, // Allows readonly access to a registry key.
+ REG_ALLOW_ANY, // Allows read and write access to a registry key.
+ FAKE_USER_GDI_INIT, // Fakes user32 and gdi32 initialization. This can
+ // be used to allow the DLLs to load and initialize
+ // even if the process cannot access that subsystem.
+ IMPLEMENT_OPM_APIS // Implements FAKE_USER_GDI_INIT and also exposes
+ // IPC calls to handle Output Protection Manager
+ // APIs.
+ };
+
+ // Increments the reference count of this object. The reference count must
+ // be incremented if this interface is given to another component.
+ virtual void AddRef() = 0;
+
+ // Decrements the reference count of this object. When the reference count
+ // is zero the object is automatically destroyed.
+ // Indicates that the caller is done with this interface. After calling
+ // release no other method should be called.
+ virtual void Release() = 0;
+
+ // Sets the security level for the target process' two tokens.
+ // This setting is permanent and cannot be changed once the target process is
+ // spawned.
+ // initial: the security level for the initial token. This is the token that
+ // is used by the process from the creation of the process until the moment
+ // the process calls TargetServices::LowerToken() or the process calls
+ // win32's RevertToSelf(). Once this happens the initial token is no longer
+ // available and the lockdown token is in effect. Using an initial token is
+ // not compatible with AppContainer, see SetAppContainer.
+ // lockdown: the security level for the token that comes into force after the
+ // process calls TargetServices::LowerToken() or the process calls
+ // RevertToSelf(). See the explanation of each level in the TokenLevel
+ // definition.
+ // Return value: SBOX_ALL_OK if the setting succeeds and false otherwise.
+ // Returns false if the lockdown value is more permissive than the initial
+ // value.
+ //
+ // Important: most of the sandbox-provided security relies on this single
+ // setting. The caller should strive to set the lockdown level as restricted
+ // as possible.
+ virtual ResultCode SetTokenLevel(TokenLevel initial, TokenLevel lockdown) = 0;
+
+ // Returns the initial token level.
+ virtual TokenLevel GetInitialTokenLevel() const = 0;
+
+ // Returns the lockdown token level.
+ virtual TokenLevel GetLockdownTokenLevel() const = 0;
+
+ // Sets the security level of the Job Object to which the target process will
+ // belong. This setting is permanent and cannot be changed once the target
+ // process is spawned. The job controls the global security settings which
+ // can not be specified in the token security profile.
+ // job_level: the security level for the job. See the explanation of each
+ // level in the JobLevel definition.
+ // ui_exceptions: specify what specific rights that are disabled in the
+ // chosen job_level that need to be granted. Use this parameter to avoid
+ // selecting the next permissive job level unless you need all the rights
+ // that are granted in such level.
+ // The exceptions can be specified as a combination of the following
+ // constants:
+ // JOB_OBJECT_UILIMIT_HANDLES : grant access to all user-mode handles. These
+ // include windows, icons, menus and various GDI objects. In addition the
+ // target process can set hooks, and broadcast messages to other processes
+ // that belong to the same desktop.
+ // JOB_OBJECT_UILIMIT_READCLIPBOARD : grant read-only access to the clipboard.
+ // JOB_OBJECT_UILIMIT_WRITECLIPBOARD : grant write access to the clipboard.
+ // JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS : allow changes to the system-wide
+ // parameters as defined by the Win32 call SystemParametersInfo().
+ // JOB_OBJECT_UILIMIT_DISPLAYSETTINGS : allow programmatic changes to the
+ // display settings.
+ // JOB_OBJECT_UILIMIT_GLOBALATOMS : allow access to the global atoms table.
+ // JOB_OBJECT_UILIMIT_DESKTOP : allow the creation of new desktops.
+ // JOB_OBJECT_UILIMIT_EXITWINDOWS : allow the call to ExitWindows().
+ //
+ // Return value: SBOX_ALL_OK if the setting succeeds and false otherwise.
+ //
+ // Note: JOB_OBJECT_XXXX constants are defined in winnt.h and documented at
+ // length in:
+ // http://msdn2.microsoft.com/en-us/library/ms684152.aspx
+ //
+ // Note: the recommended level is JOB_RESTRICTED or JOB_LOCKDOWN.
+ virtual ResultCode SetJobLevel(JobLevel job_level,
+ uint32_t ui_exceptions) = 0;
+
+ // Returns the job level.
+ virtual JobLevel GetJobLevel() const = 0;
+
+ // Sets a hard limit on the size of the commit set for the sandboxed process.
+ // If the limit is reached, the process will be terminated with
+ // SBOX_FATAL_MEMORY_EXCEEDED (7012).
+ virtual ResultCode SetJobMemoryLimit(size_t memory_limit) = 0;
+
+ // Specifies the desktop on which the application is going to run. If the
+ // desktop does not exist, it will be created. If alternate_winstation is
+ // set to true, the desktop will be created on an alternate window station.
+ virtual ResultCode SetAlternateDesktop(bool alternate_winstation) = 0;
+
+ // Returns the name of the alternate desktop used. If an alternate window
+ // station is specified, the name is prepended by the window station name,
+ // followed by a backslash.
+ virtual base::string16 GetAlternateDesktop() const = 0;
+
+ // Precreates the desktop and window station, if any.
+ virtual ResultCode CreateAlternateDesktop(bool alternate_winstation) = 0;
+
+ // Destroys the desktop and windows station.
+ virtual void DestroyAlternateDesktop() = 0;
+
+ // Sets the integrity level of the process in the sandbox. Both the initial
+ // token and the main token will be affected by this. If the integrity level
+ // is set to a level higher than the current level, the sandbox will fail
+ // to start.
+ virtual ResultCode SetIntegrityLevel(IntegrityLevel level) = 0;
+
+ // Returns the initial integrity level used.
+ virtual IntegrityLevel GetIntegrityLevel() const = 0;
+
+ // Sets the integrity level of the process in the sandbox. The integrity level
+ // will not take effect before you call LowerToken. User Interface Privilege
+ // Isolation is not affected by this setting and will remain off for the
+ // process in the sandbox. If the integrity level is set to a level higher
+ // than the current level, the sandbox will fail to start.
+ virtual ResultCode SetDelayedIntegrityLevel(IntegrityLevel level) = 0;
+
+ // Sets a capability to be enabled for the sandboxed process' AppContainer.
+ virtual ResultCode SetCapability(const wchar_t* sid) = 0;
+
+ // Sets the LowBox token for sandboxed process. This is mutually exclusive
+ // with SetAppContainer method.
+ virtual ResultCode SetLowBox(const wchar_t* sid) = 0;
+
+ // Sets the mitigations enabled when the process is created. Most of these
+ // are implemented as attributes passed via STARTUPINFOEX. So they take
+ // effect before any thread in the target executes. The declaration of
+ // MitigationFlags is followed by a detailed description of each flag.
+ virtual ResultCode SetProcessMitigations(MitigationFlags flags) = 0;
+
+ // Returns the currently set mitigation flags.
+ virtual MitigationFlags GetProcessMitigations() = 0;
+
+ // Sets process mitigation flags that don't take effect before the call to
+ // LowerToken().
+ virtual ResultCode SetDelayedProcessMitigations(MitigationFlags flags) = 0;
+
+ // Returns the currently set delayed mitigation flags.
+ virtual MitigationFlags GetDelayedProcessMitigations() const = 0;
+
+ // Disconnect the target from CSRSS when TargetServices::LowerToken() is
+ // called inside the target.
+ virtual void SetDisconnectCsrss() = 0;
+
+ // Sets the interceptions to operate in strict mode. By default, interceptions
+ // are performed in "relaxed" mode, where if something inside NTDLL.DLL is
+ // already patched we attempt to intercept it anyway. Setting interceptions
+ // to strict mode means that when we detect that the function is patched we'll
+ // refuse to perform the interception.
+ virtual void SetStrictInterceptions() = 0;
+
+ // Set the handles the target process should inherit for stdout and
+ // stderr. The handles the caller passes must remain valid for the
+ // lifetime of the policy object. This only has an effect on
+ // Windows Vista and later versions. These methods accept pipe and
+ // file handles, but not console handles.
+ virtual ResultCode SetStdoutHandle(HANDLE handle) = 0;
+ virtual ResultCode SetStderrHandle(HANDLE handle) = 0;
+
+ // Adds a policy rule effective for processes spawned using this policy.
+ // subsystem: One of the above enumerated windows subsystems.
+ // semantics: One of the above enumerated FileSemantics.
+ // pattern: A specific full path or a full path with wildcard patterns.
+ // The valid wildcards are:
+ // '*' : Matches zero or more character. Only one in series allowed.
+ // '?' : Matches a single character. One or more in series are allowed.
+ // Examples:
+ // "c:\\documents and settings\\vince\\*.dmp"
+ // "c:\\documents and settings\\*\\crashdumps\\*.dmp"
+ // "c:\\temp\\app_log_?????_chrome.txt"
+ virtual ResultCode AddRule(SubSystem subsystem, Semantics semantics,
+ const wchar_t* pattern) = 0;
+
+ // Adds a dll that will be unloaded in the target process before it gets
+ // a chance to initialize itself. Typically, dlls that cause the target
+ // to crash go here.
+ virtual ResultCode AddDllToUnload(const wchar_t* dll_name) = 0;
+
+ // Adds a handle that will be closed in the target process after lockdown.
+ // A NULL value for handle_name indicates all handles of the specified type.
+ // An empty string for handle_name indicates the handle is unnamed.
+ virtual ResultCode AddKernelObjectToClose(const wchar_t* handle_type,
+ const wchar_t* handle_name) = 0;
+
+ // Adds a handle that will be shared with the target process. Does not take
+ // ownership of the handle.
+ virtual void AddHandleToShare(HANDLE handle) = 0;
+
+ // Locks down the default DACL of the created lockdown and initial tokens
+ // to restrict what other processes are allowed to access a process' kernel
+ // resources.
+ virtual void SetLockdownDefaultDacl() = 0;
+
+ // Enable OPM API redirection when in Win32k lockdown.
+ virtual void SetEnableOPMRedirection() = 0;
+ // Enable OPM API emulation when in Win32k lockdown.
+ virtual bool GetEnableOPMRedirection() = 0;
+};
+
+} // namespace sandbox
+
+
+#endif // SANDBOX_WIN_SRC_SANDBOX_POLICY_H_
diff --git a/libchrome/sandbox/win/src/sandbox_types.h b/libchrome/sandbox/win/src/sandbox_types.h
new file mode 100644
index 0000000..919086a
--- /dev/null
+++ b/libchrome/sandbox/win/src/sandbox_types.h
@@ -0,0 +1,151 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
+#define SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
+
+#include "base/process/launch.h"
+
+namespace sandbox {
+
+// Operation result codes returned by the sandbox API.
+//
+// Note: These codes are listed in a histogram and any new codes should be added
+// at the end.
+//
+enum ResultCode : int {
+ SBOX_ALL_OK = 0,
+ // Error is originating on the win32 layer. Call GetlastError() for more
+ // information.
+ SBOX_ERROR_GENERIC = 1,
+ // An invalid combination of parameters was given to the API.
+ SBOX_ERROR_BAD_PARAMS = 2,
+ // The desired operation is not supported at this time.
+ SBOX_ERROR_UNSUPPORTED = 3,
+ // The request requires more memory that allocated or available.
+ SBOX_ERROR_NO_SPACE = 4,
+ // The ipc service requested does not exist.
+ SBOX_ERROR_INVALID_IPC = 5,
+ // The ipc service did not complete.
+ SBOX_ERROR_FAILED_IPC = 6,
+ // The requested handle was not found.
+ SBOX_ERROR_NO_HANDLE = 7,
+ // This function was not expected to be called at this time.
+ SBOX_ERROR_UNEXPECTED_CALL = 8,
+ // WaitForAllTargets is already called.
+ SBOX_ERROR_WAIT_ALREADY_CALLED = 9,
+ // A channel error prevented DoCall from executing.
+ SBOX_ERROR_CHANNEL_ERROR = 10,
+ // Failed to create the alternate desktop.
+ SBOX_ERROR_CANNOT_CREATE_DESKTOP = 11,
+ // Failed to create the alternate window station.
+ SBOX_ERROR_CANNOT_CREATE_WINSTATION = 12,
+ // Failed to switch back to the interactive window station.
+ SBOX_ERROR_FAILED_TO_SWITCH_BACK_WINSTATION = 13,
+ // The supplied AppContainer is not valid.
+ SBOX_ERROR_INVALID_APP_CONTAINER = 14,
+ // The supplied capability is not valid.
+ SBOX_ERROR_INVALID_CAPABILITY = 15,
+ // There is a failure initializing the AppContainer.
+ SBOX_ERROR_CANNOT_INIT_APPCONTAINER = 16,
+ // Initializing or updating ProcThreadAttributes failed.
+ SBOX_ERROR_PROC_THREAD_ATTRIBUTES = 17,
+ // Error in creating process.
+ SBOX_ERROR_CREATE_PROCESS = 18,
+ // Failure calling delegate PreSpawnTarget.
+ SBOX_ERROR_DELEGATE_PRE_SPAWN = 19,
+ // Could not assign process to job object.
+ SBOX_ERROR_ASSIGN_PROCESS_TO_JOB_OBJECT = 20,
+ // Could not assign process to job object.
+ SBOX_ERROR_SET_THREAD_TOKEN = 21,
+ // Could not get thread context of new process.
+ SBOX_ERROR_GET_THREAD_CONTEXT = 22,
+ // Could not duplicate target info of new process.
+ SBOX_ERROR_DUPLICATE_TARGET_INFO = 23,
+ // Could not set low box token.
+ SBOX_ERROR_SET_LOW_BOX_TOKEN = 24,
+ // Could not create file mapping for IPC dispatcher.
+ SBOX_ERROR_CREATE_FILE_MAPPING = 25,
+ // Could not duplicate shared section into target process for IPC dispatcher.
+ SBOX_ERROR_DUPLICATE_SHARED_SECTION = 26,
+ // Could not map view of shared memory in broker.
+ SBOX_ERROR_MAP_VIEW_OF_SHARED_SECTION = 27,
+ // Could not apply ASLR mitigations to target process.
+ SBOX_ERROR_APPLY_ASLR_MITIGATIONS = 28,
+ // Could not setup one of the required interception services.
+ SBOX_ERROR_SETUP_BASIC_INTERCEPTIONS = 29,
+ // Could not setup basic interceptions.
+ SBOX_ERROR_SETUP_INTERCEPTION_SERVICE = 30,
+ // Could not initialize interceptions. This usually means 3rd party software
+ // is stomping on our hooks, or can sometimes mean the syscall format has
+ // changed.
+ SBOX_ERROR_INITIALIZE_INTERCEPTIONS = 31,
+ // Could not setup the imports for ntdll in target process.
+ SBOX_ERROR_SETUP_NTDLL_IMPORTS = 32,
+ // Could not setup the handle closer in target process.
+ SBOX_ERROR_SETUP_HANDLE_CLOSER = 33,
+ // Cannot get the current Window Station.
+ SBOX_ERROR_CANNOT_GET_WINSTATION = 34,
+ // Cannot query the security attributes of the current Window Station.
+ SBOX_ERROR_CANNOT_QUERY_WINSTATION_SECURITY = 35,
+ // Cannot get the current Desktop.
+ SBOX_ERROR_CANNOT_GET_DESKTOP = 36,
+ // Cannot query the security attributes of the current Desktop.
+ SBOX_ERROR_CANNOT_QUERY_DESKTOP_SECURITY = 37,
+ // Cannot setup the interception manager config buffer.
+ SBOX_ERROR_CANNOT_SETUP_INTERCEPTION_CONFIG_BUFFER = 38,
+ // Cannot copy data to the child process.
+ SBOX_ERROR_CANNOT_COPY_DATA_TO_CHILD = 39,
+ // Cannot setup the interception thunk.
+ SBOX_ERROR_CANNOT_SETUP_INTERCEPTION_THUNK = 40,
+ // Cannot resolve the interception thunk.
+ SBOX_ERROR_CANNOT_RESOLVE_INTERCEPTION_THUNK = 41,
+ // Cannot write interception thunk to child process.
+ SBOX_ERROR_CANNOT_WRITE_INTERCEPTION_THUNK = 42,
+ // Placeholder for last item of the enum.
+ SBOX_ERROR_LAST
+};
+
+// If the sandbox cannot create a secure environment for the target, the
+// target will be forcibly terminated. These are the process exit codes.
+enum TerminationCodes {
+ SBOX_FATAL_INTEGRITY = 7006, // Could not set the integrity level.
+ SBOX_FATAL_DROPTOKEN = 7007, // Could not lower the token.
+ SBOX_FATAL_FLUSHANDLES = 7008, // Failed to flush registry handles.
+ SBOX_FATAL_CACHEDISABLE = 7009, // Failed to forbid HCKU caching.
+ SBOX_FATAL_CLOSEHANDLES = 7010, // Failed to close pending handles.
+ SBOX_FATAL_MITIGATION = 7011, // Could not set the mitigation policy.
+ SBOX_FATAL_MEMORY_EXCEEDED = 7012, // Exceeded the job memory limit.
+ SBOX_FATAL_WARMUP = 7013, // Failed to warmup.
+ SBOX_FATAL_LAST
+};
+
+class BrokerServices;
+class TargetServices;
+
+// Contains the pointer to a target or broker service.
+struct SandboxInterfaceInfo {
+ BrokerServices* broker_services;
+ TargetServices* target_services;
+};
+
+#if SANDBOX_EXPORTS
+#define SANDBOX_INTERCEPT extern "C" __declspec(dllexport)
+#else
+#define SANDBOX_INTERCEPT extern "C"
+#endif
+
+enum InterceptionType {
+ INTERCEPTION_INVALID = 0,
+ INTERCEPTION_SERVICE_CALL, // Trampoline of an NT native call
+ INTERCEPTION_EAT,
+ INTERCEPTION_SIDESTEP, // Preamble patch
+ INTERCEPTION_SMART_SIDESTEP, // Preamble patch but bypass internal calls
+ INTERCEPTION_UNLOAD_MODULE, // Unload the module (don't patch)
+ INTERCEPTION_LAST // Placeholder for last item in the enumeration
+};
+
+} // namespace sandbox
+
+#endif // SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
diff --git a/libchrome/sandbox/win/src/security_level.h b/libchrome/sandbox/win/src/security_level.h
new file mode 100644
index 0000000..d8524c1
--- /dev/null
+++ b/libchrome/sandbox/win/src/security_level.h
@@ -0,0 +1,225 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_SRC_SECURITY_LEVEL_H_
+#define SANDBOX_SRC_SECURITY_LEVEL_H_
+
+#include <stdint.h>
+
+namespace sandbox {
+
+// List of all the integrity levels supported in the sandbox. This is used
+// only on Windows Vista. You can't set the integrity level of the process
+// in the sandbox to a level higher than yours.
+enum IntegrityLevel {
+ INTEGRITY_LEVEL_SYSTEM,
+ INTEGRITY_LEVEL_HIGH,
+ INTEGRITY_LEVEL_MEDIUM,
+ INTEGRITY_LEVEL_MEDIUM_LOW,
+ INTEGRITY_LEVEL_LOW,
+ INTEGRITY_LEVEL_BELOW_LOW,
+ INTEGRITY_LEVEL_UNTRUSTED,
+ INTEGRITY_LEVEL_LAST
+};
+
+// The Token level specifies a set of security profiles designed to
+// provide the bulk of the security of sandbox.
+//
+// TokenLevel |Restricting |Deny Only |Privileges|
+// |Sids |Sids | |
+// ----------------------------|--------------|----------------|----------|
+// USER_LOCKDOWN | Null Sid | All | None |
+// ----------------------------|--------------|----------------|----------|
+// USER_RESTRICTED | RESTRICTED | All | Traverse |
+// ----------------------------|--------------|----------------|----------|
+// USER_LIMITED | Users | All except: | Traverse |
+// | Everyone | Users | |
+// | RESTRICTED | Everyone | |
+// | | Interactive | |
+// ----------------------------|--------------|----------------|----------|
+// USER_INTERACTIVE | Users | All except: | Traverse |
+// | Everyone | Users | |
+// | RESTRICTED | Everyone | |
+// | Owner | Interactive | |
+// | | Local | |
+// | | Authent-users | |
+// | | User | |
+// ----------------------------|--------------|----------------|----------|
+// USER_NON_ADMIN | None | All except: | Traverse |
+// | | Users | |
+// | | Everyone | |
+// | | Interactive | |
+// | | Local | |
+// | | Authent-users | |
+// | | User | |
+// ----------------------------|--------------|----------------|----------|
+// USER_RESTRICTED_SAME_ACCESS | All | None | All |
+// ----------------------------|--------------|----------------|----------|
+// USER_UNPROTECTED | None | None | All |
+// ----------------------------|--------------|----------------|----------|
+//
+// The above restrictions are actually a transformation that is applied to
+// the existing broker process token. The resulting token that will be
+// applied to the target process depends both on the token level selected
+// and on the broker token itself.
+//
+// The LOCKDOWN and RESTRICTED are designed to allow access to almost
+// nothing that has security associated with and they are the recommended
+// levels to run sandboxed code specially if there is a chance that the
+// broker is process might be started by a user that belongs to the Admins
+// or power users groups.
+enum TokenLevel {
+ USER_LOCKDOWN = 0,
+ USER_RESTRICTED,
+ USER_LIMITED,
+ USER_INTERACTIVE,
+ USER_NON_ADMIN,
+ USER_RESTRICTED_SAME_ACCESS,
+ USER_UNPROTECTED,
+ USER_LAST
+};
+
+// The Job level specifies a set of decreasing security profiles for the
+// Job object that the target process will be placed into.
+// This table summarizes the security associated with each level:
+//
+// JobLevel |General |Quota |
+// |restrictions |restrictions |
+// -----------------|---------------------------------- |--------------------|
+// JOB_NONE | No job is assigned to the | None |
+// | sandboxed process. | |
+// -----------------|---------------------------------- |--------------------|
+// JOB_UNPROTECTED | None | *Kill on Job close.|
+// -----------------|---------------------------------- |--------------------|
+// JOB_INTERACTIVE | *Forbid system-wide changes using | |
+// | SystemParametersInfo(). | *Kill on Job close.|
+// | *Forbid the creation/switch of | |
+// | Desktops. | |
+// | *Forbids calls to ExitWindows(). | |
+// -----------------|---------------------------------- |--------------------|
+// JOB_LIMITED_USER | Same as INTERACTIVE_USER plus: | *One active process|
+// | *Forbid changes to the display | limit. |
+// | settings. | *Kill on Job close.|
+// -----------------|---------------------------------- |--------------------|
+// JOB_RESTRICTED | Same as LIMITED_USER plus: | *One active process|
+// | * No read/write to the clipboard. | limit. |
+// | * No access to User Handles that | *Kill on Job close.|
+// | belong to other processes. | |
+// | * Forbid message broadcasts. | |
+// | * Forbid setting global hooks. | |
+// | * No access to the global atoms | |
+// | table. | |
+// -----------------|-----------------------------------|--------------------|
+// JOB_LOCKDOWN | Same as RESTRICTED | *One active process|
+// | | limit. |
+// | | *Kill on Job close.|
+// | | *Kill on unhandled |
+// | | exception. |
+// | | |
+// In the context of the above table, 'user handles' refers to the handles of
+// windows, bitmaps, menus, etc. Files, treads and registry handles are kernel
+// handles and are not affected by the job level settings.
+enum JobLevel {
+ JOB_LOCKDOWN = 0,
+ JOB_RESTRICTED,
+ JOB_LIMITED_USER,
+ JOB_INTERACTIVE,
+ JOB_UNPROTECTED,
+ JOB_NONE
+};
+
+// These flags correspond to various process-level mitigations (eg. ASLR and
+// DEP). Most are implemented via UpdateProcThreadAttribute() plus flags for
+// the PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY attribute argument; documented
+// here: http://msdn.microsoft.com/en-us/library/windows/desktop/ms686880
+// Some mitigations are implemented directly by the sandbox or emulated to
+// the greatest extent possible when not directly supported by the OS.
+// Flags that are unsupported for the target OS will be silently ignored.
+// Flags that are invalid for their application (pre or post startup) will
+// return SBOX_ERROR_BAD_PARAMS.
+typedef uint64_t MitigationFlags;
+
+// Permanently enables DEP for the target process. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_DEP_ENABLE.
+const MitigationFlags MITIGATION_DEP = 0x00000001;
+
+// Permanently Disables ATL thunk emulation when DEP is enabled. Valid
+// only when MITIGATION_DEP is passed. Corresponds to not passing
+// PROCESS_CREATION_MITIGATION_POLICY_DEP_ATL_THUNK_ENABLE.
+const MitigationFlags MITIGATION_DEP_NO_ATL_THUNK = 0x00000002;
+
+// Enables Structured exception handling override prevention. Must be
+// enabled prior to process start. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_SEHOP_ENABLE.
+const MitigationFlags MITIGATION_SEHOP = 0x00000004;
+
+// Forces ASLR on all images in the child process. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_FORCE_RELOCATE_IMAGES_ALWAYS_ON .
+const MitigationFlags MITIGATION_RELOCATE_IMAGE = 0x00000008;
+
+// Refuses to load DLLs that cannot support ASLR. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_FORCE_RELOCATE_IMAGES_ALWAYS_ON_REQ_RELOCS.
+const MitigationFlags MITIGATION_RELOCATE_IMAGE_REQUIRED = 0x00000010;
+
+// Terminates the process on Windows heap corruption. Coresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_HEAP_TERMINATE_ALWAYS_ON.
+const MitigationFlags MITIGATION_HEAP_TERMINATE = 0x00000020;
+
+// Sets a random lower bound as the minimum user address. Must be
+// enabled prior to process start. On 32-bit processes this is
+// emulated to a much smaller degree. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_BOTTOM_UP_ASLR_ALWAYS_ON.
+const MitigationFlags MITIGATION_BOTTOM_UP_ASLR = 0x00000040;
+
+// Increases the randomness range of bottom-up ASLR to up to 1TB. Must be
+// enabled prior to process start and with MITIGATION_BOTTOM_UP_ASLR.
+// Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_HIGH_ENTROPY_ASLR_ALWAYS_ON
+const MitigationFlags MITIGATION_HIGH_ENTROPY_ASLR = 0x00000080;
+
+// Immediately raises an exception on a bad handle reference. Must be
+// enabled after startup. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_STRICT_HANDLE_CHECKS_ALWAYS_ON.
+const MitigationFlags MITIGATION_STRICT_HANDLE_CHECKS = 0x00000100;
+
+// Prevents the process from making Win32k calls. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_WIN32K_SYSTEM_CALL_DISABLE_ALWAYS_ON.
+const MitigationFlags MITIGATION_WIN32K_DISABLE = 0x00000200;
+
+// Prevents certain built-in third party extension points from being used.
+// - App_Init DLLs
+// - Winsock Layered Service Providers (LSPs)
+// - Global Windows Hooks (NOT thread-targeted hooks)
+// - Legacy Input Method Editors (IMEs).
+// I.e.: Disable legacy hooking mechanisms. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_EXTENSION_POINT_DISABLE_ALWAYS_ON.
+const MitigationFlags MITIGATION_EXTENSION_POINT_DISABLE = 0x00000400;
+
+// Prevents the process from loading non-system fonts into GDI.
+// Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_FONT_DISABLE_ALWAYS_ON
+const MitigationFlags MITIGATION_NONSYSTEM_FONT_DISABLE = 0x00000800;
+
+// Sets the DLL search order to LOAD_LIBRARY_SEARCH_DEFAULT_DIRS. Additional
+// directories can be added via the Windows AddDllDirectory() function.
+// http://msdn.microsoft.com/en-us/library/windows/desktop/hh310515
+// Must be enabled after startup.
+const MitigationFlags MITIGATION_DLL_SEARCH_ORDER = 0x00000001ULL << 32;
+
+// Changes the mandatory integrity level policy on the current process' token
+// to enable no-read and no-execute up. This prevents a lower IL process from
+// opening the process token for impersonate/duplicate/assignment.
+const MitigationFlags MITIGATION_HARDEN_TOKEN_IL_POLICY = 0x00000001ULL << 33;
+
+// Blocks mapping of images from remote devices. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_REMOTE_ALWAYS_ON.
+const MitigationFlags MITIGATION_IMAGE_LOAD_NO_REMOTE = 0x00000001ULL << 52;
+
+// Blocks mapping of images that have the low manditory label. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_LOW_LABEL_ALWAYS_ON.
+const MitigationFlags MITIGATION_IMAGE_LOAD_NO_LOW_LABEL = 0x00000001ULL << 56;
+
+} // namespace sandbox
+
+#endif // SANDBOX_SRC_SECURITY_LEVEL_H_
diff --git a/libchrome/sandbox/win/src/sidestep/ia32_modrm_map.cpp b/libchrome/sandbox/win/src/sidestep/ia32_modrm_map.cpp
new file mode 100644
index 0000000..89bc189
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/ia32_modrm_map.cpp
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Table of relevant information about how to decode the ModR/M byte.
+// Based on information in the IA-32 Intel Architecture
+// Software Developer's Manual Volume 2: Instruction Set Reference.
+
+#include "sandbox/win/src/sidestep/mini_disassembler.h"
+#include "sandbox/win/src/sidestep/mini_disassembler_types.h"
+
+namespace sidestep {
+
+const ModrmEntry MiniDisassembler::s_ia16_modrm_map_[] = {
+// mod == 00
+ /* r/m == 000 */ { false, false, OS_ZERO },
+ /* r/m == 001 */ { false, false, OS_ZERO },
+ /* r/m == 010 */ { false, false, OS_ZERO },
+ /* r/m == 011 */ { false, false, OS_ZERO },
+ /* r/m == 100 */ { false, false, OS_ZERO },
+ /* r/m == 101 */ { false, false, OS_ZERO },
+ /* r/m == 110 */ { true, false, OS_WORD },
+ /* r/m == 111 */ { false, false, OS_ZERO },
+// mod == 01
+ /* r/m == 000 */ { true, false, OS_BYTE },
+ /* r/m == 001 */ { true, false, OS_BYTE },
+ /* r/m == 010 */ { true, false, OS_BYTE },
+ /* r/m == 011 */ { true, false, OS_BYTE },
+ /* r/m == 100 */ { true, false, OS_BYTE },
+ /* r/m == 101 */ { true, false, OS_BYTE },
+ /* r/m == 110 */ { true, false, OS_BYTE },
+ /* r/m == 111 */ { true, false, OS_BYTE },
+// mod == 10
+ /* r/m == 000 */ { true, false, OS_WORD },
+ /* r/m == 001 */ { true, false, OS_WORD },
+ /* r/m == 010 */ { true, false, OS_WORD },
+ /* r/m == 011 */ { true, false, OS_WORD },
+ /* r/m == 100 */ { true, false, OS_WORD },
+ /* r/m == 101 */ { true, false, OS_WORD },
+ /* r/m == 110 */ { true, false, OS_WORD },
+ /* r/m == 111 */ { true, false, OS_WORD },
+// mod == 11
+ /* r/m == 000 */ { false, false, OS_ZERO },
+ /* r/m == 001 */ { false, false, OS_ZERO },
+ /* r/m == 010 */ { false, false, OS_ZERO },
+ /* r/m == 011 */ { false, false, OS_ZERO },
+ /* r/m == 100 */ { false, false, OS_ZERO },
+ /* r/m == 101 */ { false, false, OS_ZERO },
+ /* r/m == 110 */ { false, false, OS_ZERO },
+ /* r/m == 111 */ { false, false, OS_ZERO }
+};
+
+const ModrmEntry MiniDisassembler::s_ia32_modrm_map_[] = {
+// mod == 00
+ /* r/m == 000 */ { false, false, OS_ZERO },
+ /* r/m == 001 */ { false, false, OS_ZERO },
+ /* r/m == 010 */ { false, false, OS_ZERO },
+ /* r/m == 011 */ { false, false, OS_ZERO },
+ /* r/m == 100 */ { false, true, OS_ZERO },
+ /* r/m == 101 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 110 */ { false, false, OS_ZERO },
+ /* r/m == 111 */ { false, false, OS_ZERO },
+// mod == 01
+ /* r/m == 000 */ { true, false, OS_BYTE },
+ /* r/m == 001 */ { true, false, OS_BYTE },
+ /* r/m == 010 */ { true, false, OS_BYTE },
+ /* r/m == 011 */ { true, false, OS_BYTE },
+ /* r/m == 100 */ { true, true, OS_BYTE },
+ /* r/m == 101 */ { true, false, OS_BYTE },
+ /* r/m == 110 */ { true, false, OS_BYTE },
+ /* r/m == 111 */ { true, false, OS_BYTE },
+// mod == 10
+ /* r/m == 000 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 001 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 010 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 011 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 100 */ { true, true, OS_DOUBLE_WORD },
+ /* r/m == 101 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 110 */ { true, false, OS_DOUBLE_WORD },
+ /* r/m == 111 */ { true, false, OS_DOUBLE_WORD },
+// mod == 11
+ /* r/m == 000 */ { false, false, OS_ZERO },
+ /* r/m == 001 */ { false, false, OS_ZERO },
+ /* r/m == 010 */ { false, false, OS_ZERO },
+ /* r/m == 011 */ { false, false, OS_ZERO },
+ /* r/m == 100 */ { false, false, OS_ZERO },
+ /* r/m == 101 */ { false, false, OS_ZERO },
+ /* r/m == 110 */ { false, false, OS_ZERO },
+ /* r/m == 111 */ { false, false, OS_ZERO },
+};
+
+}; // namespace sidestep
diff --git a/libchrome/sandbox/win/src/sidestep/ia32_opcode_map.cpp b/libchrome/sandbox/win/src/sidestep/ia32_opcode_map.cpp
new file mode 100644
index 0000000..b7d8a60
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/ia32_opcode_map.cpp
@@ -0,0 +1,1159 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Opcode decoding maps. Based on the IA-32 Intel Architecture
+// Software Developer's Manual Volume 2: Instruction Set Reference. Idea
+// for how to lay out the tables in memory taken from the implementation
+// in the Bastard disassembly environment.
+
+#include "sandbox/win/src/sidestep/mini_disassembler.h"
+
+namespace sidestep {
+
+/*
+* This is the first table to be searched; the first field of each
+* Opcode in the table is either 0 to indicate you're in the
+* right table, or an index to the correct table, in the global
+* map g_pentiumOpcodeMap
+*/
+const Opcode s_first_opcode_byte[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF */ { 1, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x10 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x11 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x12 */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x13 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x14 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x15 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x16 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x17 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x18 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x19 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1A */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1B */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1C */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1D */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1E */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1F */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x20 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x21 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x22 */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x23 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x24 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x25 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x26 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x27 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "daa", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x28 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x29 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2A */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2B */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2C */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2D */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2E */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2F */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "das", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x30 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x31 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x32 */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x33 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x34 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x35 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x36 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x37 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "aaa", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x38 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x39 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3A */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3B */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3C */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3D */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3E */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3F */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "aas", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x40 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x41 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x42 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x43 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x44 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x45 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x46 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x47 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x48 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x49 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4A */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4B */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4C */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4D */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4E */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4F */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x50 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x51 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x52 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x53 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x54 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x55 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x56 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x57 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x58 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x59 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5A */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5B */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5C */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5D */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5E */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5F */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x60 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "pushad", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x61 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "popad", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x62 */ { 0, IT_GENERIC, AM_G | OT_V, AM_M | OT_A, AM_NOT_USED, "bound", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x63 */ { 0, IT_GENERIC, AM_E | OT_W, AM_G | OT_W, AM_NOT_USED, "arpl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x64 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x65 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x66 */ { 0, IT_PREFIX_OPERAND, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x67 */ { 0, IT_PREFIX_ADDRESS, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x68 */ { 0, IT_GENERIC, AM_I | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x69 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_I | OT_V, "imul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6A */ { 0, IT_GENERIC, AM_I | OT_B, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6B */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_I | OT_B, "imul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6C */ { 0, IT_GENERIC, AM_Y | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "insb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6D */ { 0, IT_GENERIC, AM_Y | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "insd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6E */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_X | OT_B, AM_NOT_USED, "outsb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6F */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_X | OT_V, AM_NOT_USED, "outsb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x70 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x71 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jno", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x72 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x73 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jnc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x74 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x75 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jnz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x76 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jbe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x77 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "ja", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x78 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "js", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x79 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jns", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7A */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jpe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7B */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jpo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7C */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7D */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jge", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7E */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jle", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7F */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x80 */ { 2, IT_REFERENCE, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x81 */ { 3, IT_REFERENCE, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x82 */ { 4, IT_REFERENCE, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x83 */ { 5, IT_REFERENCE, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x84 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x85 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x86 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x87 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x88 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x89 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8A */ { 0, IT_GENERIC, AM_G | OT_B, AM_E | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8B */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8C */ { 0, IT_GENERIC, AM_E | OT_W, AM_S | OT_W, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8D */ { 0, IT_GENERIC, AM_G | OT_V, AM_M | OT_ADDRESS_MODE_M, AM_NOT_USED, "lea", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8E */ { 0, IT_GENERIC, AM_S | OT_W, AM_E | OT_W, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8F */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x90 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "nop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x91 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x92 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x93 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x94 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x95 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x96 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x97 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "xchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x98 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "cwde", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x99 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "cdq", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9A */ { 0, IT_JUMP, AM_A | OT_P, AM_NOT_USED, AM_NOT_USED, "callf", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9B */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "wait", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9C */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "pushfd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9D */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "popfd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9E */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "sahf", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9F */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "lahf", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA0 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_O | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA1 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_O | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA2 */ { 0, IT_GENERIC, AM_O | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA3 */ { 0, IT_GENERIC, AM_O | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA4 */ { 0, IT_GENERIC, AM_X | OT_B, AM_Y | OT_B, AM_NOT_USED, "movsb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA5 */ { 0, IT_GENERIC, AM_X | OT_V, AM_Y | OT_V, AM_NOT_USED, "movsd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA6 */ { 0, IT_GENERIC, AM_X | OT_B, AM_Y | OT_B, AM_NOT_USED, "cmpsb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA7 */ { 0, IT_GENERIC, AM_X | OT_V, AM_Y | OT_V, AM_NOT_USED, "cmpsd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA8 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA9 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAA */ { 0, IT_GENERIC, AM_Y | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "stosb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAB */ { 0, IT_GENERIC, AM_Y | OT_V, AM_REGISTER | OT_V, AM_NOT_USED, "stosd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAC */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_X| OT_B, AM_NOT_USED, "lodsb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAD */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_X| OT_V, AM_NOT_USED, "lodsd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAE */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_Y | OT_B, AM_NOT_USED, "scasb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAF */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_Y | OT_V, AM_NOT_USED, "scasd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB0 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB1 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB2 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB3 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB4 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB5 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB6 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB7 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB8 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB9 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBA */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBB */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBC */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBD */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBE */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBF */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC0 */ { 6, IT_REFERENCE, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC1 */ { 7, IT_REFERENCE, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC2 */ { 0, IT_RETURN, AM_I | OT_W, AM_NOT_USED, AM_NOT_USED, "ret", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC3 */ { 0, IT_RETURN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "ret", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC4 */ { 0, IT_GENERIC, AM_G | OT_V, AM_M | OT_P, AM_NOT_USED, "les", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC5 */ { 0, IT_GENERIC, AM_G | OT_V, AM_M | OT_P, AM_NOT_USED, "lds", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC6 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC8 */ { 0, IT_GENERIC, AM_I | OT_W, AM_I | OT_B, AM_NOT_USED, "enter", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC9 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "leave", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCA */ { 0, IT_RETURN, AM_I | OT_W, AM_NOT_USED, AM_NOT_USED, "retf", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCB */ { 0, IT_RETURN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "retf", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCC */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "int3", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCD */ { 0, IT_GENERIC, AM_I | OT_B, AM_NOT_USED, AM_NOT_USED, "int", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCE */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "into", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCF */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "iret", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD0 */ { 8, IT_REFERENCE, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD1 */ { 9, IT_REFERENCE, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD2 */ { 10, IT_REFERENCE, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD3 */ { 11, IT_REFERENCE, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD4 */ { 0, IT_GENERIC, AM_I | OT_B, AM_NOT_USED, AM_NOT_USED, "aam", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD5 */ { 0, IT_GENERIC, AM_I | OT_B, AM_NOT_USED, AM_NOT_USED, "aad", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD6 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD7 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "xlat", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+
+ // The following 8 lines would be references to the FPU tables, but we currently
+ // do not support the FPU instructions in this disassembler.
+
+ /* 0xD8 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD9 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xDA */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xDB */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xDC */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xDD */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xDE */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xDF */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+
+
+ /* 0xE0 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "loopnz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE1 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "loopz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE2 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "loop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE3 */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jcxz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE4 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "in", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE5 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_I | OT_B, AM_NOT_USED, "in", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE6 */ { 0, IT_GENERIC, AM_I | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "out", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE7 */ { 0, IT_GENERIC, AM_I | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "out", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE8 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "call", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE9 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xEA */ { 0, IT_JUMP, AM_A | OT_P, AM_NOT_USED, AM_NOT_USED, "jmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xEB */ { 0, IT_JUMP, AM_J | OT_B, AM_NOT_USED, AM_NOT_USED, "jmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xEC */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_REGISTER | OT_W, AM_NOT_USED, "in", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xED */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_REGISTER | OT_W, AM_NOT_USED, "in", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xEE */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_REGISTER | OT_B, AM_NOT_USED, "out", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xEF */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_REGISTER | OT_V, AM_NOT_USED, "out", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF0 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "lock:", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF1 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF2 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "repne:", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF3 */ { 0, IT_PREFIX, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "rep:", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF4 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "hlt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF5 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "cmc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF6 */ { 12, IT_REFERENCE, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF7 */ { 13, IT_REFERENCE, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF8 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "clc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF9 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "stc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xFA */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "cli", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xFB */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "sti", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xFC */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "cld", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xFD */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "std", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xFE */ { 14, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xFF */ { 15, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f[] = {
+ /* 0x0 */ { 16, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 17, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_W, AM_NOT_USED, "lar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_W, AM_NOT_USED, "lsl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "clts", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "invd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "wbinvd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "ud2", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xE */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x10 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "movups", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "movsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "movss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "movupd" } },
+ /* 0x11 */ { 0, IT_GENERIC, AM_W | OT_PS, AM_V | OT_PS, AM_NOT_USED, "movups", true,
+ /* F2h */ { 0, IT_GENERIC, AM_W | OT_SD, AM_V | OT_SD, AM_NOT_USED, "movsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_W | OT_SS, AM_V | OT_SS, AM_NOT_USED, "movss" },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_PD, AM_V | OT_PD, AM_NOT_USED, "movupd" } },
+ /* 0x12 */ { 0, IT_GENERIC, AM_W | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movlps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movhlps" }, // only one of ...
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movhlps" }, // ...these two is correct, Intel doesn't specify which
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_S, AM_NOT_USED, "movlpd" } },
+ /* 0x13 */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movlps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movlpd" } },
+ /* 0x14 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_Q, AM_NOT_USED, "unpcklps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_Q, AM_NOT_USED, "unpcklpd" } },
+ /* 0x15 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_Q, AM_NOT_USED, "unpckhps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_Q, AM_NOT_USED, "unpckhpd" } },
+ /* 0x16 */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movhps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movlhps" }, // only one of...
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movlhps" }, // ...these two is correct, Intel doesn't specify which
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movhpd" } },
+ /* 0x17 */ { 0, IT_GENERIC, AM_W | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movhps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movhpd" } },
+ /* 0x18 */ { 18, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x19 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1A */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1B */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1C */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1D */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1E */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1F */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x20 */ { 0, IT_GENERIC, AM_R | OT_D, AM_C | OT_D, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x21 */ { 0, IT_GENERIC, AM_R | OT_D, AM_D | OT_D, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x22 */ { 0, IT_GENERIC, AM_C | OT_D, AM_R | OT_D, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x23 */ { 0, IT_GENERIC, AM_D | OT_D, AM_R | OT_D, AM_NOT_USED, "mov", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x24 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x25 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x26 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x27 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x28 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "movaps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "movapd" } },
+ /* 0x29 */ { 0, IT_GENERIC, AM_W | OT_PS, AM_V | OT_PS, AM_NOT_USED, "movaps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_PD, AM_V | OT_PD, AM_NOT_USED, "movapd" } },
+ /* 0x2A */ { 0, IT_GENERIC, AM_V | OT_PS, AM_Q | OT_Q, AM_NOT_USED, "cvtpi2ps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_E | OT_D, AM_NOT_USED, "cvtsi2sd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_E | OT_D, AM_NOT_USED, "cvtsi2ss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_Q | OT_DQ, AM_NOT_USED, "cvtpi2pd" } },
+ /* 0x2B */ { 0, IT_GENERIC, AM_W | OT_PS, AM_V | OT_PS, AM_NOT_USED, "movntps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_PD, AM_V | OT_PD, AM_NOT_USED, "movntpd" } },
+ /* 0x2C */ { 0, IT_GENERIC, AM_Q | OT_Q, AM_W | OT_PS, AM_NOT_USED, "cvttps2pi", true,
+ /* F2h */ { 0, IT_GENERIC, AM_G | OT_D, AM_W | OT_SD, AM_NOT_USED, "cvttsd2si" },
+ /* F3h */ { 0, IT_GENERIC, AM_G | OT_D, AM_W | OT_SS, AM_NOT_USED, "cvttss2si" },
+ /* 66h */ { 0, IT_GENERIC, AM_Q | OT_DQ, AM_W | OT_PD, AM_NOT_USED, "cvttpd2pi" } },
+ /* 0x2D */ { 0, IT_GENERIC, AM_Q | OT_Q, AM_W | OT_PS, AM_NOT_USED, "cvtps2pi", true,
+ /* F2h */ { 0, IT_GENERIC, AM_G | OT_D, AM_W | OT_SD, AM_NOT_USED, "cvtsd2si" },
+ /* F3h */ { 0, IT_GENERIC, AM_G | OT_D, AM_W | OT_SS, AM_NOT_USED, "cvtss2si" },
+ /* 66h */ { 0, IT_GENERIC, AM_Q | OT_DQ, AM_W | OT_PD, AM_NOT_USED, "cvtpd2pi" } },
+ /* 0x2E */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "ucomiss", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "ucomisd" } },
+ /* 0x2F */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_SS, AM_NOT_USED, "comiss", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "comisd" } },
+ /* 0x30 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "wrmsr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x31 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "rdtsc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x32 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "rdmsr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x33 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "rdpmc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x34 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "sysenter", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x35 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "sysexit", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x36 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x37 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x38 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x39 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3A */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3B */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3C */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "movnti", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3D */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3E */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3F */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x40 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x41 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovno", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x42 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x43 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovnc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x44 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x45 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovnz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x46 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovbe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x47 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmova", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x48 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovs", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x49 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovns", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4A */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovpe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4B */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovpo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4C */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4D */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovge", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4E */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovle", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4F */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "cmovg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x50 */ { 0, IT_GENERIC, AM_E | OT_D, AM_V | OT_PS, AM_NOT_USED, "movmskps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_E | OT_D, AM_V | OT_PD, AM_NOT_USED, "movmskpd" } },
+ /* 0x51 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "sqrtps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "sqrtsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "sqrtss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "sqrtpd" } },
+ /* 0x52 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "rsqrtps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "rsqrtss" },
+ /* 66h */ { 0 } },
+ /* 0x53 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "rcpps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "rcpss" },
+ /* 66h */ { 0 } },
+ /* 0x54 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "andps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "andpd" } },
+ /* 0x55 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "andnps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "andnpd" } },
+ /* 0x56 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "orps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "orpd" } },
+ /* 0x57 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "xorps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "xorpd" } },
+ /* 0x58 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "addps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "addsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "addss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "addpd" } },
+ /* 0x59 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "mulps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "mulsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "mulss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "mulpd" } },
+ /* 0x5A */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PS, AM_NOT_USED, "cvtps2pd", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "cvtsd2ss" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "cvtss2sd" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PD, AM_NOT_USED, "cvtpd2ps" } },
+ /* 0x5B */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_DQ, AM_NOT_USED, "cvtdq2ps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_PS, AM_NOT_USED, "cvttps2dq" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_PS, AM_NOT_USED, "cvtps2dq" } },
+ /* 0x5C */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "subps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "subsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "subss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "subpd" } },
+ /* 0x5D */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "minps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "minsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "minss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "minpd" } },
+ /* 0x5E */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "divps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "divsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "divss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "divpd" } },
+ /* 0x5F */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_NOT_USED, "maxps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_NOT_USED, "maxsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_NOT_USED, "maxss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_NOT_USED, "maxpd" } },
+ /* 0x60 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "punpcklbw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "punpcklbw" } },
+ /* 0x61 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "punpcklwd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "punpcklwd" } },
+ /* 0x62 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "punpckldq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "punpckldq" } },
+ /* 0x63 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "packsswb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "packsswb" } },
+ /* 0x64 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "pcmpgtb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpgtb" } },
+ /* 0x65 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "pcmpgtw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpgtw" } },
+ /* 0x66 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "pcmpgtd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpgtd" } },
+ /* 0x67 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "packuswb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "packuswb" } },
+ /* 0x68 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "punpckhbw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_Q | OT_DQ, AM_NOT_USED, "punpckhbw" } },
+ /* 0x69 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "punpckhwd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_Q | OT_DQ, AM_NOT_USED, "punpckhwd" } },
+ /* 0x6A */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "punpckhdq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_Q | OT_DQ, AM_NOT_USED, "punpckhdq" } },
+ /* 0x6B */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "packssdw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_Q | OT_DQ, AM_NOT_USED, "packssdw" } },
+ /* 0x6C */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "not used without prefix", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "punpcklqdq" } },
+ /* 0x6D */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "not used without prefix", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "punpcklqdq" } },
+ /* 0x6E */ { 0, IT_GENERIC, AM_P | OT_D, AM_E | OT_D, AM_NOT_USED, "movd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_E | OT_D, AM_NOT_USED, "movd" } },
+ /* 0x6F */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_D, AM_NOT_USED, "movq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "movdqu" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "movdqa" } },
+ /* 0x70 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_I | OT_B, "pshuf", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_I | OT_B, "pshuflw" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_I | OT_B, "pshufhw" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_I | OT_B, "pshufd" } },
+ /* 0x71 */ { 19, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x72 */ { 20, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x73 */ { 21, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x74 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pcmpeqb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpeqb" } },
+ /* 0x75 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pcmpeqw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpeqw" } },
+ /* 0x76 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pcmpeqd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pcmpeqd" } },
+ /* 0x77 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "emms", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+
+ // The following six opcodes are escapes into the MMX stuff, which this disassembler does not support.
+ /* 0x78 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x79 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7A */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7B */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7C */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7D */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+
+ /* 0x7E */ { 0, IT_GENERIC, AM_E | OT_D, AM_P | OT_D, AM_NOT_USED, "movd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movq" },
+ /* 66h */ { 0, IT_GENERIC, AM_E | OT_D, AM_V | OT_DQ, AM_NOT_USED, "movd" } },
+ /* 0x7F */ { 0, IT_GENERIC, AM_Q | OT_Q, AM_P | OT_Q, AM_NOT_USED, "movq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_V | OT_DQ, AM_NOT_USED, "movdqu" },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_V | OT_DQ, AM_NOT_USED, "movdqa" } },
+ /* 0x80 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x81 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jno", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x82 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x83 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jnc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x84 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x85 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jnz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x86 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jbe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x87 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "ja", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x88 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "js", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x89 */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jns", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8A */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jpe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8B */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jpo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8C */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8D */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jge", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8E */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jle", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x8F */ { 0, IT_JUMP, AM_J | OT_V, AM_NOT_USED, AM_NOT_USED, "jg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x90 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "seto", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x91 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setno", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x92 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x93 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setnc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x94 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x95 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setnz", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x96 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setbe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x97 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "seta", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x98 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "sets", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x99 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setns", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9A */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setpe", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9B */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setpo", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9C */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9D */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setge", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9E */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setle", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x9F */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "setg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA0 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA1 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA2 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "cpuid", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "bt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_I | OT_B, "shld", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_I | OT_B | AM_REGISTER, "shld", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA6 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA7 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA8 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xA9 */ { 0, IT_GENERIC, AM_REGISTER | OT_W, AM_NOT_USED, AM_NOT_USED, "pop", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAA */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "rsm", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAB */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "bts", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAC */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_I | OT_B, "shrd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAD */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_I | OT_B | AM_REGISTER, "shrd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAE */ { 22, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xAF */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "imul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "cmpxchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "cmpxchg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB2 */ { 0, IT_GENERIC, AM_M | OT_P, AM_NOT_USED, AM_NOT_USED, "lss", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "btr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB4 */ { 0, IT_GENERIC, AM_M | OT_P, AM_NOT_USED, AM_NOT_USED, "lfs", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB5 */ { 0, IT_GENERIC, AM_M | OT_P, AM_NOT_USED, AM_NOT_USED, "lgs", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB6 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_B, AM_NOT_USED, "movzx", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB7 */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_W, AM_NOT_USED, "movzx", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB8 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xB9 */ { 0, IT_UNKNOWN, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "ud1", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBA */ { 23, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBB */ { 0, IT_GENERIC, AM_E | OT_V, AM_G | OT_V, AM_NOT_USED, "btc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBC */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "bsf", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBD */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_V, AM_NOT_USED, "bsr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBE */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_B, AM_NOT_USED, "movsx", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xBF */ { 0, IT_GENERIC, AM_G | OT_V, AM_E | OT_W, AM_NOT_USED, "movsx", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_G | OT_B, AM_NOT_USED, "xadd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "xadd", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC2 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_I | OT_B, "cmpps", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_SD, AM_W | OT_SD, AM_I | OT_B, "cmpsd" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_SS, AM_W | OT_SS, AM_I | OT_B, "cmpss" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_I | OT_B, "cmppd" } },
+ /* 0xC3 */ { 0, IT_GENERIC, AM_E | OT_D, AM_G | OT_D, AM_NOT_USED, "movnti", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC4 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_E | OT_D, AM_I | OT_B, "pinsrw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_E | OT_D, AM_I | OT_B, "pinsrw" } },
+ /* 0xC5 */ { 0, IT_GENERIC, AM_G | OT_D, AM_P | OT_Q, AM_I | OT_B, "pextrw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_G | OT_D, AM_V | OT_DQ, AM_I | OT_B, "pextrw" } },
+ /* 0xC6 */ { 0, IT_GENERIC, AM_V | OT_PS, AM_W | OT_PS, AM_I | OT_B, "shufps", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_PD, AM_I | OT_B, "shufpd" } },
+ /* 0xC7 */ { 24, IT_REFERENCE, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC8 */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xC9 */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCA */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCB */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCC */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCD */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCE */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xCF */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "bswap", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xD1 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psrlw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psrlw" } },
+ /* 0xD2 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psrld", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psrld" } },
+ /* 0xD3 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psrlq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psrlq" } },
+ /* 0xD4 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddq" } },
+ /* 0xD5 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmullw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmullw" } },
+ /* 0xD6 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "unused without prefix", true,
+ /* F2h */ { 0, IT_GENERIC, AM_P | OT_Q, AM_W | OT_Q, AM_NOT_USED, "movdq2q" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_Q | OT_Q, AM_NOT_USED, "movq2dq" },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movq" } },
+ /* 0xD7 */ { 0, IT_GENERIC, AM_G | OT_D, AM_P | OT_Q, AM_NOT_USED, "pmovmskb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_G | OT_D, AM_V | OT_DQ, AM_NOT_USED, "pmovmskb" } },
+ /* 0xD8 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubusb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubusb" } },
+ /* 0xD9 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubusw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubusw" } },
+ /* 0xDA */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pminub", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pminub" } },
+ /* 0xDB */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pand", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pand" } },
+ /* 0xDC */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddusb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddusb" } },
+ /* 0xDD */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddusw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddusw" } },
+ /* 0xDE */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmaxub", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmaxub" } },
+ /* 0xDF */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pandn", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pandn" } },
+ /* 0xE0 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pavgb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pavgb" } },
+ /* 0xE1 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psraw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psrqw" } },
+ /* 0xE2 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psrad", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psrad" } },
+ /* 0xE3 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pavgw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pavgw" } },
+ /* 0xE4 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmulhuw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmulhuw" } },
+ /* 0xE5 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmulhuw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmulhw" } },
+ /* 0xE6 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "not used without prefix", true,
+ /* F2h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_PD, AM_NOT_USED, "cvtpd2dq" },
+ /* F3h */ { 0, IT_GENERIC, AM_V | OT_PD, AM_W | OT_DQ, AM_NOT_USED, "cvtdq2pd" },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_PD, AM_NOT_USED, "cvttpd2dq" } },
+ /* 0xE7 */ { 0, IT_GENERIC, AM_W | OT_Q, AM_V | OT_Q, AM_NOT_USED, "movntq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_V | OT_DQ, AM_NOT_USED, "movntdq" } },
+ /* 0xE8 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubsb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubsb" } },
+ /* 0xE9 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubsw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubsw" } },
+ /* 0xEA */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pminsw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pminsw" } },
+ /* 0xEB */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "por", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "por" } },
+ /* 0xEC */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddsb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddsb" } },
+ /* 0xED */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddsw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddsw" } },
+ /* 0xEE */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmaxsw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmaxsw" } },
+ /* 0xEF */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pxor", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pxor" } },
+ /* 0xF0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0xF1 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psllw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psllw" } },
+ /* 0xF2 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pslld", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pslld" } },
+ /* 0xF3 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psllq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psllq" } },
+ /* 0xF4 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmuludq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmuludq" } },
+ /* 0xF5 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "pmaddwd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "pmaddwd" } },
+ /* 0xF6 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psadbw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psadbw" } },
+ /* 0xF7 */ { 0, IT_GENERIC, AM_P | OT_PI, AM_Q | OT_PI, AM_NOT_USED, "maskmovq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "maskmovdqu" } },
+ /* 0xF8 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubb" } },
+ /* 0xF9 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubw" } },
+ /* 0xFA */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubd" } },
+ /* 0xFB */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "psubq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "psubq" } },
+ /* 0xFC */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddb", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddb" } },
+ /* 0xFD */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddw" } },
+ /* 0xFE */ { 0, IT_GENERIC, AM_P | OT_Q, AM_Q | OT_Q, AM_NOT_USED, "paddd", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_V | OT_DQ, AM_W | OT_DQ, AM_NOT_USED, "paddd" } },
+ /* 0xFF */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f00[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "sldt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "str", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "lldt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "ltr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "verr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "verw", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f01[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_M | OT_S, AM_NOT_USED, AM_NOT_USED, "sgdt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_M | OT_S, AM_NOT_USED, AM_NOT_USED, "sidt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_M | OT_S, AM_NOT_USED, AM_NOT_USED, "lgdt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_M | OT_S, AM_NOT_USED, AM_NOT_USED, "lidt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "smsw", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_W, AM_NOT_USED, AM_NOT_USED, "lmsw", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_M | OT_B, AM_NOT_USED, AM_NOT_USED, "invlpg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f18[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_M | OT_ADDRESS_MODE_M, AM_NOT_USED, AM_NOT_USED, "prefetch", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "prefetch", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "prefetch", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_REGISTER | OT_D, AM_NOT_USED, AM_NOT_USED, "prefetch", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f71[] = {
+ /* 0x0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psrlw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psrlw" } },
+ /* 0x3 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psraw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psraw" } },
+ /* 0x5 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psllw", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_P | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psllw" } },
+ /* 0x7 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f72[] = {
+ /* 0x0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psrld", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psrld" } },
+ /* 0x3 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psrad", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psrad" } },
+ /* 0x5 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "pslld", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "pslld" } },
+ /* 0x7 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0f73[] = {
+ /* 0x0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psrlq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psrlq" } },
+ /* 0x3 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_P | OT_Q, AM_I | OT_B, AM_NOT_USED, "psllq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "psllq" } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "pslldq", true,
+ /* F2h */ { 0 },
+ /* F3h */ { 0 },
+ /* 66h */ { 0, IT_GENERIC, AM_W | OT_DQ, AM_I | OT_B, AM_NOT_USED, "pslldq" } },
+};
+
+const Opcode s_opcode_byte_after_0fae[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "fxsave", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "fxrstor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "ldmxcsr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "stmxcsr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "lfence", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "mfence", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, "clflush/sfence", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+};
+
+const Opcode s_opcode_byte_after_0fba[] = {
+ /* 0x0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "bt", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "bts", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "btr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "btc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_0fc7[] = {
+ /* 0x0 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_M | OT_Q, AM_NOT_USED, AM_NOT_USED, "cmpxch8b", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_80[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_81[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_82[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_83[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "add", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "or", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "adc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "sbb", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "and", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "sub", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "xor", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "cmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_c0[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "rol", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "ror", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "rcl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "rcr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "shl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "shr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "sal", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "sar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_c1[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "rol", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "ror", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "rcl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "rcr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "shl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "shr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "sal", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_B, AM_NOT_USED, "sar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_d0[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "rol", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "ror", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "rcl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "rcr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "shl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "shr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "sal", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_B, AM_IMPLICIT, AM_NOT_USED, "sar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_d1[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "rol", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "ror", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "rcl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "rcr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "shl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "shr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "sal", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_IMPLICIT, AM_NOT_USED, "sar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_d2[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "rol", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "ror", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "rcl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "rcr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "shl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "shr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "sal", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_B, AM_REGISTER | OT_B, AM_NOT_USED, "sar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_d3[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "rol", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "ror", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "rcl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "rcr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "shl", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "shr", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "sal", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_E | OT_V, AM_REGISTER | OT_B, AM_NOT_USED, "sar", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_f6[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_B, AM_I | OT_B, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "not", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "neg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, OT_B | AM_REGISTER, AM_E | OT_B, AM_NOT_USED, "mul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, OT_B | AM_REGISTER, AM_E | OT_B, AM_NOT_USED, "imul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_E | OT_B, AM_NOT_USED, "div", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_REGISTER | OT_B, AM_E | OT_B, AM_NOT_USED, "idiv", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_f7[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_I | OT_V, AM_NOT_USED, "test", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "not", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "neg", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_E | OT_V, AM_NOT_USED, "mul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_E | OT_V, AM_NOT_USED, "imul", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_E | OT_V, AM_NOT_USED, "div", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_GENERIC, AM_REGISTER | OT_V, AM_E | OT_V, AM_NOT_USED, "idiv", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_fe[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_B, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+const Opcode s_opcode_byte_after_ff[] = {
+ /* 0x0 */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "inc", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x1 */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "dec", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x2 */ { 0, IT_JUMP, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "call", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x3 */ { 0, IT_JUMP, AM_E | OT_P, AM_NOT_USED, AM_NOT_USED, "call", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x4 */ { 0, IT_JUMP, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "jmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x5 */ { 0, IT_JUMP, AM_E | OT_P, AM_NOT_USED, AM_NOT_USED, "jmp", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x6 */ { 0, IT_GENERIC, AM_E | OT_V, AM_NOT_USED, AM_NOT_USED, "push", false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } },
+ /* 0x7 */ { 0, IT_UNUSED, AM_NOT_USED, AM_NOT_USED, AM_NOT_USED, 0, false, /* F2h */ { 0 }, /* F3h */ { 0 }, /* 66h */ { 0 } }
+};
+
+/*
+* A table of all the other tables, containing some extra information, e.g.
+* how to mask out the byte we're looking at.
+*/
+const OpcodeTable MiniDisassembler::s_ia32_opcode_map_[]={
+ // One-byte opcodes and jumps to larger
+ /* 0 */ {s_first_opcode_byte, 0, 0xff, 0, 0xff},
+ // Two-byte opcodes (second byte)
+ /* 1 */ {s_opcode_byte_after_0f, 0, 0xff, 0, 0xff},
+ // Start of tables for opcodes using ModR/M bits as extension
+ /* 2 */ {s_opcode_byte_after_80, 3, 0x07, 0, 0x07},
+ /* 3 */ {s_opcode_byte_after_81, 3, 0x07, 0, 0x07},
+ /* 4 */ {s_opcode_byte_after_82, 3, 0x07, 0, 0x07},
+ /* 5 */ {s_opcode_byte_after_83, 3, 0x07, 0, 0x07},
+ /* 6 */ {s_opcode_byte_after_c0, 3, 0x07, 0, 0x07},
+ /* 7 */ {s_opcode_byte_after_c1, 3, 0x07, 0, 0x07},
+ /* 8 */ {s_opcode_byte_after_d0, 3, 0x07, 0, 0x07},
+ /* 9 */ {s_opcode_byte_after_d1, 3, 0x07, 0, 0x07},
+ /* 10 */ {s_opcode_byte_after_d2, 3, 0x07, 0, 0x07},
+ /* 11 */ {s_opcode_byte_after_d3, 3, 0x07, 0, 0x07},
+ /* 12 */ {s_opcode_byte_after_f6, 3, 0x07, 0, 0x07},
+ /* 13 */ {s_opcode_byte_after_f7, 3, 0x07, 0, 0x07},
+ /* 14 */ {s_opcode_byte_after_fe, 3, 0x07, 0, 0x01},
+ /* 15 */ {s_opcode_byte_after_ff, 3, 0x07, 0, 0x07},
+ /* 16 */ {s_opcode_byte_after_0f00, 3, 0x07, 0, 0x07},
+ /* 17 */ {s_opcode_byte_after_0f01, 3, 0x07, 0, 0x07},
+ /* 18 */ {s_opcode_byte_after_0f18, 3, 0x07, 0, 0x07},
+ /* 19 */ {s_opcode_byte_after_0f71, 3, 0x07, 0, 0x07},
+ /* 20 */ {s_opcode_byte_after_0f72, 3, 0x07, 0, 0x07},
+ /* 21 */ {s_opcode_byte_after_0f73, 3, 0x07, 0, 0x07},
+ /* 22 */ {s_opcode_byte_after_0fae, 3, 0x07, 0, 0x07},
+ /* 23 */ {s_opcode_byte_after_0fba, 3, 0x07, 0, 0x07},
+ /* 24 */ {s_opcode_byte_after_0fc7, 3, 0x07, 0, 0x01}
+};
+
+}; // namespace sidestep
diff --git a/libchrome/sandbox/win/src/sidestep/mini_disassembler.cpp b/libchrome/sandbox/win/src/sidestep/mini_disassembler.cpp
new file mode 100644
index 0000000..1e8e0bd
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/mini_disassembler.cpp
@@ -0,0 +1,395 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of MiniDisassembler.
+
+#ifdef _WIN64
+#error The code in this file should not be used on 64-bit Windows.
+#endif
+
+#include "sandbox/win/src/sidestep/mini_disassembler.h"
+
+namespace sidestep {
+
+MiniDisassembler::MiniDisassembler(bool operand_default_is_32_bits,
+ bool address_default_is_32_bits)
+ : operand_default_is_32_bits_(operand_default_is_32_bits),
+ address_default_is_32_bits_(address_default_is_32_bits) {
+ Initialize();
+}
+
+MiniDisassembler::MiniDisassembler()
+ : operand_default_is_32_bits_(true),
+ address_default_is_32_bits_(true) {
+ Initialize();
+}
+
+InstructionType MiniDisassembler::Disassemble(
+ unsigned char* start_byte,
+ unsigned int* instruction_bytes) {
+ // Clean up any state from previous invocations.
+ Initialize();
+
+ // Start by processing any prefixes.
+ unsigned char* current_byte = start_byte;
+ unsigned int size = 0;
+ InstructionType instruction_type = ProcessPrefixes(current_byte, &size);
+
+ if (IT_UNKNOWN == instruction_type)
+ return instruction_type;
+
+ current_byte += size;
+ size = 0;
+
+ // Invariant: We have stripped all prefixes, and the operand_is_32_bits_
+ // and address_is_32_bits_ flags are correctly set.
+
+ instruction_type = ProcessOpcode(current_byte, 0, &size);
+
+ // Check for error processing instruction
+ if ((IT_UNKNOWN == instruction_type_) || (IT_UNUSED == instruction_type_)) {
+ return IT_UNKNOWN;
+ }
+
+ current_byte += size;
+
+ // Invariant: operand_bytes_ indicates the total size of operands
+ // specified by the opcode and/or ModR/M byte and/or SIB byte.
+ // pCurrentByte points to the first byte after the ModR/M byte, or after
+ // the SIB byte if it is present (i.e. the first byte of any operands
+ // encoded in the instruction).
+
+ // We get the total length of any prefixes, the opcode, and the ModR/M and
+ // SIB bytes if present, by taking the difference of the original starting
+ // address and the current byte (which points to the first byte of the
+ // operands if present, or to the first byte of the next instruction if
+ // they are not). Adding the count of bytes in the operands encoded in
+ // the instruction gives us the full length of the instruction in bytes.
+ *instruction_bytes += operand_bytes_ + (current_byte - start_byte);
+
+ // Return the instruction type, which was set by ProcessOpcode().
+ return instruction_type_;
+}
+
+void MiniDisassembler::Initialize() {
+ operand_is_32_bits_ = operand_default_is_32_bits_;
+ address_is_32_bits_ = address_default_is_32_bits_;
+ operand_bytes_ = 0;
+ have_modrm_ = false;
+ should_decode_modrm_ = false;
+ instruction_type_ = IT_UNKNOWN;
+ got_f2_prefix_ = false;
+ got_f3_prefix_ = false;
+ got_66_prefix_ = false;
+}
+
+InstructionType MiniDisassembler::ProcessPrefixes(unsigned char* start_byte,
+ unsigned int* size) {
+ InstructionType instruction_type = IT_GENERIC;
+ const Opcode& opcode = s_ia32_opcode_map_[0].table_[*start_byte];
+
+ switch (opcode.type_) {
+ case IT_PREFIX_ADDRESS:
+ address_is_32_bits_ = !address_default_is_32_bits_;
+ goto nochangeoperand;
+ case IT_PREFIX_OPERAND:
+ operand_is_32_bits_ = !operand_default_is_32_bits_;
+ nochangeoperand:
+ case IT_PREFIX:
+
+ if (0xF2 == (*start_byte))
+ got_f2_prefix_ = true;
+ else if (0xF3 == (*start_byte))
+ got_f3_prefix_ = true;
+ else if (0x66 == (*start_byte))
+ got_66_prefix_ = true;
+
+ instruction_type = opcode.type_;
+ (*size)++;
+ // we got a prefix, so add one and check next byte
+ ProcessPrefixes(start_byte + 1, size);
+ default:
+ break; // not a prefix byte
+ }
+
+ return instruction_type;
+}
+
+InstructionType MiniDisassembler::ProcessOpcode(unsigned char* start_byte,
+ unsigned int table_index,
+ unsigned int* size) {
+ const OpcodeTable& table = s_ia32_opcode_map_[table_index]; // Get our table
+ unsigned char current_byte = (*start_byte) >> table.shift_;
+ current_byte = current_byte & table.mask_; // Mask out the bits we will use
+
+ // Check whether the byte we have is inside the table we have.
+ if (current_byte < table.min_lim_ || current_byte > table.max_lim_) {
+ instruction_type_ = IT_UNKNOWN;
+ return instruction_type_;
+ }
+
+ const Opcode& opcode = table.table_[current_byte];
+ if (IT_UNUSED == opcode.type_) {
+ // This instruction is not used by the IA-32 ISA, so we indicate
+ // this to the user. Probably means that we were pointed to
+ // a byte in memory that was not the start of an instruction.
+ instruction_type_ = IT_UNUSED;
+ return instruction_type_;
+ } else if (IT_REFERENCE == opcode.type_) {
+ // We are looking at an opcode that has more bytes (or is continued
+ // in the ModR/M byte). Recursively find the opcode definition in
+ // the table for the opcode's next byte.
+ (*size)++;
+ ProcessOpcode(start_byte + 1, opcode.table_index_, size);
+ return instruction_type_;
+ }
+
+ const SpecificOpcode* specific_opcode = reinterpret_cast<
+ const SpecificOpcode*>(&opcode);
+ if (opcode.is_prefix_dependent_) {
+ if (got_f2_prefix_ && opcode.opcode_if_f2_prefix_.mnemonic_ != 0) {
+ specific_opcode = &opcode.opcode_if_f2_prefix_;
+ } else if (got_f3_prefix_ && opcode.opcode_if_f3_prefix_.mnemonic_ != 0) {
+ specific_opcode = &opcode.opcode_if_f3_prefix_;
+ } else if (got_66_prefix_ && opcode.opcode_if_66_prefix_.mnemonic_ != 0) {
+ specific_opcode = &opcode.opcode_if_66_prefix_;
+ }
+ }
+
+ // Inv: The opcode type is known.
+ instruction_type_ = specific_opcode->type_;
+
+ // Let's process the operand types to see if we have any immediate
+ // operands, and/or a ModR/M byte.
+
+ ProcessOperand(specific_opcode->flag_dest_);
+ ProcessOperand(specific_opcode->flag_source_);
+ ProcessOperand(specific_opcode->flag_aux_);
+
+ // Inv: We have processed the opcode and incremented operand_bytes_
+ // by the number of bytes of any operands specified by the opcode
+ // that are stored in the instruction (not registers etc.). Now
+ // we need to return the total number of bytes for the opcode and
+ // for the ModR/M or SIB bytes if they are present.
+
+ if (table.mask_ != 0xff) {
+ if (have_modrm_) {
+ // we're looking at a ModR/M byte so we're not going to
+ // count that into the opcode size
+ ProcessModrm(start_byte, size);
+ return IT_GENERIC;
+ } else {
+ // need to count the ModR/M byte even if it's just being
+ // used for opcode extension
+ (*size)++;
+ return IT_GENERIC;
+ }
+ } else {
+ if (have_modrm_) {
+ // The ModR/M byte is the next byte.
+ (*size)++;
+ ProcessModrm(start_byte + 1, size);
+ return IT_GENERIC;
+ } else {
+ (*size)++;
+ return IT_GENERIC;
+ }
+ }
+}
+
+bool MiniDisassembler::ProcessOperand(int flag_operand) {
+ bool succeeded = true;
+ if (AM_NOT_USED == flag_operand)
+ return succeeded;
+
+ // Decide what to do based on the addressing mode.
+ switch (flag_operand & AM_MASK) {
+ // No ModR/M byte indicated by these addressing modes, and no
+ // additional (e.g. immediate) parameters.
+ case AM_A: // Direct address
+ case AM_F: // EFLAGS register
+ case AM_X: // Memory addressed by the DS:SI register pair
+ case AM_Y: // Memory addressed by the ES:DI register pair
+ case AM_IMPLICIT: // Parameter is implicit, occupies no space in
+ // instruction
+ break;
+
+ // There is a ModR/M byte but it does not necessarily need
+ // to be decoded.
+ case AM_C: // reg field of ModR/M selects a control register
+ case AM_D: // reg field of ModR/M selects a debug register
+ case AM_G: // reg field of ModR/M selects a general register
+ case AM_P: // reg field of ModR/M selects an MMX register
+ case AM_R: // mod field of ModR/M may refer only to a general register
+ case AM_S: // reg field of ModR/M selects a segment register
+ case AM_T: // reg field of ModR/M selects a test register
+ case AM_V: // reg field of ModR/M selects a 128-bit XMM register
+ have_modrm_ = true;
+ break;
+
+ // In these addressing modes, there is a ModR/M byte and it needs to be
+ // decoded. No other (e.g. immediate) params than indicated in ModR/M.
+ case AM_E: // Operand is either a general-purpose register or memory,
+ // specified by ModR/M byte
+ case AM_M: // ModR/M byte will refer only to memory
+ case AM_Q: // Operand is either an MMX register or memory (complex
+ // evaluation), specified by ModR/M byte
+ case AM_W: // Operand is either a 128-bit XMM register or memory (complex
+ // eval), specified by ModR/M byte
+ have_modrm_ = true;
+ should_decode_modrm_ = true;
+ break;
+
+ // These addressing modes specify an immediate or an offset value
+ // directly, so we need to look at the operand type to see how many
+ // bytes.
+ case AM_I: // Immediate data.
+ case AM_J: // Jump to offset.
+ case AM_O: // Operand is at offset.
+ switch (flag_operand & OT_MASK) {
+ case OT_B: // Byte regardless of operand-size attribute.
+ operand_bytes_ += OS_BYTE;
+ break;
+ case OT_C: // Byte or word, depending on operand-size attribute.
+ if (operand_is_32_bits_)
+ operand_bytes_ += OS_WORD;
+ else
+ operand_bytes_ += OS_BYTE;
+ break;
+ case OT_D: // Doubleword, regardless of operand-size attribute.
+ operand_bytes_ += OS_DOUBLE_WORD;
+ break;
+ case OT_DQ: // Double-quadword, regardless of operand-size attribute.
+ operand_bytes_ += OS_DOUBLE_QUAD_WORD;
+ break;
+ case OT_P: // 32-bit or 48-bit pointer, depending on operand-size
+ // attribute.
+ if (operand_is_32_bits_)
+ operand_bytes_ += OS_48_BIT_POINTER;
+ else
+ operand_bytes_ += OS_32_BIT_POINTER;
+ break;
+ case OT_PS: // 128-bit packed single-precision floating-point data.
+ operand_bytes_ += OS_128_BIT_PACKED_SINGLE_PRECISION_FLOATING;
+ break;
+ case OT_Q: // Quadword, regardless of operand-size attribute.
+ operand_bytes_ += OS_QUAD_WORD;
+ break;
+ case OT_S: // 6-byte pseudo-descriptor.
+ operand_bytes_ += OS_PSEUDO_DESCRIPTOR;
+ break;
+ case OT_SD: // Scalar Double-Precision Floating-Point Value
+ case OT_PD: // Unaligned packed double-precision floating point value
+ operand_bytes_ += OS_DOUBLE_PRECISION_FLOATING;
+ break;
+ case OT_SS:
+ // Scalar element of a 128-bit packed single-precision
+ // floating data.
+ // We simply return enItUnknown since we don't have to support
+ // floating point
+ succeeded = false;
+ break;
+ case OT_V: // Word or doubleword, depending on operand-size attribute.
+ if (operand_is_32_bits_)
+ operand_bytes_ += OS_DOUBLE_WORD;
+ else
+ operand_bytes_ += OS_WORD;
+ break;
+ case OT_W: // Word, regardless of operand-size attribute.
+ operand_bytes_ += OS_WORD;
+ break;
+
+ // Can safely ignore these.
+ case OT_A: // Two one-word operands in memory or two double-word
+ // operands in memory
+ case OT_PI: // Quadword MMX technology register (e.g. mm0)
+ case OT_SI: // Doubleword integer register (e.g., eax)
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return succeeded;
+}
+
+bool MiniDisassembler::ProcessModrm(unsigned char* start_byte,
+ unsigned int* size) {
+ // If we don't need to decode, we just return the size of the ModR/M
+ // byte (there is never a SIB byte in this case).
+ if (!should_decode_modrm_) {
+ (*size)++;
+ return true;
+ }
+
+ // We never care about the reg field, only the combination of the mod
+ // and r/m fields, so let's start by packing those fields together into
+ // 5 bits.
+ unsigned char modrm = (*start_byte);
+ unsigned char mod = modrm & 0xC0; // mask out top two bits to get mod field
+ modrm = modrm & 0x07; // mask out bottom 3 bits to get r/m field
+ mod = mod >> 3; // shift the mod field to the right place
+ modrm = mod | modrm; // combine the r/m and mod fields as discussed
+ mod = mod >> 3; // shift the mod field to bits 2..0
+
+ // Invariant: modrm contains the mod field in bits 4..3 and the r/m field
+ // in bits 2..0, and mod contains the mod field in bits 2..0
+
+ const ModrmEntry* modrm_entry = 0;
+ if (address_is_32_bits_)
+ modrm_entry = &s_ia32_modrm_map_[modrm];
+ else
+ modrm_entry = &s_ia16_modrm_map_[modrm];
+
+ // Invariant: modrm_entry points to information that we need to decode
+ // the ModR/M byte.
+
+ // Add to the count of operand bytes, if the ModR/M byte indicates
+ // that some operands are encoded in the instruction.
+ if (modrm_entry->is_encoded_in_instruction_)
+ operand_bytes_ += modrm_entry->operand_size_;
+
+ // Process the SIB byte if necessary, and return the count
+ // of ModR/M and SIB bytes.
+ if (modrm_entry->use_sib_byte_) {
+ (*size)++;
+ return ProcessSib(start_byte + 1, mod, size);
+ } else {
+ (*size)++;
+ return true;
+ }
+}
+
+bool MiniDisassembler::ProcessSib(unsigned char* start_byte,
+ unsigned char mod,
+ unsigned int* size) {
+ // get the mod field from the 2..0 bits of the SIB byte
+ unsigned char sib_base = (*start_byte) & 0x07;
+ if (0x05 == sib_base) {
+ switch (mod) {
+ case 0x00: // mod == 00
+ case 0x02: // mod == 10
+ operand_bytes_ += OS_DOUBLE_WORD;
+ break;
+ case 0x01: // mod == 01
+ operand_bytes_ += OS_BYTE;
+ break;
+ case 0x03: // mod == 11
+ // According to the IA-32 docs, there does not seem to be a disp
+ // value for this value of mod
+ default:
+ break;
+ }
+ }
+
+ (*size)++;
+ return true;
+}
+
+}; // namespace sidestep
diff --git a/libchrome/sandbox/win/src/sidestep/mini_disassembler.h b/libchrome/sandbox/win/src/sidestep/mini_disassembler.h
new file mode 100644
index 0000000..202c4ec
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/mini_disassembler.h
@@ -0,0 +1,156 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Definition of MiniDisassembler.
+
+#ifndef SANDBOX_SRC_SIDESTEP_MINI_DISASSEMBLER_H__
+#define SANDBOX_SRC_SIDESTEP_MINI_DISASSEMBLER_H__
+
+#include "sandbox/win/src/sidestep/mini_disassembler_types.h"
+
+namespace sidestep {
+
+// This small disassembler is very limited
+// in its functionality, and in fact does only the bare minimum required by the
+// preamble patching utility. It may be useful for other purposes, however.
+//
+// The limitations include at least the following:
+// -# No support for coprocessor opcodes, MMX, etc.
+// -# No machine-readable identification of opcodes or decoding of
+// assembly parameters. The name of the opcode (as a string) is given,
+// however, to aid debugging.
+//
+// You may ask what this little disassembler actually does, then? The answer is
+// that it does the following, which is exactly what the patching utility needs:
+// -# Indicates if opcode is a jump (any kind) or a return (any kind)
+// because this is important for the patching utility to determine if
+// a function is too short or there are jumps too early in it for it
+// to be preamble patched.
+// -# The opcode length is always calculated, so that the patching utility
+// can figure out where the next instruction starts, and whether it
+// already has enough instructions to replace with the absolute jump
+// to the patching code.
+//
+// The usage is quite simple; just create a MiniDisassembler and use its
+// Disassemble() method.
+//
+// If you would like to extend this disassembler, please refer to the
+// IA-32 Intel Architecture Software Developer's Manual Volume 2:
+// Instruction Set Reference for information about operand decoding
+// etc.
+class MiniDisassembler {
+ public:
+
+ // Creates a new instance and sets defaults.
+ //
+ // operand_default_32_bits: If true, the default operand size is
+ // set to 32 bits, which is the default under Win32. Otherwise it is 16 bits.
+ // address_default_32_bits: If true, the default address size is
+ // set to 32 bits, which is the default under Win32. Otherwise it is 16 bits.
+ MiniDisassembler(bool operand_default_32_bits,
+ bool address_default_32_bits);
+
+ // Equivalent to MiniDisassembler(true, true);
+ MiniDisassembler();
+
+ // Attempts to disassemble a single instruction starting from the
+ // address in memory it is pointed to.
+ //
+ // start: Address where disassembly should start.
+ // instruction_bytes: Variable that will be incremented by
+ // the length in bytes of the instruction.
+ // Returns enItJump, enItReturn or enItGeneric on success. enItUnknown
+ // if unable to disassemble, enItUnused if this seems to be an unused
+ // opcode. In the last two (error) cases, cbInstruction will be set
+ // to 0xffffffff.
+ //
+ // Postcondition: This instance of the disassembler is ready to be used again,
+ // with unchanged defaults from creation time.
+ InstructionType Disassemble(unsigned char* start,
+ unsigned int* instruction_bytes);
+
+ private:
+
+ // Makes the disassembler ready for reuse.
+ void Initialize();
+
+ // Sets the flags for address and operand sizes.
+ // Returns Number of prefix bytes.
+ InstructionType ProcessPrefixes(unsigned char* start, unsigned int* size);
+
+ // Sets the flag for whether we have ModR/M, and increments
+ // operand_bytes_ if any are specifies by the opcode directly.
+ // Returns Number of opcode bytes.
+ InstructionType ProcessOpcode(unsigned char* start,
+ unsigned int table,
+ unsigned int* size);
+
+ // Checks the type of the supplied operand. Increments
+ // operand_bytes_ if it directly indicates an immediate etc.
+ // operand. Asserts have_modrm_ if the operand specifies
+ // a ModR/M byte.
+ bool ProcessOperand(int flag_operand);
+
+ // Increments operand_bytes_ by size specified by ModR/M and
+ // by SIB if present.
+ // Returns 0 in case of error, 1 if there is just a ModR/M byte,
+ // 2 if there is a ModR/M byte and a SIB byte.
+ bool ProcessModrm(unsigned char* start, unsigned int* size);
+
+ // Processes the SIB byte that it is pointed to.
+ // start: Pointer to the SIB byte.
+ // mod: The mod field from the ModR/M byte.
+ // Returns 1 to indicate success (indicates 1 SIB byte)
+ bool ProcessSib(unsigned char* start, unsigned char mod, unsigned int* size);
+
+ // The instruction type we have decoded from the opcode.
+ InstructionType instruction_type_;
+
+ // Counts the number of bytes that is occupied by operands in
+ // the current instruction (note: we don't care about how large
+ // operands stored in registers etc. are).
+ unsigned int operand_bytes_;
+
+ // True iff there is a ModR/M byte in this instruction.
+ bool have_modrm_;
+
+ // True iff we need to decode the ModR/M byte (sometimes it just
+ // points to a register, we can tell by the addressing mode).
+ bool should_decode_modrm_;
+
+ // Current operand size is 32 bits if true, 16 bits if false.
+ bool operand_is_32_bits_;
+
+ // Default operand size is 32 bits if true, 16 bits if false.
+ bool operand_default_is_32_bits_;
+
+ // Current address size is 32 bits if true, 16 bits if false.
+ bool address_is_32_bits_;
+
+ // Default address size is 32 bits if true, 16 bits if false.
+ bool address_default_is_32_bits_;
+
+ // Huge big opcode table based on the IA-32 manual, defined
+ // in Ia32OpcodeMap.cpp
+ static const OpcodeTable s_ia32_opcode_map_[];
+
+ // Somewhat smaller table to help with decoding ModR/M bytes
+ // when 16-bit addressing mode is being used. Defined in
+ // Ia32ModrmMap.cpp
+ static const ModrmEntry s_ia16_modrm_map_[];
+
+ // Somewhat smaller table to help with decoding ModR/M bytes
+ // when 32-bit addressing mode is being used. Defined in
+ // Ia32ModrmMap.cpp
+ static const ModrmEntry s_ia32_modrm_map_[];
+
+ // Indicators of whether we got certain prefixes that certain
+ // silly Intel instructions depend on in nonstandard ways for
+ // their behaviors.
+ bool got_f2_prefix_, got_f3_prefix_, got_66_prefix_;
+};
+
+}; // namespace sidestep
+
+#endif // SANDBOX_SRC_SIDESTEP_MINI_DISASSEMBLER_H__
diff --git a/libchrome/sandbox/win/src/sidestep/mini_disassembler_types.h b/libchrome/sandbox/win/src/sidestep/mini_disassembler_types.h
new file mode 100644
index 0000000..1c10626
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/mini_disassembler_types.h
@@ -0,0 +1,197 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Several simple types used by the disassembler and some of the patching
+// mechanisms.
+
+#ifndef SANDBOX_SRC_SIDESTEP_MINI_DISASSEMBLER_TYPES_H__
+#define SANDBOX_SRC_SIDESTEP_MINI_DISASSEMBLER_TYPES_H__
+
+namespace sidestep {
+
+// Categories of instructions that we care about
+enum InstructionType {
+ // This opcode is not used
+ IT_UNUSED,
+ // This disassembler does not recognize this opcode (error)
+ IT_UNKNOWN,
+ // This is not an instruction but a reference to another table
+ IT_REFERENCE,
+ // This byte is a prefix byte that we can ignore
+ IT_PREFIX,
+ // This is a prefix byte that switches to the nondefault address size
+ IT_PREFIX_ADDRESS,
+ // This is a prefix byte that switches to the nondefault operand size
+ IT_PREFIX_OPERAND,
+ // A jump or call instruction
+ IT_JUMP,
+ // A return instruction
+ IT_RETURN,
+ // Any other type of instruction (in this case we don't care what it is)
+ IT_GENERIC,
+};
+
+// Lists IA-32 operand sizes in multiples of 8 bits
+enum OperandSize {
+ OS_ZERO = 0,
+ OS_BYTE = 1,
+ OS_WORD = 2,
+ OS_DOUBLE_WORD = 4,
+ OS_QUAD_WORD = 8,
+ OS_DOUBLE_QUAD_WORD = 16,
+ OS_32_BIT_POINTER = 32/8,
+ OS_48_BIT_POINTER = 48/8,
+ OS_SINGLE_PRECISION_FLOATING = 32/8,
+ OS_DOUBLE_PRECISION_FLOATING = 64/8,
+ OS_DOUBLE_EXTENDED_PRECISION_FLOATING = 80/8,
+ OS_128_BIT_PACKED_SINGLE_PRECISION_FLOATING = 128/8,
+ OS_PSEUDO_DESCRIPTOR = 6
+};
+
+// Operand addressing methods from the IA-32 manual. The enAmMask value
+// is a mask for the rest. The other enumeration values are named for the
+// names given to the addressing methods in the manual, e.g. enAm_D is for
+// the D addressing method.
+//
+// The reason we use a full 4 bytes and a mask, is that we need to combine
+// these flags with the enOperandType to store the details
+// on the operand in a single integer.
+enum AddressingMethod {
+ AM_NOT_USED = 0, // This operand is not used for this instruction
+ AM_MASK = 0x00FF0000, // Mask for the rest of the values in this enumeration
+ AM_A = 0x00010000, // A addressing type
+ AM_C = 0x00020000, // C addressing type
+ AM_D = 0x00030000, // D addressing type
+ AM_E = 0x00040000, // E addressing type
+ AM_F = 0x00050000, // F addressing type
+ AM_G = 0x00060000, // G addressing type
+ AM_I = 0x00070000, // I addressing type
+ AM_J = 0x00080000, // J addressing type
+ AM_M = 0x00090000, // M addressing type
+ AM_O = 0x000A0000, // O addressing type
+ AM_P = 0x000B0000, // P addressing type
+ AM_Q = 0x000C0000, // Q addressing type
+ AM_R = 0x000D0000, // R addressing type
+ AM_S = 0x000E0000, // S addressing type
+ AM_T = 0x000F0000, // T addressing type
+ AM_V = 0x00100000, // V addressing type
+ AM_W = 0x00110000, // W addressing type
+ AM_X = 0x00120000, // X addressing type
+ AM_Y = 0x00130000, // Y addressing type
+ AM_REGISTER = 0x00140000, // Specific register is always used as this op
+ AM_IMPLICIT = 0x00150000, // An implicit, fixed value is used
+};
+
+// Operand types from the IA-32 manual. The enOtMask value is
+// a mask for the rest. The rest of the values are named for the
+// names given to these operand types in the manual, e.g. enOt_ps
+// is for the ps operand type in the manual.
+//
+// The reason we use a full 4 bytes and a mask, is that we need
+// to combine these flags with the enAddressingMethod to store the details
+// on the operand in a single integer.
+enum OperandType {
+ OT_MASK = 0xFF000000,
+ OT_A = 0x01000000,
+ OT_B = 0x02000000,
+ OT_C = 0x03000000,
+ OT_D = 0x04000000,
+ OT_DQ = 0x05000000,
+ OT_P = 0x06000000,
+ OT_PI = 0x07000000,
+ OT_PS = 0x08000000, // actually unsupported for (we don't know its size)
+ OT_Q = 0x09000000,
+ OT_S = 0x0A000000,
+ OT_SS = 0x0B000000,
+ OT_SI = 0x0C000000,
+ OT_V = 0x0D000000,
+ OT_W = 0x0E000000,
+ OT_SD = 0x0F000000, // scalar double-precision floating-point value
+ OT_PD = 0x10000000, // double-precision floating point
+ // dummy "operand type" for address mode M - which doesn't specify
+ // operand type
+ OT_ADDRESS_MODE_M = 0x80000000
+};
+
+// Everything that's in an Opcode (see below) except the three
+// alternative opcode structs for different prefixes.
+struct SpecificOpcode {
+ // Index to continuation table, or 0 if this is the last
+ // byte in the opcode.
+ int table_index_;
+
+ // The opcode type
+ InstructionType type_;
+
+ // Description of the type of the dest, src and aux operands,
+ // put together from an enOperandType flag and an enAddressingMethod
+ // flag.
+ int flag_dest_;
+ int flag_source_;
+ int flag_aux_;
+
+ // We indicate the mnemonic for debugging purposes
+ const char* mnemonic_;
+};
+
+// The information we keep in our tables about each of the different
+// valid instructions recognized by the IA-32 architecture.
+struct Opcode {
+ // Index to continuation table, or 0 if this is the last
+ // byte in the opcode.
+ int table_index_;
+
+ // The opcode type
+ InstructionType type_;
+
+ // Description of the type of the dest, src and aux operands,
+ // put together from an enOperandType flag and an enAddressingMethod
+ // flag.
+ int flag_dest_;
+ int flag_source_;
+ int flag_aux_;
+
+ // We indicate the mnemonic for debugging purposes
+ const char* mnemonic_;
+
+ // Alternative opcode info if certain prefixes are specified.
+ // In most cases, all of these are zeroed-out. Only used if
+ // bPrefixDependent is true.
+ bool is_prefix_dependent_;
+ SpecificOpcode opcode_if_f2_prefix_;
+ SpecificOpcode opcode_if_f3_prefix_;
+ SpecificOpcode opcode_if_66_prefix_;
+};
+
+// Information about each table entry.
+struct OpcodeTable {
+ // Table of instruction entries
+ const Opcode* table_;
+ // How many bytes left to shift ModR/M byte <b>before</b> applying mask
+ unsigned char shift_;
+ // Mask to apply to byte being looked at before comparing to table
+ unsigned char mask_;
+ // Minimum/maximum indexes in table.
+ unsigned char min_lim_;
+ unsigned char max_lim_;
+};
+
+// Information about each entry in table used to decode ModR/M byte.
+struct ModrmEntry {
+ // Is the operand encoded as bytes in the instruction (rather than
+ // if it's e.g. a register in which case it's just encoded in the
+ // ModR/M byte)
+ bool is_encoded_in_instruction_;
+
+ // Is there a SIB byte? In this case we always need to decode it.
+ bool use_sib_byte_;
+
+ // What is the size of the operand (only important if it's encoded
+ // in the instruction)?
+ OperandSize operand_size_;
+};
+
+}; // namespace sidestep
+
+#endif // SANDBOX_SRC_SIDESTEP_MINI_DISASSEMBLER_TYPES_H__
diff --git a/libchrome/sandbox/win/src/sidestep/preamble_patcher.h b/libchrome/sandbox/win/src/sidestep/preamble_patcher.h
new file mode 100644
index 0000000..3a0985c
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/preamble_patcher.h
@@ -0,0 +1,111 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Definition of PreamblePatcher
+
+#ifndef SANDBOX_SRC_SIDESTEP_PREAMBLE_PATCHER_H__
+#define SANDBOX_SRC_SIDESTEP_PREAMBLE_PATCHER_H__
+
+#include <stddef.h>
+
+namespace sidestep {
+
+// Maximum size of the preamble stub. We overwrite at least the first 5
+// bytes of the function. Considering the worst case scenario, we need 4
+// bytes + the max instruction size + 5 more bytes for our jump back to
+// the original code. With that in mind, 32 is a good number :)
+const size_t kMaxPreambleStubSize = 32;
+
+// Possible results of patching/unpatching
+enum SideStepError {
+ SIDESTEP_SUCCESS = 0,
+ SIDESTEP_INVALID_PARAMETER,
+ SIDESTEP_INSUFFICIENT_BUFFER,
+ SIDESTEP_JUMP_INSTRUCTION,
+ SIDESTEP_FUNCTION_TOO_SMALL,
+ SIDESTEP_UNSUPPORTED_INSTRUCTION,
+ SIDESTEP_NO_SUCH_MODULE,
+ SIDESTEP_NO_SUCH_FUNCTION,
+ SIDESTEP_ACCESS_DENIED,
+ SIDESTEP_UNEXPECTED,
+};
+
+// Implements a patching mechanism that overwrites the first few bytes of
+// a function preamble with a jump to our hook function, which is then
+// able to call the original function via a specially-made preamble-stub
+// that imitates the action of the original preamble.
+//
+// Note that there are a number of ways that this method of patching can
+// fail. The most common are:
+// - If there is a jump (jxx) instruction in the first 5 bytes of
+// the function being patched, we cannot patch it because in the
+// current implementation we do not know how to rewrite relative
+// jumps after relocating them to the preamble-stub. Note that
+// if you really really need to patch a function like this, it
+// would be possible to add this functionality (but at some cost).
+// - If there is a return (ret) instruction in the first 5 bytes
+// we cannot patch the function because it may not be long enough
+// for the jmp instruction we use to inject our patch.
+// - If there is another thread currently executing within the bytes
+// that are copied to the preamble stub, it will crash in an undefined
+// way.
+//
+// If you get any other error than the above, you're either pointing the
+// patcher at an invalid instruction (e.g. into the middle of a multi-
+// byte instruction, or not at memory containing executable instructions)
+// or, there may be a bug in the disassembler we use to find
+// instruction boundaries.
+class PreamblePatcher {
+ public:
+ // Patches target_function to point to replacement_function using a provided
+ // preamble_stub of stub_size bytes.
+ // Returns An error code indicating the result of patching.
+ template <class T>
+ static SideStepError Patch(T target_function, T replacement_function,
+ void* preamble_stub, size_t stub_size) {
+ return RawPatchWithStub(target_function, replacement_function,
+ reinterpret_cast<unsigned char*>(preamble_stub),
+ stub_size, NULL);
+ }
+
+ private:
+
+ // Patches a function by overwriting its first few bytes with
+ // a jump to a different function. This is similar to the RawPatch
+ // function except that it uses the stub allocated by the caller
+ // instead of allocating it.
+ //
+ // To use this function, you first have to call VirtualProtect to make the
+ // target function writable at least for the duration of the call.
+ //
+ // target_function: A pointer to the function that should be
+ // patched.
+ //
+ // replacement_function: A pointer to the function that should
+ // replace the target function. The replacement function must have
+ // exactly the same calling convention and parameters as the original
+ // function.
+ //
+ // preamble_stub: A pointer to a buffer where the preamble stub
+ // should be copied. The size of the buffer should be sufficient to
+ // hold the preamble bytes.
+ //
+ // stub_size: Size in bytes of the buffer allocated for the
+ // preamble_stub
+ //
+ // bytes_needed: Pointer to a variable that receives the minimum
+ // number of bytes required for the stub. Can be set to NULL if you're
+ // not interested.
+ //
+ // Returns An error code indicating the result of patching.
+ static SideStepError RawPatchWithStub(void* target_function,
+ void *replacement_function,
+ unsigned char* preamble_stub,
+ size_t stub_size,
+ size_t* bytes_needed);
+};
+
+}; // namespace sidestep
+
+#endif // SANDBOX_SRC_SIDESTEP_PREAMBLE_PATCHER_H__
diff --git a/libchrome/sandbox/win/src/sidestep/preamble_patcher_with_stub.cpp b/libchrome/sandbox/win/src/sidestep/preamble_patcher_with_stub.cpp
new file mode 100644
index 0000000..b501600
--- /dev/null
+++ b/libchrome/sandbox/win/src/sidestep/preamble_patcher_with_stub.cpp
@@ -0,0 +1,181 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of PreamblePatcher
+
+#include "sandbox/win/src/sidestep/preamble_patcher.h"
+
+#include <stddef.h>
+
+#include "sandbox/win/src/sandbox_nt_util.h"
+#include "sandbox/win/src/sidestep/mini_disassembler.h"
+
+// Definitions of assembly statements we need
+#define ASM_JMP32REL 0xE9
+#define ASM_INT3 0xCC
+
+namespace {
+
+// Very basic memcpy. We are copying 4 to 12 bytes most of the time, so there
+// is no attempt to optimize this code or have a general purpose function.
+// We don't want to call the crt from this code.
+inline void* RawMemcpy(void* destination, const void* source, size_t bytes) {
+ const char* from = reinterpret_cast<const char*>(source);
+ char* to = reinterpret_cast<char*>(destination);
+
+ for (size_t i = 0; i < bytes ; i++)
+ to[i] = from[i];
+
+ return destination;
+}
+
+// Very basic memset. We are filling 1 to 7 bytes most of the time, so there
+// is no attempt to optimize this code or have a general purpose function.
+// We don't want to call the crt from this code.
+inline void* RawMemset(void* destination, int value, size_t bytes) {
+ char* to = reinterpret_cast<char*>(destination);
+
+ for (size_t i = 0; i < bytes ; i++)
+ to[i] = static_cast<char>(value);
+
+ return destination;
+}
+
+} // namespace
+
+#define ASSERT(a, b) DCHECK_NT(a)
+
+namespace sidestep {
+
+SideStepError PreamblePatcher::RawPatchWithStub(
+ void* target_function,
+ void* replacement_function,
+ unsigned char* preamble_stub,
+ size_t stub_size,
+ size_t* bytes_needed) {
+ if ((NULL == target_function) ||
+ (NULL == replacement_function) ||
+ (NULL == preamble_stub)) {
+ ASSERT(false, (L"Invalid parameters - either pTargetFunction or "
+ L"pReplacementFunction or pPreambleStub were NULL."));
+ return SIDESTEP_INVALID_PARAMETER;
+ }
+
+ // TODO(V7:joi) Siggi and I just had a discussion and decided that both
+ // patching and unpatching are actually unsafe. We also discussed a
+ // method of making it safe, which is to freeze all other threads in the
+ // process, check their thread context to see if their eip is currently
+ // inside the block of instructions we need to copy to the stub, and if so
+ // wait a bit and try again, then unfreeze all threads once we've patched.
+ // Not implementing this for now since we're only using SideStep for unit
+ // testing, but if we ever use it for production code this is what we
+ // should do.
+ //
+ // NOTE: Stoyan suggests we can write 8 or even 10 bytes atomically using
+ // FPU instructions, and on newer processors we could use cmpxchg8b or
+ // cmpxchg16b. So it might be possible to do the patching/unpatching
+ // atomically and avoid having to freeze other threads. Note though, that
+ // doing it atomically does not help if one of the other threads happens
+ // to have its eip in the middle of the bytes you change while you change
+ // them.
+ unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
+
+ // Let's disassemble the preamble of the target function to see if we can
+ // patch, and to see how much of the preamble we need to take. We need 5
+ // bytes for our jmp instruction, so let's find the minimum number of
+ // instructions to get 5 bytes.
+ MiniDisassembler disassembler;
+ unsigned int preamble_bytes = 0;
+ while (preamble_bytes < 5) {
+ InstructionType instruction_type =
+ disassembler.Disassemble(target + preamble_bytes, &preamble_bytes);
+ if (IT_JUMP == instruction_type) {
+ ASSERT(false, (L"Unable to patch because there is a jump instruction "
+ L"in the first 5 bytes."));
+ return SIDESTEP_JUMP_INSTRUCTION;
+ } else if (IT_RETURN == instruction_type) {
+ ASSERT(false, (L"Unable to patch because function is too short"));
+ return SIDESTEP_FUNCTION_TOO_SMALL;
+ } else if (IT_GENERIC != instruction_type) {
+ ASSERT(false, (L"Disassembler encountered unsupported instruction "
+ L"(either unused or unknown"));
+ return SIDESTEP_UNSUPPORTED_INSTRUCTION;
+ }
+ }
+
+ if (NULL != bytes_needed)
+ *bytes_needed = preamble_bytes + 5;
+
+ // Inv: preamble_bytes is the number of bytes (at least 5) that we need to
+ // take from the preamble to have whole instructions that are 5 bytes or more
+ // in size total. The size of the stub required is cbPreamble + size of
+ // jmp (5)
+ if (preamble_bytes + 5 > stub_size) {
+ NOTREACHED_NT();
+ return SIDESTEP_INSUFFICIENT_BUFFER;
+ }
+
+ // First, copy the preamble that we will overwrite.
+ RawMemcpy(reinterpret_cast<void*>(preamble_stub),
+ reinterpret_cast<void*>(target), preamble_bytes);
+
+ // Now, make a jmp instruction to the rest of the target function (minus the
+ // preamble bytes we moved into the stub) and copy it into our preamble-stub.
+ // find address to jump to, relative to next address after jmp instruction
+#pragma warning(push)
+#pragma warning(disable:4244)
+ // This assignment generates a warning because it is 32 bit specific.
+ int relative_offset_to_target_rest
+ = ((reinterpret_cast<unsigned char*>(target) + preamble_bytes) -
+ (preamble_stub + preamble_bytes + 5));
+#pragma warning(pop)
+ // jmp (Jump near, relative, displacement relative to next instruction)
+ preamble_stub[preamble_bytes] = ASM_JMP32REL;
+ // copy the address
+ RawMemcpy(reinterpret_cast<void*>(preamble_stub + preamble_bytes + 1),
+ reinterpret_cast<void*>(&relative_offset_to_target_rest), 4);
+
+ // Inv: preamble_stub points to assembly code that will execute the
+ // original function by first executing the first cbPreamble bytes of the
+ // preamble, then jumping to the rest of the function.
+
+ // Overwrite the first 5 bytes of the target function with a jump to our
+ // replacement function.
+ // (Jump near, relative, displacement relative to next instruction)
+ target[0] = ASM_JMP32REL;
+
+ // Find offset from instruction after jmp, to the replacement function.
+#pragma warning(push)
+#pragma warning(disable:4244)
+ int offset_to_replacement_function =
+ reinterpret_cast<unsigned char*>(replacement_function) -
+ reinterpret_cast<unsigned char*>(target) - 5;
+#pragma warning(pop)
+ // complete the jmp instruction
+ RawMemcpy(reinterpret_cast<void*>(target + 1),
+ reinterpret_cast<void*>(&offset_to_replacement_function), 4);
+ // Set any remaining bytes that were moved to the preamble-stub to INT3 so
+ // as not to cause confusion (otherwise you might see some strange
+ // instructions if you look at the disassembly, or even invalid
+ // instructions). Also, by doing this, we will break into the debugger if
+ // some code calls into this portion of the code. If this happens, it
+ // means that this function cannot be patched using this patcher without
+ // further thought.
+ if (preamble_bytes > 5) {
+ RawMemset(reinterpret_cast<void*>(target + 5), ASM_INT3,
+ preamble_bytes - 5);
+ }
+
+ // Inv: The memory pointed to by target_function now points to a relative
+ // jump instruction that jumps over to the preamble_stub. The preamble
+ // stub contains the first stub_size bytes of the original target
+ // function's preamble code, followed by a relative jump back to the next
+ // instruction after the first cbPreamble bytes.
+
+ return SIDESTEP_SUCCESS;
+}
+
+}; // namespace sidestep
+
+#undef ASSERT
diff --git a/libchrome/sandbox/win/tests/integration_tests/sbox_integration_tests.vcproj b/libchrome/sandbox/win/tests/integration_tests/sbox_integration_tests.vcproj
new file mode 100644
index 0000000..53816e7
--- /dev/null
+++ b/libchrome/sandbox/win/tests/integration_tests/sbox_integration_tests.vcproj
@@ -0,0 +1,242 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="sbox_integration_tests"
+ ProjectGUID="{542D4B3B-98D4-4233-B68D-0103891508C6}"
+ RootNamespace="unit_tests"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="_CONSOLE"
+ UsePrecompiledHeader="2"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalOptions="/safeseh /dynamicbase /ignore:4199 $(NoInherit)"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="_CONSOLE"
+ UsePrecompiledHeader="0"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalOptions="/safeseh /dynamicbase /ignore:4199 $(NoInherit)"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="Common"
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"
+ UniqueIdentifier="{49F2D231-E141-4455-B241-7D37C09B6EEB}"
+ >
+ <File
+ RelativePath="..\common\controller.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\common\controller.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\testing\gtest\src\gtest.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\integration_tests.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ </Filter>
+ <File
+ RelativePath="..\..\src\dep_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\file_policy_test.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\integration_tests_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\integrity_level_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\ipc_ping_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\named_pipe_policy_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\policy_target_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\process_policy_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\registry_policy_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\sync_policy_test.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\unload_dll_test.cc"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/tests/unit_tests/sbox_unittests.vcproj b/libchrome/sandbox/win/tests/unit_tests/sbox_unittests.vcproj
new file mode 100644
index 0000000..a2df792
--- /dev/null
+++ b/libchrome/sandbox/win/tests/unit_tests/sbox_unittests.vcproj
@@ -0,0 +1,258 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="sbox_unittests"
+ ProjectGUID="{883553BE-2A9D-418C-A121-61FE1DFBC562}"
+ RootNamespace="unit_tests"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="_CONSOLE"
+ UsePrecompiledHeader="2"
+ WarningLevel="3"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="_CONSOLE"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="Common"
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"
+ UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
+ >
+ <File
+ RelativePath="..\..\..\testing\gtest\src\gtest.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ <File
+ RelativePath=".\unit_tests.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="TestInterception"
+ >
+ <File
+ RelativePath="..\..\src\interception_unittest.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\pe_image_unittest.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\service_resolver_unittest.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="TestRestrictedToken"
+ >
+ <File
+ RelativePath="..\..\src\restricted_token_unittest.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="TestJob"
+ >
+ <File
+ RelativePath="..\..\src\job_unittest.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Sid"
+ >
+ <File
+ RelativePath="..\..\src\sid_unittest.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Policy"
+ >
+ <File
+ RelativePath="..\..\src\policy_engine_unittest.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\policy_low_level_unittest.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\policy_opcodes_unittest.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="IPC"
+ >
+ <File
+ RelativePath="..\..\src\ipc_unittest.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\threadpool_unittest.cc"
+ >
+ </File>
+ </Filter>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/tests/validation_tests/sbox_validation_tests.vcproj b/libchrome/sandbox/win/tests/validation_tests/sbox_validation_tests.vcproj
new file mode 100644
index 0000000..9b7b599
--- /dev/null
+++ b/libchrome/sandbox/win/tests/validation_tests/sbox_validation_tests.vcproj
@@ -0,0 +1,216 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="sbox_validation_tests"
+ ProjectGUID="{B9CC7B0D-145A-49C2-B887-84E43CFA0F27}"
+ RootNamespace="unit_tests"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="_CONSOLE"
+ UsePrecompiledHeader="2"
+ WarningLevel="3"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="shlwapi.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops;$(SolutionDir)..\testing\using_gtest.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ PreprocessorDefinitions="_CONSOLE"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ AdditionalDependencies="shlwapi.lib"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="Common"
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"
+ UniqueIdentifier="{2E6C7E35-7538-4883-B80C-C89961A80D66}"
+ >
+ <File
+ RelativePath="..\common\controller.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\common\controller.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\testing\gtest\src\gtest.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ ExcludedFromBuild="true"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ <File
+ RelativePath=".\unit_tests.cc"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="Suite"
+ >
+ <File
+ RelativePath=".\commands.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\commands.h"
+ >
+ </File>
+ <File
+ RelativePath=".\suite.cc"
+ >
+ </File>
+ </Filter>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/tools/finder/finder.vcproj b/libchrome/sandbox/win/tools/finder/finder.vcproj
new file mode 100644
index 0000000..787c847
--- /dev/null
+++ b/libchrome/sandbox/win/tools/finder/finder.vcproj
@@ -0,0 +1,201 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="finder"
+ ProjectGUID="{ACDC2E06-0366-41A4-A646-C37E130A605D}"
+ RootNamespace="finder"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="2"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath=".\finder.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\finder.h"
+ >
+ </File>
+ <File
+ RelativePath=".\finder_fs.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\finder_kernel.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\finder_registry.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\main.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\ntundoc.h"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/tools/finder/ntundoc.h b/libchrome/sandbox/win/tools/finder/ntundoc.h
new file mode 100644
index 0000000..dc8c3a5
--- /dev/null
+++ b/libchrome/sandbox/win/tools/finder/ntundoc.h
@@ -0,0 +1,275 @@
+// Copyright (c) 2006-2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_TOOLS_FINDER_NTUNDOC_H__
+#define SANDBOX_TOOLS_FINDER_NTUNDOC_H__
+
+#define NTSTATUS ULONG
+#define STATUS_SUCCESS 0x00000000
+#define STATUS_INFO_LENGTH_MISMATCH 0xC0000004
+#define STATUS_ACCESS_DENIED 0xC0000022
+#define STATUS_BUFFER_OVERFLOW 0x80000005
+
+typedef struct _LSA_UNICODE_STRING {
+ USHORT Length;
+ USHORT MaximumLength;
+ PWSTR Buffer;
+} UNICODE_STRING;
+
+typedef struct _OBJDIR_INFORMATION {
+ UNICODE_STRING ObjectName;
+ UNICODE_STRING ObjectTypeName;
+ BYTE Data[1];
+} OBJDIR_INFORMATION;
+
+typedef struct _OBJECT_ATTRIBUTES {
+ ULONG Length;
+ HANDLE RootDirectory;
+ UNICODE_STRING *ObjectName;
+ ULONG Attributes;
+ PVOID SecurityDescriptor;
+ PVOID SecurityQualityOfService;
+} OBJECT_ATTRIBUTES;
+
+typedef struct _PUBLIC_OBJECT_BASIC_INFORMATION {
+ ULONG Attributes;
+ ACCESS_MASK GrantedAccess;
+ ULONG HandleCount;
+ ULONG PointerCount;
+ ULONG Reserved[10]; // reserved for internal use
+ } PUBLIC_OBJECT_BASIC_INFORMATION, *PPUBLIC_OBJECT_BASIC_INFORMATION;
+
+typedef struct __PUBLIC_OBJECT_TYPE_INFORMATION {
+ UNICODE_STRING TypeName;
+ ULONG Reserved [22]; // reserved for internal use
+} PUBLIC_OBJECT_TYPE_INFORMATION, *PPUBLIC_OBJECT_TYPE_INFORMATION;
+
+typedef enum _POOL_TYPE {
+ NonPagedPool,
+ PagedPool,
+ NonPagedPoolMustSucceed,
+ ReservedType,
+ NonPagedPoolCacheAligned,
+ PagedPoolCacheAligned,
+ NonPagedPoolCacheAlignedMustS
+} POOL_TYPE;
+
+typedef struct _OBJECT_TYPE_INFORMATION {
+ UNICODE_STRING Name;
+ ULONG TotalNumberOfObjects;
+ ULONG TotalNumberOfHandles;
+ ULONG TotalPagedPoolUsage;
+ ULONG TotalNonPagedPoolUsage;
+ ULONG TotalNamePoolUsage;
+ ULONG TotalHandleTableUsage;
+ ULONG HighWaterNumberOfObjects;
+ ULONG HighWaterNumberOfHandles;
+ ULONG HighWaterPagedPoolUsage;
+ ULONG HighWaterNonPagedPoolUsage;
+ ULONG HighWaterNamePoolUsage;
+ ULONG HighWaterHandleTableUsage;
+ ULONG InvalidAttributes;
+ GENERIC_MAPPING GenericMapping;
+ ULONG ValidAccess;
+ BOOLEAN SecurityRequired;
+ BOOLEAN MaintainHandleCount;
+ USHORT MaintainTypeList;
+ POOL_TYPE PoolType;
+ ULONG PagedPoolUsage;
+ ULONG NonPagedPoolUsage;
+} OBJECT_TYPE_INFORMATION, *POBJECT_TYPE_INFORMATION;
+
+typedef struct _OBJECT_NAME_INFORMATION {
+ UNICODE_STRING ObjectName;
+} OBJECT_NAME_INFORMATION, *POBJECT_NAME_INFORMATION;
+
+typedef enum _OBJECT_INFORMATION_CLASS {
+ ObjectBasicInformation,
+ ObjectNameInformation,
+ ObjectTypeInformation,
+ ObjectAllInformation,
+ ObjectDataInformation
+} OBJECT_INFORMATION_CLASS, *POBJECT_INFORMATION_CLASS;
+
+typedef struct _FILE_NAME_INFORMATION {
+ ULONG FileNameLength;
+ WCHAR FileName[1];
+} FILE_NAME_INFORMATION, *PFILE_NAME_INFORMATION;
+
+typedef enum _FILE_INFORMATION_CLASS {
+ // end_wdm
+ FileDirectoryInformation = 1,
+ FileFullDirectoryInformation, // 2
+ FileBothDirectoryInformation, // 3
+ FileBasicInformation, // 4 wdm
+ FileStandardInformation, // 5 wdm
+ FileInternalInformation, // 6
+ FileEaInformation, // 7
+ FileAccessInformation, // 8
+ FileNameInformation, // 9
+ FileRenameInformation, // 10
+ FileLinkInformation, // 11
+ FileNamesInformation, // 12
+ FileDispositionInformation, // 13
+ FilePositionInformation, // 14 wdm
+ FileFullEaInformation, // 15
+ FileModeInformation, // 16
+ FileAlignmentInformation, // 17
+ FileAllInformation, // 18
+ FileAllocationInformation, // 19
+ FileEndOfFileInformation, // 20 wdm
+ FileAlternateNameInformation, // 21
+ FileStreamInformation, // 22
+ FilePipeInformation, // 23
+ FilePipeLocalInformation, // 24
+ FilePipeRemoteInformation, // 25
+ FileMailslotQueryInformation, // 26
+ FileMailslotSetInformation, // 27
+ FileCompressionInformation, // 28
+ FileObjectIdInformation, // 29
+ FileCompletionInformation, // 30
+ FileMoveClusterInformation, // 31
+ FileQuotaInformation, // 32
+ FileReparsePointInformation, // 33
+ FileNetworkOpenInformation, // 34
+ FileAttributeTagInformation, // 35
+ FileTrackingInformation, // 36
+ FileMaximumInformation
+ // begin_wdm
+} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS;
+
+typedef enum _SYSTEM_INFORMATION_CLASS {
+ SystemHandleInformation = 16
+} SYSTEM_INFORMATION_CLASS;
+
+typedef struct _IO_STATUS_BLOCK {
+ union {
+ NTSTATUS Status;
+ PVOID Pointer;
+ };
+ ULONG_PTR Information;
+} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
+
+#define InitializeObjectAttributes( p, n, a, r, s ) { \
+ (p)->Length = sizeof( OBJECT_ATTRIBUTES ); \
+ (p)->RootDirectory = r; \
+ (p)->Attributes = a; \
+ (p)->ObjectName = n; \
+ (p)->SecurityDescriptor = s; \
+ (p)->SecurityQualityOfService = NULL; \
+}
+
+typedef struct _SYSTEM_HANDLE_INFORMATION {
+ USHORT ProcessId;
+ USHORT CreatorBackTraceIndex;
+ UCHAR ObjectTypeNumber;
+ UCHAR Flags;
+ USHORT Handle;
+ PVOID Object;
+ ACCESS_MASK GrantedAccess;
+} SYSTEM_HANDLE_INFORMATION, *PSYSTEM_HANDLE_INFORMATION;
+
+typedef struct _SYSTEM_HANDLE_INFORMATION_EX {
+ ULONG NumberOfHandles;
+ SYSTEM_HANDLE_INFORMATION Information[1];
+} SYSTEM_HANDLE_INFORMATION_EX, *PSYSTEM_HANDLE_INFORMATION_EX;
+
+#define POBJECT_ATTRIBUTES OBJECT_ATTRIBUTES*
+
+typedef NTSTATUS (WINAPI* NTQUERYDIRECTORYOBJECT)(
+ HANDLE,
+ OBJDIR_INFORMATION*,
+ DWORD,
+ DWORD,
+ DWORD,
+ DWORD*,
+ DWORD*);
+
+typedef NTSTATUS (WINAPI* NTOPENDIRECTORYOBJECT)(
+ HANDLE *,
+ DWORD,
+ OBJECT_ATTRIBUTES* );
+
+typedef NTSTATUS (WINAPI* NTGENERICOPEN) (
+ OUT PHANDLE EventHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENEVENT)(
+ OUT PHANDLE EventHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENJOBOBJECT)(
+ OUT PHANDLE JobHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENKEYEDEVENT)(
+ OUT PHANDLE KeyedEventHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENMUTANT)(
+ OUT PHANDLE MutantHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENSECTION)(
+ OUT PHANDLE SectionHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENSEMAPHORE)(
+ OUT PHANDLE SemaphoreHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENSYMBOLICLINKOBJECT)(
+ OUT PHANDLE SymbolicLinkHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENTIMER)(
+ OUT PHANDLE TimerHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes);
+
+typedef NTSTATUS (WINAPI* NTOPENFILE)(
+ HANDLE *,
+ DWORD,
+ OBJECT_ATTRIBUTES *,
+ IO_STATUS_BLOCK *,
+ DWORD,
+ DWORD);
+
+typedef NTSTATUS (WINAPI* NTQUERYINFORMATIONFILE)(
+ HANDLE,
+ PIO_STATUS_BLOCK,
+ PVOID,
+ ULONG,
+ FILE_INFORMATION_CLASS);
+
+typedef NTSTATUS (WINAPI* NTQUERYSYSTEMINFORMATION)(
+ SYSTEM_INFORMATION_CLASS SystemInformationClass,
+ PVOID SystemInformation,
+ ULONG SystemInformationLength,
+ PULONG ReturnLength);
+
+typedef NTSTATUS (WINAPI* NTQUERYOBJECT)(
+ HANDLE Handle,
+ OBJECT_INFORMATION_CLASS ObjectInformationClass,
+ PVOID ObjectInformation,
+ ULONG ObjectInformationLength,
+ PULONG ReturnLength);
+
+typedef NTSTATUS (WINAPI* NTCLOSE) (HANDLE);
+
+#define DIRECTORY_QUERY 0x0001
+#define DIRECTORY_TRAVERSE 0x0002
+#define DIRECTORY_CREATE_OBJECT 0x0004
+#define DIRECTORY_CREATE_SUBDIRECTORY 0x0008
+#define DIRECTORY_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | 0xF)
+
+#endif // SANDBOX_TOOLS_FINDER_NTUNDOC_H__
diff --git a/libchrome/sandbox/win/tools/launcher/launcher.vcproj b/libchrome/sandbox/win/tools/launcher/launcher.vcproj
new file mode 100644
index 0000000..71ed011
--- /dev/null
+++ b/libchrome/sandbox/win/tools/launcher/launcher.vcproj
@@ -0,0 +1,177 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="launcher"
+ ProjectGUID="{386FA217-FBC2-4461-882D-CDAD221ED800}"
+ RootNamespace="launcher"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\debug.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="2"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ ConfigurationType="1"
+ InheritedPropertySheets="$(SolutionDir)..\build\release.vsprops;$(SolutionDir)..\build\common.vsprops"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ ForcedIncludeFiles="stdafx.h"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <File
+ RelativePath=".\launcher.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\stdafx.cc"
+ >
+ <FileConfiguration
+ Name="Debug|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="1"
+ />
+ </FileConfiguration>
+ <FileConfiguration
+ Name="Release|Win32"
+ >
+ <Tool
+ Name="VCCLCompilerTool"
+ UsePrecompiledHeader="0"
+ />
+ </FileConfiguration>
+ </File>
+ <File
+ RelativePath=".\stdafx.h"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/sandbox/win/wow_helper.sln b/libchrome/sandbox/win/wow_helper.sln
new file mode 100644
index 0000000..26d0da2
--- /dev/null
+++ b/libchrome/sandbox/win/wow_helper.sln
@@ -0,0 +1,19 @@
+Microsoft Visual Studio Solution File, Format Version 9.00
+# Visual Studio 2005
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "wow_helper", "wow_helper\wow_helper.vcproj", "{BCF3A457-39F1-4DAA-9A65-93CFCD559036}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|x64 = Debug|x64
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Debug|x64.ActiveCfg = Debug|x64
+ {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Debug|x64.Build.0 = Debug|x64
+ {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Release|x64.ActiveCfg = Release|x64
+ {BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/libchrome/sandbox/win/wow_helper/wow_helper.exe b/libchrome/sandbox/win/wow_helper/wow_helper.exe
new file mode 100755
index 0000000..f9bfb4b
--- /dev/null
+++ b/libchrome/sandbox/win/wow_helper/wow_helper.exe
Binary files differ
diff --git a/libchrome/sandbox/win/wow_helper/wow_helper.pdb b/libchrome/sandbox/win/wow_helper/wow_helper.pdb
new file mode 100644
index 0000000..9cb67d0
--- /dev/null
+++ b/libchrome/sandbox/win/wow_helper/wow_helper.pdb
Binary files differ
diff --git a/libchrome/sandbox/win/wow_helper/wow_helper.vcproj b/libchrome/sandbox/win/wow_helper/wow_helper.vcproj
new file mode 100644
index 0000000..c8e7c9e
--- /dev/null
+++ b/libchrome/sandbox/win/wow_helper/wow_helper.vcproj
@@ -0,0 +1,215 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="wow_helper"
+ ProjectGUID="{BCF3A457-39F1-4DAA-9A65-93CFCD559036}"
+ RootNamespace="wow_helper"
+ Keyword="Win32Proj"
+ >
+ <Platforms>
+ <Platform
+ Name="x64"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|x64"
+ OutputDirectory="$(ProjectDir)"
+ IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+ ConfigurationType="1"
+ CharacterSet="1"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ TargetEnvironment="3"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ Optimization="0"
+ AdditionalIncludeDirectories="$(SolutionDir)..;$(SolutionDir)..\third_party\platformsdk_win2008_6_1\files\Include;$(VSInstallDir)\VC\atlmfc\include"
+ PreprocessorDefinitions="_WIN32_WINNT=0x0501;WINVER=0x0501;WIN32;_DEBUG"
+ MinimalRebuild="true"
+ BasicRuntimeChecks="0"
+ RuntimeLibrary="1"
+ BufferSecurityCheck="false"
+ RuntimeTypeInfo="false"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ Detect64BitPortabilityProblems="true"
+ DebugInformationFormat="3"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ LinkIncremental="1"
+ GenerateDebugInformation="true"
+ SubSystem="2"
+ TargetMachine="17"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|x64"
+ OutputDirectory="$(ProjectDir)"
+ IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+ ConfigurationType="1"
+ CharacterSet="1"
+ WholeProgramOptimization="1"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ TargetEnvironment="3"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ AdditionalIncludeDirectories="$(SolutionDir)..;$(SolutionDir)..\third_party\platformsdk_win2008_6_1\files\Include;$(VSInstallDir)\VC\atlmfc\include"
+ PreprocessorDefinitions="_WIN32_WINNT=0x0501;WINVER=0x0501;WIN32;NDEBUG"
+ RuntimeLibrary="0"
+ BufferSecurityCheck="false"
+ RuntimeTypeInfo="false"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ Detect64BitPortabilityProblems="true"
+ DebugInformationFormat="3"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ LinkIncremental="1"
+ GenerateDebugInformation="true"
+ SubSystem="2"
+ OptimizeReferences="2"
+ EnableCOMDATFolding="2"
+ TargetMachine="17"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCWebDeploymentTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="sandbox"
+ >
+ <File
+ RelativePath="..\src\nt_internals.h"
+ >
+ </File>
+ <File
+ RelativePath="..\src\resolver.h"
+ >
+ </File>
+ </Filter>
+ <File
+ RelativePath=".\service64_resolver.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\service64_resolver.h"
+ >
+ </File>
+ <File
+ RelativePath=".\target_code.cc"
+ >
+ </File>
+ <File
+ RelativePath=".\target_code.h"
+ >
+ </File>
+ <File
+ RelativePath=".\wow_helper.cc"
+ >
+ </File>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/libchrome/testing/gmock/include/gmock/gmock.h b/libchrome/testing/gmock/include/gmock/gmock.h
new file mode 100644
index 0000000..9678b68
--- /dev/null
+++ b/libchrome/testing/gmock/include/gmock/gmock.h
@@ -0,0 +1 @@
+#include <gmock/gmock.h>
diff --git a/libchrome/testing/gtest/include/gtest/gtest.h b/libchrome/testing/gtest/include/gtest/gtest.h
new file mode 100644
index 0000000..2180533
--- /dev/null
+++ b/libchrome/testing/gtest/include/gtest/gtest.h
@@ -0,0 +1 @@
+#include <gtest/gtest.h>
diff --git a/libchrome/testing/gtest/include/gtest/gtest_prod.h b/libchrome/testing/gtest/include/gtest/gtest_prod.h
new file mode 100644
index 0000000..00174fc
--- /dev/null
+++ b/libchrome/testing/gtest/include/gtest/gtest_prod.h
@@ -0,0 +1 @@
+#include <gtest/gtest_prod.h>
diff --git a/libchrome/testing/multiprocess_func_list.cc b/libchrome/testing/multiprocess_func_list.cc
new file mode 100644
index 0000000..f96c2b5
--- /dev/null
+++ b/libchrome/testing/multiprocess_func_list.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "multiprocess_func_list.h"
+
+#include <map>
+
+// Helper functions to maintain mapping of "test name"->test func.
+// The information is accessed via a global map.
+namespace multi_process_function_list {
+
+namespace {
+
+struct ProcessFunctions {
+ ProcessFunctions() : main(NULL), setup(NULL) {}
+ ProcessFunctions(TestMainFunctionPtr main, SetupFunctionPtr setup)
+ : main(main),
+ setup(setup) {
+ }
+ TestMainFunctionPtr main;
+ SetupFunctionPtr setup;
+};
+
+typedef std::map<std::string, ProcessFunctions> MultiProcessTestMap;
+
+// Retrieve a reference to the global 'func name' -> func ptr map.
+MultiProcessTestMap& GetMultiprocessFuncMap() {
+ static MultiProcessTestMap test_name_to_func_ptr_map;
+ return test_name_to_func_ptr_map;
+}
+
+} // namespace
+
+AppendMultiProcessTest::AppendMultiProcessTest(
+ std::string test_name,
+ TestMainFunctionPtr main_func_ptr,
+ SetupFunctionPtr setup_func_ptr) {
+ GetMultiprocessFuncMap()[test_name] =
+ ProcessFunctions(main_func_ptr, setup_func_ptr);
+}
+
+int InvokeChildProcessTest(const std::string& test_name) {
+ MultiProcessTestMap& func_lookup_table = GetMultiprocessFuncMap();
+ MultiProcessTestMap::iterator it = func_lookup_table.find(test_name);
+ if (it != func_lookup_table.end()) {
+ const ProcessFunctions& process_functions = it->second;
+ if (process_functions.setup)
+ (*process_functions.setup)();
+ if (process_functions.main)
+ return (*process_functions.main)();
+ }
+
+ return -1;
+}
+
+} // namespace multi_process_function_list
diff --git a/libchrome/testing/multiprocess_func_list.h b/libchrome/testing/multiprocess_func_list.h
new file mode 100644
index 0000000..c3d2f1f
--- /dev/null
+++ b/libchrome/testing/multiprocess_func_list.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TESTING_MULTIPROCESS_FUNC_LIST_H_
+#define TESTING_MULTIPROCESS_FUNC_LIST_H_
+
+#include <string>
+
+// This file provides the plumbing to register functions to be executed
+// as the main function of a child process in a multi-process test.
+// This complements the MultiProcessTest class which provides facilities
+// for launching such tests.
+//
+// The MULTIPROCESS_TEST_MAIN() macro registers a string -> func_ptr mapping
+// by creating a new global instance of the AppendMultiProcessTest() class
+// this means that by the time that we reach our main() function the mapping
+// is already in place.
+//
+// Example usage:
+// MULTIPROCESS_TEST_MAIN(a_test_func) {
+// // Code here runs in a child process.
+// return 0;
+// }
+//
+// The prototype of a_test_func is implicitly
+// int test_main_func_name();
+
+namespace multi_process_function_list {
+
+// Type for child process main functions.
+typedef int (*TestMainFunctionPtr)();
+
+// Type for child setup functions.
+typedef void (*SetupFunctionPtr)();
+
+// Helper class to append a test function to the global mapping.
+// Used by the MULTIPROCESS_TEST_MAIN macro.
+class AppendMultiProcessTest {
+ public:
+ // |main_func_ptr| is the main function that is run in the child process.
+ // |setup_func_ptr| is a function run when the global mapping is added.
+ AppendMultiProcessTest(std::string test_name,
+ TestMainFunctionPtr main_func_ptr,
+ SetupFunctionPtr setup_func_ptr);
+};
+
+// Invoke the main function of a test previously registered with
+// MULTIPROCESS_TEST_MAIN()
+int InvokeChildProcessTest(const std::string& test_name);
+
+// This macro creates a global MultiProcessTest::AppendMultiProcessTest object
+// whose constructor does the work of adding the global mapping.
+#define MULTIPROCESS_TEST_MAIN(test_main) \
+ MULTIPROCESS_TEST_MAIN_WITH_SETUP(test_main, NULL)
+
+// Same as above but lets callers specify a setup method that is run in the
+// child process, just before the main function is run. This facilitates
+// adding a generic one-time setup function for multiple tests.
+#define MULTIPROCESS_TEST_MAIN_WITH_SETUP(test_main, test_setup) \
+ int test_main(); \
+ namespace { \
+ multi_process_function_list::AppendMultiProcessTest \
+ AddMultiProcessTest##_##test_main(#test_main, (test_main), (test_setup)); \
+ } \
+ int test_main()
+
+} // namespace multi_process_function_list
+
+#endif // TESTING_MULTIPROCESS_FUNC_LIST_H_
diff --git a/libchrome/testing/platform_test.h b/libchrome/testing/platform_test.h
new file mode 100644
index 0000000..04fc845
--- /dev/null
+++ b/libchrome/testing/platform_test.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TESTING_PLATFORM_TEST_H_
+#define TESTING_PLATFORM_TEST_H_
+
+#include <gtest/gtest.h>
+
+#if defined(GTEST_OS_MAC)
+#ifdef __OBJC__
+@class NSAutoreleasePool;
+#else
+class NSAutoreleasePool;
+#endif
+
+// The purpose of this class us to provide a hook for platform-specific
+// operations across unit tests. For example, on the Mac, it creates and
+// releases an outer NSAutoreleasePool for each test case. For now, it's only
+// implemented on the Mac. To enable this for another platform, just adjust
+// the #ifdefs and add a platform_test_<platform>.cc implementation file.
+class PlatformTest : public testing::Test {
+ public:
+ virtual ~PlatformTest();
+
+ protected:
+ PlatformTest();
+
+ private:
+ NSAutoreleasePool* pool_;
+};
+#else
+typedef testing::Test PlatformTest;
+#endif // GTEST_OS_MAC
+
+#endif // TESTING_PLATFORM_TEST_H_
diff --git a/libchrome/testrunner.cc b/libchrome/testrunner.cc
new file mode 100644
index 0000000..07117a5
--- /dev/null
+++ b/libchrome/testrunner.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <gtest/gtest.h>
+
+#include "base/at_exit.h"
+#include "base/command_line.h"
+
+int main(int argc, char** argv) {
+ base::AtExitManager at_exit_manager;
+ base::CommandLine::Init(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/libchrome/third_party/protobuf/src/google/protobuf/message_lite.h b/libchrome/third_party/protobuf/src/google/protobuf/message_lite.h
new file mode 100644
index 0000000..c472844
--- /dev/null
+++ b/libchrome/third_party/protobuf/src/google/protobuf/message_lite.h
@@ -0,0 +1 @@
+#include <google/protobuf/message_lite.h>